instruction stringclasses 100
values | code stringlengths 78 193k | response stringlengths 259 170k | file stringlengths 59 203 |
|---|---|---|---|
Add structured docstrings to improve clarity | import copy
import functools
import json
import time
from collections import deque
from pathlib import Path
import tiktoken
import yaml
from pydantic import ValidationError
from extensions.openai.errors import InvalidRequestError
from extensions.openai.typing import ToolDefinition
from extensions.openai.utils import debug_msg
from modules.tool_parsing import get_tool_call_id, parse_tool_call, detect_tool_call_format
from modules import shared
from modules.reasoning import extract_reasoning
from modules.chat import (
generate_chat_prompt,
generate_chat_reply,
load_character_memoized,
load_instruction_template_memoized
)
from modules.image_utils import convert_openai_messages_to_images
from modules.logging_colors import logger
from modules.presets import load_preset_memoized
from modules.text_generation import decode, encode, generate_reply
@functools.cache
def load_chat_template_file(filepath):
filepath = Path(filepath)
ext = filepath.suffix.lower()
text = filepath.read_text(encoding='utf-8')
if ext in ['.yaml', '.yml']:
data = yaml.safe_load(text)
return data.get('instruction_template', '')
return text
def _get_raw_logprob_entries(offset=0):
if not hasattr(shared.model, 'last_completion_probabilities') or not shared.model.last_completion_probabilities:
return [], offset
all_entries = shared.model.last_completion_probabilities
new_entries = all_entries[offset:]
return new_entries, len(all_entries)
def _dict_to_logprob_entries(token_dict):
if not token_dict:
return []
return [{"top_logprobs": [{"token": t, "logprob": lp} for t, lp in token_dict.items()]}]
def _parse_entry_top(entry):
return entry.get('top_logprobs', entry.get('top_probs', []))
def format_chat_logprobs(entries):
if not entries:
return None
content = []
for entry in entries:
top = _parse_entry_top(entry)
if not top:
continue
chosen = top[0]
token_str = chosen.get('token', '')
token_logprob = chosen.get('logprob', chosen.get('prob', 0))
top_list = []
for item in top:
t = item.get('token', '')
lp = item.get('logprob', item.get('prob', 0))
top_list.append({
"token": t,
"logprob": lp,
"bytes": list(t.encode('utf-8')) if t else None
})
content.append({
"token": token_str,
"logprob": token_logprob,
"bytes": list(token_str.encode('utf-8')) if token_str else None,
"top_logprobs": top_list
})
return {"content": content, "refusal": None} if content else None
def format_completion_logprobs(entries):
if not entries:
return None
tokens = []
token_logprobs = []
top_logprobs = []
text_offset = []
offset = 0
for entry in entries:
top = _parse_entry_top(entry)
if not top:
continue
chosen = top[0]
token_str = chosen.get('token', '')
token_logprob = chosen.get('logprob', chosen.get('prob', 0))
tokens.append(token_str)
token_logprobs.append(token_logprob)
text_offset.append(offset)
offset += len(token_str)
top_dict = {}
for item in top:
t = item.get('token', '')
lp = item.get('logprob', item.get('prob', 0))
top_dict[t] = lp
top_logprobs.append(top_dict)
if not tokens:
return None
return {
"tokens": tokens,
"token_logprobs": token_logprobs,
"top_logprobs": top_logprobs,
"text_offset": text_offset
}
def process_parameters(body, is_legacy=False):
generate_params = body
max_tokens_str = 'length' if is_legacy else 'max_tokens'
generate_params['max_new_tokens'] = body.pop(max_tokens_str)
if generate_params['truncation_length'] == 0:
generate_params['truncation_length'] = shared.settings['truncation_length']
if generate_params['temperature'] == 0:
generate_params['do_sample'] = False
generate_params['top_k'] = 1
if body['preset'] is not None:
preset = load_preset_memoized(body['preset'])
generate_params.update(preset)
generate_params['custom_stopping_strings'] = []
if 'stop' in body: # str or array, max len 4 (ignored)
if isinstance(body['stop'], str):
generate_params['custom_stopping_strings'] = [body['stop']]
elif isinstance(body['stop'], list):
generate_params['custom_stopping_strings'] = body['stop']
# Resolve logprobs: for chat completions, logprobs is a bool and the count
# comes from top_logprobs. Normalize to an int for all backends.
logprobs = body.get('logprobs', None)
top_logprobs = body.get('top_logprobs', None)
if logprobs is True:
logprobs = max(top_logprobs, 1) if top_logprobs is not None else 5
generate_params['logprobs'] = logprobs
# For llama.cpp and ExLlamav3 native, logit_bias and logprobs are forwarded natively
if shared.args.loader not in ('llama.cpp', 'ExLlamav3'):
from transformers import LogitsProcessorList
from modules.transformers_loader import (
LogitsBiasProcessor,
LogprobProcessor
)
logits_processor = []
logit_bias = body.get('logit_bias', None)
if logit_bias: # {str: float, ...}
logits_processor = [LogitsBiasProcessor(logit_bias)]
if logprobs is not None and logprobs > 0:
generate_params['logprob_proc'] = LogprobProcessor(logprobs)
logits_processor.extend([generate_params['logprob_proc']])
if logits_processor: # requires logits_processor support
generate_params['logits_processor'] = LogitsProcessorList(logits_processor)
return generate_params
def process_multimodal_content(content):
if isinstance(content, str):
return content
if isinstance(content, list):
text_parts = []
image_placeholders = ""
for item in content:
if not isinstance(item, dict):
continue
item_type = item.get('type', '')
if item_type == 'text':
text_parts.append(item.get('text', ''))
elif item_type == 'image_url':
image_placeholders += "<__media__>"
final_text = ' '.join(text_parts)
if image_placeholders:
return f"{image_placeholders}\n\n{final_text}"
else:
return final_text
return str(content)
def convert_history(history):
chat_dialogue = []
current_message = ""
current_reply = ""
user_input = ""
user_input_last = True
system_message = ""
seen_non_system = False
for entry in history:
content = entry["content"]
role = entry["role"]
if role == "user":
seen_non_system = True
# Extract text content (images handled by model-specific code)
content = process_multimodal_content(content)
user_input = content
user_input_last = True
if current_message:
chat_dialogue.append([current_message, '', '', {}])
current_message = ""
current_message = content
elif role == "assistant":
seen_non_system = True
meta = {}
tool_calls = entry.get("tool_calls")
if tool_calls and isinstance(tool_calls, list) and len(tool_calls) > 0:
meta["tool_calls"] = tool_calls
if content.strip() == "":
content = "" # keep empty content, don't skip
current_reply = content
user_input_last = False
if current_message:
chat_dialogue.append([current_message, current_reply, '', meta])
current_message = ""
current_reply = ""
else:
chat_dialogue.append(['', current_reply, '', meta])
elif role == "tool":
seen_non_system = True
user_input_last = False
meta = {}
if "tool_call_id" in entry:
meta["tool_call_id"] = entry["tool_call_id"]
chat_dialogue.append(['', '', content, meta])
elif role in ("system", "developer"):
if not seen_non_system:
# Leading system messages go to custom_system_message (placed at top)
system_message += f"\n{content}" if system_message else content
else:
# Mid-conversation system messages: preserve position in history
if current_message:
chat_dialogue.append([current_message, '', '', {}])
current_message = ""
chat_dialogue.append([content, '', '', {"role": "system"}])
if not user_input_last:
user_input = ""
return user_input, system_message, {
'internal': chat_dialogue,
'visible': copy.deepcopy(chat_dialogue),
'messages': history # Store original messages for multimodal models
}
def chat_completions_common(body: dict, is_legacy: bool = False, stream=False, prompt_only=False, stop_event=None) -> dict:
if body.get('functions', []):
raise InvalidRequestError(message="functions is not supported.", param='functions')
if body.get('function_call', ''):
raise InvalidRequestError(message="function_call is not supported.", param='function_call')
if 'messages' not in body:
raise InvalidRequestError(message="messages is required", param='messages')
tools = None
if 'tools' in body and body['tools'] is not None and isinstance(body['tools'], list) and len(body['tools']) > 0:
tools = validateTools(body['tools']) # raises InvalidRequestError if validation fails
tool_choice = body.get('tool_choice', None)
if tool_choice == "none":
tools = None # Disable tool detection entirely
messages = body['messages']
for m in messages:
if 'role' not in m:
raise InvalidRequestError(message="messages: missing role", param='messages')
elif m['role'] == 'function':
raise InvalidRequestError(message="role: function is not supported.", param='messages')
# Handle multimodal content validation
content = m.get('content')
if content is None:
# OpenAI allows content: null on assistant messages when tool_calls is present
if m['role'] == 'assistant' and m.get('tool_calls'):
m['content'] = ''
else:
raise InvalidRequestError(message="messages: missing content", param='messages')
# Validate multimodal content structure
if isinstance(content, list):
for item in content:
if not isinstance(item, dict) or 'type' not in item:
raise InvalidRequestError(message="messages: invalid content item format", param='messages')
if item['type'] not in ['text', 'image_url']:
raise InvalidRequestError(message="messages: unsupported content type", param='messages')
if item['type'] == 'text' and 'text' not in item:
raise InvalidRequestError(message="messages: missing text in content item", param='messages')
if item['type'] == 'image_url' and ('image_url' not in item or 'url' not in item['image_url']):
raise InvalidRequestError(message="messages: missing image_url in content item", param='messages')
# Chat Completions
object_type = 'chat.completion' if not stream else 'chat.completion.chunk'
created_time = int(time.time())
cmpl_id = "chatcmpl-%d" % (int(time.time() * 1000000000))
resp_list = 'data' if is_legacy else 'choices'
# generation parameters
generate_params = process_parameters(body, is_legacy=is_legacy)
if stop_event is not None:
generate_params['stop_event'] = stop_event
continue_ = body['continue_']
# Instruction template
if body['instruction_template_str']:
instruction_template_str = body['instruction_template_str']
elif body['instruction_template']:
instruction_template = body['instruction_template']
instruction_template = "Alpaca" if instruction_template == "None" else instruction_template
instruction_template_str = load_instruction_template_memoized(instruction_template)
elif shared.args.chat_template_file:
instruction_template_str = load_chat_template_file(shared.args.chat_template_file)
else:
instruction_template_str = shared.settings['instruction_template_str']
chat_template_str = body['chat_template_str'] or shared.default_settings['chat_template_str']
chat_instruct_command = body['chat_instruct_command'] or shared.default_settings['chat-instruct_command']
# Chat character
character = body['character'] or shared.default_settings['character']
character = "Assistant" if character == "None" else character
name1 = body['user_name'] or shared.default_settings['name1']
name1, name2, _, greeting, context = load_character_memoized(character, name1, '')
name2 = body['bot_name'] or name2
context = body['context'] or context
greeting = body['greeting'] or greeting
user_bio = body['user_bio'] or ''
# History
user_input, custom_system_message, history = convert_history(messages)
generate_params.update({
'mode': body['mode'],
'name1': name1,
'name2': name2,
'context': context,
'greeting': greeting,
'user_bio': user_bio,
'instruction_template_str': instruction_template_str,
'custom_system_message': custom_system_message,
'chat_template_str': chat_template_str,
'chat-instruct_command': chat_instruct_command,
'tools': tools,
'history': history,
'stream': stream
})
max_tokens = generate_params['max_new_tokens']
if max_tokens in [None, 0]:
generate_params['max_new_tokens'] = 512
generate_params['auto_max_new_tokens'] = True
requested_model = generate_params.pop('model')
logprob_proc = generate_params.pop('logprob_proc', None)
if logprob_proc:
logprob_proc.token_alternatives_history.clear()
chat_logprobs_offset = [0] # mutable for closure access in streaming
def chat_streaming_chunk(content=None, chunk_tool_calls=None, include_role=False, reasoning_content=None):
# begin streaming
delta = {}
if include_role:
delta['role'] = 'assistant'
delta['refusal'] = None
if content is not None:
delta['content'] = content
if reasoning_content is not None:
delta['reasoning_content'] = reasoning_content
if chunk_tool_calls:
delta['tool_calls'] = chunk_tool_calls
chunk = {
"id": cmpl_id,
"object": object_type,
"created": created_time,
"model": shared.model_name,
"system_fingerprint": None,
resp_list: [{
"index": 0,
"finish_reason": None,
"delta": delta,
"logprobs": None,
}],
}
if logprob_proc:
entries = _dict_to_logprob_entries(logprob_proc.token_alternatives)
formatted = format_chat_logprobs(entries)
if formatted:
chunk[resp_list][0]["logprobs"] = formatted
elif shared.args.loader in ('llama.cpp', 'ExLlamav3'):
entries, chat_logprobs_offset[0] = _get_raw_logprob_entries(chat_logprobs_offset[0])
if entries:
formatted = format_chat_logprobs(entries)
if formatted:
chunk[resp_list][0]["logprobs"] = formatted
return chunk
# Check if usage should be included in streaming chunks per OpenAI spec
stream_options = body.get('stream_options')
include_usage = bool(stream_options) and bool(stream_options.get('include_usage') if isinstance(stream_options, dict) else getattr(stream_options, 'include_usage', False))
# generate reply #######################################
if prompt_only:
prompt = generate_chat_prompt(user_input, generate_params, _continue=continue_)
yield {'prompt': prompt}
return
if stream:
chunk = chat_streaming_chunk('', include_role=True)
if include_usage:
chunk['usage'] = None
yield chunk
generator = generate_chat_reply(
user_input, generate_params, regenerate=False, _continue=continue_, loading_message=False)
answer = ''
seen_content = ''
seen_reasoning = ''
tool_calls = []
end_last_tool_call = 0
supported_tools = [x["function"]["name"] for x in tools] if tools is not None else None
_tool_parsers = None
# Filter supported_tools when tool_choice specifies a particular function
if supported_tools and isinstance(tool_choice, dict):
specified_func = tool_choice.get("function", {}).get("name")
if specified_func and specified_func in supported_tools:
supported_tools = [specified_func]
if supported_tools is not None:
_template_str = generate_params.get('instruction_template_str', '') if generate_params.get('mode') == 'instruct' else generate_params.get('chat_template_str', '')
_tool_parsers, _, _ = detect_tool_call_format(_template_str)
for a in generator:
answer = a['internal'][-1][1]
if supported_tools is not None:
tool_call = parse_tool_call(answer[end_last_tool_call:], supported_tools, parsers=_tool_parsers) if len(answer) > 0 else []
if len(tool_call) > 0:
for tc in tool_call:
tc["id"] = get_tool_call_id()
if stream:
tc["index"] = len(tool_calls)
tc["function"]["arguments"] = json.dumps(tc["function"]["arguments"])
tool_calls.append(tc)
end_last_tool_call = len(answer)
# Stop generation before streaming content if tool_calls were detected,
# so that raw tool markup is not sent as content deltas.
if len(tool_calls) > 0:
break
if stream:
# Strip reasoning/thinking blocks so only final content is streamed.
# Reasoning is emitted separately as reasoning_content deltas.
reasoning, content = extract_reasoning(answer)
if reasoning is not None:
new_reasoning = reasoning[len(seen_reasoning):]
new_content = content[len(seen_content):]
else:
new_reasoning = None
new_content = answer[len(seen_content):]
if (not new_content and not new_reasoning) or chr(0xfffd) in (new_content or '') + (new_reasoning or ''):
continue
chunk = chat_streaming_chunk(
content=new_content if new_content else None,
reasoning_content=new_reasoning if new_reasoning else None,
)
if include_usage:
chunk['usage'] = None
if reasoning is not None:
seen_reasoning = reasoning
seen_content = content
else:
seen_content = answer
yield chunk
token_count = shared.model.last_prompt_token_count if hasattr(shared.model, 'last_prompt_token_count') else 0
completion_token_count = len(encode(answer)[0])
if len(tool_calls) > 0:
stop_reason = "tool_calls"
elif token_count + completion_token_count >= generate_params['truncation_length'] or completion_token_count >= generate_params['max_new_tokens']:
stop_reason = "length"
else:
stop_reason = "stop"
if stream:
chunk = chat_streaming_chunk(chunk_tool_calls=tool_calls)
chunk[resp_list][0]['finish_reason'] = stop_reason
usage = {
"prompt_tokens": token_count,
"completion_tokens": completion_token_count,
"total_tokens": token_count + completion_token_count
}
if include_usage:
chunk['usage'] = None
yield chunk
# Separate usage-only chunk with choices: [] per OpenAI spec
yield {
"id": cmpl_id,
"object": object_type,
"created": created_time,
"model": shared.model_name,
"system_fingerprint": None,
resp_list: [],
"usage": usage
}
else:
yield chunk
else:
reasoning, content = extract_reasoning(answer)
message = {
"role": "assistant",
"refusal": None,
"content": None if tool_calls else content,
**({"reasoning_content": reasoning} if reasoning else {}),
**({"tool_calls": tool_calls} if tool_calls else {}),
}
resp = {
"id": cmpl_id,
"object": object_type,
"created": created_time,
"model": shared.model_name,
"system_fingerprint": None,
resp_list: [{
"index": 0,
"finish_reason": stop_reason,
"message": message,
"logprobs": None,
}],
"usage": {
"prompt_tokens": token_count,
"completion_tokens": completion_token_count,
"total_tokens": token_count + completion_token_count
}
}
if logprob_proc:
all_entries = []
for alt in logprob_proc.token_alternatives_history:
all_entries.extend(_dict_to_logprob_entries(alt))
formatted = format_chat_logprobs(all_entries)
if formatted:
resp[resp_list][0]["logprobs"] = formatted
elif shared.args.loader in ('llama.cpp', 'ExLlamav3'):
raw = getattr(shared.model, 'last_completion_probabilities', None)
if raw:
formatted = format_chat_logprobs(raw)
if formatted:
resp[resp_list][0]["logprobs"] = formatted
yield resp
def completions_common(body: dict, is_legacy: bool = False, stream=False, stop_event=None):
object_type = 'text_completion'
created_time = int(time.time())
cmpl_id = "cmpl-%d" % (int(time.time() * 1000000000))
resp_list = 'data' if is_legacy else 'choices'
prompt_str = 'context' if is_legacy else 'prompt'
# Handle both prompt and messages format for unified multimodal support
if prompt_str not in body or body[prompt_str] is None:
if 'messages' in body:
# Convert messages format to prompt for completions endpoint
prompt_text = ""
for message in body.get('messages', []):
if isinstance(message, dict) and 'content' in message:
# Extract text content from multimodal messages
content = message['content']
if isinstance(content, str):
prompt_text += content
elif isinstance(content, list):
for item in content:
if isinstance(item, dict) and item.get('type') == 'text':
prompt_text += item.get('text', '')
# Allow empty prompts for image-only requests
body[prompt_str] = prompt_text
else:
raise InvalidRequestError("Missing required input", param=prompt_str)
# common params
generate_params = process_parameters(body, is_legacy=is_legacy)
max_tokens = generate_params['max_new_tokens']
generate_params['stream'] = stream
if stop_event is not None:
generate_params['stop_event'] = stop_event
requested_model = generate_params.pop('model')
logprob_proc = generate_params.pop('logprob_proc', None)
if logprob_proc:
logprob_proc.token_alternatives_history.clear()
suffix = body['suffix'] if body['suffix'] else ''
echo = body['echo']
# Add messages to generate_params if present for multimodal processing
if body.get('messages'):
generate_params['messages'] = body['messages']
raw_images = convert_openai_messages_to_images(generate_params['messages'])
if raw_images:
logger.info(f"Found {len(raw_images)} image(s) in request.")
generate_params['raw_images'] = raw_images
n_completions = body.get('n', 1) or 1
if not stream:
prompt_arg = body[prompt_str]
# Handle empty/None prompts (e.g., image-only requests)
if prompt_arg is None:
prompt_arg = ""
if isinstance(prompt_arg, str) or (isinstance(prompt_arg, list) and len(prompt_arg) > 0 and isinstance(prompt_arg[0], int)):
prompt_arg = [prompt_arg]
resp_list_data = []
total_completion_token_count = 0
total_prompt_token_count = 0
choice_index = 0
for idx, prompt in enumerate(prompt_arg, start=0):
if isinstance(prompt, list) and len(prompt) > 0 and isinstance(prompt[0], int):
# token lists
if requested_model == shared.model_name:
prompt = decode(prompt)[0]
else:
try:
encoder = tiktoken.encoding_for_model(requested_model)
prompt = encoder.decode(prompt)
except KeyError:
prompt = decode(prompt)[0]
prefix = prompt if echo else ''
token_count = len(encode(prompt)[0])
total_prompt_token_count += token_count
original_seed = generate_params.get('seed', -1)
for _n in range(n_completions):
# Increment seed for each completion to ensure diversity (matches llama.cpp native behavior)
if original_seed >= 0:
generate_params['seed'] = original_seed + _n
if logprob_proc:
logprob_proc.token_alternatives_history.clear()
# generate reply #######################################
debug_msg({'prompt': prompt, 'generate_params': generate_params})
generator = generate_reply(prompt, generate_params, is_chat=False)
answer = ''
for a in generator:
answer = a
completion_token_count = len(encode(answer)[0])
total_completion_token_count += completion_token_count
stop_reason = "stop"
if token_count + completion_token_count >= generate_params['truncation_length'] or completion_token_count >= max_tokens:
stop_reason = "length"
if logprob_proc:
all_entries = []
for alt in logprob_proc.token_alternatives_history:
all_entries.extend(_dict_to_logprob_entries(alt))
completion_logprobs = format_completion_logprobs(all_entries)
elif shared.args.loader in ('llama.cpp', 'ExLlamav3'):
raw = getattr(shared.model, 'last_completion_probabilities', None)
completion_logprobs = format_completion_logprobs(raw)
else:
completion_logprobs = None
respi = {
"index": choice_index,
"finish_reason": stop_reason,
"text": prefix + answer + suffix,
"logprobs": completion_logprobs,
}
resp_list_data.append(respi)
choice_index += 1
resp = {
"id": cmpl_id,
"object": object_type,
"created": created_time,
"model": shared.model_name,
"system_fingerprint": None,
resp_list: resp_list_data,
"usage": {
"prompt_tokens": total_prompt_token_count,
"completion_tokens": total_completion_token_count,
"total_tokens": total_prompt_token_count + total_completion_token_count
}
}
yield resp
else:
prompt = body[prompt_str]
if isinstance(prompt, list):
if prompt and isinstance(prompt[0], int):
try:
encoder = tiktoken.encoding_for_model(requested_model)
prompt = encoder.decode(prompt)
except KeyError:
prompt = decode(prompt)[0]
else:
raise InvalidRequestError(message="API Batched generation not yet supported.", param=prompt_str)
prefix = prompt if echo else ''
token_count = len(encode(prompt)[0])
# Check if usage should be included in streaming chunks per OpenAI spec
stream_options = body.get('stream_options')
include_usage = bool(stream_options) and bool(stream_options.get('include_usage') if isinstance(stream_options, dict) else getattr(stream_options, 'include_usage', False))
cmpl_logprobs_offset = [0] # mutable for closure access in streaming
def text_streaming_chunk(content):
# begin streaming
if logprob_proc:
chunk_logprobs = format_completion_logprobs(_dict_to_logprob_entries(logprob_proc.token_alternatives))
elif shared.args.loader in ('llama.cpp', 'ExLlamav3'):
entries, cmpl_logprobs_offset[0] = _get_raw_logprob_entries(cmpl_logprobs_offset[0])
chunk_logprobs = format_completion_logprobs(entries) if entries else None
else:
chunk_logprobs = None
chunk = {
"id": cmpl_id,
"object": object_type,
"created": created_time,
"model": shared.model_name,
"system_fingerprint": None,
resp_list: [{
"index": 0,
"finish_reason": None,
"text": content,
"logprobs": chunk_logprobs,
}],
}
return chunk
chunk = text_streaming_chunk(prefix)
if include_usage:
chunk['usage'] = None
yield chunk
# generate reply #######################################
debug_msg({'prompt': prompt, 'generate_params': generate_params})
generator = generate_reply(prompt, generate_params, is_chat=False)
answer = ''
seen_content = ''
completion_token_count = 0
for a in generator:
answer = a
len_seen = len(seen_content)
new_content = answer[len_seen:]
if not new_content or chr(0xfffd) in new_content: # partial unicode character, don't send it yet.
continue
seen_content = answer
chunk = text_streaming_chunk(new_content)
if include_usage:
chunk['usage'] = None
yield chunk
completion_token_count = len(encode(answer)[0])
stop_reason = "stop"
if token_count + completion_token_count >= generate_params['truncation_length'] or completion_token_count >= max_tokens:
stop_reason = "length"
chunk = text_streaming_chunk(suffix)
chunk[resp_list][0]["finish_reason"] = stop_reason
usage = {
"prompt_tokens": token_count,
"completion_tokens": completion_token_count,
"total_tokens": token_count + completion_token_count
}
if include_usage:
chunk['usage'] = None
yield chunk
# Separate usage-only chunk with choices: [] per OpenAI spec
yield {
"id": cmpl_id,
"object": object_type,
"created": created_time,
"model": shared.model_name,
"system_fingerprint": None,
resp_list: [],
"usage": usage
}
else:
yield chunk
def chat_completions(body: dict, is_legacy: bool = False, stop_event=None) -> dict:
generator = chat_completions_common(body, is_legacy, stream=False, stop_event=stop_event)
return deque(generator, maxlen=1).pop()
def stream_chat_completions(body: dict, is_legacy: bool = False, stop_event=None):
for resp in chat_completions_common(body, is_legacy, stream=True, stop_event=stop_event):
yield resp
def completions(body: dict, is_legacy: bool = False, stop_event=None) -> dict:
generator = completions_common(body, is_legacy, stream=False, stop_event=stop_event)
return deque(generator, maxlen=1).pop()
def stream_completions(body: dict, is_legacy: bool = False, stop_event=None):
for resp in completions_common(body, is_legacy, stream=True, stop_event=stop_event):
yield resp
def validateTools(tools: list[dict]):
# Validate each tool definition in the JSON array
valid_tools = None
for idx in range(len(tools)):
tool = tools[idx]
try:
tool_definition = ToolDefinition(**tool)
# Backfill defaults so Jinja2 templates don't crash on missing fields
func = tool.get("function", {})
if "description" not in func:
func["description"] = ""
if "parameters" not in func:
func["parameters"] = {"type": "object", "properties": {}}
if valid_tools is None:
valid_tools = []
valid_tools.append(tool)
except ValidationError:
raise InvalidRequestError(message=f"Invalid tool specification at index {idx}.", param='tools')
return valid_tools | --- +++ @@ -29,6 +29,7 @@
@functools.cache
def load_chat_template_file(filepath):
+ """Load a chat template from a file path (.jinja, .jinja2, or .yaml/.yml)."""
filepath = Path(filepath)
ext = filepath.suffix.lower()
text = filepath.read_text(encoding='utf-8')
@@ -39,6 +40,10 @@
def _get_raw_logprob_entries(offset=0):
+ """Get raw logprob entries from llama.cpp/ExLlamav3 backend, starting from offset.
+
+ Returns (new_entries, new_offset).
+ """
if not hasattr(shared.model, 'last_completion_probabilities') or not shared.model.last_completion_probabilities:
return [], offset
@@ -48,6 +53,7 @@
def _dict_to_logprob_entries(token_dict):
+ """Convert a flat {token: logprob} dict (from LogprobProcessor) to raw entry format."""
if not token_dict:
return []
@@ -55,10 +61,15 @@
def _parse_entry_top(entry):
+ """Extract the top logprobs list from a raw entry, handling both key names."""
return entry.get('top_logprobs', entry.get('top_probs', []))
def format_chat_logprobs(entries):
+ """Format logprob entries into OpenAI chat completions logprobs format.
+
+ Output: {"content": [{"token", "logprob", "bytes", "top_logprobs": [...]}]}
+ """
if not entries:
return None
@@ -93,6 +104,10 @@
def format_completion_logprobs(entries):
+ """Format logprob entries into OpenAI completions logprobs format.
+
+ Output: {"tokens", "token_logprobs", "top_logprobs": [{token: prob}], "text_offset"}
+ """
if not entries:
return None
@@ -189,6 +204,7 @@
def process_multimodal_content(content):
+ """Extract text and add image placeholders from OpenAI multimodal format"""
if isinstance(content, str):
return content
@@ -215,6 +231,10 @@
def convert_history(history):
+ '''
+ Chat histories in this program are in the format [message, reply].
+ This function converts OpenAI histories to that format.
+ '''
chat_dialogue = []
current_message = ""
current_reply = ""
@@ -884,4 +904,4 @@ except ValidationError:
raise InvalidRequestError(message=f"Invalid tool specification at index {idx}.", param='tools')
- return valid_tools+ return valid_tools
| https://raw.githubusercontent.com/oobabooga/text-generation-webui/HEAD/extensions/openai/completions.py |
Document this script properly | import os
import gradio as gr
# get the current directory of the script
current_dir = os.path.dirname(os.path.abspath(__file__))
# check if the bias_options.txt file exists, if not, create it
bias_file = os.path.join(current_dir, "bias_options.txt")
if not os.path.isfile(bias_file):
with open(bias_file, "w") as f:
f.write("*I am so happy*\n*I am so sad*\n*I am so excited*\n*I am so bored*\n*I am so angry*")
# read bias options from the text file
with open(bias_file, "r") as f:
bias_options = [line.strip() for line in f.readlines()]
params = {
"activate": True,
"bias string": " *I am so happy*",
"custom string": "",
}
def input_modifier(string):
return string
def output_modifier(string):
return string
def bot_prefix_modifier(string):
if params['activate']:
if params['custom string'].strip() != '':
return f'{string} {params["custom string"].strip()} '
else:
return f'{string} {params["bias string"].strip()} '
else:
return string
def ui():
# Gradio elements
activate = gr.Checkbox(value=params['activate'], label='Activate character bias')
dropdown_string = gr.Dropdown(choices=bias_options, value=params["bias string"], label='Character bias', info='To edit the options in this dropdown edit the "bias_options.txt" file')
custom_string = gr.Textbox(value=params['custom string'], placeholder="Enter custom bias string", label="Custom Character Bias", info='If not empty, will be used instead of the value above')
# Event functions to update the parameters in the backend
def update_bias_string(x):
if x:
params.update({"bias string": x})
else:
params.update({"bias string": dropdown_string.get()})
return x
def update_custom_string(x):
params.update({"custom string": x})
dropdown_string.change(update_bias_string, dropdown_string, None)
custom_string.change(update_custom_string, custom_string, None)
activate.change(lambda x: params.update({"activate": x}), activate, None) | --- +++ @@ -23,14 +23,26 @@
def input_modifier(string):
+ """
+ This function is applied to your text inputs before
+ they are fed into the model.
+ """
return string
def output_modifier(string):
+ """
+ This function is applied to the model outputs.
+ """
return string
def bot_prefix_modifier(string):
+ """
+ This function is only applied in chat mode. It modifies
+ the prefix text for the Bot and can be used to bias its
+ behavior.
+ """
if params['activate']:
if params['custom string'].strip() != '':
return f'{string} {params["custom string"].strip()} '
@@ -59,4 +71,4 @@
dropdown_string.change(update_bias_string, dropdown_string, None)
custom_string.change(update_custom_string, custom_string, None)
- activate.change(lambda x: params.update({"activate": x}), activate, None)+ activate.change(lambda x: params.update({"activate": x}), activate, None)
| https://raw.githubusercontent.com/oobabooga/text-generation-webui/HEAD/extensions/character_bias/script.py |
Provide clean and structured docstrings | import functools
import pprint
from pathlib import Path
import yaml
from modules import shared
from modules.loaders import loaders_samplers
from modules.logging_colors import logger
default_preset_values = {
'temperature': 1,
'dynatemp_low': 1,
'dynatemp_high': 1,
'dynatemp_exponent': 1,
'smoothing_factor': 0,
'smoothing_curve': 1,
'top_p': 1,
'top_k': 0,
'min_p': 0,
'top_n_sigma': 0,
'typical_p': 1,
'xtc_threshold': 0.1,
'xtc_probability': 0,
'epsilon_cutoff': 0,
'eta_cutoff': 0,
'tfs': 1,
'top_a': 0,
'adaptive_target': 0,
'adaptive_decay': 0.9,
'dry_multiplier': 0,
'dry_allowed_length': 2,
'dry_base': 1.75,
'repetition_penalty': 1,
'frequency_penalty': 0,
'presence_penalty': 0,
'encoder_repetition_penalty': 1,
'no_repeat_ngram_size': 0,
'repetition_penalty_range': 1024,
'penalty_alpha': 0,
'guidance_scale': 1,
'mirostat_mode': 0,
'mirostat_tau': 5,
'mirostat_eta': 0.1,
'do_sample': True,
'dynamic_temperature': False,
'temperature_last': False,
'sampler_priority': 'repetition_penalty\npresence_penalty\nfrequency_penalty\ndry\ntop_n_sigma\ntemperature\ndynamic_temperature\nquadratic_sampling\ntop_k\ntop_p\ntypical_p\nepsilon_cutoff\neta_cutoff\ntfs\ntop_a\nmin_p\nadaptive_p\nmirostat\nxtc\nencoder_repetition_penalty\nno_repeat_ngram',
'dry_sequence_breakers': '"\\n", ":", "\\"", "*"',
}
def default_preset():
result = dict(default_preset_values)
if shared.args.portable:
samplers = result['sampler_priority'].split('\n')
samplers = [sampler for sampler in samplers if sampler in ["dry", "top_k", "top_p", "top_n_sigma", "min_p", "temperature", "xtc", "typical_p", "repetition_penalty"]]
result['sampler_priority'] = '\n'.join(samplers)
return result
def presets_params():
return [k for k in default_preset()]
def load_preset(name, verbose=False):
generate_params = default_preset()
if name not in ['None', None, '']:
path = shared.user_data_dir / 'presets' / f'{name}.yaml'
if path.exists():
with open(path, 'r') as infile:
preset = yaml.safe_load(infile)
for k in preset:
generate_params[k] = preset[k]
else:
logger.error(f"The preset \"{name}\" does not exist under \"{path}\". Using the default parameters.")
if verbose:
logger.info(f"\"{name}\" preset:")
pprint.PrettyPrinter(indent=4, width=1, sort_dicts=False).pprint(remove_defaults(generate_params))
return generate_params
@functools.cache
def load_preset_memoized(name):
return load_preset(name)
def load_preset_for_ui(name, state):
generate_params = load_preset(name, verbose=True)
state.update(generate_params)
return state, *[generate_params[k] for k in presets_params()]
def reset_preset_for_ui(name, state):
generate_params = load_preset(name, verbose=True)
state.update(generate_params)
return state, *[generate_params[k] for k in presets_params()]
def neutralize_samplers_for_ui(state):
generate_params = default_preset()
state.update(generate_params)
return state, *[generate_params[k] for k in presets_params()]
def loader_contains(sampler):
if sampler == 'dynamic_temperature' and 'dynatemp_low' in loaders_samplers[shared.args.loader]:
return True
else:
return sampler in loaders_samplers[shared.args.loader]
def remove_defaults(state):
defaults = default_preset()
data = {k: state[k] for k in presets_params()}
for k in list(data.keys()):
if data[k] == defaults[k]:
del data[k]
return data
def generate_preset_yaml(state):
data = remove_defaults(state)
return yaml.dump(data, sort_keys=False) | --- +++ @@ -98,12 +98,14 @@
def reset_preset_for_ui(name, state):
+ """Reset current preset to its saved values from file"""
generate_params = load_preset(name, verbose=True)
state.update(generate_params)
return state, *[generate_params[k] for k in presets_params()]
def neutralize_samplers_for_ui(state):
+ """Set all samplers to their default/neutral values"""
generate_params = default_preset()
state.update(generate_params)
return state, *[generate_params[k] for k in presets_params()]
@@ -129,4 +131,4 @@
def generate_preset_yaml(state):
data = remove_defaults(state)
- return yaml.dump(data, sort_keys=False)+ return yaml.dump(data, sort_keys=False)
| https://raw.githubusercontent.com/oobabooga/text-generation-webui/HEAD/modules/presets.py |
Add docstrings following best practices | import math
import re
import string
import nltk
import spacy
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from num2words import num2words
class TextPreprocessorBuilder:
# Define class variables as None initially
_stop_words = set(stopwords.words('english'))
_lemmatizer = WordNetLemmatizer()
# Some of the functions are expensive. We cache the results.
_lemmatizer_cache = {}
_pos_remove_cache = {}
def __init__(self, text: str):
self.text = text
def to_lower(self):
# Match both words and non-word characters
tokens = re.findall(r'\b\w+\b|\W+', self.text)
for i, token in enumerate(tokens):
# Check if token is a word
if re.match(r'^\w+$', token):
# Check if token is not an abbreviation or constant
if not re.match(r'^[A-Z]+$', token) and not re.match(r'^[A-Z_]+$', token):
tokens[i] = token.lower()
self.text = "".join(tokens)
return self
def num_to_word(self, min_len: int = 1):
# Match both words and non-word characters
tokens = re.findall(r'\b\w+\b|\W+', self.text)
for i, token in enumerate(tokens):
# Check if token is a number of length `min_len` or more
if token.isdigit() and len(token) >= min_len:
# This is done to pay better attention to numbers (e.g. ticket numbers, thread numbers, post numbers)
# 740700 will become "seven hundred and forty thousand seven hundred".
tokens[i] = num2words(int(token)).replace(",", "") # Remove commas from num2words.
self.text = "".join(tokens)
return self
def num_to_char_long(self, min_len: int = 1):
# Match both words and non-word characters
tokens = re.findall(r'\b\w+\b|\W+', self.text)
for i, token in enumerate(tokens):
# Check if token is a number of length `min_len` or more
if token.isdigit() and len(token) >= min_len:
# This is done to pay better attention to numbers (e.g. ticket numbers, thread numbers, post numbers)
# 740700 will become HHHHHHEEEEEAAAAHHHAAA
def convert_token(token):
return ''.join((chr(int(digit) + 65) * (i + 1)) for i, digit in enumerate(token[::-1]))[::-1]
tokens[i] = convert_token(tokens[i])
self.text = "".join(tokens)
return self
def num_to_char(self, min_len: int = 1):
# Match both words and non-word characters
tokens = re.findall(r'\b\w+\b|\W+', self.text)
for i, token in enumerate(tokens):
# Check if token is a number of length `min_len` or more
if token.isdigit() and len(token) >= min_len:
# This is done to pay better attention to numbers (e.g. ticket numbers, thread numbers, post numbers)
# 740700 will become HEAHAA
tokens[i] = ''.join(chr(int(digit) + 65) for digit in token)
self.text = "".join(tokens)
return self
def merge_spaces(self):
self.text = re.sub(' +', ' ', self.text)
return self
def strip(self):
self.text = self.text.strip()
return self
def remove_punctuation(self):
self.text = self.text.translate(str.maketrans('', '', string.punctuation))
return self
def remove_stopwords(self):
self.text = "".join([word for word in re.findall(r'\b\w+\b|\W+', self.text) if word not in TextPreprocessorBuilder._stop_words])
return self
def remove_specific_pos(self):
processed_text = TextPreprocessorBuilder._pos_remove_cache.get(self.text)
if processed_text:
self.text = processed_text
return self
# Match both words and non-word characters
tokens = re.findall(r'\b\w+\b|\W+', self.text)
# Exclude adverbs and interjections
excluded_tags = ['RB', 'RBR', 'RBS', 'UH']
for i, token in enumerate(tokens):
# Check if token is a word
if re.match(r'^\w+$', token):
# Part-of-speech tag the word
pos = nltk.pos_tag([token])[0][1]
# If the word's POS tag is in the excluded list, remove the word
if pos in excluded_tags:
tokens[i] = ''
new_text = "".join(tokens)
TextPreprocessorBuilder._pos_remove_cache[self.text] = new_text
self.text = new_text
return self
def lemmatize(self):
processed_text = TextPreprocessorBuilder._lemmatizer_cache.get(self.text)
if processed_text:
self.text = processed_text
return self
new_text = "".join([TextPreprocessorBuilder._lemmatizer.lemmatize(word) for word in re.findall(r'\b\w+\b|\W+', self.text)])
TextPreprocessorBuilder._lemmatizer_cache[self.text] = new_text
self.text = new_text
return self
def build(self):
return self.text
class TextSummarizer:
_nlp_pipeline = None
_cache = {}
@staticmethod
def _load_nlp_pipeline():
# Lazy-load it.
if TextSummarizer._nlp_pipeline is None:
TextSummarizer._nlp_pipeline = spacy.load('en_core_web_sm')
TextSummarizer._nlp_pipeline.add_pipe("textrank", last=True)
return TextSummarizer._nlp_pipeline
@staticmethod
def process_long_text(text: str, min_num_sent: int) -> list[str]:
# Attempt to get the result from cache
cache_key = (text, min_num_sent)
cached_result = TextSummarizer._cache.get(cache_key, None)
if cached_result is not None:
return cached_result
nlp_pipeline = TextSummarizer._load_nlp_pipeline()
doc = nlp_pipeline(text)
num_sent = len(list(doc.sents))
result = []
if num_sent >= min_num_sent:
limit_phrases = math.ceil(len(doc._.phrases) * 0.20) # 20% of the phrases, rounded up
limit_sentences = math.ceil(num_sent * 0.20) # 20% of the sentences, rounded up
result = [str(sent) for sent in doc._.textrank.summary(limit_phrases=limit_phrases, limit_sentences=limit_sentences)]
else:
result = [text]
# Store the result in cache before returning it
TextSummarizer._cache[cache_key] = result
return result | --- +++ @@ -1,3 +1,16 @@+"""
+This module contains utils for preprocessing the text before converting it to embeddings.
+
+- TextPreprocessorBuilder preprocesses individual strings.
+ * lowering cases
+ * converting numbers to words or characters
+ * merging and stripping spaces
+ * removing punctuation
+ * removing stop words
+ * lemmatizing
+ * removing specific parts of speech (adverbs and interjections)
+- TextSummarizer extracts the most important sentences from a long string using text-ranking.
+"""
import math
import re
import string
@@ -89,6 +102,10 @@ return self
def remove_specific_pos(self):
+ """
+ In the English language, adverbs and interjections rarely provide meaningul information.
+ Removing them improves the embedding precision. Don't tell JK Rowling, though.
+ """
processed_text = TextPreprocessorBuilder._pos_remove_cache.get(self.text)
if processed_text:
self.text = processed_text
@@ -145,6 +162,14 @@
@staticmethod
def process_long_text(text: str, min_num_sent: int) -> list[str]:
+ """
+ This function applies a text summarization process on a given text string, extracting
+ the most important sentences based on the principle that 20% of the content is responsible
+ for 80% of the meaning (the Pareto Principle).
+
+ Returns:
+ list: A list of the most important sentences
+ """
# Attempt to get the result from cache
cache_key = (text, min_num_sent)
@@ -169,4 +194,4 @@
# Store the result in cache before returning it
TextSummarizer._cache[cache_key] = result
- return result+ return result
| https://raw.githubusercontent.com/oobabooga/text-generation-webui/HEAD/extensions/superboogav2/data_preprocessor.py |
Create simple docstrings for beginners | import json
import random
import re
def get_tool_call_id() -> str:
letter_bytes = "abcdefghijklmnopqrstuvwxyz0123456789"
b = [random.choice(letter_bytes) for _ in range(8)]
return "call_" + "".join(b).lower()
# All known opening markers for tool calls across model formats.
TOOL_CALL_OPENING_MARKERS = [
'<tool_call>',
'<function_call>',
'<minimax:tool_call>',
'<|tool_call_begin|>',
'<|tool_calls_section_begin|>',
'<|tool▁call▁begin|>',
'<|tool▁calls▁begin|>',
'[TOOL_CALLS]',
'to=functions.',
'<|channel|>commentary',
]
def streaming_tool_buffer_check(text, markers=None, tool_names=None, check_bare_names=False):
# Full marker found in text → buffer permanently.
# Always checks ALL known markers regardless of template (cheap safety net).
for marker in TOOL_CALL_OPENING_MARKERS:
if marker in text:
return True
# Bare function-name full match: "get_weather{...}" or "get_weather {...}"
if tool_names:
for name in tool_names:
if name + '{' in text or name + ' {' in text:
return True
# Partial-prefix matching: only for template-specific markers.
for marker in (markers if markers is not None else TOOL_CALL_OPENING_MARKERS):
for prefix_len in range(min(len(marker) - 1, len(text)), 0, -1):
if text.endswith(marker[:prefix_len]):
return True
# Bare-name partial matching: only when template format is unknown.
if check_bare_names and tool_names:
for name in tool_names:
if text.endswith(name):
return True
for prefix_len in range(min(len(name) - 1, len(text)), 0, -1):
if text.endswith(name[:prefix_len]):
return True
return False
def check_and_sanitize_tool_call_candidate(candidate_dict: dict, tool_names: list[str]):
# check if property 'function' exists and is a dictionary, otherwise adapt dict
if 'function' not in candidate_dict and 'name' in candidate_dict and isinstance(candidate_dict['name'], str):
candidate_dict = {"type": "function", "function": candidate_dict}
if 'function' in candidate_dict and isinstance(candidate_dict['function'], str):
candidate_dict['name'] = candidate_dict['function']
del candidate_dict['function']
candidate_dict = {"type": "function", "function": candidate_dict}
if 'function' in candidate_dict and isinstance(candidate_dict['function'], dict):
# check if 'name' exists within 'function' and is part of known tools
if 'name' in candidate_dict['function'] and candidate_dict['function']['name'] in tool_names:
candidate_dict["type"] = "function" # ensure required property 'type' exists and has the right value
# map property 'parameters' used by some older models to 'arguments'
if "arguments" not in candidate_dict["function"] and "parameters" in candidate_dict["function"]:
candidate_dict["function"]["arguments"] = candidate_dict["function"]["parameters"]
del candidate_dict["function"]["parameters"]
return candidate_dict
return None
def _extract_balanced_json(text: str, start: int) -> str | None:
if start >= len(text) or text[start] != '{':
return None
depth = 0
in_string = False
escape_next = False
for i in range(start, len(text)):
c = text[i]
if escape_next:
escape_next = False
continue
if c == '\\' and in_string:
escape_next = True
continue
if c == '"':
in_string = not in_string
continue
if in_string:
continue
if c == '{':
depth += 1
elif c == '}':
depth -= 1
if depth == 0:
return text[start:i + 1]
return None
def _parse_channel_tool_calls(answer: str, tool_names: list[str]):
matches = []
start_pos = None
# Pattern 1: to=functions.NAME before <|channel|> (GPT-OSS primary format)
# Pattern 2: to=functions.NAME after <|channel|> (alternative format)
patterns = [
r'to=functions\.([^<\s]+)\s*<\|channel\|>[^<]*<\|message\|>',
r'<\|channel\|>\w+ to=functions\.([^<\s]+).*?<\|message\|>',
]
for pattern in patterns:
for m in re.finditer(pattern, answer):
func_name = m.group(1).strip()
if func_name not in tool_names:
continue
json_str = _extract_balanced_json(answer, m.end())
if json_str is None:
continue
try:
arguments = json.loads(json_str)
if start_pos is None:
prefix = answer.rfind('<|start|>assistant', 0, m.start())
start_pos = prefix if prefix != -1 else m.start()
matches.append({
"type": "function",
"function": {
"name": func_name,
"arguments": arguments
}
})
except json.JSONDecodeError:
pass
if matches:
break
return matches, start_pos
def _parse_mistral_token_tool_calls(answer: str, tool_names: list[str]):
matches = []
start_pos = None
for m in re.finditer(
r'\[TOOL_CALLS\]\s*(\S+?)\s*\[ARGS\]\s*',
answer
):
func_name = m.group(1).strip()
if func_name not in tool_names:
continue
json_str = _extract_balanced_json(answer, m.end())
if json_str is None:
continue
try:
arguments = json.loads(json_str)
if start_pos is None:
start_pos = m.start()
matches.append({
"type": "function",
"function": {
"name": func_name,
"arguments": arguments
}
})
except json.JSONDecodeError:
pass
return matches, start_pos
def _parse_bare_name_tool_calls(answer: str, tool_names: list[str]):
matches = []
start_pos = None
# Match tool name followed by opening brace, then extract balanced JSON
escaped_names = [re.escape(name) for name in tool_names]
pattern = r'(?:' + '|'.join(escaped_names) + r')\s*\{'
for match in re.finditer(pattern, answer):
text = match.group(0)
name = None
for n in tool_names:
if text.startswith(n):
name = n
break
if not name:
continue
brace_start = match.end() - 1
json_str = _extract_balanced_json(answer, brace_start)
if json_str is None:
continue
try:
arguments = json.loads(json_str)
if start_pos is None:
start_pos = match.start()
matches.append({
"type": "function",
"function": {
"name": name,
"arguments": arguments
}
})
except json.JSONDecodeError:
pass
return matches, start_pos
def _parse_xml_param_tool_calls(answer: str, tool_names: list[str]):
matches = []
start_pos = None
for tc_match in re.finditer(r'<tool_call>\s*(.*?)\s*</tool_call>', answer, re.DOTALL):
tc_content = tc_match.group(1)
func_match = re.search(r'<function=([^>]+)>', tc_content)
if not func_match:
continue
func_name = func_match.group(1).strip()
if func_name not in tool_names:
continue
arguments = {}
for param_match in re.finditer(r'<parameter=([^>]+)>\s*(.*?)\s*</parameter>', tc_content, re.DOTALL):
param_name = param_match.group(1).strip()
param_value = param_match.group(2).strip()
try:
param_value = json.loads(param_value)
except (json.JSONDecodeError, ValueError):
pass # keep as string
arguments[param_name] = param_value
if start_pos is None:
start_pos = tc_match.start()
matches.append({
"type": "function",
"function": {
"name": func_name,
"arguments": arguments
}
})
return matches, start_pos
def _parse_kimi_tool_calls(answer: str, tool_names: list[str]):
matches = []
start_pos = None
for m in re.finditer(
r'<\|tool_call_begin\|>\s*(?:functions\.)?(\S+?)(?::\d+)?\s*<\|tool_call_argument_begin\|>\s*',
answer
):
func_name = m.group(1).strip()
if func_name not in tool_names:
continue
json_str = _extract_balanced_json(answer, m.end())
if json_str is None:
continue
try:
arguments = json.loads(json_str)
if start_pos is None:
# Check for section begin marker before the call marker
section = answer.rfind('<|tool_calls_section_begin|>', 0, m.start())
start_pos = section if section != -1 else m.start()
matches.append({
"type": "function",
"function": {
"name": func_name,
"arguments": arguments
}
})
except json.JSONDecodeError:
pass
return matches, start_pos
def _parse_minimax_tool_calls(answer: str, tool_names: list[str]):
matches = []
start_pos = None
for tc_match in re.finditer(r'<minimax:tool_call>\s*(.*?)\s*</minimax:tool_call>', answer, re.DOTALL):
tc_content = tc_match.group(1)
# Split on <invoke> to handle multiple parallel calls in one block
for invoke_match in re.finditer(r'<invoke\s+name="([^"]+)">(.*?)</invoke>', tc_content, re.DOTALL):
func_name = invoke_match.group(1).strip()
if func_name not in tool_names:
continue
invoke_body = invoke_match.group(2)
arguments = {}
for param_match in re.finditer(r'<parameter\s+name="([^"]+)">\s*(.*?)\s*</parameter>', invoke_body, re.DOTALL):
param_name = param_match.group(1).strip()
param_value = param_match.group(2).strip()
try:
param_value = json.loads(param_value)
except (json.JSONDecodeError, ValueError):
pass # keep as string
arguments[param_name] = param_value
if start_pos is None:
start_pos = tc_match.start()
matches.append({
"type": "function",
"function": {
"name": func_name,
"arguments": arguments
}
})
return matches, start_pos
def _parse_deep_seek_tool_calls(answer: str, tool_names: list[str]):
matches = []
start_pos = None
for m in re.finditer(
r'<|tool▁call▁begin|>\s*(\S+?)\s*<|tool▁sep|>\s*',
answer
):
func_name = m.group(1).strip()
if func_name not in tool_names:
continue
json_str = _extract_balanced_json(answer, m.end())
if json_str is None:
continue
try:
arguments = json.loads(json_str)
if start_pos is None:
# Check for section begin marker before the call marker
section = answer.rfind('<|tool▁calls▁begin|>', 0, m.start())
start_pos = section if section != -1 else m.start()
matches.append({
"type": "function",
"function": {
"name": func_name,
"arguments": arguments
}
})
except json.JSONDecodeError:
pass
return matches, start_pos
def _parse_glm_tool_calls(answer: str, tool_names: list[str]):
matches = []
start_pos = None
for tc_match in re.finditer(r'<tool_call>\s*(.*?)\s*</tool_call>', answer, re.DOTALL):
tc_content = tc_match.group(1)
# First non-tag text is the function name
name_match = re.match(r'([^<\s]+)', tc_content.strip())
if not name_match:
continue
func_name = name_match.group(1).strip()
if func_name not in tool_names:
continue
# Extract arg_key/arg_value pairs
keys = [k.group(1).strip() for k in re.finditer(r'<arg_key>\s*(.*?)\s*</arg_key>', tc_content, re.DOTALL)]
vals = [v.group(1).strip() for v in re.finditer(r'<arg_value>\s*(.*?)\s*</arg_value>', tc_content, re.DOTALL)]
if len(keys) != len(vals):
continue
arguments = {}
for k, v in zip(keys, vals):
try:
v = json.loads(v)
except (json.JSONDecodeError, ValueError):
pass # keep as string
arguments[k] = v
if start_pos is None:
start_pos = tc_match.start()
matches.append({
"type": "function",
"function": {
"name": func_name,
"arguments": arguments
}
})
return matches, start_pos
def _parse_pythonic_tool_calls(answer: str, tool_names: list[str]):
matches = []
start_pos = None
# Match a bracketed list of function calls
bracket_match = re.search(r'\[([^\[\]]+)\]', answer)
if not bracket_match:
return matches, start_pos
inner = bracket_match.group(1)
# Build pattern for known tool names
escaped_names = [re.escape(name) for name in tool_names]
name_pattern = '|'.join(escaped_names)
for call_match in re.finditer(
r'(' + name_pattern + r')\(([^)]*)\)',
inner
):
func_name = call_match.group(1)
params_str = call_match.group(2).strip()
arguments = {}
if params_str:
# Parse key="value" pairs, handling commas inside quoted values
for param_match in re.finditer(
r'(\w+)\s*=\s*("(?:[^"\\]|\\.)*"|\'(?:[^\'\\]|\\.)*\'|[^,\)]+)',
params_str
):
param_name = param_match.group(1)
param_value = param_match.group(2).strip()
# Strip surrounding quotes
if (param_value.startswith('"') and param_value.endswith('"')) or \
(param_value.startswith("'") and param_value.endswith("'")):
param_value = param_value[1:-1]
# Try to parse as JSON for numeric/bool/null values
try:
param_value = json.loads(param_value)
except (json.JSONDecodeError, ValueError):
pass
arguments[param_name] = param_value
if start_pos is None:
start_pos = bracket_match.start()
matches.append({
"type": "function",
"function": {
"name": func_name,
"arguments": arguments
}
})
return matches, start_pos
# Format registry: maps template substrings to the parser and streaming
# markers for that format. When a format's hints are NOT found in the
# template, its parser and markers are excluded.
TOOL_CALL_FORMATS = [
{
'template_hints': ['tool▁call▁begin', 'tool▁calls▁begin'],
'parser': _parse_deep_seek_tool_calls,
'markers': ['<|tool▁call▁begin|>', '<|tool▁calls▁begin|>'],
},
{
'template_hints': ['<|tool_call_begin|>', 'tool_calls_section'],
'parser': _parse_kimi_tool_calls,
'markers': ['<|tool_call_begin|>', '<|tool_calls_section_begin|>'],
},
{
'template_hints': ['to=functions.', '<|channel|>'],
'parser': _parse_channel_tool_calls,
'markers': ['to=functions.', '<|channel|>commentary'],
},
{
'template_hints': ['minimax:tool_call'],
'parser': _parse_minimax_tool_calls,
'markers': ['<minimax:tool_call>'],
},
{
'template_hints': ['<arg_key>'],
'parser': _parse_glm_tool_calls,
'markers': ['<tool_call>'],
},
{
'template_hints': ['<tool_call>'],
'parser': _parse_xml_param_tool_calls,
'markers': ['<tool_call>'],
},
{
'template_hints': ['[TOOL_CALLS]'],
'parser': _parse_mistral_token_tool_calls,
'markers': ['[TOOL_CALLS]'],
},
{
'template_hints': ['<function_call>'],
'parser': None,
'markers': ['<function_call>'],
},
]
# Default ordered list of all specialized parsers.
ALL_PARSERS = [
_parse_deep_seek_tool_calls,
_parse_kimi_tool_calls,
_parse_channel_tool_calls,
_parse_minimax_tool_calls,
_parse_glm_tool_calls,
_parse_xml_param_tool_calls,
_parse_mistral_token_tool_calls,
_parse_bare_name_tool_calls,
_parse_pythonic_tool_calls,
]
def detect_tool_call_format(template_str):
if not template_str:
return None, TOOL_CALL_OPENING_MARKERS, True
matched_any = False
exclude_parsers = []
exclude_markers = []
matched_markers = []
for fmt in TOOL_CALL_FORMATS:
if any(hint in template_str for hint in fmt['template_hints']):
matched_any = True
matched_markers.extend(fmt['markers'])
else:
if fmt['parser'] is not None:
exclude_parsers.append(fmt['parser'])
exclude_markers.extend(fmt['markers'])
if not matched_any:
return None, TOOL_CALL_OPENING_MARKERS, True
parsers = [p for p in ALL_PARSERS if p not in exclude_parsers]
markers = [m for m in TOOL_CALL_OPENING_MARKERS if m not in exclude_markers or m in matched_markers]
return parsers, markers, False
def parse_tool_call(answer: str, tool_names: list[str], return_prefix: bool = False, parsers: list = None):
matches = []
start_pos = None
def _return(matches, start_pos):
if return_prefix:
prefix = answer[:start_pos] if matches and start_pos is not None else ''
return matches, prefix
return matches
# Try specialized parsers.
for parser in (parsers if parsers is not None else ALL_PARSERS):
matches, start_pos = parser(answer, tool_names)
if matches:
return _return(matches, start_pos)
# Generic fallback: regex pattern to find the JSON content wrapped in <function>, <tools>, <tool_call>, and other tags observed from various models
patterns = [r"(```[^\n]*)\n(.*?)```", r"<([^>]+)>(.*?)</\1>"]
for pattern in patterns:
for match in re.finditer(pattern, answer, re.DOTALL):
if match.group(2) is None:
continue
# remove backtick wraps if present
candidate = re.sub(r"^```(json|xml|python[^\n]*)\n", "", match.group(2).strip())
candidate = re.sub(r"```$", "", candidate.strip())
# unwrap inner tags
candidate = re.sub(pattern, r"\2", candidate.strip(), flags=re.DOTALL)
# llm might have generated multiple json objects separated by linebreaks, check for this pattern and try parsing each object individually
if re.search(r"\}\s*\n\s*\{", candidate) is not None:
candidate = re.sub(r"\}\s*\n\s*\{", "},\n{", candidate)
if not candidate.strip().startswith("["):
candidate = "[" + candidate + "]"
candidates = []
try:
# parse the candidate JSON into a dictionary
candidates = json.loads(candidate)
if not isinstance(candidates, list):
candidates = [candidates]
except json.JSONDecodeError:
# Ignore invalid JSON silently
continue
for candidate_dict in candidates:
checked_candidate = check_and_sanitize_tool_call_candidate(candidate_dict, tool_names)
if checked_candidate is not None:
if start_pos is None:
start_pos = match.start()
matches.append(checked_candidate)
# last resort if nothing has been mapped: LLM might have produced plain json tool call without xml-like tags
if len(matches) == 0:
try:
candidate = answer
# llm might have generated multiple json objects separated by linebreaks, check for this pattern and try parsing each object individually
if re.search(r"\}\s*\n\s*\{", candidate) is not None:
candidate = re.sub(r"\}\s*\n\s*\{", "},\n{", candidate)
if not candidate.strip().startswith("["):
candidate = "[" + candidate + "]"
# parse the candidate JSON into a dictionary
candidates = json.loads(candidate)
if not isinstance(candidates, list):
candidates = [candidates]
for candidate_dict in candidates:
checked_candidate = check_and_sanitize_tool_call_candidate(candidate_dict, tool_names)
if checked_candidate is not None:
matches.append(checked_candidate)
except json.JSONDecodeError:
# Ignore invalid JSON silently
pass
return _return(matches, start_pos) | --- +++ @@ -25,6 +25,18 @@
def streaming_tool_buffer_check(text, markers=None, tool_names=None, check_bare_names=False):
+ '''
+ Check whether streaming output should be withheld because it may
+ contain tool-call markup.
+
+ Args:
+ text: Full accumulated internal text.
+ markers: Template-specific markers for partial-prefix matching.
+ If None, falls back to TOOL_CALL_OPENING_MARKERS.
+ tool_names: List of tool function names.
+ check_bare_names: Whether to do partial-prefix matching on tool
+ names (for models with unknown template format).
+ '''
# Full marker found in text → buffer permanently.
# Always checks ALL known markers regardless of template (cheap safety net).
for marker in TOOL_CALL_OPENING_MARKERS:
@@ -76,6 +88,11 @@
def _extract_balanced_json(text: str, start: int) -> str | None:
+ """Extract a balanced JSON object from text starting at the given position.
+
+ Walks through the string tracking brace depth and string boundaries
+ to correctly handle arbitrary nesting levels.
+ """
if start >= len(text) or text[start] != '{':
return None
depth = 0
@@ -104,6 +121,13 @@
def _parse_channel_tool_calls(answer: str, tool_names: list[str]):
+ """Parse channel-based tool calls used by GPT-OSS and similar models.
+
+ Format:
+ <|start|>assistant to=functions.func_name<|channel|>commentary json<|message|>{"arg": "value"}
+ or:
+ <|channel|>commentary to=functions.func_name <|constrain|>json<|message|>{"arg": "value"}
+ """
matches = []
start_pos = None
# Pattern 1: to=functions.NAME before <|channel|> (GPT-OSS primary format)
@@ -140,6 +164,11 @@
def _parse_mistral_token_tool_calls(answer: str, tool_names: list[str]):
+ """Parse Mistral/Devstral-style tool calls with [TOOL_CALLS] and [ARGS] special tokens.
+
+ Format:
+ [TOOL_CALLS]func_name[ARGS]{"arg": "value"}
+ """
matches = []
start_pos = None
for m in re.finditer(
@@ -169,6 +198,12 @@
def _parse_bare_name_tool_calls(answer: str, tool_names: list[str]):
+ """Parse bare function-name style tool calls used by Mistral and similar models.
+
+ Format:
+ functionName{"arg": "value"}
+ Multiple calls are concatenated directly or separated by whitespace.
+ """
matches = []
start_pos = None
# Match tool name followed by opening brace, then extract balanced JSON
@@ -204,6 +239,15 @@
def _parse_xml_param_tool_calls(answer: str, tool_names: list[str]):
+ """Parse XML-parameter style tool calls used by Qwen3.5 and similar models.
+
+ Format:
+ <tool_call>
+ <function=function_name>
+ <parameter=param_name>value</parameter>
+ </function>
+ </tool_call>
+ """
matches = []
start_pos = None
for tc_match in re.finditer(r'<tool_call>\s*(.*?)\s*</tool_call>', answer, re.DOTALL):
@@ -236,6 +280,13 @@
def _parse_kimi_tool_calls(answer: str, tool_names: list[str]):
+ """Parse Kimi-K2-style tool calls using pipe-delimited tokens.
+
+ Format:
+ <|tool_calls_section_begin|>
+ <|tool_call_begin|>functions.func_name:index<|tool_call_argument_begin|>{"arg": "value"}<|tool_call_end|>
+ <|tool_calls_section_end|>
+ """
matches = []
start_pos = None
for m in re.finditer(
@@ -267,6 +318,15 @@
def _parse_minimax_tool_calls(answer: str, tool_names: list[str]):
+ """Parse MiniMax-style tool calls using invoke/parameter XML tags.
+
+ Format:
+ <minimax:tool_call>
+ <invoke name="function_name">
+ <parameter name="param_name">value</parameter>
+ </invoke>
+ </minimax:tool_call>
+ """
matches = []
start_pos = None
for tc_match in re.finditer(r'<minimax:tool_call>\s*(.*?)\s*</minimax:tool_call>', answer, re.DOTALL):
@@ -299,6 +359,11 @@
def _parse_deep_seek_tool_calls(answer: str, tool_names: list[str]):
+ """Parse DeepSeek-style tool calls using fullwidth Unicode token delimiters.
+
+ Format:
+ <|tool▁calls▁begin|><|tool▁call▁begin|>func_name<|tool▁sep|>{"arg": "value"}<|tool▁call▁end|><|tool▁calls▁end|>
+ """
matches = []
start_pos = None
for m in re.finditer(
@@ -330,6 +395,14 @@
def _parse_glm_tool_calls(answer: str, tool_names: list[str]):
+ """Parse GLM-style tool calls using arg_key/arg_value XML pairs.
+
+ Format:
+ <tool_call>function_name
+ <arg_key>key1</arg_key>
+ <arg_value>value1</arg_value>
+ </tool_call>
+ """
matches = []
start_pos = None
for tc_match in re.finditer(r'<tool_call>\s*(.*?)\s*</tool_call>', answer, re.DOTALL):
@@ -366,6 +439,11 @@
def _parse_pythonic_tool_calls(answer: str, tool_names: list[str]):
+ """Parse pythonic-style tool calls used by Llama 4 and similar models.
+
+ Format:
+ [func_name(param1="value1", param2="value2"), func_name2(...)]
+ """
matches = []
start_pos = None
# Match a bracketed list of function calls
@@ -480,6 +558,14 @@
def detect_tool_call_format(template_str):
+ """Inspect a chat/instruction template to determine which tool call
+ formats are relevant.
+
+ Uses an exclude-based approach: starts with all parsers/markers,
+ then removes the ones whose hints are not found in the template.
+
+ Returns (parsers, streaming_markers, check_bare_names).
+ """
if not template_str:
return None, TOOL_CALL_OPENING_MARKERS, True
@@ -578,4 +664,4 @@ # Ignore invalid JSON silently
pass
- return _return(matches, start_pos)+ return _return(matches, start_pos)
| https://raw.githubusercontent.com/oobabooga/text-generation-webui/HEAD/modules/tool_parsing.py |
Add docstrings to make code maintainable | import functools
import json
import re
from math import floor
from pathlib import Path
import yaml
from modules import loaders, metadata_gguf, shared
from modules.logging_colors import logger
from modules.utils import resolve_model_path
def get_fallback_settings():
return {
'bf16': False,
'ctx_size': 8192,
'truncation_length': shared.settings['truncation_length'],
'truncation_length_info': shared.settings['truncation_length'],
'skip_special_tokens': shared.settings['skip_special_tokens'],
}
def get_model_metadata(model):
model_path = resolve_model_path(model)
model_settings = {}
# Get settings from user_data/models/config.yaml and user_data/models/config-user.yaml
settings = shared.model_config
for pat in settings:
if re.match(pat.lower(), Path(model).name.lower()):
for k in settings[pat]:
model_settings[k] = settings[pat][k]
path = model_path / 'config.json'
if path.exists():
hf_metadata = json.loads(open(path, 'r', encoding='utf-8').read())
else:
hf_metadata = None
if 'loader' not in model_settings:
quant_method = None if hf_metadata is None else hf_metadata.get("quantization_config", {}).get("quant_method", None)
model_settings['loader'] = infer_loader(
model,
model_settings,
hf_quant_method=quant_method
)
# GGUF metadata
if model_settings['loader'] == 'llama.cpp':
path = model_path
if path.is_file():
model_file = path
else:
gguf_files = list(path.glob('*.gguf'))
if not gguf_files:
error_msg = f"No .gguf models found in directory: {path}"
logger.error(error_msg)
raise FileNotFoundError(error_msg)
model_file = gguf_files[0]
metadata = load_gguf_metadata_with_cache(model_file)
for k in metadata:
if k.endswith('.context_length'):
model_settings['ctx_size'] = 0
model_settings['truncation_length_info'] = metadata[k]
elif k.endswith('.block_count'):
model_settings['gpu_layers'] = -1
model_settings['max_gpu_layers'] = metadata[k] + 1
if 'tokenizer.chat_template' in metadata:
template = metadata['tokenizer.chat_template']
if 'tokenizer.ggml.eos_token_id' in metadata:
eos_token = metadata['tokenizer.ggml.tokens'][metadata['tokenizer.ggml.eos_token_id']]
else:
eos_token = ""
if 'tokenizer.ggml.bos_token_id' in metadata:
bos_token = metadata['tokenizer.ggml.tokens'][metadata['tokenizer.ggml.bos_token_id']]
else:
bos_token = ""
shared.bos_token = bos_token
shared.eos_token = eos_token
template = re.sub(r"\{\{-?\s*raise_exception\(.*?\)\s*-?\}\}", "", template, flags=re.DOTALL)
template = re.sub(r'raise_exception\([^)]*\)', "''", template)
model_settings['instruction_template'] = 'Custom (obtained from model metadata)'
model_settings['instruction_template_str'] = template
else:
# Transformers metadata
if hf_metadata is not None:
metadata = json.loads(open(path, 'r', encoding='utf-8').read())
if 'pretrained_config' in metadata:
metadata = metadata['pretrained_config']
for k in ['max_position_embeddings', 'model_max_length', 'max_seq_len']:
if k in metadata:
value = metadata[k]
elif k in metadata.get('text_config', {}):
value = metadata['text_config'][k]
else:
continue
model_settings['truncation_length'] = value
model_settings['truncation_length_info'] = value
model_settings['ctx_size'] = min(value, 8192)
break
if 'torch_dtype' in metadata and metadata['torch_dtype'] == 'bfloat16':
model_settings['bf16'] = True
# Try to find the Jinja instruct template
path = model_path / 'tokenizer_config.json'
template = None
# 1. Prioritize reading from chat_template.jinja if it exists
jinja_path = model_path / 'chat_template.jinja'
if jinja_path.exists():
with open(jinja_path, 'r', encoding='utf-8') as f:
template = f.read()
# 2. If no .jinja file, try chat_template.json
if template is None:
json_template_path = model_path / 'chat_template.json'
if json_template_path.exists():
with open(json_template_path, 'r', encoding='utf-8') as f:
json_data = json.load(f)
if 'chat_template' in json_data:
template = json_data['chat_template']
# 3. Fall back to tokenizer_config.json metadata
if path.exists():
metadata = json.loads(open(path, 'r', encoding='utf-8').read())
# Only read from metadata if we haven't already loaded from .jinja or .json
if template is None and 'chat_template' in metadata:
template = metadata['chat_template']
if isinstance(template, list):
template = template[0]['template']
# 4. If a template was found from any source, process it
if template:
shared.bos_token = '<s>'
shared.eos_token = '</s>'
for k in ['eos_token', 'bos_token']:
if k in metadata:
value = metadata[k]
if isinstance(value, dict):
value = value['content']
setattr(shared, k, value)
template = re.sub(r"\{\{-?\s*raise_exception\(.*?\)\s*-?\}\}", "", template, flags=re.DOTALL)
template = re.sub(r'raise_exception\([^)]*\)', "''", template)
model_settings['instruction_template'] = 'Custom (obtained from model metadata)'
model_settings['instruction_template_str'] = template
if 'instruction_template' not in model_settings:
model_settings['instruction_template'] = 'Alpaca'
# Apply user settings from user_data/models/config-user.yaml
settings = shared.user_config
for pat in settings:
if re.match(pat.lower(), Path(model).name.lower()):
for k in settings[pat]:
new_k = k
if k == 'n_gpu_layers':
new_k = 'gpu_layers'
model_settings[new_k] = settings[pat][k]
# Load instruction template if defined by name rather than by value
if model_settings['instruction_template'] != 'Custom (obtained from model metadata)':
model_settings['instruction_template_str'] = load_instruction_template(model_settings['instruction_template'])
return model_settings
def infer_loader(model_name, model_settings, hf_quant_method=None):
path_to_model = resolve_model_path(model_name)
if not path_to_model.exists():
loader = None
elif shared.args.portable:
loader = 'llama.cpp'
elif len(list(path_to_model.glob('*.gguf'))) > 0:
loader = 'llama.cpp'
elif re.match(r'.*\.gguf', model_name.lower()):
loader = 'llama.cpp'
elif hf_quant_method == 'exl3':
loader = 'ExLlamav3'
elif re.match(r'.*exl3', model_name.lower()):
loader = 'ExLlamav3'
else:
loader = 'Transformers'
return loader
def update_model_parameters(state, initial=False):
elements = loaders.list_model_elements() # the names of the parameters
for i, element in enumerate(elements):
if element not in state:
continue
value = state[element]
if initial and element in shared.provided_arguments:
continue
if element == 'cpu_memory' and value == 0:
value = vars(shared.args_defaults)[element]
setattr(shared.args, element, value)
def apply_model_settings_to_state(model, state):
import gradio as gr
model_settings = get_model_metadata(model)
if 'loader' in model_settings:
loader = model_settings.pop('loader')
if not (loader == 'ExLlamav3_HF' and state['loader'] == 'ExLlamav3'):
state['loader'] = loader
for k in model_settings:
if k in state and k != 'gpu_layers': # Skip gpu_layers, handle separately
state[k] = model_settings[k]
# Handle GPU layers and VRAM update for llama.cpp
if state['loader'] == 'llama.cpp' and 'gpu_layers' in model_settings:
gpu_layers = model_settings['gpu_layers'] # -1 (auto) by default, or user-saved value
max_layers = model_settings.get('max_gpu_layers', 256)
state['gpu_layers'] = gr.update(value=gpu_layers, maximum=max_layers)
vram_info = update_gpu_layers_and_vram(
state['loader'],
model,
gpu_layers,
state['ctx_size'],
state['cache_type'],
)
state['vram_info'] = vram_info
return state
def save_model_settings(model, state):
if model == 'None':
yield ("Not saving the settings because no model is selected in the menu.")
return
user_config = shared.load_user_config()
model_regex = Path(model).name + '$' # For exact matches
if model_regex not in user_config:
user_config[model_regex] = {}
for k in loaders.list_model_elements():
if k == 'loader' or k in loaders.loaders_and_params[state['loader']]:
user_config[model_regex][k] = state[k]
shared.user_config = user_config
output = yaml.dump(user_config, sort_keys=False)
p = Path(f'{shared.args.model_dir}/config-user.yaml')
with open(p, 'w') as f:
f.write(output)
yield (f"Settings for `{model}` saved to `{p}`.")
def save_instruction_template(model, template):
if model == 'None':
yield ("Not saving the template because no model is selected in the menu.")
return
user_config = shared.load_user_config()
model_regex = Path(model).name + '$' # For exact matches
if model_regex not in user_config:
user_config[model_regex] = {}
if template == 'None':
user_config[model_regex].pop('instruction_template', None)
else:
user_config[model_regex]['instruction_template'] = template
shared.user_config = user_config
output = yaml.dump(user_config, sort_keys=False)
p = Path(f'{shared.args.model_dir}/config-user.yaml')
with open(p, 'w') as f:
f.write(output)
if template == 'None':
yield (f"Instruction template for `{model}` unset in `{p}`, as the value for template was `{template}`.")
else:
yield (f"Instruction template for `{model}` saved to `{p}` as `{template}`.")
@functools.lru_cache(maxsize=1)
def load_gguf_metadata_with_cache(model_file):
return metadata_gguf.load_metadata(model_file)
def get_model_size_mb(model_file: Path) -> float:
filename = model_file.name
# Check for multipart pattern
match = re.match(r'(.+)-\d+-of-\d+\.gguf$', filename)
if match:
# It's a multipart file, find all matching parts
base_pattern = match.group(1)
part_files = sorted(model_file.parent.glob(f'{base_pattern}-*-of-*.gguf'))
total_size = sum(p.stat().st_size for p in part_files)
else:
# Single part
total_size = model_file.stat().st_size
return total_size / (1024 ** 2) # Return size in MB
def estimate_vram(gguf_file, gpu_layers, ctx_size, cache_type):
model_file = resolve_model_path(gguf_file)
metadata = load_gguf_metadata_with_cache(model_file)
size_in_mb = get_model_size_mb(model_file)
# Extract values from metadata
n_layers = None
n_kv_heads = None
n_attention_heads = None # Fallback for models without separate KV heads
embedding_dim = None
for key, value in metadata.items():
if key.endswith('.block_count'):
n_layers = value
elif key.endswith('.attention.head_count_kv'):
n_kv_heads = max(value) if isinstance(value, list) else value
elif key.endswith('.attention.head_count'):
n_attention_heads = max(value) if isinstance(value, list) else value
elif key.endswith('.embedding_length'):
embedding_dim = value
if n_kv_heads is None:
n_kv_heads = n_attention_heads
if gpu_layers > n_layers:
gpu_layers = n_layers
# Convert cache_type to numeric
if cache_type == 'q4_0':
cache_type = 4
elif cache_type == 'q8_0':
cache_type = 8
else:
cache_type = 16
# Derived features
size_per_layer = size_in_mb / max(n_layers, 1e-6)
kv_cache_factor = n_kv_heads * cache_type * ctx_size
embedding_per_context = embedding_dim / ctx_size
# Calculate VRAM using the model
# Details: https://oobabooga.github.io/blog/posts/gguf-vram-formula/
vram = (
(size_per_layer - 17.99552795246051 + 3.148552680382576e-05 * kv_cache_factor)
* (gpu_layers + max(0.9690636483914102, cache_type - (floor(50.77817218646521 * embedding_per_context) + 9.987899908205632)))
+ 1516.522943869404
)
return vram
def update_gpu_layers_and_vram(loader, model, gpu_layers, ctx_size, cache_type):
if loader != 'llama.cpp' or model in ["None", None] or not model.endswith(".gguf") or gpu_layers < 0 or ctx_size == 0:
return f"<div id=\"vram-info\"'>Estimated VRAM to load the model: <span class=\"value\">auto</span></div>"
vram_usage = estimate_vram(model, gpu_layers, ctx_size, cache_type)
return f"<div id=\"vram-info\"'>Estimated VRAM to load the model: <span class=\"value\">{vram_usage:.0f} MiB</span></div>"
def load_instruction_template(template):
if template == 'None':
return ''
for filepath in [shared.user_data_dir / 'instruction-templates' / f'{template}.yaml', shared.user_data_dir / 'instruction-templates' / 'Alpaca.yaml']:
if filepath.exists():
break
else:
return ''
with open(filepath, 'r', encoding='utf-8') as f:
file_contents = f.read()
data = yaml.safe_load(file_contents)
if 'instruction_template' in data:
return data['instruction_template']
else:
return _jinja_template_from_old_format(data)
def _jinja_template_from_old_format(params, verbose=False):
MASTER_TEMPLATE = """
{%- set ns = namespace(found=false) -%}
{%- for message in messages -%}
{%- if message['role'] == 'system' -%}
{%- set ns.found = true -%}
{%- endif -%}
{%- endfor -%}
{%- if not ns.found -%}
{{- '<|PRE-SYSTEM|>' + '<|SYSTEM-MESSAGE|>' + '<|POST-SYSTEM|>' -}}
{%- endif %}
{%- for message in messages %}
{%- if message['role'] == 'system' -%}
{{- '<|PRE-SYSTEM|>' + message['content'] + '<|POST-SYSTEM|>' -}}
{%- else -%}
{%- if message['role'] == 'user' -%}
{{-'<|PRE-USER|>' + message['content'] + '<|POST-USER|>'-}}
{%- else -%}
{{-'<|PRE-ASSISTANT|>' + message['content'] + '<|POST-ASSISTANT|>' -}}
{%- endif -%}
{%- endif -%}
{%- endfor -%}
{%- if add_generation_prompt -%}
{{-'<|PRE-ASSISTANT-GENERATE|>'-}}
{%- endif -%}
"""
if 'context' in params and '<|system-message|>' in params['context']:
pre_system = params['context'].split('<|system-message|>')[0]
post_system = params['context'].split('<|system-message|>')[1]
else:
pre_system = ''
post_system = ''
pre_user = params['turn_template'].split('<|user-message|>')[0].replace('<|user|>', params['user'])
post_user = params['turn_template'].split('<|user-message|>')[1].split('<|bot|>')[0]
pre_assistant = '<|bot|>' + params['turn_template'].split('<|bot-message|>')[0].split('<|bot|>')[1]
pre_assistant = pre_assistant.replace('<|bot|>', params['bot'])
post_assistant = params['turn_template'].split('<|bot-message|>')[1]
def preprocess(string):
return string.replace('\n', '\\n').replace('\'', '\\\'')
pre_system = preprocess(pre_system)
post_system = preprocess(post_system)
pre_user = preprocess(pre_user)
post_user = preprocess(post_user)
pre_assistant = preprocess(pre_assistant)
post_assistant = preprocess(post_assistant)
if verbose:
print(
'\n',
repr(pre_system) + '\n',
repr(post_system) + '\n',
repr(pre_user) + '\n',
repr(post_user) + '\n',
repr(pre_assistant) + '\n',
repr(post_assistant) + '\n',
)
result = MASTER_TEMPLATE
if 'system_message' in params:
result = result.replace('<|SYSTEM-MESSAGE|>', preprocess(params['system_message']))
else:
result = result.replace('<|SYSTEM-MESSAGE|>', '')
result = result.replace('<|PRE-SYSTEM|>', pre_system)
result = result.replace('<|POST-SYSTEM|>', post_system)
result = result.replace('<|PRE-USER|>', pre_user)
result = result.replace('<|POST-USER|>', post_user)
result = result.replace('<|PRE-ASSISTANT|>', pre_assistant)
result = result.replace('<|PRE-ASSISTANT-GENERATE|>', pre_assistant.rstrip(' '))
result = result.replace('<|POST-ASSISTANT|>', post_assistant)
result = result.strip()
return result | --- +++ @@ -202,6 +202,9 @@
def update_model_parameters(state, initial=False):
+ '''
+ UI: update the command-line arguments based on the interface values
+ '''
elements = loaders.list_model_elements() # the names of the parameters
for i, element in enumerate(elements):
@@ -219,6 +222,9 @@
def apply_model_settings_to_state(model, state):
+ '''
+ UI: update the state variable with the model settings
+ '''
import gradio as gr
model_settings = get_model_metadata(model)
if 'loader' in model_settings:
@@ -250,6 +256,9 @@
def save_model_settings(model, state):
+ '''
+ Save the settings for this model to user_data/models/config-user.yaml
+ '''
if model == 'None':
yield ("Not saving the settings because no model is selected in the menu.")
return
@@ -274,6 +283,9 @@
def save_instruction_template(model, template):
+ '''
+ Similar to the function above, but it saves only the instruction template.
+ '''
if model == 'None':
yield ("Not saving the template because no model is selected in the menu.")
return
@@ -376,6 +388,10 @@
def update_gpu_layers_and_vram(loader, model, gpu_layers, ctx_size, cache_type):
+ """
+ Compute the estimated VRAM usage for the given GPU layers and return
+ an HTML string for the UI display.
+ """
if loader != 'llama.cpp' or model in ["None", None] or not model.endswith(".gguf") or gpu_layers < 0 or ctx_size == 0:
return f"<div id=\"vram-info\"'>Estimated VRAM to load the model: <span class=\"value\">auto</span></div>"
@@ -480,4 +496,4 @@
result = result.strip()
- return result+ return result
| https://raw.githubusercontent.com/oobabooga/text-generation-webui/HEAD/modules/models_settings.py |
Document functions with clear intent |
import math
import torch
from transformers.generation.logits_process import LogitsProcessor
from transformers.utils import add_start_docstrings
LOGITS_PROCESSOR_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam
search or log softmax for each vocabulary token when using beam search
Return:
`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.
"""
class GrammarConstrainedLogitsProcessor(LogitsProcessor):
def __init__(self, grammar_constraint):
self.last_size = None
self.grammar_constraint = grammar_constraint
self.batch_stacks = None
def filter_logits(self, logits, device):
# resolve each stack to a tensor of True/False for each token
# indicating acceptance
# acceptance = self.grammar_acceptor.filter_vocab(self.stacks, device)
acceptance = self.grammar_constraint.batch_filter_vocab(self.batch_stacks, device)
# logger.debug(acceptance)
# Logits to -inf where False
logits[~acceptance] = -math.inf
# TODO: batching
def process_logits(self, input_ids, scores, parse_start_index=None):
# we dynamically create stacks at the first call, so that we know the batch size and beam size
if self.batch_stacks is None:
self.batch_stacks = [self.grammar_constraint.init_stacks() for _ in range(len(input_ids))]
# if self.last_size is not set (which would be the case when processing the first token).
# In this case, do nothing.
if self.last_size is None:
prefix_to_parse = [
single_input_ids[parse_start_index:] if parse_start_index is not None else []
for single_input_ids in input_ids
]
# self.grammar_acceptor.accept_token_ids(prefix_to_parse, self.stacks)
self.batch_stacks = [
self.grammar_constraint.accept_token_ids(prefix, stack)
for prefix, stack in zip(prefix_to_parse, self.batch_stacks)
]
# if the length of the current input IDs (input_ids[0]) is exactly one more than self.last_size.
# This is expected in a scenario where inputs are processed incrementally, one token at a time.
elif len(input_ids[0]) == self.last_size + 1:
# self.stacks = self.grammar_acceptor.accept_token_id(input_ids[0][-1], self.stacks)
self.batch_stacks = [
self.grammar_constraint.accept_token_id(single_input_ids[-1], stack)
for single_input_ids, stack in zip(input_ids, self.batch_stacks)
]
# ensure that the input size is consistent with the expected incremental processing
# (i.e., one token at a time).
else:
# here we check if the input_ids are one token longer than the last time we processed
# but we don't check if input_ids are actually valid.
# Imagine a scenario where we generate 10 tokens, then we replace the 10 generated tokens with 10 new tokens.
# In this case, the input_ids will be consistent with the last_size, but the input_ids are not valid.
# However, should we really check if the input_ids are valid here?
# If we do, then we need to reparse the whole input_ids at each call, which is not efficient.
# Maybe we should just trust the user to provide valid input_ids?
# The conclusion is that, we assume the input_ids are valid, and our generation will be correct.
# If the input_ids are not valid, then the generation result will be wrong and we don't take responsibility for that.
raise RuntimeError(
"Input ID's length is inconsistent with the current state of "
"the GrammarConstrainedLogitsProcessor. If you want to process "
"another input sequence, please instantiate a new "
"GrammarConstrainedLogitsProcessor."
)
self.filter_logits(scores, scores.device)
self.last_size = len(input_ids[0])
return scores
@add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING)
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
return self.process_logits(input_ids, scores) | --- +++ @@ -1,3 +1,12 @@+'''
+This file has been 100% copied from this PR to the Transformers library:
+https://github.com/huggingface/transformers/pull/27557
+
+Author: Saibo-creator
+Author GitHub: https://github.com/Saibo-creator
+
+All credits go to the author.
+'''
import math
@@ -36,6 +45,12 @@
# TODO: batching
def process_logits(self, input_ids, scores, parse_start_index=None):
+ """
+ :param input_ids:
+ :param scores:
+ :param parse_start_index: default None, which means generate from scratch. Set to 0 to parse all input_ids
+ :return:
+ """
# we dynamically create stacks at the first call, so that we know the batch size and beam size
if self.batch_stacks is None:
self.batch_stacks = [self.grammar_constraint.init_stacks() for _ in range(len(input_ids))]
@@ -86,4 +101,4 @@
@add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING)
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
- return self.process_logits(input_ids, scores)+ return self.process_logits(input_ids, scores)
| https://raw.githubusercontent.com/oobabooga/text-generation-webui/HEAD/modules/grammar/logits_process.py |
Generate consistent documentation across files | import time
import html
import functools
import re
import gradio
import numpy as np
import torch
from transformers import LogitsProcessor
import colorsys
from modules import html_generator, shared
params = {
'active': True,
'color_by_perplexity': False,
'color_by_probability': False,
'ppl_scale': 15.0, # No slider for this right now, because I don't think it really needs to be changed. Very large perplexity scores don't show up often.
'probability_dropdown': False,
'verbose': False # For debugging mostly
}
class PerplexityLogits(LogitsProcessor):
def __init__(self, verbose=False):
self.generated_token_ids = []
self.selected_probs = []
self.top_token_ids_list = []
self.top_probs_list = []
self.perplexities_list = []
self.last_probs = None
self.verbose = verbose
def __call__(self, input_ids, scores):
#t0 = time.time()
probs = torch.softmax(scores, dim=-1, dtype=torch.float)
log_probs = torch.nan_to_num(torch.log(probs)) # Note: This is to convert log(0) nan to 0, but probs*log_probs makes this 0 not affect the perplexity.
entropy = -torch.sum(probs * log_probs)
entropy = entropy.cpu().numpy()
perplexity = round(float(np.exp(entropy)), 4)
self.perplexities_list.append(perplexity)
last_token_id = int(input_ids[0][-1].cpu().numpy().item())
# Store the generated tokens (not sure why this isn't accessible in the output endpoint!)
self.generated_token_ids.append(last_token_id)
# Get last probability, and add to the list if it wasn't there
if len(self.selected_probs) > 0:
# Is the selected token in the top tokens?
if self.verbose:
print(shared.tokenizer.decode(last_token_id), [shared.tokenizer.decode(token_id) for token_id in self.top_token_ids_list[-1][0]],
[round(float(prob), 4) for prob in self.top_probs_list[-1][0]])
if last_token_id in self.top_token_ids_list[-1][0]:
idx = self.top_token_ids_list[-1][0].index(last_token_id)
self.selected_probs.append(self.top_probs_list[-1][0][idx])
else:
self.top_token_ids_list[-1][0].append(last_token_id)
last_prob = round(float(self.last_probs[last_token_id]), 4)
self.top_probs_list[-1][0].append(last_prob)
self.selected_probs.append(last_prob)
else:
self.selected_probs.append(1.0) # Placeholder for the last token of the prompt
if self.verbose:
pplbar = "-"
if not np.isnan(perplexity):
pplbar = "*" * round(perplexity)
print(f"PPL for token after {shared.tokenizer.decode(last_token_id)}: {perplexity:.2f} {pplbar}")
# Get top 5 probabilities
top_tokens_and_probs = torch.topk(probs, 5)
top_probs = top_tokens_and_probs.values.cpu().numpy().astype(float).tolist()
top_token_ids = top_tokens_and_probs.indices.cpu().numpy().astype(int).tolist()
self.top_token_ids_list.append(top_token_ids)
self.top_probs_list.append(top_probs)
probs = probs.cpu().numpy().flatten()
self.last_probs = probs # Need to keep this as a reference for top probs
#t1 = time.time()
#print(f"PPL Processor: {(t1-t0):.3f} s")
# About 1 ms, though occasionally up to around 100 ms, not sure why...
# Doesn't actually modify the logits!
return scores
# Stores the perplexity and top probabilities
# global ppl_logits_processor
ppl_logits_processor = None
def logits_processor_modifier(logits_processor_list, input_ids):
global ppl_logits_processor
if params['active']:
ppl_logits_processor = PerplexityLogits(verbose=params['verbose'])
logits_processor_list.append(ppl_logits_processor)
def get_last_token(text, tokens_list, token_ids_list, token_probs_list):
for token, token_id, prob in zip(tokens_list, token_ids_list, token_probs_list):
if text.strip().endswith(token.strip()): # Whitespace could be a problem
return token, token_id, prob
# Unknown?
print("Last token not found in list:", tokens_list)
return '', -1, 0.0
def output_modifier(text):
global ppl_logits_processor
#t0 = time.time()
original_text = text
if not params['active'] or ppl_logits_processor is None:
return text
# Space at the beginning to account for tokenization spaces...
text = ' ' + html.unescape(text)
# TODO: It's probably more efficient to do this above rather than modifying all these lists
# Remove last element of perplexities_list, top_token_ids_list, top_tokens_list, top_probs_list since everything is off by one because this extension runs before generation
perplexities = ppl_logits_processor.perplexities_list
top_token_ids_list = ppl_logits_processor.top_token_ids_list
top_tokens_list = [[shared.tokenizer.decode(token_id) for token_id in top_token_ids[0]] for top_token_ids in top_token_ids_list]
top_probs_list = ppl_logits_processor.top_probs_list
# Remove first element of generated_token_ids, generated_tokens, selected_probs because they are for the last token of the prompt
gen_token_ids = ppl_logits_processor.generated_token_ids[1:]
# Add last sampled token, if possible (it could be past the end of the top 5 list)
last_token, last_token_id, last_prob = get_last_token(text, top_tokens_list[-1], top_token_ids_list[-1][0], top_probs_list[-1][0])
if last_token_id != -1:
gen_token_ids.append(last_token_id)
gen_tokens = [shared.tokenizer.decode(token_id) for token_id in gen_token_ids]
sel_probs = ppl_logits_processor.selected_probs[1:]
if last_token_id != -1:
sel_probs.append(last_prob)
end_part = '</div></div>' if params['probability_dropdown'] else '</span>' # Helps with finding the index after replacing part of the text.
# Initial space added to deal with some tokenizers...
# Used to find where the message started generating, for working with "continue" generations
# Doesn't work for longer messages... Not sure how I should handle this
full_msg = shared.tokenizer.decode([token_id for token_id in gen_token_ids[:-1]]).strip()
# There was an issue with tab lengths being off by one...
# Seems like it might be model-dependent...
#text = re.sub(r'( {3,})', r'\1 ', text)
# Subtracting 2 to hopefully help with the tokenization spaces and continue issues,
# Though it's possible it could overwrite the previous token if it's the same in the last 2 chars
i = text.find(full_msg) - 2
if i < 0:
# Backup, try removing the extra whitespace (needed for continue)
i = text.find(full_msg.strip()) - 2
if i < 0:
i = 0
#i = 0
# Add token index for ability to regenerate from there
nonwhitespace_token_found = False
missing_token_count = 0
for index, token, prob, ppl, top_tokens, top_probs in zip(range(len(gen_tokens)), gen_tokens, sel_probs, perplexities, top_tokens_list, top_probs_list):
# Somehow this works without issues, but not sure how...
if not nonwhitespace_token_found and token.strip() == '':
#print('Ignoring initial whitespace token...')
continue
nonwhitespace_token_found = True
max_prob = top_probs[0][0]
color = 'ffffff'
if params['color_by_probability'] and params['color_by_perplexity']:
color = probability_perplexity_color_scale(prob, max_prob, ppl)
elif params['color_by_perplexity']:
color = perplexity_color_scale(ppl)
elif params['color_by_probability']:
color = probability_color_scale(prob)
if token.strip() in text[i:]:
if params['probability_dropdown']:
text = text[:i] + text[i:].replace(token.replace('\n', ''), add_dropdown_html(token, index, i, color, top_tokens, top_probs[0], ppl), 1)
else:
text = text[:i] + text[i:].replace(token.replace('\n', ''), add_color_html(token, color), 1)
# This might be slightly inefficient
i += text[i:].find(end_part) + len(end_part)
else:
missing_token_count += 1
print('Missing token:', token, '...', text[i:i+20])
# If there are any missing tokens, then either the tokenization was off, or this is the start of a conversation, or something else went wrong
if missing_token_count > 5:
print("Canceling token coloring...")
return original_text
# Use full perplexity list for calculating the average here.
# Fix issue with mean of empty slice
if len(ppl_logits_processor.perplexities_list) > 1:
print('Average perplexity:', round(np.mean(ppl_logits_processor.perplexities_list[:-1]), 4))
#t1 = time.time()
#print(f"Output modifier: {(t1-t0):.3f} s")
# About 50 ms
return text.strip() # Remove extra beginning whitespace that some tokenizers add
def probability_color_scale(prob):
# hue (0.0 = red, 0.33 = green)
# saturation (0.0 = gray / white, 1.0 = normal, just leave at 1.0)
# brightness (0.0 = black, 1.0 = brightest, use something in between for better readability if you want...)
hue = prob * 0.33
rv, gv, bv = colorsys.hsv_to_rgb(hue, 1.0, 1.0)
# to hex
hex_col = f"{int(rv*255):02x}{int(gv*255):02x}{int(bv*255):02x}"
return hex_col
def perplexity_color_scale(ppl):
# hue (0.0 = red)
# saturation (1.0 = red)
# brightness (0.0 = black, 1.0 = red)
# scale saturation from white to red the higher the perplexity
ppl = min(ppl, params['ppl_scale']) # clip ppl to 0-params['ppl_scale'] for color scaling. 15 should be fine for clipping and scaling
sat = ppl / params['ppl_scale']
rv, gv, bv = colorsys.hsv_to_rgb(0.0, sat, 1.0)
# to hex
hex_col = f"{int(rv*255):02x}{int(gv*255):02x}{int(bv*255):02x}"
return hex_col
def probability_perplexity_color_scale(prob, max_prob, ppl):
hue = prob/max_prob * 0.33
rv, gv, _ = colorsys.hsv_to_rgb(hue, 1.0, 1.0)
ppl = min(ppl, params['ppl_scale']) # clip ppl to 0-params['ppl_scale'] for color scaling. 15 should be fine for clipping and scaling
bv = ppl / params['ppl_scale']
# to hex
hex_col = f"{int(rv*255):02x}{int(gv*255):02x}{int(bv*255):02x}"
return hex_col
def add_color_html(token, color):
output = ''
output += f'<span style="color: #{color}">{html.escape(repr(token)[1:-1])}</span>'
#if '\n' in token or '\r' in token: #token.isspace():
# output += '<br>'
return output
# TODO: Might also need message index for the click-to-regenerate feature to work... For now it only works in the last message, which I think is fine.
# TODO: Major issue: Applying this to too many tokens will cause a permanent slowdown in generation speed until the messages are removed from the history. The slowdown seems to be mostly resolved in the current version though
# I think the issue is from HTML elements taking up space in the visible history, and things like history deepcopy add latency proportional to the size of the history.
# Potential solution is maybe to modify the main generation code to send just the internal text and not the visible history, to avoid moving too much around.
# I wonder if we can also avoid using deepcopy here.
def add_dropdown_html(token, index, msg_position, color, top_tokens, top_probs, perplexity=0):
#print("Token:", token, token.isspace(), '\n' in token or '\r' in token)
output = ''
# Use the repr to get characters like \n visible. Exclude the quotes around it
output += f'<div class="hoverable" name="tok_{index}_{msg_position}"><span style="color: #{color}">{html.escape(repr(token)[1:-1])}</span><div class="dropdown"><table class="dropdown-content"><tbody>'
for i, token_option, prob in zip(range(len(top_tokens)), top_tokens, top_probs):
# TODO: Bold for selected token?
# Using divs prevented the problem of divs inside spans causing issues.
# Now the problem is that divs show the same whitespace of one space between every token.
# There is probably some way to fix this in CSS that I don't know about.
row_color = probability_color_scale(prob)
row_class = ' class="selected"' if token_option == token else ''
# This time we want to include the quotes around it so that we can see where the spaces are.
output += f'<tr{row_class}><td name="opt_{index}_{i}_{msg_position}" style="color: #{row_color}">{html.escape(repr(token_option))}</td><td style="color: #{row_color}">{prob:.4f}</td></tr>'
if perplexity != 0:
ppl_color = perplexity_color_scale(perplexity)
output += f'<tr><td>Perplexity:</td><td style="color: #{ppl_color}">{perplexity:.4f}</td></tr>'
output += '</tbody></table></div></div>'
#if '\n' in token or '\r' in token: #token.isspace():
# output += '<br>' # I imagine this will cause problems sometimes
return output # About 750 characters per token...
def custom_css():
return """
.dropdown {
display: none;
position: absolute;
z-index: 50;
background-color: var(--background-fill-secondary);
box-shadow: 0px 8px 16px 0px rgba(0,0,0,1.0);
width: max-content;
overflow: visible;
padding: 5px;
border-radius: 10px;
border: 1px solid var(--border-color-primary);
}
.dropdown-content {
border: none;
z-index: 50;
}
.dropdown-content tr.selected {
background-color: var(--background-fill-primary);
}
.dropdown-content td {
color: var(--body-text-color);
}
.hoverable {
color: var(--body-text-color);
position: relative;
display: inline-block;
overflow: visible;
font-size: 15px;
line-height: 1.75;
margin: 0;
padding: 0;
}
.hoverable:hover .dropdown {
display: block;
}
pre {
white-space: pre-wrap;
}
# TODO: This makes the hover menus extend outside the bounds of the chat area, which is good.
# However, it also makes the scrollbar disappear, which is bad.
# The scroll bar needs to still be present. So for now, we can't see dropdowns that extend past the edge of the chat area.
.chat {
overflow-y: auto;
}
"""
def custom_js():
return """
function sleep(ms) {
return new Promise(resolve => setTimeout(resolve, ms));
}
// Note that this will only work as intended on the last agent message
document.addEventListener("click", async function(event) {
//console.log(event.target);
const name = event.target.getAttribute("name");
if (name != null && name.includes("opt_")) {
const name_parts = name.split("_");
const token_index = name_parts[1];
const option_index = name_parts[2];
const msg_pos = name_parts[3];
// Exclude the quotes and convert newlines... Not sure about the newlines though
// TODO: Seems like continuing generation from a newline causes problems whether you add it or not!
const token_string = event.target.innerHTML.substring(1, event.target.innerHTML.length-1).replace(new RegExp(String.fromCharCode(92)+String.fromCharCode(92)+"r", "g"), '').replace(new RegExp(String.fromCharCode(92)+String.fromCharCode(92)+"n", "g"), '');
//console.log(token_index + ", " + option_index + ", " + token_string);
// Get all the previous text (I'm sure there is a more efficient way to do this)
var msg_text = ""
const msg_html = event.target.parentElement.parentElement.parentElement.parentElement.parentElement.parentElement;
var msg_parts = msg_html.childNodes;
for (var i = 0; i < msg_parts.length; i++) {
var msg_part = msg_parts[i];
if (msg_part.nodeType === Node.ELEMENT_NODE) {
if (msg_part.nodeName == "DIV") {
msg_part_name = msg_part.getAttribute("name")
if (msg_part_name != null) {
var current_token_index = msg_part_name.split("_")[1];
var current_message_pos = msg_part_name.split("_")[2];
if (current_token_index == token_index && current_message_pos == msg_pos) {
// Use the replacement token
// TODO: Don't have access to the tokenizer here, and sometimes there needs to be a space added before this token
msg_text += token_string //.replace(new RegExp(String.fromCharCode(92)+String.fromCharCode(92)+"r", "g"), '').replace(new RegExp(String.fromCharCode(92)+String.fromCharCode(92)+"n", "g"), '');
break;
}
else {
// Replace here or at the end?
var text = msg_part.firstChild.innerHTML.replace(new RegExp(String.fromCharCode(92)+String.fromCharCode(92)+"r", "g"), '').replace(new RegExp(String.fromCharCode(92)+String.fromCharCode(92)+"n", "g"), '')
msg_text += text;
}
}
}
else {
// Break tag (hacky workaround because the newline literal can't be parsed here)
//msg_text += String.fromCharCode(10);
// Do nothing???
}
}
else if (msg_part.nodeType === Node.TEXT_NODE) {
msg_text += msg_part.textContent;
}
}
var textbox = document.querySelector("#chat-input textarea");
textbox.focus();
textbox.value = msg_text.trimStart() // Fix initial tokenization spaces
//console.log(textbox.value);
// Add some delays to make sure it's processed correctly. Without these, there's a chance the events don't go through correctly and it doesn't work
// It's unknown how long this will take, and probably depends on the size of the message...
// It would be better to somehow wait for gradio to update instead of waiting a fixed amount of time.
// Hopefully 1 second of delay before starting generation isn't unacceptable.
var inputEvent = new Event('input', {
bubbles: true,
cancelable: true,
});
textbox.dispatchEvent(inputEvent);
var changeEvent = new Event('change', {
bubbles: true,
cancelable: true,
});
textbox.dispatchEvent(changeEvent);
await sleep(250);
document.getElementById("Replace-last").click();
// This can take a while to execute
await sleep(750);
document.getElementById("Continue").click();
}
});
console.log("Custom JS for perplexity_colors loaded");
"""
# Monkeypatch applied to html_generator.py
# We simply don't render markdown into HTML. We wrap everything in <pre> tags to preserve whitespace
# formatting. If you're coloring tokens by perplexity or probability, or especially if you're using
# the probability dropdown, you probably care more about seeing the tokens the model actually outputted
# rather than rendering ```code blocks``` or *italics*.
@functools.lru_cache(maxsize=4096)
def convert_to_markdown(string):
return '<pre>' + string + '</pre>'
def convert_to_markdown_wrapped(string, use_cache=True):
if use_cache:
return convert_to_markdown(string)
return convert_to_markdown.__wrapped__(string)
# This is still necessary for formatting to work correctly
html_generator.convert_to_markdown = convert_to_markdown
def ui():
def update_active_check(x):
params.update({'active': x})
def update_color_by_ppl_check(x):
params.update({'color_by_perplexity': x})
def update_color_by_prob_check(x):
params.update({'color_by_probability': x})
def update_prob_dropdown_check(x):
params.update({'probability_dropdown': x})
active_check = gradio.Checkbox(value=True, label="Compute probabilities and perplexity scores", info="Activate this extension. Note that this extension currently does not work with llama.cpp, but it does work with ExLlamav2_HF and llamacpp_HF when set up correctly")
color_by_ppl_check = gradio.Checkbox(value=False, label="Color by perplexity", info="Higher perplexity is more red. If also showing probability, higher perplexity has more blue component.")
color_by_prob_check = gradio.Checkbox(value=False, label="Color by probability", info="Green-yellow-red linear scale, with 100% green, 50% yellow, 0% red.")
prob_dropdown_check = gradio.Checkbox(value=False, label="Probability dropdown", info="Hover over a token to show a dropdown of top token probabilities. Currently slightly buggy with whitespace between tokens.")
active_check.change(update_active_check, active_check, None)
color_by_ppl_check.change(update_color_by_ppl_check, color_by_ppl_check, None)
color_by_prob_check.change(update_color_by_prob_check, color_by_prob_check, None)
prob_dropdown_check.change(update_prob_dropdown_check, prob_dropdown_check, None) | --- +++ @@ -198,6 +198,9 @@
def probability_color_scale(prob):
+ '''
+ Green-yellow-red color scale
+ '''
# hue (0.0 = red, 0.33 = green)
# saturation (0.0 = gray / white, 1.0 = normal, just leave at 1.0)
# brightness (0.0 = black, 1.0 = brightest, use something in between for better readability if you want...)
@@ -210,6 +213,9 @@
def perplexity_color_scale(ppl):
+ '''
+ Red component only, white for 0 perplexity (sorry if you're not in dark mode)
+ '''
# hue (0.0 = red)
# saturation (1.0 = red)
# brightness (0.0 = black, 1.0 = red)
@@ -226,6 +232,9 @@
def probability_perplexity_color_scale(prob, max_prob, ppl):
+ '''
+ Green-yellow-red for relative probability compared to maximum for the current token, and blue component for perplexity
+ '''
hue = prob/max_prob * 0.33
rv, gv, _ = colorsys.hsv_to_rgb(hue, 1.0, 1.0)
@@ -454,4 +463,4 @@ active_check.change(update_active_check, active_check, None)
color_by_ppl_check.change(update_color_by_ppl_check, color_by_ppl_check, None)
color_by_prob_check.change(update_color_by_prob_check, color_by_prob_check, None)
- prob_dropdown_check.change(update_prob_dropdown_check, prob_dropdown_check, None)+ prob_dropdown_check.change(update_prob_dropdown_check, prob_dropdown_check, None)
| https://raw.githubusercontent.com/oobabooga/text-generation-webui/HEAD/extensions/perplexity_colors/script.py |
Provide clean and structured docstrings | import argparse
import glob
import hashlib
import json
import os
import platform
import re
import signal
import site
import subprocess
import sys
# Define the required versions
TORCH_VERSION = "2.9.1"
PYTHON_VERSION = "3.13"
LIBSTDCXX_VERSION_LINUX = "12.1.0"
# Environment
script_dir = os.getcwd()
conda_env_path = os.path.join(script_dir, "installer_files", "env")
state_file = '.installer_state.json'
# Command-line flags
flags = f"{' '.join([flag for flag in sys.argv[1:] if flag != '--update-wizard'])}"
def signal_handler(sig, frame):
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
def is_linux():
return sys.platform.startswith("linux")
def is_windows():
return sys.platform.startswith("win")
def is_macos():
return sys.platform.startswith("darwin")
def is_x86_64():
return platform.machine() == "x86_64"
def is_installed():
site_packages_path = None
for sitedir in site.getsitepackages():
if "site-packages" in sitedir and conda_env_path in sitedir:
site_packages_path = sitedir
break
if site_packages_path:
return os.path.isfile(os.path.join(site_packages_path, 'torch', '__init__.py'))
else:
return os.path.isdir(conda_env_path)
def load_state():
if os.path.exists(state_file):
try:
with open(state_file, 'r') as f:
return json.load(f)
except Exception:
return {}
return {}
def save_state(state):
with open(state_file, 'w') as f:
json.dump(state, f)
def get_gpu_choice():
state = load_state()
gpu_choice = state.get('gpu_choice')
if not gpu_choice:
if "GPU_CHOICE" in os.environ:
choice = os.environ["GPU_CHOICE"].upper()
print_big_message(f"Selected GPU choice \"{choice}\" based on the GPU_CHOICE environment variable.")
else:
choice = get_user_choice(
"What is your GPU?",
{
'A': 'NVIDIA',
'B': 'AMD - Linux only, ROCm 7.2',
'C': 'Apple M Series',
'D': 'Intel Arc (beta)',
'N': 'CPU mode'
},
)
# Convert choice to GPU name
gpu_choice = {"A": "NVIDIA_CUDA128", "B": "AMD", "C": "APPLE", "D": "INTEL", "N": "NONE"}[choice]
# Save choice to state
state['gpu_choice'] = gpu_choice
save_state(state)
return gpu_choice
def get_pytorch_install_command(gpu_choice):
base_cmd = f"python -m pip install torch=={TORCH_VERSION} "
pypi_fallback = " --extra-index-url https://pypi.org/simple/"
if gpu_choice == "NVIDIA_CUDA128":
return base_cmd + "--index-url https://download.pytorch.org/whl/cu128" + pypi_fallback
elif gpu_choice == "AMD":
py_tag = f"cp{PYTHON_VERSION.replace('.', '')}"
return f"python -m pip install https://repo.radeon.com/rocm/manylinux/rocm-rel-7.2/torch-{TORCH_VERSION}%2Brocm7.2.0.lw.git7e1940d4-{py_tag}-{py_tag}-linux_x86_64.whl"
elif gpu_choice in ["APPLE", "NONE"]:
return base_cmd + "--index-url https://download.pytorch.org/whl/cpu" + pypi_fallback
elif gpu_choice == "INTEL":
return base_cmd + "--index-url https://download.pytorch.org/whl/xpu"
else:
return base_cmd
def get_pytorch_update_command(gpu_choice):
base_cmd = f"python -m pip install --upgrade torch=={TORCH_VERSION} "
pypi_fallback = " --extra-index-url https://pypi.org/simple/"
if gpu_choice == "NVIDIA_CUDA128":
return f"{base_cmd}--index-url https://download.pytorch.org/whl/cu128" + pypi_fallback
elif gpu_choice == "AMD":
py_tag = f"cp{PYTHON_VERSION.replace('.', '')}"
return f"python -m pip install --upgrade https://repo.radeon.com/rocm/manylinux/rocm-rel-7.2/torch-{TORCH_VERSION}%2Brocm7.2.0.lw.git7e1940d4-{py_tag}-{py_tag}-linux_x86_64.whl"
elif gpu_choice in ["APPLE", "NONE"]:
return f"{base_cmd}--index-url https://download.pytorch.org/whl/cpu" + pypi_fallback
elif gpu_choice == "INTEL":
return f"{base_cmd}--index-url https://download.pytorch.org/whl/xpu"
else:
return base_cmd
def get_requirements_file(gpu_choice):
requirements_base = os.path.join("requirements", "full")
if gpu_choice == "NVIDIA_CUDA128":
file_name = "requirements.txt"
elif gpu_choice == "AMD":
file_name = "requirements_amd.txt"
elif gpu_choice == "APPLE":
file_name = f"requirements_apple_{'intel' if is_x86_64() else 'silicon'}.txt"
elif gpu_choice in ["INTEL", "NONE"]:
file_name = "requirements_cpu_only.txt"
else:
raise ValueError(f"Unknown GPU choice: {gpu_choice}")
return os.path.join(requirements_base, file_name)
def get_current_commit():
result = run_cmd("git rev-parse HEAD", capture_output=True, environment=True)
return result.stdout.decode('utf-8').strip()
def get_extensions_names():
return [foldername for foldername in os.listdir('extensions') if os.path.isfile(os.path.join('extensions', foldername, 'requirements.txt'))]
def check_env():
# If we have access to conda, we are probably in an environment
conda_exist = run_cmd("conda", environment=True, capture_output=True).returncode == 0
if not conda_exist:
print("Conda is not installed. Exiting...")
sys.exit(1)
# Ensure this is a new environment and not the base environment
if os.environ.get("CONDA_DEFAULT_ENV", "") == "base":
print("Create an environment for this project and activate it. Exiting...")
sys.exit(1)
def clear_cache():
run_cmd("conda clean -a -y", environment=True)
run_cmd("python -m pip cache purge", environment=True)
def run_cmd(cmd, assert_success=False, environment=False, capture_output=False, env=None):
# Use the conda environment
if environment:
if is_windows():
conda_bat_path = os.path.join(script_dir, "installer_files", "conda", "condabin", "conda.bat")
python_path = os.path.join(conda_env_path, "python.exe")
cmd = cmd.replace("python ", f'"{python_path}" ')
cmd = f'"{conda_bat_path}" activate "{conda_env_path}" >nul && {cmd}'
else:
conda_sh_path = os.path.join(script_dir, "installer_files", "conda", "etc", "profile.d", "conda.sh")
cmd = f'. "{conda_sh_path}" && conda activate "{conda_env_path}" && {cmd}'
# Set executable to None for Windows, bash for everything else
executable = None if is_windows() else 'bash'
# Run shell commands
result = subprocess.run(cmd, shell=True, capture_output=capture_output, env=env, executable=executable)
# Assert the command ran successfully
if assert_success and result.returncode != 0:
print(f"Command '{cmd}' failed with exit status code '{str(result.returncode)}'.\n\nExiting now.\nTry running the start/update script again.")
sys.exit(1)
return result
def print_big_message(message):
message = message.strip()
lines = message.split('\n')
print("\n\n*******************************************************************")
for line in lines:
print("*", line)
print("*******************************************************************\n\n")
def calculate_file_hash(file_path):
p = os.path.join(script_dir, file_path)
if os.path.isfile(p):
with open(p, 'rb') as f:
return hashlib.sha256(f.read()).hexdigest()
else:
return ''
def generate_alphabetic_sequence(index):
result = ''
while index >= 0:
index, remainder = divmod(index, 26)
result = chr(ord('A') + remainder) + result
index -= 1
return result
def get_user_choice(question, options_dict):
print()
print(question)
print()
for key, value in options_dict.items():
print(f"{key}) {value}")
print()
choice = input("Input> ").upper()
while choice not in options_dict.keys():
print("Invalid choice. Please try again.")
choice = input("Input> ").upper()
return choice
def update_pytorch_and_python():
print_big_message("Checking for PyTorch updates.")
gpu_choice = get_gpu_choice()
install_cmd = get_pytorch_update_command(gpu_choice)
run_cmd(install_cmd, assert_success=True, environment=True)
def clean_outdated_pytorch_cuda_dependencies():
patterns = ["cu121", "cu122", "rocm6", "torch2.4", "torch2.6", "torch2.7", "torchvision", "torchaudio"]
result = run_cmd("python -m pip list --format=freeze", capture_output=True, environment=True)
matching_packages = []
for line in result.stdout.decode('utf-8').splitlines():
if "==" in line:
pkg_name, version = line.split('==', 1)
if any(pattern in version for pattern in patterns):
matching_packages.append(pkg_name)
if matching_packages:
print(f"\nUninstalling: {', '.join(matching_packages)}\n")
run_cmd(f"python -m pip uninstall -y {' '.join(matching_packages)}", assert_success=True, environment=True)
return matching_packages
def install_webui():
if os.path.isfile(state_file):
os.remove(state_file)
# Get GPU choice and save it to state
gpu_choice = get_gpu_choice()
# Write a flag to CMD_FLAGS.txt for CPU mode
if gpu_choice == "NONE":
cmd_flags_path = os.path.join(script_dir, "user_data", "CMD_FLAGS.txt")
with open(cmd_flags_path, 'r+') as cmd_flags_file:
if "--cpu" not in cmd_flags_file.read():
print_big_message("Adding the --cpu flag to user_data/CMD_FLAGS.txt.")
cmd_flags_file.write("\n--cpu\n")
# Handle CUDA version display
elif any((is_windows(), is_linux())) and gpu_choice == "NVIDIA_CUDA128":
print("CUDA: 12.8")
# No PyTorch for AMD on Windows
elif is_windows() and gpu_choice == "AMD":
print("PyTorch setup on Windows is not implemented yet. Exiting...")
sys.exit(1)
# Install Git and then Pytorch
print_big_message("Installing PyTorch.")
install_pytorch = get_pytorch_install_command(gpu_choice)
run_cmd(f"conda install -y ninja git && {install_pytorch}", assert_success=True, environment=True)
# Install the webui requirements
update_requirements(initial_installation=True, pull=False)
def update_requirements(initial_installation=False, pull=True):
# Create .git directory if missing
if not os.path.exists(os.path.join(script_dir, ".git")):
run_cmd(
"git init -b main && git remote add origin https://github.com/oobabooga/text-generation-webui && "
"git fetch && git symbolic-ref refs/remotes/origin/HEAD refs/remotes/origin/main && "
"git reset --hard origin/main && git branch --set-upstream-to=origin/main",
environment=True,
assert_success=True
)
# Check for outdated Python version and refuse to update
if '.'.join(map(str, sys.version_info[:2])) != PYTHON_VERSION:
print_big_message(
"Your current installation uses Python {}.{}, which is outdated.\n"
"Python {} is now required. A clean installation is needed.\n\n"
"INSTRUCTIONS:\n"
"1. Delete the 'installer_files' folder in your text-generation-webui directory.\n"
"2. Run the start script again (e.g., start_windows.bat).\n\n"
"This will create a fresh environment with the latest software.".format(*sys.version_info[:2], PYTHON_VERSION)
)
sys.exit(0)
# Check for outdated CUDA 12.4 installs and refuse to update
state = load_state()
if state.get('gpu_choice') == 'NVIDIA':
print_big_message(
"Your current installation uses CUDA 12.4, which has been removed.\n"
"To update to the new default (CUDA 12.8), a clean installation is required.\n\n"
"INSTRUCTIONS:\n"
"1. Delete the 'installer_files' folder in your text-generation-webui directory.\n"
"2. Run the start script again (e.g., start_windows.bat).\n\n"
"This will create a fresh environment with the latest software."
)
sys.exit(0)
current_commit = get_current_commit()
wheels_changed = not os.path.exists(state_file)
installed_wheels = set()
if not wheels_changed:
state = load_state()
installed_wheels = set(state.get('installed_wheels', []))
if 'wheels_changed' in state or state.get('last_installed_commit') != current_commit:
wheels_changed = True
gpu_choice = get_gpu_choice()
requirements_file = get_requirements_file(gpu_choice)
if pull:
# Read .whl lines before pulling
before_pull_whl_lines = []
if os.path.exists(requirements_file):
with open(requirements_file, 'r') as f:
before_pull_whl_lines = [line for line in f if '.whl' in line]
print_big_message('Updating the local copy of the repository with "git pull"')
# Hash files before pulling
files_to_check = [
'start_linux.sh', 'start_macos.sh', 'start_windows.bat', 'start_wsl.bat',
'update_wizard_linux.sh', 'update_wizard_macos.sh', 'update_wizard_windows.bat', 'update_wizard_wsl.bat',
'one_click.py'
]
before_hashes = {file: calculate_file_hash(file) for file in files_to_check}
# Perform the git pull
run_cmd("git pull --autostash", assert_success=True, environment=True)
current_commit = get_current_commit()
# Check hashes after pulling
after_hashes = {file: calculate_file_hash(file) for file in files_to_check}
if os.path.exists(requirements_file):
with open(requirements_file, 'r') as f:
after_pull_whl_lines = [line for line in f if '.whl' in line]
wheels_changed = wheels_changed or (before_pull_whl_lines != after_pull_whl_lines)
# Check for changes to installer files
for file in files_to_check:
if before_hashes[file] != after_hashes[file]:
print_big_message(f"File '{file}' was updated during 'git pull'. Please run the script again.")
# Save state before exiting
state = load_state()
state['last_installed_commit'] = current_commit
if wheels_changed:
state['wheels_changed'] = True
save_state(state)
sys.exit(1)
if os.environ.get("INSTALL_EXTENSIONS", "").lower() in ("yes", "y", "true", "1", "t", "on"):
install_extensions_requirements()
if is_linux():
run_cmd(f"conda install -y -c conda-forge 'libstdcxx-ng>={LIBSTDCXX_VERSION_LINUX}'", assert_success=True, environment=True)
# Update PyTorch
if not initial_installation:
update_pytorch_and_python()
clean_outdated_pytorch_cuda_dependencies()
print_big_message(f"Installing webui requirements from file: {requirements_file}")
print(f"GPU Choice: {gpu_choice}\n")
# Prepare the requirements file
textgen_requirements = open(requirements_file).read().splitlines()
all_whl_lines = [line.strip() for line in textgen_requirements if '.whl' in line]
if not initial_installation:
if installed_wheels:
# Per-wheel comparison: only re-download wheels that changed
textgen_requirements = [
line for line in textgen_requirements
if '.whl' not in line or line.strip() not in installed_wheels
]
elif not wheels_changed:
textgen_requirements = [line for line in textgen_requirements if '.whl' not in line]
with open('temp_requirements.txt', 'w') as file:
file.write('\n'.join(textgen_requirements))
# Workaround for git+ packages not updating properly.
git_requirements = [req for req in textgen_requirements if req.startswith("git+")]
for req in git_requirements:
url = req.replace("git+", "")
package_name = url.split("/")[-1].split("@")[0].rstrip(".git")
run_cmd(f"python -m pip uninstall -y {package_name}", environment=True)
print(f"Uninstalled {package_name}")
# Install/update the project requirements
run_cmd("python -m pip install -r temp_requirements.txt --upgrade", assert_success=True, environment=True)
# Save state after successful installation
state = load_state()
state['last_installed_commit'] = current_commit
state['installed_wheels'] = all_whl_lines
state.pop('wheels_changed', None)
save_state(state)
# Clean up
os.remove('temp_requirements.txt')
clear_cache()
def install_extensions_requirements():
print_big_message("Installing extensions requirements.\nSome of these may fail on Windows.\nDon\'t worry if you see error messages, as they will not affect the main program.")
extensions = get_extensions_names()
for i, extension in enumerate(extensions):
print(f"\n\n--- [{i + 1}/{len(extensions)}]: {extension}\n\n")
extension_req_path = os.path.join("extensions", extension, "requirements.txt")
run_cmd(f"python -m pip install -r {extension_req_path} --upgrade", assert_success=False, environment=True)
def launch_webui():
run_cmd(f"python server.py {flags}", environment=True)
if __name__ == "__main__":
# Verifies we are in a conda environment
check_env()
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('--update-wizard', action='store_true', help='Launch a menu with update options.')
args, _ = parser.parse_known_args()
if args.update_wizard:
while True:
choice = get_user_choice(
"What would you like to do?",
{
'A': 'Update the web UI',
'B': 'Install/update extensions requirements',
'C': 'Revert local changes to repository files with \"git reset --hard\"',
'N': 'Nothing (exit)'
},
)
if choice == 'A':
update_requirements()
elif choice == 'B':
choices = {'A': 'All extensions'}
for i, name in enumerate(get_extensions_names()):
key = generate_alphabetic_sequence(i + 1)
choices[key] = name
choice = get_user_choice("What extension?", choices)
if choice == 'A':
install_extensions_requirements()
else:
extension_req_path = os.path.join("extensions", choices[choice], "requirements.txt")
run_cmd(f"python -m pip install -r {extension_req_path} --upgrade", assert_success=False, environment=True)
update_requirements(pull=False)
elif choice == 'C':
run_cmd("git reset --hard", assert_success=True, environment=True)
elif choice == 'N':
sys.exit()
else:
if not is_installed():
install_webui()
os.chdir(script_dir)
if os.environ.get("LAUNCH_AFTER_INSTALL", "").lower() in ("no", "n", "false", "0", "f", "off"):
print_big_message("Will now exit due to LAUNCH_AFTER_INSTALL.")
sys.exit()
# Check if a model has been downloaded yet
if '--model-dir' in flags:
# Splits on ' ' or '=' while maintaining spaces within quotes
flags_list = re.split(' +(?=(?:[^\"]*\"[^\"]*\")*[^\"]*$)|=', flags)
model_dir = [flags_list[(flags_list.index(flag) + 1)] for flag in flags_list if flag == '--model-dir'][0].strip('"\'')
else:
model_dir = 'user_data/models'
if len([item for item in glob.glob(f'{model_dir}/*') if not item.endswith(('.txt', '.yaml'))]) == 0:
print_big_message("You haven't downloaded any model yet.\nOnce the web UI launches, head over to the \"Model\" tab and download one.")
# Workaround for llama-cpp-python loading paths in CUDA env vars even if they do not exist
conda_path_bin = os.path.join(conda_env_path, "bin")
if not os.path.exists(conda_path_bin):
os.mkdir(conda_path_bin)
# Launch the webui
launch_webui() | --- +++ @@ -61,6 +61,7 @@
def load_state():
+ """Load installer state from JSON file"""
if os.path.exists(state_file):
try:
with open(state_file, 'r') as f:
@@ -71,11 +72,13 @@
def save_state(state):
+ """Save installer state to JSON file"""
with open(state_file, 'w') as f:
json.dump(state, f)
def get_gpu_choice():
+ """Get GPU choice from state file or ask user"""
state = load_state()
gpu_choice = state.get('gpu_choice')
@@ -106,6 +109,7 @@
def get_pytorch_install_command(gpu_choice):
+ """Get PyTorch installation command based on GPU choice"""
base_cmd = f"python -m pip install torch=={TORCH_VERSION} "
pypi_fallback = " --extra-index-url https://pypi.org/simple/"
@@ -123,6 +127,7 @@
def get_pytorch_update_command(gpu_choice):
+ """Get PyTorch update command based on GPU choice"""
base_cmd = f"python -m pip install --upgrade torch=={TORCH_VERSION} "
pypi_fallback = " --extra-index-url https://pypi.org/simple/"
@@ -140,6 +145,7 @@
def get_requirements_file(gpu_choice):
+ """Get requirements file path based on GPU choice"""
requirements_base = os.path.join("requirements", "full")
if gpu_choice == "NVIDIA_CUDA128":
@@ -538,4 +544,4 @@ os.mkdir(conda_path_bin)
# Launch the webui
- launch_webui()+ launch_webui()
| https://raw.githubusercontent.com/oobabooga/text-generation-webui/HEAD/one_click.py |
Add docstrings explaining edge cases | import base64
import copy
import functools
import html
import json
import pprint
import re
import shutil
import threading
import time
from datetime import datetime
from functools import partial
from pathlib import Path
import markupsafe
import yaml
from jinja2.ext import loopcontrols
from jinja2.sandbox import ImmutableSandboxedEnvironment
from PIL import Image
import modules.shared as shared
from modules import utils
from modules.extensions import apply_extensions
from modules.html_generator import (
chat_html_wrapper,
convert_to_markdown,
extract_thinking_block,
make_thumbnail
)
from modules.image_utils import open_image_safely
from modules.logging_colors import logger
from modules.reasoning import THINKING_FORMATS
from modules.text_generation import (
generate_reply,
get_encoded_length,
get_max_prompt_length
)
from modules.utils import (
delete_file,
get_available_characters,
get_available_users,
sanitize_filename,
save_file
)
from modules.web_search import add_web_search_attachments
_history_file_lock = threading.Lock()
def strftime_now(format):
return datetime.now().strftime(format)
def get_current_timestamp():
return datetime.now().strftime('%b %d, %Y %H:%M')
def update_message_metadata(metadata_dict, role, index, **fields):
key = f"{role}_{index}"
if key not in metadata_dict:
metadata_dict[key] = {}
# Update with provided fields
for field_name, field_value in fields.items():
metadata_dict[key][field_name] = field_value
jinja_env = ImmutableSandboxedEnvironment(
trim_blocks=True,
lstrip_blocks=True,
extensions=[loopcontrols]
)
def custom_tojson(value, indent=None, ensure_ascii=True):
return markupsafe.Markup(json.dumps(value, indent=indent, ensure_ascii=ensure_ascii))
jinja_env.filters["tojson"] = custom_tojson
jinja_env.globals["strftime_now"] = strftime_now
def _raise_exception(message):
raise ValueError(message)
jinja_env.globals["raise_exception"] = _raise_exception
_template_cache = {}
def get_compiled_template(template_str):
compiled = _template_cache.get(template_str)
if compiled is None:
compiled = jinja_env.from_string(template_str)
_template_cache[template_str] = compiled
return compiled
def str_presenter(dumper, data):
if data.count('\n') > 0:
return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='|')
return dumper.represent_scalar('tag:yaml.org,2002:str', data)
yaml.add_representer(str, str_presenter)
yaml.representer.SafeRepresenter.add_representer(str, str_presenter)
class _JsonDict(dict):
def __str__(self):
return json.dumps(self, ensure_ascii=False)
def __add__(self, other):
return str(self) + other
def __radd__(self, other):
return other + str(self)
def _deserialize_tool_call_arguments(tool_calls):
result = []
for tc in tool_calls:
tc = copy.copy(tc)
func = tc.get('function', {})
if isinstance(func, dict):
func = dict(func)
args = func.get('arguments')
if isinstance(args, str):
try:
func['arguments'] = _JsonDict(json.loads(args))
except (json.JSONDecodeError, ValueError):
pass
elif isinstance(args, dict) and not isinstance(args, _JsonDict):
func['arguments'] = _JsonDict(args)
tc['function'] = func
result.append(tc)
return result
def _expand_tool_sequence(tool_seq):
messages = []
expected_ids = []
seen_ids = set()
for item in tool_seq:
if 'tool_calls' in item:
deserialized = _deserialize_tool_call_arguments(item['tool_calls'])
messages.append({
"role": "assistant",
"content": item.get('content', ''),
"tool_calls": deserialized
})
for tc in item['tool_calls']:
tc_id = tc.get('id', '')
if tc_id:
expected_ids.append(tc_id)
elif item.get('role') == 'tool':
messages.append({
"role": "tool",
"content": item['content'],
"tool_call_id": item.get('tool_call_id', '')
})
seen_ids.add(item.get('tool_call_id', ''))
# Fill in synthetic results for any orphaned tool call IDs
for tc_id in expected_ids:
if tc_id not in seen_ids:
messages.append({
"role": "tool",
"content": "",
"tool_call_id": tc_id
})
return messages
def generate_chat_prompt(user_input, state, **kwargs):
impersonate = kwargs.get('impersonate', False)
_continue = kwargs.get('_continue', False)
also_return_rows = kwargs.get('also_return_rows', False)
history_data = kwargs.get('history', state['history'])
history = history_data['internal']
metadata = history_data.get('metadata', {})
# Templates
chat_template_str = state['chat_template_str']
if state['mode'] != 'instruct':
chat_template_str = replace_character_names(chat_template_str, state['name1'], state['name2'])
instruction_template = get_compiled_template(state['instruction_template_str'])
chat_template = get_compiled_template(chat_template_str)
instruct_renderer = partial(
instruction_template.render,
builtin_tools=None,
tools=state['tools'] if 'tools' in state else None,
tools_in_user_message=False,
add_generation_prompt=False,
enable_thinking=state['enable_thinking'],
reasoning_effort=state['reasoning_effort'],
thinking_budget=-1 if state.get('enable_thinking', True) else 0,
bos_token=shared.bos_token,
eos_token=shared.eos_token,
)
chat_renderer = partial(
chat_template.render,
add_generation_prompt=False,
name1=state['name1'],
name2=state['name2'],
user_bio=replace_character_names(state['user_bio'], state['name1'], state['name2']),
tools=state['tools'] if 'tools' in state else None,
)
messages = []
if state['mode'] == 'instruct':
renderer = instruct_renderer
if state['custom_system_message'].strip() != '':
messages.append({"role": "system", "content": state['custom_system_message']})
else:
renderer = chat_renderer
if state['context'].strip() != '' or state['user_bio'].strip() != '':
context = replace_character_names(state['context'], state['name1'], state['name2'])
messages.append({"role": "system", "content": context})
insert_pos = len(messages)
for i, entry in enumerate(reversed(history)):
user_msg = entry[0].strip()
assistant_msg = entry[1].strip()
tool_msg = entry[2].strip() if len(entry) > 2 else ''
entry_meta = entry[3] if len(entry) > 3 else {}
row_idx = len(history) - i - 1
if tool_msg:
tool_message = {"role": "tool", "content": tool_msg}
if "tool_call_id" in entry_meta:
tool_message["tool_call_id"] = entry_meta["tool_call_id"]
messages.insert(insert_pos, tool_message)
if not assistant_msg and entry_meta.get('tool_calls'):
# Assistant message with only tool_calls and no text content
messages.insert(insert_pos, {"role": "assistant", "content": "", "tool_calls": _deserialize_tool_call_arguments(entry_meta['tool_calls'])})
elif assistant_msg:
# Handle GPT-OSS as a special case
if '<|channel|>analysis<|message|>' in assistant_msg or '<|channel|>final<|message|>' in assistant_msg:
thinking_content = ""
final_content = ""
# Extract analysis content if present
if '<|channel|>analysis<|message|>' in assistant_msg:
parts = assistant_msg.split('<|channel|>analysis<|message|>', 1)
if len(parts) > 1:
# The content is everything after the tag
potential_content = parts[1]
# Now, find the end of this content block
analysis_end_tag = '<|end|>'
if analysis_end_tag in potential_content:
thinking_content = potential_content.split(analysis_end_tag, 1)[0].strip()
else:
# Fallback: if no <|end|> tag, stop at the start of the final channel if it exists
final_channel_tag = '<|channel|>final<|message|>'
if final_channel_tag in potential_content:
thinking_content = potential_content.split(final_channel_tag, 1)[0].strip()
else:
thinking_content = potential_content.strip()
# Extract final content if present
final_tag_to_find = '<|channel|>final<|message|>'
if final_tag_to_find in assistant_msg:
parts = assistant_msg.split(final_tag_to_find, 1)
if len(parts) > 1:
# The content is everything after the tag
potential_content = parts[1]
# Now, find the end of this content block
final_end_tag = '<|end|>'
if final_end_tag in potential_content:
final_content = potential_content.split(final_end_tag, 1)[0].strip()
else:
final_content = potential_content.strip()
# Insert as structured message
msg_dict = {"role": "assistant", "content": final_content}
if '<|channel|>analysis<|message|>' in assistant_msg:
msg_dict["thinking"] = thinking_content
messages.insert(insert_pos, msg_dict)
# Handle Seed-OSS
elif '<seed:think>' in assistant_msg:
thinking_content = ""
final_content = assistant_msg
# Extract thinking content if present
if '<seed:think>' in assistant_msg:
parts = assistant_msg.split('<seed:think>', 1)
if len(parts) > 1:
potential_content = parts[1]
if '</seed:think>' in potential_content:
thinking_content = potential_content.split('</seed:think>', 1)[0].strip()
final_content = parts[0] + potential_content.split('</seed:think>', 1)[1]
else:
thinking_content = potential_content.strip()
final_content = parts[0]
# Insert as structured message
msg_dict = {"role": "assistant", "content": final_content.strip()}
if thinking_content:
msg_dict["reasoning_content"] = thinking_content
messages.insert(insert_pos, msg_dict)
else:
# Default case (used by all other models)
messages.insert(insert_pos, {"role": "assistant", "content": assistant_msg})
# Attach tool_calls metadata to the assistant message if present
if entry_meta.get('tool_calls') and messages[insert_pos].get('role') == 'assistant':
messages[insert_pos]['tool_calls'] = _deserialize_tool_call_arguments(entry_meta['tool_calls'])
# Expand tool_sequence from metadata (inserted AFTER assistant so that
# the final order is: user → tool_calls → tool_results → final_answer)
meta_key = f"assistant_{row_idx}"
tool_seq = metadata.get(meta_key, {}).get('tool_sequence', [])
if tool_seq:
for msg in reversed(_expand_tool_sequence(tool_seq)):
messages.insert(insert_pos, msg)
if entry_meta.get('role') == 'system':
if user_msg:
messages.insert(insert_pos, {"role": "system", "content": user_msg})
elif user_msg not in ['', '<|BEGIN-VISIBLE-CHAT|>']:
# Check for user message attachments in metadata
user_key = f"user_{row_idx}"
enhanced_user_msg = user_msg
# Add attachment content if present AND if past attachments are enabled
if user_key in metadata and "attachments" in metadata[user_key]:
attachments_text = ""
image_refs = ""
for attachment in metadata[user_key]["attachments"]:
if attachment.get("type") == "image":
# Add image reference for multimodal models
image_refs += "<__media__>"
elif state.get('include_past_attachments', True):
# Handle text/PDF attachments
filename = attachment.get("name", "file")
content = attachment.get("content", "")
if attachment.get("type") == "text/html" and attachment.get("url"):
attachments_text += f"\nName: {filename}\nURL: {attachment['url']}\nContents:\n\n=====\n{content}\n=====\n\n"
else:
attachments_text += f"\nName: {filename}\nContents:\n\n=====\n{content}\n=====\n\n"
if image_refs:
enhanced_user_msg = f"{image_refs}\n\n{enhanced_user_msg}"
if attachments_text:
enhanced_user_msg += f"\n\nATTACHMENTS:\n{attachments_text}"
messages.insert(insert_pos, {"role": "user", "content": enhanced_user_msg})
# Handle the current user input
user_input = user_input.strip()
# Check if we have attachments
if not (impersonate or _continue):
has_attachments = False
if len(history_data.get('metadata', {})) > 0:
current_row_idx = len(history)
user_key = f"user_{current_row_idx}"
has_attachments = user_key in metadata and "attachments" in metadata[user_key]
if user_input or has_attachments:
# For the current user input being processed, check if we need to add attachments
if len(history_data.get('metadata', {})) > 0:
current_row_idx = len(history)
user_key = f"user_{current_row_idx}"
if user_key in metadata and "attachments" in metadata[user_key]:
attachments_text = ""
image_refs = ""
for attachment in metadata[user_key]["attachments"]:
if attachment.get("type") == "image":
image_refs += "<__media__>"
else:
filename = attachment.get("name", "file")
content = attachment.get("content", "")
if attachment.get("type") == "text/html" and attachment.get("url"):
attachments_text += f"\nName: {filename}\nURL: {attachment['url']}\nContents:\n\n=====\n{content}\n=====\n\n"
else:
attachments_text += f"\nName: {filename}\nContents:\n\n=====\n{content}\n=====\n\n"
if image_refs:
user_input = f"{image_refs}\n\n{user_input}"
if attachments_text:
user_input += f"\n\nATTACHMENTS:\n{attachments_text}"
messages.append({"role": "user", "content": user_input})
# Expand tool_sequence for the current entry (excluded from the
# history loop during regenerate — needed so the model sees prior
# tool calls and results when re-generating the final answer).
current_tool_seq = metadata.get(f"assistant_{len(history)}", {}).get('tool_sequence', [])
messages.extend(_expand_tool_sequence(current_tool_seq))
if impersonate and state['mode'] != 'chat-instruct':
messages.append({"role": "user", "content": "fake user message replace me"})
def make_prompt(messages):
last_message = messages[-1].copy()
if _continue:
if state['mode'] == 'chat-instruct':
messages = messages[:-1]
else:
messages[-1]["content"] = "fake assistant message replace me"
messages.append({"role": "assistant", "content": "this will get deleted"})
if state['mode'] != 'chat-instruct':
add_generation_prompt = (not _continue and not impersonate)
else:
add_generation_prompt = False
prompt = renderer(
messages=messages,
add_generation_prompt=add_generation_prompt
)
if state['mode'] == 'chat-instruct':
command = state['chat-instruct_command']
command = command.replace('<|character|>', state['name2'] if not impersonate else state['name1'])
command = command.replace('<|prompt|>', prompt)
command = replace_character_names(command, state['name1'], state['name2'])
outer_messages = []
if state['custom_system_message'].strip() != '':
outer_messages.append({"role": "system", "content": state['custom_system_message']})
outer_messages.append({"role": "user", "content": command})
if _continue:
outer_messages.append(last_message.copy())
outer_messages[-1]["content"] = "fake assistant message replace me"
outer_messages.append({"role": "assistant", "content": "this will get deleted"})
prompt = instruct_renderer(
messages=outer_messages,
add_generation_prompt=not _continue
)
if _continue:
prompt = prompt.split("fake assistant message replace me", 1)[0]
content = last_message.get("content", "")
partial_thought = last_message.get("thinking", "") or last_message.get("reasoning_content", "")
# Handle partial thinking blocks (GPT-OSS and Seed-OSS)
if not content and partial_thought and partial_thought.strip():
search_string = partial_thought.strip()
index = prompt.rfind(search_string)
if index != -1:
prompt = prompt[:index] + partial_thought
else:
# Fallback if search fails: just append the thought
prompt += partial_thought
else:
# All other cases
prompt += content
if impersonate:
prompt = prompt.split("fake user message replace me", 1)[0]
prompt += user_input
if state['mode'] in ['chat', 'chat-instruct'] and not impersonate and not _continue:
prompt += apply_extensions('bot_prefix', "", state)
return prompt
prompt = make_prompt(messages)
# Handle truncation
if shared.tokenizer is not None:
max_length = get_max_prompt_length(state)
encoded_length = get_encoded_length(prompt)
while len(messages) > 0 and encoded_length > max_length:
# Remove old message, save system message
if len(messages) > 2 and messages[0]['role'] == 'system':
messages.pop(1)
# Remove old message when no system message is present
elif len(messages) > 1 and messages[0]['role'] != 'system':
messages.pop(0)
# Resort to truncating the user input
else:
user_message = messages[-1]['content']
# Bisect the truncation point
left, right = 0, len(user_message)
while left < right:
mid = (left + right + 1) // 2
messages[-1]['content'] = user_message[:mid]
prompt = make_prompt(messages)
encoded_length = get_encoded_length(prompt)
if encoded_length <= max_length:
left = mid
else:
right = mid - 1
messages[-1]['content'] = user_message[:left]
prompt = make_prompt(messages)
encoded_length = get_encoded_length(prompt)
if encoded_length > max_length:
logger.error(f"Failed to build the chat prompt. The input is too long for the available context length.\n\nTruncation length: {state['truncation_length']}\nmax_new_tokens: {state['max_new_tokens']} (is it too high?)\nAvailable context length: {max_length}\n")
raise ValueError
else:
# Calculate token counts for the log message
original_user_tokens = get_encoded_length(user_message)
truncated_user_tokens = get_encoded_length(user_message[:left])
total_context = max_length + state['max_new_tokens']
logger.warning(
f"User message truncated from {original_user_tokens} to {truncated_user_tokens} tokens. "
f"Context full: {max_length} input tokens ({total_context} total, {state['max_new_tokens']} for output). "
f"Increase ctx-size while loading the model to avoid truncation."
)
break
prompt = make_prompt(messages)
encoded_length = get_encoded_length(prompt)
if also_return_rows:
return prompt, [message['content'] for message in messages]
else:
return prompt
def count_prompt_tokens(text_input, state):
if shared.tokenizer is None:
return "Tokenizer not available"
try:
# Handle dict format with text and files
files = []
if isinstance(text_input, dict):
files = text_input.get('files', [])
text = text_input.get('text', '')
else:
text = text_input
files = []
# Create temporary history copy to add attachments
temp_history = copy.deepcopy(state['history'])
if 'metadata' not in temp_history:
temp_history['metadata'] = {}
# Process attachments if any
if files:
row_idx = len(temp_history['internal'])
for file_path in files:
add_message_attachment(temp_history, row_idx, file_path, is_user=True)
# Create temp state with modified history
temp_state = copy.deepcopy(state)
temp_state['history'] = temp_history
# Build prompt using existing logic
prompt = generate_chat_prompt(text, temp_state)
current_tokens = get_encoded_length(prompt)
max_tokens = temp_state['truncation_length']
percentage = (current_tokens / max_tokens) * 100 if max_tokens > 0 else 0
return f"History + Input:<br/>{current_tokens:,} / {max_tokens:,} tokens ({percentage:.1f}%)"
except Exception as e:
logger.error(f"Error counting tokens: {e}")
return f"Error: {str(e)}"
def get_stopping_strings(state):
stopping_strings = []
renderers = []
if state['mode'] in ['instruct', 'chat-instruct']:
template = get_compiled_template(state['instruction_template_str'])
renderer = partial(template.render, add_generation_prompt=False, bos_token=shared.bos_token, eos_token=shared.eos_token)
renderers.append(renderer)
if state['mode'] in ['chat']:
template = get_compiled_template(state['chat_template_str'])
renderer = partial(template.render, add_generation_prompt=False, name1=state['name1'], name2=state['name2'])
renderers.append(renderer)
fake_messages = [
{"role": "user", "content": "first user message"},
{"role": "assistant", "content": "first assistant message"},
{"role": "user", "content": "second user message"},
{"role": "assistant", "content": "second assistant message"},
]
stopping_strings = []
for renderer in renderers:
prompt = renderer(messages=fake_messages)
# Find positions of each message content
first_user_end = prompt.find("first user message") + len("first user message")
first_assistant_start = prompt.find("first assistant message")
first_assistant_end = prompt.find("first assistant message") + len("first assistant message")
second_user_start = prompt.find("second user message")
second_assistant_end = prompt.find("second assistant message") + len("second assistant message")
# Extract pieces of text potentially containing unique stopping strings
texts = [
prompt[first_user_end:first_assistant_start],
prompt[first_assistant_end:second_user_start],
prompt[second_assistant_end:]
]
for text in texts:
stripped_text = text.strip()
if stripped_text.startswith("<") and ">" in stripped_text:
stopping_strings.append(stripped_text.split(">")[0] + ">")
elif stripped_text.startswith("[") and "]" in stripped_text:
stopping_strings.append(stripped_text.split("]")[0] + "]")
elif stripped_text.startswith("(") and ")" in stripped_text:
stopping_strings.append(stripped_text.split(")")[0] + ")")
elif stripped_text.startswith("{") and "}" in stripped_text:
stopping_strings.append(stripped_text.split("}")[0] + "}")
elif ":" in text:
stopping_strings.append(text.split(":")[0] + ":")
if 'stopping_strings' in state and isinstance(state['stopping_strings'], list):
stopping_strings += state.pop('stopping_strings')
# Remove redundant items that start with another item
result = [item for item in stopping_strings if not any(item.startswith(other) and item != other for other in stopping_strings)]
result = list(set(result))
# Handle GPT-OSS as a special case
if '<|channel|>final<|message|>' in state['instruction_template_str'] and "<|end|>" in result:
result.remove("<|end|>")
result.append("<|result|>")
result = list(set(result))
if shared.args.verbose:
logger.info("STOPPING_STRINGS=")
pprint.PrettyPrinter(indent=4, sort_dicts=False).pprint(result)
print()
return result
def add_message_version(history, role, row_idx, is_current=True):
key = f"{role}_{row_idx}"
if 'metadata' not in history:
history['metadata'] = {}
if key not in history['metadata']:
history['metadata'][key] = {}
if "versions" not in history['metadata'][key]:
history['metadata'][key]["versions"] = []
# Determine which index to use for content based on role
content_idx = 0 if role == 'user' else 1
current_content = history['internal'][row_idx][content_idx]
current_visible = history['visible'][row_idx][content_idx]
history['metadata'][key]["versions"].append({
"content": current_content,
"visible_content": current_visible,
"timestamp": get_current_timestamp()
})
if is_current:
# Set the current_version_index to the newly added version (which is now the last one).
history['metadata'][key]["current_version_index"] = len(history['metadata'][key]["versions"]) - 1
def add_message_attachment(history, row_idx, file_path, is_user=True):
if 'metadata' not in history:
history['metadata'] = {}
key = f"{'user' if is_user else 'assistant'}_{row_idx}"
if key not in history['metadata']:
history['metadata'][key] = {"timestamp": get_current_timestamp()}
if "attachments" not in history['metadata'][key]:
history['metadata'][key]["attachments"] = []
# Get file info using pathlib
path = Path(file_path)
filename = path.name
file_extension = path.suffix.lower()
try:
# Handle image files
if file_extension in ['.jpg', '.jpeg', '.png', '.webp', '.bmp', '.gif']:
# Convert image to base64
with open(path, 'rb') as f:
image_data = base64.b64encode(f.read()).decode('utf-8')
# Determine MIME type from extension
mime_type_map = {
'.jpg': 'image/jpeg',
'.jpeg': 'image/jpeg',
'.png': 'image/png',
'.webp': 'image/webp',
'.bmp': 'image/bmp',
'.gif': 'image/gif'
}
mime_type = mime_type_map.get(file_extension, 'image/jpeg')
# Format as data URL
data_url = f"data:{mime_type};base64,{image_data}"
# Generate unique image ID
image_id = len([att for att in history['metadata'][key]["attachments"] if att.get("type") == "image"]) + 1
attachment = {
"name": filename,
"type": "image",
"image_data": data_url,
"image_id": image_id,
}
elif file_extension == '.pdf':
# Process PDF file
content = extract_pdf_text(path)
attachment = {
"name": filename,
"type": "application/pdf",
"content": content,
}
elif file_extension == '.docx':
content = extract_docx_text(path)
attachment = {
"name": filename,
"type": "application/docx",
"content": content,
}
else:
# Default handling for text files
with open(path, 'r', encoding='utf-8') as f:
content = f.read()
attachment = {
"name": filename,
"type": "text/plain",
"content": content,
}
history['metadata'][key]["attachments"].append(attachment)
return attachment # Return the attachment for reuse
except Exception as e:
logger.error(f"Error processing attachment {filename}: {e}")
return None
def extract_pdf_text(pdf_path):
import pymupdf
text = ""
try:
with pymupdf.open(pdf_path) as doc:
for page in doc:
text += page.get_text() + "\n\n"
return text.strip()
except Exception as e:
logger.error(f"Error extracting text from PDF: {e}")
return f"[Error extracting PDF text: {str(e)}]"
def extract_docx_text(docx_path):
try:
import docx
doc = docx.Document(docx_path)
parts = []
# 1) Extract non-empty header paragraphs from each section
for section in doc.sections:
for para in section.header.paragraphs:
text = para.text.strip()
if text:
parts.append(text)
# 2) Extract body blocks (paragraphs and tables) in document order
parent_elm = doc.element.body
for child in parent_elm.iterchildren():
if isinstance(child, docx.oxml.text.paragraph.CT_P):
para = docx.text.paragraph.Paragraph(child, doc)
text = para.text.strip()
if text:
parts.append(text)
elif isinstance(child, docx.oxml.table.CT_Tbl):
table = docx.table.Table(child, doc)
for row in table.rows:
cells = [cell.text.strip() for cell in row.cells]
parts.append("\t".join(cells))
# 3) Extract non-empty footer paragraphs from each section
for section in doc.sections:
for para in section.footer.paragraphs:
text = para.text.strip()
if text:
parts.append(text)
return "\n".join(parts)
except Exception as e:
logger.error(f"Error extracting text from DOCX: {e}")
return f"[Error extracting DOCX text: {str(e)}]"
def generate_search_query(user_message, state):
# Augment the user message with search instruction
augmented_message = f"{user_message}\n\n=====\n\nPlease turn the message above into a short web search query in the same language as the message. Respond with only the search query, nothing else."
# Use a minimal state for search query generation but keep the full history
search_state = state.copy()
search_state['auto_max_new_tokens'] = True
search_state['enable_thinking'] = False
search_state['reasoning_effort'] = 'low'
search_state['start_with'] = ""
# Generate the full prompt using existing history + augmented message
formatted_prompt = generate_chat_prompt(augmented_message, search_state)
query = ""
for reply in generate_reply(formatted_prompt, search_state, stopping_strings=[], is_chat=True):
query = reply
# Check for thinking block delimiters and extract content after them
if "</think>" in query:
query = query.rsplit("</think>", 1)[1]
elif "<|start|>assistant<|channel|>final<|message|>" in query:
query = query.rsplit("<|start|>assistant<|channel|>final<|message|>", 1)[1]
elif "<|channel|>final<|message|>" in query:
query = query.rsplit("<|channel|>final<|message|>", 1)[1]
elif "</seed:think>" in query:
query = query.rsplit("</seed:think>", 1)[1]
# Strip and remove surrounding quotes if present
query = query.strip()
if len(query) >= 2 and query.startswith('"') and query.endswith('"'):
query = query[1:-1]
return query
def chatbot_wrapper(text, state, regenerate=False, _continue=False, loading_message=True, for_ui=False):
# Handle dict format with text and files
files = []
if isinstance(text, dict):
files = text.get('files', [])
text = text.get('text', '')
history = state['history']
output = copy.deepcopy(history)
output = apply_extensions('history', output)
state = apply_extensions('state', state)
# Handle GPT-OSS as a special case
if '<|channel|>final<|message|>' in state['instruction_template_str']:
state['skip_special_tokens'] = False
# Let the jinja2 template handle the BOS token
if state['mode'] in ['instruct', 'chat-instruct']:
state['add_bos_token'] = False
# Initialize metadata if not present
if 'metadata' not in output:
output['metadata'] = {}
visible_text = None
stopping_strings = get_stopping_strings(state)
is_stream = state['stream']
# Prepare the input
if not (regenerate or _continue):
visible_text = html.escape(text)
# Process file attachments and store in metadata
row_idx = len(output['internal'])
# Add attachments to metadata only, not modifying the message text
for file_path in files:
add_message_attachment(output, row_idx, file_path, is_user=True)
# Add web search results as attachments if enabled
if state.get('enable_web_search', False):
search_query = generate_search_query(text, state)
add_web_search_attachments(output, row_idx, text, search_query, state)
# Apply extensions
text, visible_text = apply_extensions('chat_input', text, visible_text, state)
text = apply_extensions('input', text, state, is_chat=True)
# Current row index
output['internal'].append([text, ''])
output['visible'].append([visible_text, ''])
# Add metadata with timestamp
update_message_metadata(output['metadata'], "user", row_idx, timestamp=get_current_timestamp())
# *Is typing...*
if loading_message:
yield {
'visible': output['visible'][:-1] + [[output['visible'][-1][0], shared.processing_message]],
'internal': output['internal'],
'metadata': output['metadata']
}
else:
text, visible_text = output['internal'][-1][0], output['visible'][-1][0]
if regenerate and not state.get('_tool_turn'):
row_idx = len(output['internal']) - 1
# Store the old response as a version before regenerating
if not output['metadata'].get(f"assistant_{row_idx}", {}).get('versions'):
add_message_version(output, "assistant", row_idx, is_current=False)
# Add new empty version (will be filled during streaming)
key = f"assistant_{row_idx}"
output['metadata'][key]["versions"].append({
"content": "",
"visible_content": "",
"timestamp": get_current_timestamp()
})
output['metadata'][key]["current_version_index"] = len(output['metadata'][key]["versions"]) - 1
if loading_message:
yield {
'visible': output['visible'][:-1] + [[visible_text, shared.processing_message]],
'internal': output['internal'][:-1] + [[text, '']],
'metadata': output['metadata']
}
elif _continue:
last_reply = [output['internal'][-1][1], output['visible'][-1][1]]
if loading_message:
yield {
'visible': output['visible'][:-1] + [[visible_text, last_reply[1] + '...']],
'internal': output['internal'],
'metadata': output['metadata']
}
row_idx = len(output['internal']) - 1
# Collect image attachments for multimodal generation from the entire history
all_image_attachments = []
if 'metadata' in output:
for i in range(len(output['internal'])):
user_key = f"user_{i}"
if user_key in output['metadata'] and "attachments" in output['metadata'][user_key]:
for attachment in output['metadata'][user_key]["attachments"]:
if attachment.get("type") == "image":
all_image_attachments.append(attachment)
# Add all collected image attachments to state for the generation
if all_image_attachments:
state['image_attachments'] = all_image_attachments
# Generate the prompt
kwargs = {
'_continue': _continue,
'history': output if _continue else {
k: (v[:-1] if k in ['internal', 'visible'] else v)
for k, v in output.items()
}
}
prompt = apply_extensions('custom_generate_chat_prompt', text, state, **kwargs)
if prompt is None:
prompt = generate_chat_prompt(text, state, **kwargs)
# Add timestamp for assistant's response at the start of generation
update_message_metadata(output['metadata'], "assistant", row_idx, timestamp=get_current_timestamp(), model_name=shared.model_name)
# Detect if the template appended a thinking start tag to the prompt
thinking_prefix = None
if not _continue:
stripped_prompt = prompt.rstrip('\n')
for start_tag, end_tag, content_tag in THINKING_FORMATS:
if start_tag is not None and stripped_prompt.endswith(start_tag):
thinking_prefix = start_tag
break
# When tools are active, buffer streaming output during potential tool
# call generation to prevent raw markup from leaking into the display.
_check_tool_markers = bool(state.get('tools'))
_last_visible_before_tool_buffer = None
if _check_tool_markers:
from modules.tool_parsing import streaming_tool_buffer_check, detect_tool_call_format
_tool_names = [t['function']['name'] for t in state['tools'] if 'function' in t and 'name' in t['function']]
_template_str = state.get('instruction_template_str', '') if state.get('mode') == 'instruct' else state.get('chat_template_str', '')
_, _streaming_markers, _check_bare_names = detect_tool_call_format(_template_str)
# Generate
reply = None
for j, reply in enumerate(generate_reply(prompt, state, stopping_strings=stopping_strings, is_chat=True, for_ui=for_ui)):
# Prepend thinking tag if the template appended it to the prompt
if thinking_prefix:
reply = thinking_prefix + reply
# Extract the reply
if state['mode'] in ['chat', 'chat-instruct']:
if not _continue:
reply = reply.lstrip()
if reply.startswith(state['name2'] + ':'):
reply = reply[len(state['name2'] + ':'):]
elif reply.startswith(state['name1'] + ':'):
reply = reply[len(state['name1'] + ':'):]
visible_reply = re.sub("(<USER>|<user>|{{user}})", state['name1'], reply)
else:
visible_reply = reply
visible_reply = html.escape(visible_reply)
if shared.stop_everything:
if not state.get('_skip_output_extensions'):
output['visible'][-1][1] = apply_extensions('output', output['visible'][-1][1], state, is_chat=True)
yield output
return
if _continue:
output['internal'][-1] = [text, last_reply[0] + reply]
output['visible'][-1] = [visible_text, last_reply[1] + visible_reply]
elif not (j == 0 and visible_reply.strip() == ''):
output['internal'][-1] = [text, reply.lstrip(' ')]
output['visible'][-1] = [visible_text, visible_reply.lstrip(' ')]
# Keep version metadata in sync during streaming (for regeneration)
if regenerate and not state.get('_tool_turn'):
row_idx = len(output['internal']) - 1
key = f"assistant_{row_idx}"
current_idx = output['metadata'][key]['current_version_index']
output['metadata'][key]['versions'][current_idx].update({
'content': output['internal'][row_idx][1],
'visible_content': output['visible'][row_idx][1]
})
if is_stream:
if _check_tool_markers:
if streaming_tool_buffer_check(output['internal'][-1][1], markers=_streaming_markers, tool_names=_tool_names, check_bare_names=_check_bare_names):
continue
_last_visible_before_tool_buffer = output['visible'][-1][1]
yield output
if _continue:
# Reprocess the entire internal text for extensions (like translation).
# Skip entirely when the visible text contains <tool_call> markers,
# since those only exist in visible (internal is cleared after each tool
# execution) and rebuilding from internal would destroy them. Output
# extensions also can't handle the raw <tool_call> markup safely.
if '<tool_call>' not in output['visible'][-1][1]:
full_internal = output['internal'][-1][1]
if state['mode'] in ['chat', 'chat-instruct']:
full_visible = re.sub("(<USER>|<user>|{{user}})", state['name1'], full_internal)
else:
full_visible = full_internal
full_visible = html.escape(full_visible)
if not state.get('_skip_output_extensions'):
output['visible'][-1][1] = apply_extensions('output', full_visible, state, is_chat=True)
else:
if not state.get('_skip_output_extensions'):
output['visible'][-1][1] = apply_extensions('output', output['visible'][-1][1], state, is_chat=True)
# Final sync for version metadata (in case streaming was disabled)
if regenerate and not state.get('_tool_turn'):
row_idx = len(output['internal']) - 1
key = f"assistant_{row_idx}"
current_idx = output['metadata'][key]['current_version_index']
output['metadata'][key]['versions'][current_idx].update({
'content': output['internal'][row_idx][1],
'visible_content': output['visible'][row_idx][1]
})
# When tool markers were detected during streaming, restore the last
# visible text from before buffering started so raw markup doesn't flash
# in the UI. The internal text is left intact so the caller can still
# parse tool calls from it.
if is_stream and _check_tool_markers and streaming_tool_buffer_check(output['internal'][-1][1], markers=_streaming_markers, tool_names=_tool_names, check_bare_names=_check_bare_names):
output['visible'][-1][1] = _last_visible_before_tool_buffer or ''
yield output
def impersonate_wrapper(textbox, state):
text = textbox['text']
static_output = chat_html_wrapper(state['history'], state['name1'], state['name2'], state['mode'], state['chat_style'], state['character_menu'])
prompt = generate_chat_prompt('', state, impersonate=True)
stopping_strings = get_stopping_strings(state)
textbox['text'] = text + '...'
yield textbox, static_output
reply = None
for reply in generate_reply(prompt + text, state, stopping_strings=stopping_strings, is_chat=True):
textbox['text'] = (text + reply).lstrip(' ')
yield textbox, static_output
if shared.stop_everything:
return
def generate_chat_reply(text, state, regenerate=False, _continue=False, loading_message=True, for_ui=False):
history = state['history']
if regenerate or _continue:
text = ''
if (len(history['visible']) == 1 and not history['visible'][0][0]) or len(history['internal']) == 0:
yield history
return
for history in chatbot_wrapper(text, state, regenerate=regenerate, _continue=_continue, loading_message=loading_message, for_ui=for_ui):
yield history
def character_is_loaded(state, raise_exception=False):
if state['mode'] in ['chat', 'chat-instruct'] and state['name2'] == '':
logger.error('It looks like no character is loaded. Please load one under Parameters > Character.')
if raise_exception:
raise ValueError
return False
else:
return True
def generate_chat_reply_wrapper(text, state, regenerate=False, _continue=False):
if not character_is_loaded(state):
return
if state['start_with'] != '' and not _continue:
if regenerate:
text, state['history'] = remove_last_message(state['history'])
regenerate = False
_continue = True
send_dummy_message(text, state)
send_dummy_reply(state['start_with'], state)
# On regenerate, clear old tool_sequence metadata so it gets rebuilt.
# Save it first so it can be stored per-version below.
# This must happen after the start_with logic above, which may remove
# and re-add messages, changing which row we operate on.
_old_tool_sequence = None
if regenerate:
history = state['history']
meta = history.get('metadata', {})
row_idx = len(history['internal']) - 1
if row_idx >= 0:
_old_tool_sequence = meta.get(f'assistant_{row_idx}', {}).pop('tool_sequence', None)
# Load tools if any are selected
selected = state.get('selected_tools', [])
parse_tool_call = None
_tool_parsers = None
if selected:
from modules.tool_use import load_tools, execute_tool
from modules.tool_parsing import parse_tool_call, get_tool_call_id, detect_tool_call_format
if selected:
tool_defs, tool_executors = load_tools(selected)
state['tools'] = tool_defs
tool_func_names = [t['function']['name'] for t in tool_defs]
_template_str = state.get('instruction_template_str', '') if state.get('mode') == 'instruct' else state.get('chat_template_str', '')
_tool_parsers, _, _ = detect_tool_call_format(_template_str)
else:
tool_func_names = None
visible_prefix = [] # Accumulated tool call summaries + results
last_save_time = time.monotonic()
save_interval = 8
_tool_turn = 0
while True:
history = state['history']
# Turn 0: use original flags; turns 2+: regenerate into the same entry.
# _tool_turn tells chatbot_wrapper to skip version creation/sync so
# that intermediate tool-loop regenerations don't pollute swipe history.
if _tool_turn > 0:
state['_tool_turn'] = True
state['_skip_output_extensions'] = True
regen = regenerate if _tool_turn == 0 else True
cont = _continue if _tool_turn == 0 else False
cur_text = text if _tool_turn == 0 else ''
for i, history in enumerate(generate_chat_reply(cur_text, state, regen, cont, loading_message=True, for_ui=True)):
# Prepend accumulated tool output to visible reply for display.
# Save and restore the original to prevent the markers from leaking
# back into chatbot_wrapper's shared output object, which would cause
# duplication on the next yield.
_original_visible = history['visible'][-1][1] if visible_prefix else None
if visible_prefix:
history['visible'][-1][1] = '\n\n'.join(visible_prefix + [_original_visible])
yield chat_html_wrapper(history, state['name1'], state['name2'], state['mode'], state['chat_style'], state['character_menu'], last_message_only=(i > 0)), history
if visible_prefix:
history['visible'][-1][1] = _original_visible
if i == 0:
# Save old tool_sequence into version 0 (created by chatbot_wrapper
# on the first yield). Only needed on the first regeneration when
# versions didn't previously exist.
if _old_tool_sequence is not None and _tool_turn == 0:
_ri = len(history['internal']) - 1
_versions = history.get('metadata', {}).get(f'assistant_{_ri}', {}).get('versions', [])
if _versions and 'tool_sequence' not in _versions[0]:
_versions[0]['tool_sequence'] = _old_tool_sequence
_old_tool_sequence = None
time.sleep(0.125)
current_time = time.monotonic()
if i == 0 or (current_time - last_save_time) >= save_interval:
save_history(history, state['unique_id'], state['character_menu'], state['mode'])
last_save_time = current_time
# Early stop on tool call detection
if tool_func_names and parse_tool_call(history['internal'][-1][1], tool_func_names, parsers=_tool_parsers):
break
# Save the model's visible output before re-applying visible_prefix,
# so we can extract thinking content from just this turn's output.
_model_visible = history['visible'][-1][1]
# Recover visible_prefix from existing visible text (e.g. on Continue
# after a previous session had tool calls). Extract all <tool_call>
# blocks and any text between them (thinking blocks, intermediate text).
if tool_func_names and not visible_prefix and _model_visible:
tc_matches = list(re.finditer(r'<tool_call>.*?</tool_call>', _model_visible, re.DOTALL))
if tc_matches:
prefix_end = tc_matches[-1].end()
prefix = _model_visible[:prefix_end].strip()
if prefix:
visible_prefix = [prefix]
_model_visible = _model_visible[prefix_end:].strip()
# Re-apply visible prefix to the final state after streaming completes.
# This is safe because we're no longer sharing the object with chatbot_wrapper.
if visible_prefix:
history['visible'][-1][1] = '\n\n'.join(visible_prefix + [_model_visible])
if tool_func_names:
save_history(history, state['unique_id'], state['character_menu'], state['mode'])
# Check for tool calls
if not tool_func_names or shared.stop_everything:
break
answer = history['internal'][-1][1]
parsed_calls, content_prefix = parse_tool_call(answer, tool_func_names, return_prefix=True, parsers=_tool_parsers) if answer else (None, '')
if not parsed_calls:
break # No tool calls — done
# --- Process tool calls ---
row_idx = len(history['internal']) - 1
meta = history.get('metadata', {})
seq = meta.setdefault(f'assistant_{row_idx}', {}).setdefault('tool_sequence', [])
def _render():
return chat_html_wrapper(history, state['name1'], state['name2'], state['mode'], state['chat_style'], state['character_menu'])
# Serialize tool calls and build display headers in one pass
serialized = []
tc_headers = []
for tc in parsed_calls:
tc['id'] = get_tool_call_id()
fn_name = tc['function']['name']
fn_args = tc['function'].get('arguments', {})
serialized.append({
'id': tc['id'],
'type': 'function',
'function': {
'name': fn_name,
'arguments': json.dumps(fn_args) if isinstance(fn_args, dict) else fn_args
}
})
if isinstance(fn_args, dict) and fn_args:
args_summary = ', '.join(f'{k}={json.dumps(v, ensure_ascii=False)}' for k, v in fn_args.items())
elif isinstance(fn_args, dict):
args_summary = ''
else:
args_summary = str(fn_args)
tc_headers.append(f'{fn_name}({args_summary})')
seq_entry = {'tool_calls': serialized}
if content_prefix.strip():
# Strip GPT-OSS channel tokens so they don't get double-wrapped
# by the template (which adds its own channel markup).
clean = content_prefix.strip()
if '<|channel|>' in clean and '<|message|>' in clean:
inner = clean.split('<|message|>', 1)[1]
if '<|end|>' in inner:
inner = inner.split('<|end|>', 1)[0]
clean = inner.strip()
if clean:
seq_entry['content'] = clean
seq.append(seq_entry)
# Clear internal (raw tool markup)
history['internal'][-1][1] = ''
# Preserve thinking block and intermediate text from this turn.
# content_prefix is the raw text before tool call syntax (returned
# by parse_tool_call); HTML-escape it and extract thinking to get
# the content the user should see.
content_text = html.escape(content_prefix)
thinking_content, intermediate = extract_thinking_block(content_text)
if thinking_content:
visible_prefix.append(f'<think>\n{thinking_content}\n</think>')
if intermediate and intermediate.strip():
visible_prefix.append(intermediate.strip())
# Show placeholder accordions with "..." before execution starts
# (tool calls may be slow, e.g. web search).
pending_placeholders = [f'<tool_call>{h}\n...\n</tool_call>' for h in tc_headers]
history['visible'][-1][1] = '\n\n'.join(visible_prefix + pending_placeholders)
yield _render(), history
# Execute tools, store results, and replace placeholders with real results
for i, tc in enumerate(parsed_calls):
# Check for stop request before each tool execution
if shared.stop_everything:
for j in range(i, len(parsed_calls)):
seq.append({'role': 'tool', 'content': 'Tool execution was cancelled by the user.', 'tool_call_id': parsed_calls[j]['id']})
pending_placeholders[j] = f'<tool_call>{tc_headers[j]}\nCancelled\n</tool_call>'
history['visible'][-1][1] = '\n\n'.join(visible_prefix + pending_placeholders)
yield _render(), history
break
fn_name = tc['function']['name']
fn_args = tc['function'].get('arguments', {})
result = execute_tool(fn_name, fn_args, tool_executors)
seq.append({'role': 'tool', 'content': result, 'tool_call_id': tc['id']})
try:
pretty_result = json.dumps(json.loads(result), indent=2, ensure_ascii=False)
except (json.JSONDecodeError, TypeError):
pretty_result = result
# Replace the placeholder with the real result
pending_placeholders[i] = f'<tool_call>{tc_headers[i]}\n{pretty_result}\n</tool_call>'
history['visible'][-1][1] = '\n\n'.join(visible_prefix + pending_placeholders)
yield _render(), history
# Move completed tool calls into visible_prefix for next turns
visible_prefix.extend(pending_placeholders)
history['visible'][-1][1] = '\n\n'.join(visible_prefix)
save_history(history, state['unique_id'], state['character_menu'], state['mode'])
state['history'] = history
_tool_turn += 1
state.pop('_tool_turn', None)
# If output extensions were deferred during tool turns, apply them now
# to the final model response only (not to tool call markers).
if state.pop('_skip_output_extensions', None):
_model_visible = apply_extensions('output', _model_visible, state, is_chat=True)
if visible_prefix:
history['visible'][-1][1] = '\n\n'.join(visible_prefix + [_model_visible])
else:
history['visible'][-1][1] = _model_visible
yield chat_html_wrapper(history, state['name1'], state['name2'], state['mode'], state['chat_style'], state['character_menu']), history
state['history'] = history
# Sync version metadata so swipes show the full visible (with tool prefix)
if visible_prefix and history.get('metadata'):
row_idx = len(history['internal']) - 1
key = f"assistant_{row_idx}"
meta_entry = history['metadata'].get(key, {})
if 'versions' in meta_entry and 'current_version_index' in meta_entry:
current_idx = meta_entry['current_version_index']
if current_idx < len(meta_entry['versions']):
version_update = {
'content': history['internal'][row_idx][1],
'visible_content': history['visible'][row_idx][1]
}
ts = meta_entry.get('tool_sequence')
if ts is not None:
version_update['tool_sequence'] = ts
meta_entry['versions'][current_idx].update(version_update)
save_history(history, state['unique_id'], state['character_menu'], state['mode'])
def remove_last_message(history):
if 'metadata' not in history:
history['metadata'] = {}
if len(history['visible']) > 0 and history['internal'][-1][0] != '<|BEGIN-VISIBLE-CHAT|>':
row_idx = len(history['internal']) - 1
last = history['visible'].pop()
history['internal'].pop()
# Remove metadata directly by known keys
if f"user_{row_idx}" in history['metadata']:
del history['metadata'][f"user_{row_idx}"]
if f"assistant_{row_idx}" in history['metadata']:
del history['metadata'][f"assistant_{row_idx}"]
else:
last = ['', '']
return html.unescape(last[0]), history
def send_dummy_message(text, state):
history = state['history']
# Handle both dict and string inputs
if isinstance(text, dict):
text = text['text']
# Initialize metadata if not present
if 'metadata' not in history:
history['metadata'] = {}
row_idx = len(history['internal'])
history['visible'].append([html.escape(text), ''])
history['internal'].append([apply_extensions('input', text, state, is_chat=True), ''])
update_message_metadata(history['metadata'], "user", row_idx, timestamp=get_current_timestamp())
return history
def send_dummy_reply(text, state):
history = state['history']
# Handle both dict and string inputs
if isinstance(text, dict):
text = text['text']
# Initialize metadata if not present
if 'metadata' not in history:
history['metadata'] = {}
if len(history['visible']) > 0 and not history['visible'][-1][1] == '':
row_idx = len(history['internal'])
history['visible'].append(['', ''])
history['internal'].append(['', ''])
# We don't need to add system metadata
row_idx = len(history['internal']) - 1
history['visible'][-1][1] = html.escape(text)
history['internal'][-1][1] = apply_extensions('input', text, state, is_chat=True)
update_message_metadata(history['metadata'], "assistant", row_idx, timestamp=get_current_timestamp())
return history
def redraw_html(history, name1, name2, mode, style, character, reset_cache=False):
return chat_html_wrapper(history, name1, name2, mode, style, character, reset_cache=reset_cache)
def start_new_chat(state, unique_id=None):
mode = state['mode']
# Initialize with empty metadata dictionary
history = {'internal': [], 'visible': [], 'metadata': {}}
if mode != 'instruct':
greeting = replace_character_names(state['greeting'], state['name1'], state['name2'])
if greeting != '':
history['internal'] += [['<|BEGIN-VISIBLE-CHAT|>', greeting]]
history['visible'] += [['', apply_extensions('output', html.escape(greeting), state, is_chat=True)]]
# Add timestamp for assistant's greeting
update_message_metadata(history['metadata'], "assistant", 0, timestamp=get_current_timestamp())
if unique_id is None:
unique_id = datetime.now().strftime('%Y%m%d-%H-%M-%S')
save_history(history, unique_id, state['character_menu'], state['mode'])
return history
def get_history_file_path(unique_id, character, mode):
if mode == 'instruct':
p = shared.user_data_dir / 'logs' / 'instruct' / f'{unique_id}.json'
else:
p = shared.user_data_dir / 'logs' / 'chat' / character / f'{unique_id}.json'
return p
def save_history(history, unique_id, character, mode):
if shared.args.multi_user:
return
if unique_id and unique_id.startswith('incognito-'):
return
p = get_history_file_path(unique_id, character, mode)
if not p.parent.is_dir():
p.parent.mkdir(parents=True)
with _history_file_lock:
with open(p, 'w', encoding='utf-8') as f:
f.write(json.dumps(history, indent=4, ensure_ascii=False))
def rename_history(old_id, new_id, character, mode):
if shared.args.multi_user:
return
old_p = get_history_file_path(old_id, character, mode)
new_p = get_history_file_path(new_id, character, mode)
if new_p.parent != old_p.parent:
logger.error(f"The following path is not allowed: \"{new_p}\".")
elif new_p == old_p:
logger.info("The provided path is identical to the old one.")
elif new_p.exists():
logger.error(f"The new path already exists and will not be overwritten: \"{new_p}\".")
else:
logger.info(f"Renaming \"{old_p}\" to \"{new_p}\"")
old_p.rename(new_p)
def get_paths(state):
if state['mode'] == 'instruct':
return (shared.user_data_dir / 'logs' / 'instruct').glob('*.json')
else:
character = state['character_menu']
# Handle obsolete filenames and paths
old_p = shared.user_data_dir / 'logs' / f'{character}_persistent.json'
new_p = shared.user_data_dir / 'logs' / f'persistent_{character}.json'
if old_p.exists():
logger.warning(f"Renaming \"{old_p}\" to \"{new_p}\"")
old_p.rename(new_p)
if new_p.exists():
unique_id = datetime.now().strftime('%Y%m%d-%H-%M-%S')
p = get_history_file_path(unique_id, character, state['mode'])
logger.warning(f"Moving \"{new_p}\" to \"{p}\"")
p.parent.mkdir(exist_ok=True)
new_p.rename(p)
return (shared.user_data_dir / 'logs' / 'chat' / character).glob('*.json')
def find_all_histories(state):
if shared.args.multi_user:
return ['']
paths = get_paths(state)
histories = sorted(paths, key=lambda x: x.stat().st_mtime, reverse=True)
return [path.stem for path in histories]
def find_all_histories_with_first_prompts(state):
if shared.args.multi_user:
return []
paths = get_paths(state)
histories = sorted(paths, key=lambda x: x.stat().st_mtime, reverse=True)
result = []
for i, path in enumerate(histories):
filename = path.stem
file_content = ""
with open(path, 'r', encoding='utf-8') as f:
file_content = f.read()
if state['search_chat'] and state['search_chat'] not in file_content:
continue
data = json.loads(file_content)
if re.match(r'^[0-9]{8}-[0-9]{2}-[0-9]{2}-[0-9]{2}$', filename):
first_prompt = ""
if data and 'visible' in data and len(data['visible']) > 0:
if len(data['internal']) > 0 and data['internal'][0][0] == '<|BEGIN-VISIBLE-CHAT|>':
if len(data['visible']) > 1:
first_prompt = html.unescape(data['visible'][1][0])
elif i == 0:
first_prompt = "New chat"
else:
first_prompt = html.unescape(data['visible'][0][0])
elif i == 0:
first_prompt = "New chat"
else:
first_prompt = filename
first_prompt = first_prompt.strip()
# Truncate the first prompt if it's longer than 30 characters
if len(first_prompt) > 30:
first_prompt = first_prompt[:30 - 3] + '...'
result.append((first_prompt, filename))
return result
def load_latest_history(state):
if shared.args.multi_user:
return start_new_chat(state), None
histories = find_all_histories(state)
if len(histories) > 0:
# Try to load the last visited chat for this character/mode
chat_state = load_last_chat_state()
key = get_chat_state_key(state['character_menu'], state['mode'])
last_chat_id = chat_state.get("last_chats", {}).get(key)
# If we have a stored last chat and it still exists, use it
if last_chat_id and last_chat_id in histories:
unique_id = last_chat_id
else:
# Fall back to most recent (current behavior)
unique_id = histories[0]
history = load_history(unique_id, state['character_menu'], state['mode'])
return history, unique_id
else:
return start_new_chat(state), None
def load_history_after_deletion(state, idx):
import gradio as gr
if shared.args.multi_user:
return start_new_chat(state)
histories = find_all_histories_with_first_prompts(state)
idx = min(int(idx), len(histories) - 1)
idx = max(0, idx)
if len(histories) > 0:
history = load_history(histories[idx][1], state['character_menu'], state['mode'])
else:
history = start_new_chat(state)
histories = find_all_histories_with_first_prompts(state)
return history, gr.update(choices=histories, value=histories[idx][1])
def update_character_menu_after_deletion(idx):
import gradio as gr
characters = utils.get_available_characters()
idx = min(int(idx), len(characters) - 1)
idx = max(0, idx)
return gr.update(choices=characters, value=characters[idx])
def get_chat_state_key(character, mode):
if mode == 'instruct':
return 'instruct'
else:
return f"chat_{character}"
def load_last_chat_state():
state_file = shared.user_data_dir / 'logs' / 'chat_state.json'
if state_file.exists():
try:
with open(state_file, 'r', encoding='utf-8') as f:
return json.loads(f.read())
except Exception:
pass
return {"last_chats": {}}
def save_last_chat_state(character, mode, unique_id):
if shared.args.multi_user:
return
if unique_id and unique_id.startswith('incognito-'):
return
state = load_last_chat_state()
key = get_chat_state_key(character, mode)
state["last_chats"][key] = unique_id
state_file = shared.user_data_dir / 'logs' / 'chat_state.json'
state_file.parent.mkdir(exist_ok=True)
with open(state_file, 'w', encoding='utf-8') as f:
f.write(json.dumps(state, indent=2))
def load_history(unique_id, character, mode):
p = get_history_file_path(unique_id, character, mode)
if not p.exists():
return {'internal': [], 'visible': [], 'metadata': {}}
f = json.loads(open(p, 'rb').read())
if 'internal' in f and 'visible' in f:
history = f
else:
history = {
'internal': f['data'],
'visible': f['data_visible']
}
# Add metadata if it doesn't exist
if 'metadata' not in history:
history['metadata'] = {}
# Add placeholder timestamps for existing messages
for i, (user_msg, asst_msg) in enumerate(history['internal']):
if user_msg and user_msg != '<|BEGIN-VISIBLE-CHAT|>':
update_message_metadata(history['metadata'], "user", i, timestamp="")
if asst_msg:
update_message_metadata(history['metadata'], "assistant", i, timestamp="")
return history
def load_history_json(file, history):
try:
file = file.decode('utf-8')
f = json.loads(file)
if 'internal' in f and 'visible' in f:
history = f
else:
history = {
'internal': f['data'],
'visible': f['data_visible']
}
# Add metadata if it doesn't exist
if 'metadata' not in history:
history['metadata'] = {}
# Add placeholder timestamps
for i, (user_msg, asst_msg) in enumerate(history['internal']):
if user_msg and user_msg != '<|BEGIN-VISIBLE-CHAT|>':
update_message_metadata(history['metadata'], "user", i, timestamp="")
if asst_msg:
update_message_metadata(history['metadata'], "assistant", i, timestamp="")
return history
except Exception:
return history
def delete_history(unique_id, character, mode):
p = get_history_file_path(unique_id, character, mode)
delete_file(p)
def replace_character_names(text, name1, name2):
text = text.replace('{{user}}', name1).replace('{{char}}', name2)
return text.replace('<USER>', name1).replace('<BOT>', name2)
def generate_pfp_cache(character):
cache_folder = Path(shared.args.disk_cache_dir)
if not cache_folder.exists():
cache_folder.mkdir()
for path in [shared.user_data_dir / 'characters' / f"{character}.{extension}" for extension in ['png', 'jpg', 'jpeg']]:
if path.exists():
original_img = Image.open(path)
# Define file paths
pfp_path = Path(f'{cache_folder}/pfp_character.png')
thumb_path = Path(f'{cache_folder}/pfp_character_thumb.png')
# Save main picture and thumbnail
original_img.save(pfp_path, format='PNG')
thumb = make_thumbnail(original_img)
thumb.save(thumb_path, format='PNG')
# Return the path to the thumbnail, not the in-memory PIL Image object.
return str(thumb_path)
return None
def load_character(character, name1, name2):
context = greeting = ""
greeting_field = 'greeting'
picture = None
filepath = None
for extension in ["yml", "yaml", "json"]:
filepath = shared.user_data_dir / 'characters' / f'{character}.{extension}'
if filepath.exists():
break
if filepath is None or not filepath.exists():
logger.error(f"Could not find the character \"{character}\" inside {shared.user_data_dir}/characters. No character has been loaded.")
raise ValueError
file_contents = open(filepath, 'r', encoding='utf-8').read()
data = json.loads(file_contents) if extension == "json" else yaml.safe_load(file_contents)
cache_folder = Path(shared.args.disk_cache_dir)
for path in [Path(f"{cache_folder}/pfp_character.png"), Path(f"{cache_folder}/pfp_character_thumb.png")]:
if path.exists():
path.unlink()
picture = generate_pfp_cache(character)
# Finding the bot's name
for k in ['name', 'bot', '<|bot|>', 'char_name']:
if k in data and data[k] != '':
name2 = data[k]
break
# Find the user name (if any)
for k in ['your_name', 'user', '<|user|>']:
if k in data and data[k] != '':
name1 = data[k]
break
if 'context' in data:
context = data['context'].strip()
elif "char_persona" in data:
context = build_pygmalion_style_context(data)
greeting_field = 'char_greeting'
greeting = data.get(greeting_field, greeting)
return name1, name2, picture, greeting, context
def restore_character_for_ui(state):
if state['character_menu'] and state['character_menu'] != 'None':
try:
name1, name2, picture, greeting, context = load_character(state['character_menu'], state['name1'], state['name2'])
state['name2'] = name2
state['greeting'] = greeting
state['context'] = context
state['character_picture'] = picture # This triggers cache update via generate_pfp_cache
return state, name2, context, greeting, picture
except Exception as e:
logger.error(f"Failed to reset character '{state['character_menu']}': {e}")
return clear_character_for_ui(state)
else:
return clear_character_for_ui(state)
def clear_character_for_ui(state):
state['name2'] = shared.settings['name2']
state['context'] = shared.settings['context']
state['greeting'] = shared.settings['greeting']
state['character_picture'] = None
# Clear the cache files
cache_folder = Path(shared.args.disk_cache_dir)
for cache_file in ['pfp_character.png', 'pfp_character_thumb.png']:
cache_path = Path(f'{cache_folder}/{cache_file}')
if cache_path.exists():
cache_path.unlink()
return state, state['name2'], state['context'], state['greeting'], None
@functools.cache
def load_character_memoized(character, name1, name2):
return load_character(character, name1, name2)
@functools.cache
def load_instruction_template_memoized(template):
from modules.models_settings import load_instruction_template
return load_instruction_template(template)
def upload_character(file, img_path, tavern=False):
import gradio as gr
img = open_image_safely(img_path)
decoded_file = file if isinstance(file, str) else file.decode('utf-8')
try:
data = json.loads(decoded_file)
except Exception:
data = yaml.safe_load(decoded_file)
if 'char_name' in data:
name = sanitize_filename(data['char_name'])
greeting = data['char_greeting']
context = build_pygmalion_style_context(data)
yaml_data = generate_character_yaml(name, greeting, context)
else:
name = sanitize_filename(data['name'])
yaml_data = generate_character_yaml(data['name'], data['greeting'], data['context'])
outfile_name = name
i = 1
while (shared.user_data_dir / 'characters' / f'{outfile_name}.yaml').exists():
outfile_name = f'{name}_{i:03d}'
i += 1
with open(shared.user_data_dir / 'characters' / f'{outfile_name}.yaml', 'w', encoding='utf-8') as f:
f.write(yaml_data)
if img is not None:
img.save(shared.user_data_dir / 'characters' / f'{outfile_name}.png')
logger.info(f'New character saved to "{shared.user_data_dir}/characters/{outfile_name}.yaml".')
return gr.update(value=outfile_name, choices=get_available_characters())
def build_pygmalion_style_context(data):
context = ""
if 'char_persona' in data and data['char_persona'] != '':
context += f"{data['char_name']}'s Persona: {data['char_persona']}\n"
if 'world_scenario' in data and data['world_scenario'] != '':
context += f"Scenario: {data['world_scenario']}\n"
if 'example_dialogue' in data and data['example_dialogue'] != '':
context += f"{data['example_dialogue'].strip()}\n"
context = f"{context.strip()}\n"
return context
def upload_tavern_character(img_path, _json):
_json = {'char_name': _json['name'], 'char_persona': _json['description'], 'char_greeting': _json['first_mes'], 'example_dialogue': _json['mes_example'], 'world_scenario': _json['scenario']}
return upload_character(json.dumps(_json), img_path, tavern=True)
def check_tavern_character(img_path):
import gradio as gr
img = open_image_safely(img_path)
if img is None:
return "Invalid or disallowed image file.", None, None, gr.update(interactive=False)
if "chara" not in img.info:
return "Not a TavernAI card", None, None, gr.update(interactive=False)
decoded_string = base64.b64decode(img.info['chara']).replace(b'\\r\\n', b'\\n')
_json = json.loads(decoded_string)
if "data" in _json:
_json = _json["data"]
return _json['name'], _json['description'], _json, gr.update(interactive=True)
def upload_your_profile_picture(img_path):
img = open_image_safely(img_path)
cache_folder = Path(shared.args.disk_cache_dir)
if not cache_folder.exists():
cache_folder.mkdir()
if img is None:
if Path(f"{cache_folder}/pfp_me.png").exists():
Path(f"{cache_folder}/pfp_me.png").unlink()
else:
img = make_thumbnail(img)
img.save(Path(f'{cache_folder}/pfp_me.png'))
logger.info(f'Profile picture saved to "{cache_folder}/pfp_me.png"')
def generate_character_yaml(name, greeting, context):
data = {
'name': name,
'greeting': greeting,
'context': context,
}
data = {k: v for k, v in data.items() if v} # Strip falsy
return yaml.dump(data, sort_keys=False, width=float("inf"))
def generate_instruction_template_yaml(instruction_template):
data = {
'instruction_template': instruction_template
}
return my_yaml_output(data)
def save_character(name, greeting, context, picture, filename):
filename = sanitize_filename(filename)
if filename == "":
logger.error("The filename is empty, so the character will not be saved.")
return
data = generate_character_yaml(name, greeting, context)
filepath = shared.user_data_dir / 'characters' / f'{filename}.yaml'
save_file(filepath, data)
path_to_img = shared.user_data_dir / 'characters' / f'{filename}.png'
if picture is not None:
# Copy the image file from its source path to the character folder
shutil.copy(picture, path_to_img)
logger.info(f'Saved {path_to_img}.')
def delete_character(name, instruct=False):
name = sanitize_filename(name)
# Check for character data files
for extension in ["yml", "yaml", "json"]:
delete_file(shared.user_data_dir / 'characters' / f'{name}.{extension}')
# Check for character image files
for extension in ["png", "jpg", "jpeg"]:
delete_file(shared.user_data_dir / 'characters' / f'{name}.{extension}')
def generate_user_pfp_cache(user):
cache_folder = Path(shared.args.disk_cache_dir)
if not cache_folder.exists():
cache_folder.mkdir()
for path in [shared.user_data_dir / 'users' / f"{user}.{extension}" for extension in ['png', 'jpg', 'jpeg']]:
if path.exists():
original_img = Image.open(path)
# Define file paths
pfp_path = Path(f'{cache_folder}/pfp_me.png')
# Save thumbnail
thumb = make_thumbnail(original_img)
thumb.save(pfp_path, format='PNG')
logger.info(f'User profile picture cached to "{pfp_path}"')
return str(pfp_path)
return None
def load_user(user_name, name1, user_bio):
picture = None
filepath = None
for extension in ["yml", "yaml", "json"]:
filepath = shared.user_data_dir / 'users' / f'{user_name}.{extension}'
if filepath.exists():
break
if filepath is None or not filepath.exists():
logger.error(f"Could not find the user \"{user_name}\" inside {shared.user_data_dir}/users. No user has been loaded.")
raise ValueError
with open(filepath, 'r', encoding='utf-8') as f:
file_contents = f.read()
extension = filepath.suffix[1:] # Remove the leading dot
data = json.loads(file_contents) if extension == "json" else yaml.safe_load(file_contents)
# Clear existing user picture cache
cache_folder = Path(shared.args.disk_cache_dir)
pfp_path = Path(f"{cache_folder}/pfp_me.png")
if pfp_path.exists():
pfp_path.unlink()
# Generate new picture cache
picture = generate_user_pfp_cache(user_name)
# Get user name
if 'name' in data and data['name'] != '':
name1 = data['name']
# Get user bio
if 'user_bio' in data:
user_bio = data['user_bio']
return name1, user_bio, picture
def generate_user_yaml(name, user_bio):
data = {
'name': name,
'user_bio': user_bio,
}
return yaml.dump(data, sort_keys=False, width=float("inf"))
def save_user(name, user_bio, picture, filename):
filename = sanitize_filename(filename)
if filename == "":
logger.error("The filename is empty, so the user will not be saved.")
return
# Ensure the users directory exists
users_dir = shared.user_data_dir / 'users'
users_dir.mkdir(parents=True, exist_ok=True)
data = generate_user_yaml(name, user_bio)
filepath = shared.user_data_dir / 'users' / f'{filename}.yaml'
save_file(filepath, data)
path_to_img = shared.user_data_dir / 'users' / f'{filename}.png'
if picture is not None:
# Copy the image file from its source path to the users folder
shutil.copy(picture, path_to_img)
logger.info(f'Saved user profile picture to {path_to_img}.')
def delete_user(name):
name = sanitize_filename(name)
# Check for user data files
for extension in ["yml", "yaml", "json"]:
delete_file(shared.user_data_dir / 'users' / f'{name}.{extension}')
# Check for user image files
for extension in ["png", "jpg", "jpeg"]:
delete_file(shared.user_data_dir / 'users' / f'{name}.{extension}')
def update_user_menu_after_deletion(idx):
import gradio as gr
users = get_available_users()
if len(users) == 0:
# Create a default user if none exist
save_user('You', '', None, 'Default')
users = get_available_users()
idx = min(int(idx), len(users) - 1)
idx = max(0, idx)
return gr.update(choices=users, value=users[idx])
def handle_user_menu_change(state):
try:
name1, user_bio, picture = load_user(state['user_menu'], state['name1'], state['user_bio'])
return [
name1,
user_bio,
picture
]
except Exception as e:
logger.error(f"Failed to load user '{state['user_menu']}': {e}")
return [
state['name1'],
state['user_bio'],
None
]
def handle_save_user_click(name1):
import gradio as gr
return [
name1,
gr.update(visible=True)
]
def my_yaml_output(data):
result = ""
for k in data:
result += k + ": |-\n"
for line in data[k].splitlines():
result += " " + line.rstrip(' ') + "\n"
return result
def handle_send_dummy_message_click(text, state):
history = send_dummy_message(text, state)
save_history(history, state['unique_id'], state['character_menu'], state['mode'])
html = redraw_html(history, state['name1'], state['name2'], state['mode'], state['chat_style'], state['character_menu'])
return [history, html, {"text": "", "files": []}]
def handle_send_dummy_reply_click(text, state):
history = send_dummy_reply(text, state)
save_history(history, state['unique_id'], state['character_menu'], state['mode'])
html = redraw_html(history, state['name1'], state['name2'], state['mode'], state['chat_style'], state['character_menu'])
return [history, html, {"text": "", "files": []}]
def handle_remove_last_click(state):
last_input, history = remove_last_message(state['history'])
save_history(history, state['unique_id'], state['character_menu'], state['mode'])
html = redraw_html(history, state['name1'], state['name2'], state['mode'], state['chat_style'], state['character_menu'])
return [history, html, {"text": last_input, "files": []}]
def handle_unique_id_select(state):
history = load_history(state['unique_id'], state['character_menu'], state['mode'])
html = redraw_html(history, state['name1'], state['name2'], state['mode'], state['chat_style'], state['character_menu'])
# Save this as the last visited chat
save_last_chat_state(state['character_menu'], state['mode'], state['unique_id'])
convert_to_markdown.cache_clear()
return [history, html]
def handle_start_new_chat_click(state):
import gradio as gr
history = start_new_chat(state)
histories = find_all_histories_with_first_prompts(state)
html = redraw_html(history, state['name1'], state['name2'], state['mode'], state['chat_style'], state['character_menu'])
convert_to_markdown.cache_clear()
if len(histories) > 0:
past_chats_update = gr.update(choices=histories, value=histories[0][1])
else:
past_chats_update = gr.update(choices=histories)
return [history, html, past_chats_update]
def handle_start_incognito_chat_click(state):
import gradio as gr
unique_id = 'incognito-' + datetime.now().strftime('%Y%m%d-%H-%M-%S')
history = start_new_chat(state, unique_id=unique_id)
html = redraw_html(history, state['name1'], state['name2'], state['mode'], state['chat_style'], state['character_menu'])
convert_to_markdown.cache_clear()
histories = find_all_histories_with_first_prompts(state)
past_chats_update = gr.update(choices=histories, value=unique_id)
return [history, html, past_chats_update]
def handle_delete_chat_confirm_click(state):
filtered_histories = find_all_histories_with_first_prompts(state)
filtered_ids = [h[1] for h in filtered_histories]
if state['unique_id'] not in filtered_ids:
# Incognito or unknown chat — just load the most recent saved chat
index = '0'
else:
index = str(filtered_ids.index(state['unique_id']))
delete_history(state['unique_id'], state['character_menu'], state['mode'])
history, unique_id = load_history_after_deletion(state, index)
html = redraw_html(history, state['name1'], state['name2'], state['mode'], state['chat_style'], state['character_menu'])
convert_to_markdown.cache_clear()
return [history, html, unique_id]
def handle_branch_chat_click(state):
import gradio as gr
branch_from_index = state['branch_index']
if branch_from_index == -1:
history = state['history']
else:
history = state['history']
history['visible'] = history['visible'][:branch_from_index + 1]
history['internal'] = history['internal'][:branch_from_index + 1]
# Prune the metadata dictionary to remove entries beyond the branch point
if 'metadata' in history:
history['metadata'] = {k: v for k, v in history['metadata'].items() if int(k.split('_')[-1]) <= branch_from_index}
prefix = 'incognito-' if state['unique_id'] and state['unique_id'].startswith('incognito-') else ''
new_unique_id = prefix + datetime.now().strftime('%Y%m%d-%H-%M-%S')
save_history(history, new_unique_id, state['character_menu'], state['mode'])
histories = find_all_histories_with_first_prompts(state)
html = redraw_html(history, state['name1'], state['name2'], state['mode'], state['chat_style'], state['character_menu'])
convert_to_markdown.cache_clear()
past_chats_update = gr.update(choices=histories, value=new_unique_id)
return [history, html, past_chats_update, -1]
def handle_edit_message_click(state):
history = state['history']
message_index = int(state['edit_message_index'])
new_text = state['edit_message_text']
role = state['edit_message_role'] # "user" or "assistant"
if message_index >= len(history['internal']):
html_output = redraw_html(history, state['name1'], state['name2'], state['mode'], state['chat_style'], state['character_menu'])
return [history, html_output]
role_idx = 0 if role == "user" else 1
if 'metadata' not in history:
history['metadata'] = {}
key = f"{role}_{message_index}"
if key not in history['metadata']:
history['metadata'][key] = {}
# If no versions exist yet for this message, store the current (pre-edit) content as the first version.
if "versions" not in history['metadata'][key] or not history['metadata'][key]["versions"]:
original_content = history['internal'][message_index][role_idx]
original_visible = history['visible'][message_index][role_idx]
original_timestamp = history['metadata'][key].get('timestamp', get_current_timestamp())
version_entry = {
"content": original_content,
"visible_content": original_visible,
"timestamp": original_timestamp
}
ts = history['metadata'][key].get('tool_sequence')
if ts is not None:
version_entry['tool_sequence'] = ts
history['metadata'][key]["versions"] = [version_entry]
history['internal'][message_index][role_idx] = apply_extensions('input', new_text, state, is_chat=True)
history['visible'][message_index][role_idx] = html.escape(new_text)
history['metadata'][key].pop('tool_sequence', None)
add_message_version(history, role, message_index, is_current=True)
save_history(history, state['unique_id'], state['character_menu'], state['mode'])
html_output = redraw_html(history, state['name1'], state['name2'], state['mode'], state['chat_style'], state['character_menu'])
return [history, html_output]
def handle_navigate_version_click(state):
history = state['history']
message_index = int(state['navigate_message_index'])
direction = state['navigate_direction']
role = state['navigate_message_role']
if not role:
logger.error("Role not provided for version navigation.")
html = redraw_html(history, state['name1'], state['name2'], state['mode'], state['chat_style'], state['character_menu'])
return [history, html]
key = f"{role}_{message_index}"
if 'metadata' not in history or key not in history['metadata'] or 'versions' not in history['metadata'][key]:
html = redraw_html(history, state['name1'], state['name2'], state['mode'], state['chat_style'], state['character_menu'])
return [history, html]
metadata = history['metadata'][key]
versions = metadata['versions']
# Default to the last version if current_version_index is not set
current_idx = metadata.get('current_version_index', len(versions) - 1 if versions else 0)
if direction == 'left':
new_idx = max(0, current_idx - 1)
else: # right
new_idx = min(len(versions) - 1, current_idx + 1)
if new_idx == current_idx:
html = redraw_html(history, state['name1'], state['name2'], state['mode'], state['chat_style'], state['character_menu'])
return [history, html]
msg_content_idx = 0 if role == 'user' else 1 # 0 for user content, 1 for assistant content in the pair
version_to_load = versions[new_idx]
history['internal'][message_index][msg_content_idx] = version_to_load['content']
history['visible'][message_index][msg_content_idx] = version_to_load['visible_content']
metadata['current_version_index'] = new_idx
# Restore per-version tool_sequence so follow-up prompts see consistent context
version_ts = version_to_load.get('tool_sequence')
if version_ts is not None:
metadata['tool_sequence'] = version_ts
else:
metadata.pop('tool_sequence', None)
update_message_metadata(history['metadata'], role, message_index, timestamp=version_to_load['timestamp'])
# Redraw and save
html = redraw_html(history, state['name1'], state['name2'], state['mode'], state['chat_style'], state['character_menu'])
save_history(history, state['unique_id'], state['character_menu'], state['mode'])
return [history, html]
def handle_rename_chat_click():
import gradio as gr
return [
gr.update(value="My New Chat"),
gr.update(visible=True),
]
def handle_rename_chat_confirm(rename_to, state):
import gradio as gr
if state['unique_id'] and state['unique_id'].startswith('incognito-'):
return [
gr.update(),
gr.update(visible=False),
]
rename_history(state['unique_id'], rename_to, state['character_menu'], state['mode'])
histories = find_all_histories_with_first_prompts(state)
return [
gr.update(choices=histories, value=rename_to),
gr.update(visible=False),
]
def handle_search_chat_change(state):
import gradio as gr
histories = find_all_histories_with_first_prompts(state)
return gr.update(choices=histories)
def handle_upload_chat_history(load_chat_history, state):
import gradio as gr
history = start_new_chat(state)
history = load_history_json(load_chat_history, history)
save_history(history, state['unique_id'], state['character_menu'], state['mode'])
histories = find_all_histories_with_first_prompts(state)
html = redraw_html(history, state['name1'], state['name2'], state['mode'], state['chat_style'], state['character_menu'])
convert_to_markdown.cache_clear()
if len(histories) > 0:
past_chats_update = gr.update(choices=histories, value=histories[0][1])
else:
past_chats_update = gr.update(choices=histories)
return [
history,
html,
past_chats_update
]
def handle_character_menu_change(state):
import gradio as gr
name1, name2, picture, greeting, context = load_character(state['character_menu'], state['name1'], state['name2'])
state['name1'] = name1
state['name2'] = name2
state['character_picture'] = picture
state['greeting'] = greeting
state['context'] = context
history, loaded_unique_id = load_latest_history(state)
histories = find_all_histories_with_first_prompts(state)
html = redraw_html(history, state['name1'], state['name2'], state['mode'], state['chat_style'], state['character_menu'])
convert_to_markdown.cache_clear()
if len(histories) > 0:
past_chats_update = gr.update(choices=histories, value=loaded_unique_id or histories[0][1])
else:
past_chats_update = gr.update(choices=histories)
return [
history,
html,
name1,
name2,
picture,
greeting,
context,
past_chats_update
]
def handle_character_picture_change(picture_path):
picture = open_image_safely(picture_path)
cache_folder = Path(shared.args.disk_cache_dir)
if not cache_folder.exists():
cache_folder.mkdir()
if picture is not None:
# Save to cache
picture.save(Path(f'{cache_folder}/pfp_character.png'), format='PNG')
thumb = make_thumbnail(picture)
thumb.save(Path(f'{cache_folder}/pfp_character_thumb.png'), format='PNG')
else:
# Remove cache files when picture is cleared
for cache_file in ['pfp_character.png', 'pfp_character_thumb.png']:
cache_path = Path(f'{cache_folder}/{cache_file}')
if cache_path.exists():
cache_path.unlink()
def handle_mode_change(state):
import gradio as gr
history, loaded_unique_id = load_latest_history(state)
histories = find_all_histories_with_first_prompts(state)
# Ensure character picture cache exists
if state['mode'] in ['chat', 'chat-instruct'] and state['character_menu'] and state['character_menu'] != 'None':
generate_pfp_cache(state['character_menu'])
html = redraw_html(history, state['name1'], state['name2'], state['mode'], state['chat_style'], state['character_menu'])
convert_to_markdown.cache_clear()
if len(histories) > 0:
past_chats_update = gr.update(choices=histories, value=loaded_unique_id or histories[0][1])
else:
past_chats_update = gr.update(choices=histories)
return [
history,
html,
gr.update(visible=state['mode'] != 'instruct'),
gr.update(visible=state['mode'] == 'chat-instruct'),
past_chats_update
]
def handle_save_character_click(name2):
import gradio as gr
return [
name2,
gr.update(visible=True)
]
def handle_load_template_click(instruction_template):
from modules.models_settings import load_instruction_template
output = load_instruction_template(instruction_template)
return [
output,
"Select template to load..."
]
def handle_save_template_click(instruction_template_str):
import gradio as gr
contents = generate_instruction_template_yaml(instruction_template_str)
root = str(shared.user_data_dir / 'instruction-templates') + '/'
return [
"My Template.yaml",
root,
contents,
root,
gr.update(visible=True)
]
def handle_delete_template_click(template):
import gradio as gr
root = str(shared.user_data_dir / 'instruction-templates') + '/'
return [
f"{template}.yaml",
root,
root,
gr.update(visible=False)
]
def handle_your_picture_change(picture, state):
upload_your_profile_picture(picture)
html = redraw_html(state['history'], state['name1'], state['name2'], state['mode'], state['chat_style'], state['character_menu'], reset_cache=True)
return html
def handle_send_instruction_click(state):
import gradio as gr
state['mode'] = 'instruct'
state['history'] = {'internal': [], 'visible': [], 'metadata': {}}
output = generate_chat_prompt("Input", state)
if state["show_two_notebook_columns"]:
return gr.update(), output, ""
else:
return output, gr.update(), gr.update()
def handle_send_chat_click(state):
import gradio as gr
output = generate_chat_prompt("", state, _continue=True)
if state["show_two_notebook_columns"]:
return gr.update(), output, ""
else:
return output, gr.update(), gr.update() | --- +++ @@ -52,10 +52,20 @@
def get_current_timestamp():
+ """Returns the current time in 24-hour format"""
return datetime.now().strftime('%b %d, %Y %H:%M')
def update_message_metadata(metadata_dict, role, index, **fields):
+ """
+ Updates or adds metadata fields for a specific message.
+
+ Args:
+ metadata_dict: The metadata dictionary
+ role: The role (user, assistant, etc)
+ index: The message index
+ **fields: Arbitrary metadata fields to update/add
+ """
key = f"{role}_{index}"
if key not in metadata_dict:
metadata_dict[key] = {}
@@ -90,6 +100,7 @@
def get_compiled_template(template_str):
+ """Cache compiled Jinja2 templates keyed by their source string."""
compiled = _template_cache.get(template_str)
if compiled is None:
compiled = jinja_env.from_string(template_str)
@@ -99,6 +110,10 @@
def str_presenter(dumper, data):
+ """
+ Copied from https://github.com/yaml/pyyaml/issues/240
+ Makes pyyaml output prettier multiline strings.
+ """
if data.count('\n') > 0:
return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='|')
@@ -111,6 +126,12 @@
class _JsonDict(dict):
+ """A dict that serializes as JSON when used in string concatenation.
+
+ Some Jinja2 templates (Qwen, GLM) iterate arguments with .items(),
+ requiring a dict. Others (DeepSeek) concatenate arguments as a
+ string, requiring JSON. This class satisfies both.
+ """
def __str__(self):
return json.dumps(self, ensure_ascii=False)
@@ -123,6 +144,12 @@
def _deserialize_tool_call_arguments(tool_calls):
+ """Convert tool_call arguments from JSON strings to _JsonDict.
+
+ The OpenAI API spec sends arguments as a JSON string, but Jinja2
+ templates may need a dict (.items()) or a string (concatenation).
+ _JsonDict handles both transparently.
+ """
result = []
for tc in tool_calls:
tc = copy.copy(tc)
@@ -143,6 +170,12 @@
def _expand_tool_sequence(tool_seq):
+ """Expand a tool_sequence list into API messages.
+
+ Returns a list of dicts (role: assistant with tool_calls, or role: tool).
+ If any tool_call IDs are missing a matching tool result, a synthetic
+ empty result is inserted so the prompt is never malformed.
+ """
messages = []
expected_ids = []
seen_ids = set()
@@ -548,6 +581,7 @@
def count_prompt_tokens(text_input, state):
+ """Count tokens for current history + input including attachments"""
if shared.tokenizer is None:
return "Tokenizer not available"
@@ -690,6 +724,7 @@
def add_message_attachment(history, row_idx, file_path, is_user=True):
+ """Add a file attachment to a message in history metadata"""
if 'metadata' not in history:
history['metadata'] = {}
@@ -769,6 +804,7 @@
def extract_pdf_text(pdf_path):
+ """Extract text from a PDF file"""
import pymupdf
text = ""
@@ -784,6 +820,10 @@
def extract_docx_text(docx_path):
+ """
+ Extract text from a .docx file, including headers,
+ body (paragraphs and tables), and footers.
+ """
try:
import docx
@@ -827,6 +867,7 @@
def generate_search_query(user_message, state):
+ """Generate a search query from user message using the LLM"""
# Augment the user message with search instruction
augmented_message = f"{user_message}\n\n=====\n\nPlease turn the message above into a short web search query in the same language as the message. Respond with only the search query, nothing else."
@@ -1144,6 +1185,13 @@
def generate_chat_reply_wrapper(text, state, regenerate=False, _continue=False):
+ '''
+ Same as above but returns HTML for the UI.
+ When tools are selected, wraps generation in a loop that detects
+ tool calls, executes them, and re-generates until the model stops.
+ All tool output is consolidated into a single visible chat bubble
+ using metadata['assistant_N']['tool_sequence'].
+ '''
if not character_is_loaded(state):
return
@@ -1621,6 +1669,10 @@
def load_latest_history(state):
+ '''
+ Loads the latest history for the given character in chat or chat-instruct
+ mode, or the latest instruct history for instruct mode.
+ '''
if shared.args.multi_user:
return start_new_chat(state), None
@@ -1647,6 +1699,10 @@
def load_history_after_deletion(state, idx):
+ '''
+ Loads the latest history for the given character in chat or chat-instruct
+ mode, or the latest instruct history for instruct mode.
+ '''
import gradio as gr
if shared.args.multi_user:
@@ -1674,6 +1730,7 @@
def get_chat_state_key(character, mode):
+ """Generate a key for storing last chat state"""
if mode == 'instruct':
return 'instruct'
else:
@@ -1681,6 +1738,7 @@
def load_last_chat_state():
+ """Load the last chat state from file"""
state_file = shared.user_data_dir / 'logs' / 'chat_state.json'
if state_file.exists():
try:
@@ -1693,6 +1751,7 @@
def save_last_chat_state(character, mode, unique_id):
+ """Save the last visited chat for a character/mode"""
if shared.args.multi_user:
return
@@ -1845,6 +1904,7 @@
def restore_character_for_ui(state):
+ """Reset character fields to the currently loaded character's saved values"""
if state['character_menu'] and state['character_menu'] != 'None':
try:
name1, name2, picture, greeting, context = load_character(state['character_menu'], state['name1'], state['name2'])
@@ -1864,6 +1924,7 @@
def clear_character_for_ui(state):
+ """Clear all character fields and picture cache"""
state['name2'] = shared.settings['name2']
state['context'] = shared.settings['context']
state['greeting'] = shared.settings['greeting']
@@ -2024,6 +2085,7 @@
def generate_user_pfp_cache(user):
+ """Generate cached profile picture for user"""
cache_folder = Path(shared.args.disk_cache_dir)
if not cache_folder.exists():
cache_folder.mkdir()
@@ -2045,6 +2107,7 @@
def load_user(user_name, name1, user_bio):
+ """Load user profile from YAML file"""
picture = None
filepath = None
@@ -2084,6 +2147,7 @@
def generate_user_yaml(name, user_bio):
+ """Generate YAML content for user profile"""
data = {
'name': name,
'user_bio': user_bio,
@@ -2093,6 +2157,7 @@
def save_user(name, user_bio, picture, filename):
+ """Save user profile to YAML file"""
filename = sanitize_filename(filename)
if filename == "":
logger.error("The filename is empty, so the user will not be saved.")
@@ -2114,6 +2179,7 @@
def delete_user(name):
+ """Delete user profile files"""
name = sanitize_filename(name)
# Check for user data files
for extension in ["yml", "yaml", "json"]:
@@ -2125,6 +2191,7 @@
def update_user_menu_after_deletion(idx):
+ """Update user menu after a user is deleted"""
import gradio as gr
users = get_available_users()
if len(users) == 0:
@@ -2138,6 +2205,7 @@
def handle_user_menu_change(state):
+ """Handle user menu selection change"""
try:
name1, user_bio, picture = load_user(state['user_menu'], state['name1'], state['user_bio'])
@@ -2156,6 +2224,7 @@
def handle_save_user_click(name1):
+ """Handle save user button click"""
import gradio as gr
return [
name1,
@@ -2164,6 +2233,10 @@
def my_yaml_output(data):
+ '''
+ pyyaml is very inconsistent with multiline strings.
+ for simple instruction template outputs, this is enough.
+ '''
result = ""
for k in data:
result += k + ": |-\n"
@@ -2473,6 +2546,7 @@
def handle_character_picture_change(picture_path):
+ """Update or clear cache when character picture changes"""
picture = open_image_safely(picture_path)
cache_folder = Path(shared.args.disk_cache_dir)
if not cache_folder.exists():
@@ -2586,4 +2660,4 @@ if state["show_two_notebook_columns"]:
return gr.update(), output, ""
else:
- return output, gr.update(), gr.update()+ return output, gr.update(), gr.update()
| https://raw.githubusercontent.com/oobabooga/text-generation-webui/HEAD/modules/chat.py |
Add missing documentation to my Python functions |
import logging
import re
import time
from abc import ABC
from functools import lru_cache
from typing import Dict, List
import torch
from modules import shared
logger = logging.getLogger(__name__)
########################
# EBNF Grammar Parsing #
########################
END_OF_ALTERNATE_MARKER = 0
END_OF_RULE_MARKER = 0
TO_BE_FILLED_MARKER = 0
REF_RULE_MARKER = 1
LITERAL_MARKER = 2
class ParseState:
def __init__(self):
self.symbol_ids = {}
self.grammar_encoding = [] # old name: out_grammar
def get_symbol_id(state, src):
if src not in state.symbol_ids:
state.symbol_ids[src] = len(state.symbol_ids)
return state.symbol_ids[src]
def generate_symbol_id(state, base_name):
next_id = len(state.symbol_ids)
state.symbol_ids[base_name + "_" + str(next_id)] = next_id
return next_id
def is_word_char(c):
return c.isalnum() or c == "-" or c == "_"
def hex_to_int(c):
if c.isdigit():
return int(c)
elif "a" <= c.lower() <= "f":
return ord(c.lower()) - ord("a") + 10
raise RuntimeError("unknown hex char " + c)
def remove_leading_white_space(src, newline_ok):
pos = 0
while pos < len(src) and (src[pos].isspace() or src[pos] == "#"):
if src[pos] == "#":
while pos < len(src) and src[pos] not in ("\r", "\n"):
pos += 1
else:
if not newline_ok and src[pos] in ("\r", "\n"):
break
pos += 1
return src[pos:]
def parse_name(src):
pos = 0
while pos < len(src) and is_word_char(src[pos]):
pos += 1
if pos == 0:
raise RuntimeError("expecting name at " + src)
return src[:pos], src[pos:]
def read_hex(s):
val = 0
for c in s:
val = (val << 4) + hex_to_int(c)
return chr(val)
def parse_char(src):
# if we have a backslash, it's maybe an escape
if src[0] == "\\":
esc = src[1]
if esc == "x":
return read_hex(src[2:4]), src[4:]
elif esc == "u":
return read_hex(src[2:6]), src[6:]
elif esc == "U":
return read_hex(src[2:10]), src[10:]
elif esc in ('"', "[", "]", "\\", "-"):
return esc, src[2:]
elif esc == "r":
return "\r", src[2:]
elif esc == "n":
return "\n", src[2:]
elif esc == "t":
return "\t", src[2:]
elif esc == "\\":
return "\\", src[2:]
raise RuntimeError("unknown escape at " + src)
elif src:
return src[0], src[1:]
raise RuntimeError("unexpected end of input")
def parse_sequence(state, src, rule_name, outbuf, is_nested):
out_start_pos = len(outbuf)
# sequence size, will be replaced at end when known
outbuf.append(TO_BE_FILLED_MARKER)
last_sym_start = len(outbuf)
remaining_src = src
while remaining_src:
if remaining_src[0] == '"': # literal string
remaining_src = remaining_src[1:]
last_sym_start = len(outbuf)
while remaining_src[0] != '"':
char, remaining_src = parse_char(remaining_src)
# each char of a literal is encoded as a "range" of char - char
outbuf.append(LITERAL_MARKER)
outbuf.append(ord(char))
outbuf.append(ord(char))
remaining_src = remove_leading_white_space(remaining_src[1:], is_nested)
elif remaining_src[0] == "[": # char range(s)
remaining_src = remaining_src[1:]
last_sym_start = len(outbuf)
# num chars in range - replaced at end of loop
outbuf.append(TO_BE_FILLED_MARKER)
while remaining_src[0] != "]":
char, remaining_src = parse_char(remaining_src)
outbuf.append(ord(char))
if remaining_src[0] == "-" and remaining_src[1] != "]":
endchar_pair, remaining_src = parse_char(remaining_src[1:])
outbuf.append(ord(endchar_pair))
else:
# chars that aren't part of a c1-c2 range are just doubled (i.e., c-c)
outbuf.append(ord(char))
# replace num chars with actual
outbuf[last_sym_start] = len(outbuf) - last_sym_start - 1
remaining_src = remove_leading_white_space(remaining_src[1:], is_nested)
elif is_word_char(remaining_src[0]): # rule reference
name, remaining_src = parse_name(remaining_src)
ref_rule_id = get_symbol_id(state, name)
remaining_src = remove_leading_white_space(remaining_src, is_nested)
last_sym_start = len(outbuf)
outbuf.append(REF_RULE_MARKER)
outbuf.append(ref_rule_id)
elif remaining_src[0] == "(": # grouping
# parse nested alternates into synthesized rule
remaining_src = remove_leading_white_space(remaining_src[1:], True)
sub_rule_id = generate_symbol_id(state, rule_name)
remaining_src = parse_alternates(state, remaining_src, rule_name, sub_rule_id, True)
last_sym_start = len(outbuf)
# output reference to synthesized rule
outbuf.append(REF_RULE_MARKER)
outbuf.append(sub_rule_id)
if remaining_src[0] != ")":
raise RuntimeError("expecting ')' at " + remaining_src)
remaining_src = remove_leading_white_space(remaining_src[1:], is_nested)
elif remaining_src[0] in ("*", "+", "?"): # repetition operator
if len(outbuf) - out_start_pos - 1 == 0:
raise RuntimeError("expecting preceeding item to */+/? at " + remaining_src)
out_grammar = state.grammar_encoding
# apply transformation to previous symbol (last_sym_start -
# end) according to rewrite rules:
# S* --> S' ::= S S' |
# S+ --> S' ::= S S' | S
# S? --> S' ::= S |
sub_rule_id = generate_symbol_id(state, rule_name)
out_grammar.append(sub_rule_id)
sub_rule_start = len(out_grammar)
# placeholder for size of 1st alternate
out_grammar.append(TO_BE_FILLED_MARKER)
# add preceding symbol to generated rule
out_grammar.extend(outbuf[last_sym_start:])
if remaining_src[0] in ("*", "+"):
# cause generated rule to recurse
out_grammar.append(REF_RULE_MARKER)
out_grammar.append(sub_rule_id)
# apply actual size
out_grammar[sub_rule_start] = len(out_grammar) - sub_rule_start
# mark end of 1st alternate
out_grammar.append(END_OF_ALTERNATE_MARKER)
sub_rule_start = len(out_grammar)
# placeholder for size of 2nd alternate
out_grammar.append(TO_BE_FILLED_MARKER)
if remaining_src[0] == "+":
# add preceding symbol as alternate only for '+'
out_grammar.extend(outbuf[last_sym_start:])
# apply actual size of 2nd alternate
out_grammar[sub_rule_start] = len(out_grammar) - sub_rule_start
# mark end of 2nd alternate, then end of rule
out_grammar.append(END_OF_ALTERNATE_MARKER)
out_grammar.append(END_OF_RULE_MARKER)
# in original rule, replace previous symbol with reference to generated rule
outbuf[last_sym_start:] = [1, sub_rule_id]
remaining_src = remove_leading_white_space(remaining_src[1:], is_nested)
else:
break
# apply actual size of this alternate sequence
outbuf[out_start_pos] = len(outbuf) - out_start_pos
# mark end of alternate
outbuf.append(END_OF_ALTERNATE_MARKER)
return remaining_src
def parse_alternates(state, src, rule_name, rule_id, is_nested):
outbuf = []
remaining_src = parse_sequence(state, src, rule_name, outbuf, is_nested)
while remaining_src and remaining_src[0] == "|":
remaining_src = remove_leading_white_space(remaining_src[1:], True)
remaining_src = parse_sequence(state, remaining_src, rule_name, outbuf, is_nested)
state.grammar_encoding.append(rule_id)
state.grammar_encoding.extend(outbuf)
state.grammar_encoding.append(0)
return remaining_src
def parse_rule(state, src):
name, remaining_src = parse_name(src)
remaining_src = remove_leading_white_space(remaining_src, False)
rule_id = get_symbol_id(state, name)
if remaining_src[:3] != "::=":
raise RuntimeError("expecting ::= at " + remaining_src)
remaining_src = remove_leading_white_space(remaining_src[3:], True)
remaining_src = parse_alternates(state, remaining_src, name, rule_id, False)
if remaining_src and remaining_src[0] == "\r":
remaining_src = remaining_src[2:] if remaining_src[1] == "\n" else remaining_src[1:]
elif remaining_src and remaining_src[0] == "\n":
remaining_src = remaining_src[1:]
elif remaining_src:
raise RuntimeError("expecting newline or end at " + remaining_src)
return remove_leading_white_space(remaining_src, True)
def parse_ebnf(src):
try:
state = ParseState()
grammar_repr = remove_leading_white_space(src, True)
last_grammar_repr = ""
while grammar_repr:
if last_grammar_repr:
last_parsed_rule_len = len(last_grammar_repr) - len(grammar_repr)
logger.debug(f"last_parsed_rule: {last_grammar_repr[:last_parsed_rule_len]}")
last_grammar_repr = grammar_repr
grammar_repr = parse_rule(state, grammar_repr)
state.grammar_encoding.append(0xFFFF)
return state
except RuntimeError as err:
logger.warning("error parsing grammar:", err)
return ParseState()
def print_rule(file, grammar_encoding, index, symbol_id_names):
rule_id = grammar_encoding[index]
print(f"<{index}>{symbol_id_names[rule_id]} ::=", end=" ", file=file)
pos = index + 1
while grammar_encoding[pos]:
if pos - 1 > index:
print("|", end=" ", file=file)
pos += 1 # sequence size, not needed here
while grammar_encoding[pos]:
if grammar_encoding[pos] == REF_RULE_MARKER:
ref_rule_id = grammar_encoding[pos + 1]
print(
f"<{pos}>{symbol_id_names[ref_rule_id]}",
end=" ",
file=file,
)
pos += 2
else:
print("<{}>[".format(pos), end="", file=file)
num_chars = grammar_encoding[pos]
pos += 1
for i in range(0, num_chars, 2):
print("{}-".format(chr(grammar_encoding[pos + i])), end="", file=file)
if i + 1 < num_chars:
print("{}".format(chr(grammar_encoding[pos + i + 1])), end="", file=file)
print("]", end=" ", file=file)
pos += num_chars
pos += 1
print(file=file)
return pos + 1
def print_grammar(file, state):
pos = 0
symbol_id_names = {v: k for k, v in state.symbol_ids.items()}
print("Grammar Rules:", file=file)
while state.grammar_encoding[pos] != 0xFFFF:
pos = print_rule(file, state.grammar_encoding, pos, symbol_id_names)
pos = 0
print("\nBinary representation:", file=file)
while state.grammar_encoding[pos] != 0xFFFF:
print(f"{state.grammar_encoding[pos]:04x}", end=" ", file=file)
pos += 1
print("ffff\n")
###################################
# EBNF Grammar Parsing ends here #
###################################
class GrammarConstraint(ABC):
def __init__(self, grammar_str, start_rule_name, tokenizer):
self.tt = 0
self.nt = 0
state = parse_ebnf(grammar_str)
grammar_encoding = state.grammar_encoding
self.start_rule_id = state.symbol_ids.get(start_rule_name)
self.eos_token_id = tokenizer.eos_token_id
self.token_trie = TokenTrie(tokenizer)
self.tokenizer = tokenizer
self.grammar_encoding = grammar_encoding
pos = 0
rules: Dict[int, int] = {}
while grammar_encoding[pos] != 0xFFFF:
rule_id = grammar_encoding[pos]
# Store the current position in the 'rules' list at the index corresponding to rule_id.
# This effectively maps each rule_id to its position in the grammar encoding.
rules[rule_id] = pos
pos += 1
# Continue to the next rule in the encoding.
# The loop advances by the size indicated at the current position (grammar_encoding[pos])
# plus one for the size field itself.
while grammar_encoding[pos]:
pos += 1 + grammar_encoding[pos]
# Now we're at the end of the rule,
# so advance to the next rule by skipping the 0, which means 'end of rule'.
pos += 1
self.start_rule_pos = rules[self.start_rule_id]
self.rules_pos_dict: Dict[int, int] = rules
def init_stacks(self):
# suppose the start rule position is 0, then grammar_encoding[0] = rule_id
# grammar_encoding[1] = rule_size
# grammar_encoding[2] = rule_type
# this is why we need to add 2 to the start rule position
stack = [self.start_rule_pos + 2]
# convert to tuple for caching(immutable)
return self.advance_stack(tuple(stack))
# For each stack, resolve rules to find the actual characters that are
# accepted by this stack (not the set of sub-rules).
# This is where the parsing happens.
# The parsing is a top-down, left-to-right, depth-first traversal of the
# grammar.
@lru_cache(maxsize=32768)
def advance_stack(self, stack):
stack = list(stack)
# If the stack is empty, we're done. Because no more tokens should be accepted.
if len(stack) == 0:
return [stack]
# Get the top of the stack.
pos = stack[-1]
# If the stack head is a terminal(literal), we can resolve it immediately.
# literal is marked with 2 in the grammar encoding.
if self.grammar_encoding[pos] > 1:
return [stack]
# The stack head is a nonterminal (a rule reference, 1 in the grammar encoding).
# Resolving this rule gives a set of one or more possible positions
# (e.g. two in `a ::= b | c`)
# We pop the current rule off the stack and, for each option, push:
# - the symbol following this symbol in the current rule; then
# - the first symbol of the resolved rule.
referenced_rule_id = self.grammar_encoding[pos + 1]
# subpos should points to the size of the subrule
subpos = self.rules_pos_dict[referenced_rule_id] + 1
stacks: List[List[int]] = []
# do depth-first search to find all possible rules and check the next terminal
# When this value is non-zero, it indicates that subpos is not yet at the end of the rule, so we can continue.
# here subpos is a pointer, and the value in the rule encoding can never be 0 except for the end of the rule.
while self.grammar_encoding[subpos]:
new_stack = stack[:-1]
if self.grammar_encoding[pos + 2]:
# check if there is a next symbol in the current rule, e.g. `a ::= b c | d`
# if yes, push the pos to rule_size to the stack
new_stack.append(pos + 2)
# if the type of the next symbol is not "empty", push the first symbol of the resolved rule to the stack
if self.grammar_encoding[subpos + 1]:
new_stack.append(subpos + 1)
stacks.extend(self.advance_stack(tuple(new_stack)))
# The increment subpos += self.grammar_encoding[subpos] + 1
# moves subpos forward in the grammar encoding array to the next alternative in the current rule.
subpos += self.grammar_encoding[subpos] + 1
return stacks
def accept_char(self, *args, **kwargs):
raise NotImplementedError
def accept_token_id(self, *args, **kwargs):
raise NotImplementedError
def filter_vocab(self, *args, **kwargs):
raise NotImplementedError
class IncrementalGrammarConstraint(GrammarConstraint):
def __init__(self, grammar_str, start_rule_name, tokenizer):
super().__init__(grammar_str, start_rule_name, tokenizer)
def accept_char(self, char, stacks):
byte = char if isinstance(char, int) else ord(char)
new_stacks = []
for stack in stacks:
# stack is empty
if not stack:
continue
pos = stack[-1]
num_chars = self.grammar_encoding[pos]
# to make pos point to the size of the char range rule
pos += 1
found = False
for i in range(0, num_chars, 2):
if self.grammar_encoding[pos + i] <= byte and byte <= self.grammar_encoding[pos + i + 1]:
found = True
break
if self.grammar_encoding[pos + i] >= byte and byte >= self.grammar_encoding[pos + i + 1]:
found = True
break
if not found:
continue
pos += num_chars
new_stack = stack[:-1]
if self.grammar_encoding[pos]:
new_stack.append(pos)
new_stacks.extend(self.advance_stack(tuple(new_stack)))
return new_stacks
def accept_string(self, string: str, stacks: List[List[int]]):
for char in string:
stacks = self.accept_char(char, stacks)
return stacks
def accept_token_id(self, token_id: int, stacks: List[List[int]]):
if token_id == self.eos_token_id:
if stacks and all(len(stack) != 0 for stack in stacks):
raise Exception(
f"At least one of the stack should be empty when EOS is reached. However, "
f"the stacks are {stacks}"
)
return []
for byte in self.token_trie.id2str(token_id):
stacks = self.accept_char(byte, stacks)
# check updated stacks
# TODO, I commented this out because it will fail when the stack is empty
# empty stack means the end of the grammar
# assert stacks != []
return stacks
def accept_token_ids(self, token_ids: List[int], stacks: List[List[int]], as_string=True):
if as_string:
string = self.tokenizer.decode(token_ids)
stacks = self.accept_string(string, stacks)
else:
for token_id in token_ids:
stacks = self.accept_token_id(token_id, stacks)
return stacks
def batch_filter_vocab(self, batch_stacks, device):
batch_acceptance = []
for stacks in batch_stacks:
batch_acceptance.append(self.filter_vocab(stacks, device))
return torch.stack(batch_acceptance)
def filter_vocab(self, stacks, device):
if not stacks: # Check if stacks is empty
# Handle the empty case: for example, return a tensor of False
# The size of the tensor should match the size of your vocabulary
vocab_size = len(self.token_trie)
logger.debug(f"sum of acceptance: {0}")
return torch.zeros(vocab_size, dtype=torch.bool, device=device)
acceptance_matrix = torch.cat([self.token_acceptance_for_stack(tuple(stack), device) for stack in stacks])
# Merge stacks: any True => True
acceptance = acceptance_matrix.reshape(len(stacks), -1).any(dim=0)
logger.debug(f"sum of acceptance: {acceptance.sum()}")
return acceptance
# For each sub-rule in the grammar, cache whether each byte is accepted.
@lru_cache(maxsize=None)
def pos_char_acceptance(self, pos, char):
byte = char if isinstance(char, int) else ord(char)
num_chars = self.grammar_encoding[pos]
pos += 1
for i in range(0, num_chars, 2):
start = self.grammar_encoding[pos + i]
end = self.grammar_encoding[pos + i + 1]
if byte >= start and byte <= end:
return True
if byte <= start and byte >= end:
return True
return False
# Probably this should be configurable. If the grammar has an exceedingly
# large number of states, the correct setting is a tradeoff between GPU
# RAM usage and recomputation time.
#
# The main variable that pushes usage up here is number of states in the
# grammar.
@lru_cache(maxsize=32768)
def token_acceptance_for_stack(self, stack, device):
st = time.time()
stack = list(stack) # needs to come in as a tuple for lru_cache
accepts = [False] * len(self.token_trie)
accepts[self.eos_token_id] = len(stack) == 0
if len(stack) == 0:
logger.debug("empty stack")
def traverse_trie(trie, stacks):
for byte, next_trie in trie.items():
if byte == LEAF:
token_id = next_trie
if token_id != self.eos_token_id:
accepts[token_id] = bool(stacks)
continue
new_stacks = []
for stk in stacks:
if not stk:
continue
pos = stk[-1]
num_chars = self.grammar_encoding[pos]
if not self.pos_char_acceptance(pos, byte):
continue
pos += num_chars + 1
new_stack = stk[:-1]
if self.grammar_encoding[pos]:
new_stack.append(pos)
new_stacks.extend(self.advance_stack(tuple(new_stack)))
if new_stacks:
traverse_trie(next_trie, new_stacks)
traverse_trie(self.token_trie.trie, [stack])
et = time.time() - st
x = torch.tensor(accepts, dtype=torch.bool, device=device)
self.tt += et
self.nt += 1
return x
class StaticGrammarConstraint(GrammarConstraint):
def __init__(self, grammar_str, start_rule_name, tokenizer):
super().__init__(grammar_str, start_rule_name, tokenizer)
def accept_char(self):
raise NotImplementedError
#################
# DATA STRUCTURES
#################
LEAF = -1
class TokenTrie:
def __init__(self, tokenizer):
self.eos_token_id = tokenizer.eos_token_id
self.tokens = []
self.trie = {}
self.load_tokens(tokenizer)
def id2str(self, token_id):
return self.tokens[token_id]
def __len__(self):
return len(self.tokens)
def load_tokens(self, tokenizer):
def replace_hex(match):
hex_value = match.group(1)
return chr(int(hex_value, 16))
if "gpt2" in tokenizer.__class__.__name__.lower():
special = tokenizer.additional_special_tokens_ids
# Here, the decoder does a string replace on a bunch of sequences
# like ' .' for '.'. This interferes with our assumptions, where a
# token should always have exactly one representation.
# Fortunately(?) text-generation-inference doesn't seem to run this
# cleanup, so we get extraneous spaces. So, in order to generate
# the right token set for TGI, we have to skip the space trimming.
# See:
# https://github.com/huggingface/transformers/blob/main/src/transformers/tokenization_utils_base.py#L3588-L3600
def fmt_token(id):
if id in special:
return None
return bytes(tokenizer.decode([id], clean_up_tokenization_spaces=False), "utf-8")
elif "llama" in tokenizer.__class__.__name__.lower():
def fmt_token(id):
token = tokenizer.convert_ids_to_tokens(id)
token = re.sub(r"<0x([0-9a-fA-F]{2})>", replace_hex, token)
token = token.replace("▁", " ")
return token
else:
print("Warning: unrecognized tokenizer: using default token formatting")
def fmt_token(id):
token = tokenizer.convert_ids_to_tokens(id)
return token
# note: vocab_size doesn't work here because there are also
# get_added_vocab() tokens
self.tokens = [fmt_token(i) for i in range(len(tokenizer.get_vocab()))]
for token_id, token_bytes in enumerate(self.tokens):
if token_bytes is not None:
self.insert_into_trie(self.trie, token_bytes, token_id)
def insert_into_trie(self, trie, token_bytes, token_id):
current = trie
for byte in token_bytes:
if byte not in current:
current[byte] = {}
current = current[byte]
current[LEAF] = token_id
@lru_cache(maxsize=5)
def initialize_grammar(grammar_string):
return IncrementalGrammarConstraint(grammar_string.strip(), start_rule_name="root", tokenizer=shared.tokenizer) | --- +++ @@ -1,3 +1,12 @@+'''
+This file has been 100% copied from this PR to the Transformers library:
+https://github.com/huggingface/transformers/pull/27557
+
+Author: Saibo-creator
+Author GitHub: https://github.com/Saibo-creator
+
+All credits go to the author.
+'''
import logging
import re
@@ -55,6 +64,21 @@
def remove_leading_white_space(src, newline_ok):
+ """
+ Skips over whitespace and comments in the input string.
+ This function processes the input string, skipping over any spaces, tabs,
+ and content following a '#' character, which denotes a comment. The parsing
+ of a comment continues until the end of the line (denoted by newline characters
+ '\r' or '\n'). If the 'newline_ok' parameter is set to False, the function
+ will stop processing and return the remaining string upon encountering a
+ newline character, otherwise it will skip over newline characters as well.
+ Parameters:
+ src (str): The input string to be processed.
+ newline_ok (bool): A flag indicating whether encountering a newline character
+ should stop the parsing (False) or if it should be skipped (True).
+ Returns:
+ str: The remaining portion of the input string after skipping whitespace and comments.
+ """
pos = 0
while pos < len(src) and (src[pos].isspace() or src[pos] == "#"):
if src[pos] == "#":
@@ -84,6 +108,11 @@
def parse_char(src):
+ """
+ parse the leading char from the input string
+ :param src:
+ :return: char, remaining_src
+ """
# if we have a backslash, it's maybe an escape
if src[0] == "\\":
@@ -418,9 +447,11 @@ return stacks
def accept_char(self, *args, **kwargs):
+ """Process a byte according to the grammar rules."""
raise NotImplementedError
def accept_token_id(self, *args, **kwargs):
+ """Process a token according to the grammar rules."""
raise NotImplementedError
def filter_vocab(self, *args, **kwargs):
@@ -666,4 +697,4 @@
@lru_cache(maxsize=5)
def initialize_grammar(grammar_string):
- return IncrementalGrammarConstraint(grammar_string.strip(), start_rule_name="root", tokenizer=shared.tokenizer)+ return IncrementalGrammarConstraint(grammar_string.strip(), start_rule_name="root", tokenizer=shared.tokenizer)
| https://raw.githubusercontent.com/oobabooga/text-generation-webui/HEAD/modules/grammar/grammar_utils.py |
Add structured docstrings to improve clarity | import torch
from modules import chat, shared
from modules.text_generation import (
decode,
encode,
generate_reply,
)
from transformers import LogitsProcessor
import gradio as gr
params = {
"display_name": "Long replies",
"is_tab": False,
"min_length": 120,
}
initial_size = 0
class MyLogits(LogitsProcessor):
def __init__(self):
self.newline_id = shared.tokenizer.encode('\n')[-1]
pass
def __call__(self, input_ids, scores):
if input_ids.shape[-1] - initial_size < params["min_length"]:
scores[...,self.newline_id] = -1000
# scores[...,shared.tokenizer.eos_token_id] = -1000
# probs = torch.softmax(scores, dim=-1, dtype=torch.float)
# probs[0] /= probs[0].sum()
# scores = torch.log(probs / (1 - probs))
return scores
def history_modifier(history):
return history
def state_modifier(state):
return state
def chat_input_modifier(text, visible_text, state):
return text, visible_text
def input_modifier(string, state):
return string
def bot_prefix_modifier(string, state):
return string
def tokenizer_modifier(state, prompt, input_ids, input_embeds):
global initial_size
initial_size = input_ids.shape[-1]
return prompt, input_ids, input_embeds
def logits_processor_modifier(processor_list, input_ids):
processor_list.append(MyLogits())
return processor_list
def output_modifier(string, state):
return string
def custom_generate_chat_prompt(user_input, state, **kwargs):
result = chat.generate_chat_prompt(user_input, state, **kwargs)
return result
def custom_css():
return ''
def custom_js():
return ''
def setup():
pass
def ui():
min_length = gr.Slider(0, 800, step=10, value=params['min_length'], label='Minimum reply length')
min_length.change(lambda x: params.update({'min_length': x}), min_length, None) | --- +++ @@ -17,6 +17,10 @@ initial_size = 0
class MyLogits(LogitsProcessor):
+ """
+ Manipulates the probabilities for the next token before it gets sampled.
+ Used in the logits_processor_modifier function below.
+ """
def __init__(self):
self.newline_id = shared.tokenizer.encode('\n')[-1]
pass
@@ -32,21 +36,49 @@ return scores
def history_modifier(history):
+ """
+ Modifies the chat history.
+ Only used in chat mode.
+ """
return history
def state_modifier(state):
+ """
+ Modifies the state variable, which is a dictionary containing the input
+ values in the UI like sliders and checkboxes.
+ """
return state
def chat_input_modifier(text, visible_text, state):
+ """
+ Modifies the user input string in chat mode (visible_text).
+ You can also modify the internal representation of the user
+ input (text) to change how it will appear in the prompt.
+ """
return text, visible_text
def input_modifier(string, state):
+ """
+ In default/notebook modes, modifies the whole prompt.
+
+ In chat mode, it is the same as chat_input_modifier but only applied
+ to "text", here called "string", and not to "visible_text".
+ """
return string
def bot_prefix_modifier(string, state):
+ """
+ Modifies the prefix for the next bot reply in chat mode.
+ By default, the prefix will be something like "Bot Name:".
+ """
return string
def tokenizer_modifier(state, prompt, input_ids, input_embeds):
+ """
+ Modifies the input ids and embeds.
+ Used by the multimodal extension to put image embeddings in the prompt.
+ Only used by loaders that use the transformers library for sampling.
+ """
global initial_size
initial_size = input_ids.shape[-1]
@@ -54,26 +86,58 @@ return prompt, input_ids, input_embeds
def logits_processor_modifier(processor_list, input_ids):
+ """
+ Adds logits processors to the list, allowing you to access and modify
+ the next token probabilities.
+ Only used by loaders that use the transformers library for sampling.
+ """
processor_list.append(MyLogits())
return processor_list
def output_modifier(string, state):
+ """
+ Modifies the LLM output before it gets presented.
+
+ In chat mode, the modified version goes into history['visible'],
+ and the original version goes into history['internal'].
+ """
return string
def custom_generate_chat_prompt(user_input, state, **kwargs):
+ """
+ Replaces the function that generates the prompt from the chat history.
+ Only used in chat mode.
+ """
result = chat.generate_chat_prompt(user_input, state, **kwargs)
return result
def custom_css():
+ """
+ Returns a CSS string that gets appended to the CSS for the webui.
+ """
return ''
def custom_js():
+ """
+ Returns a javascript string that gets appended to the javascript
+ for the webui.
+ """
return ''
def setup():
+ """
+ Gets executed only once, when the extension is imported.
+ """
pass
def ui():
+ """
+ Gets executed when the UI is drawn. Custom gradio elements and
+ their corresponding event handlers should be defined here.
+
+ To learn about gradio components, check out the docs:
+ https://gradio.app/docs/
+ """
min_length = gr.Slider(0, 800, step=10, value=params['min_length'], label='Minimum reply length')
- min_length.change(lambda x: params.update({'min_length': x}), min_length, None)+ min_length.change(lambda x: params.update({'min_length': x}), min_length, None)
| https://raw.githubusercontent.com/oobabooga/text-generation-webui/HEAD/extensions/long_replies/script.py |
Add docstrings to incomplete code | import asyncio
import json
import logging
import os
import socket
import threading
import traceback
from collections import deque
from threading import Thread
import uvicorn
from fastapi import Depends, FastAPI, Header, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from fastapi.requests import Request
from fastapi.responses import JSONResponse
from pydub import AudioSegment
from sse_starlette import EventSourceResponse
from starlette.concurrency import iterate_in_threadpool
import extensions.openai.completions as OAIcompletions
import extensions.openai.logits as OAIlogits
import extensions.openai.models as OAImodels
from extensions.openai.tokens import token_count, token_decode, token_encode
from extensions.openai.errors import OpenAIError
from extensions.openai.utils import _start_cloudflared
from modules import shared
from modules.logging_colors import logger
from modules.models import unload_model
from modules.text_generation import stop_everything_event # used by /v1/internal/stop-generation
from .typing import (
ChatCompletionRequest,
ChatCompletionResponse,
ChatPromptResponse,
CompletionRequest,
CompletionResponse,
DecodeRequest,
DecodeResponse,
EmbeddingsRequest,
EmbeddingsResponse,
EncodeRequest,
EncodeResponse,
ImageGenerationRequest,
ImageGenerationResponse,
LoadLorasRequest,
LoadModelRequest,
LogitsRequest,
LogitsResponse,
LoraListResponse,
ModelInfoResponse,
ModelListResponse,
TokenCountResponse,
to_dict
)
params = {
'embedding_device': 'cpu',
'embedding_model': 'sentence-transformers/all-mpnet-base-v2',
'debug': 0
}
async def _wait_for_disconnect(request: Request, stop_event: threading.Event):
while True:
message = await request.receive()
if message["type"] == "http.disconnect":
stop_event.set()
return
def verify_api_key(authorization: str = Header(None)) -> None:
expected_api_key = shared.args.api_key
if expected_api_key and (authorization is None or authorization != f"Bearer {expected_api_key}"):
raise HTTPException(status_code=401, detail="Unauthorized")
def verify_admin_key(authorization: str = Header(None)) -> None:
expected_api_key = shared.args.admin_key
if expected_api_key and (authorization is None or authorization != f"Bearer {expected_api_key}"):
raise HTTPException(status_code=401, detail="Unauthorized")
app = FastAPI()
check_key = [Depends(verify_api_key)]
check_admin_key = [Depends(verify_admin_key)]
# Configure CORS settings to allow all origins, methods, and headers
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"]
)
@app.exception_handler(OpenAIError)
async def openai_error_handler(request: Request, exc: OpenAIError):
error_type = "server_error" if exc.code >= 500 else "invalid_request_error"
return JSONResponse(
status_code=exc.code,
content={"error": {
"message": exc.message,
"type": error_type,
"param": getattr(exc, 'param', None),
"code": None
}}
)
@app.middleware("http")
async def validate_host_header(request: Request, call_next):
# Be strict about only approving access to localhost by default
if not (shared.args.listen or shared.args.public_api):
host = request.headers.get("host", "").split(":")[0]
if host not in ["localhost", "127.0.0.1"]:
return JSONResponse(
status_code=400,
content={"detail": "Invalid host header"}
)
return await call_next(request)
@app.options("/", dependencies=check_key)
async def options_route():
return JSONResponse(content="OK")
@app.post('/v1/completions', response_model=CompletionResponse, dependencies=check_key)
async def openai_completions(request: Request, request_data: CompletionRequest):
path = request.url.path
is_legacy = "/generate" in path
if request_data.stream:
if (request_data.n or 1) > 1:
return JSONResponse(
status_code=400,
content={"error": {"message": "n > 1 is not supported with streaming.", "type": "invalid_request_error", "param": "n", "code": None}}
)
stop_event = threading.Event()
async def generator():
response = OAIcompletions.stream_completions(to_dict(request_data), is_legacy=is_legacy, stop_event=stop_event)
try:
async for resp in iterate_in_threadpool(response):
disconnected = await request.is_disconnected()
if disconnected:
break
yield {"data": json.dumps(resp)}
yield {"data": "[DONE]"}
finally:
stop_event.set()
response.close()
return EventSourceResponse(generator(), sep="\n") # SSE streaming
else:
stop_event = threading.Event()
monitor = asyncio.create_task(_wait_for_disconnect(request, stop_event))
try:
response = await asyncio.to_thread(
OAIcompletions.completions,
to_dict(request_data),
is_legacy=is_legacy,
stop_event=stop_event
)
finally:
stop_event.set()
monitor.cancel()
return JSONResponse(response)
@app.post('/v1/chat/completions', response_model=ChatCompletionResponse, dependencies=check_key)
async def openai_chat_completions(request: Request, request_data: ChatCompletionRequest):
path = request.url.path
is_legacy = "/generate" in path
if request_data.stream:
stop_event = threading.Event()
async def generator():
response = OAIcompletions.stream_chat_completions(to_dict(request_data), is_legacy=is_legacy, stop_event=stop_event)
try:
async for resp in iterate_in_threadpool(response):
disconnected = await request.is_disconnected()
if disconnected:
break
yield {"data": json.dumps(resp)}
yield {"data": "[DONE]"}
finally:
stop_event.set()
response.close()
return EventSourceResponse(generator(), sep="\n") # SSE streaming
else:
stop_event = threading.Event()
monitor = asyncio.create_task(_wait_for_disconnect(request, stop_event))
try:
response = await asyncio.to_thread(
OAIcompletions.chat_completions,
to_dict(request_data),
is_legacy=is_legacy,
stop_event=stop_event
)
finally:
stop_event.set()
monitor.cancel()
return JSONResponse(response)
@app.get("/v1/models", dependencies=check_key)
@app.get("/v1/models/{model}", dependencies=check_key)
async def handle_models(request: Request):
path = request.url.path
is_list = request.url.path.split('?')[0].split('#')[0] == '/v1/models'
if is_list:
response = OAImodels.list_models_openai_format()
else:
model_name = path[len('/v1/models/'):]
response = OAImodels.model_info_dict(model_name)
return JSONResponse(response)
@app.get('/v1/billing/usage', dependencies=check_key)
def handle_billing_usage():
return JSONResponse(content={"total_usage": 0})
@app.post('/v1/audio/transcriptions', dependencies=check_key)
async def handle_audio_transcription(request: Request):
import speech_recognition as sr
r = sr.Recognizer()
form = await request.form()
audio_file = await form["file"].read()
audio_data = AudioSegment.from_file(audio_file)
# Convert AudioSegment to raw data
raw_data = audio_data.raw_data
# Create AudioData object
audio_data = sr.AudioData(raw_data, audio_data.frame_rate, audio_data.sample_width)
whisper_language = form.getvalue('language', None)
whisper_model = form.getvalue('model', 'tiny') # Use the model from the form data if it exists, otherwise default to tiny
transcription = {"text": ""}
try:
transcription["text"] = r.recognize_whisper(audio_data, language=whisper_language, model=whisper_model)
except sr.UnknownValueError:
print("Whisper could not understand audio")
transcription["text"] = "Whisper could not understand audio UnknownValueError"
except sr.RequestError as e:
print("Could not request results from Whisper", e)
transcription["text"] = "Whisper could not understand audio RequestError"
return JSONResponse(content=transcription)
@app.post('/v1/images/generations', response_model=ImageGenerationResponse, dependencies=check_key)
async def handle_image_generation(request_data: ImageGenerationRequest):
import extensions.openai.images as OAIimages
response = await asyncio.to_thread(OAIimages.generations, request_data)
return JSONResponse(response)
@app.post("/v1/embeddings", response_model=EmbeddingsResponse, dependencies=check_key)
async def handle_embeddings(request: Request, request_data: EmbeddingsRequest):
import extensions.openai.embeddings as OAIembeddings
input = request_data.input
if not input:
raise HTTPException(status_code=400, detail="Missing required argument input")
if type(input) is str:
input = [input]
response = OAIembeddings.embeddings(input, request_data.encoding_format)
return JSONResponse(response)
@app.post("/v1/moderations", dependencies=check_key)
async def handle_moderations(request: Request):
import extensions.openai.moderations as OAImoderations
body = await request.json()
input = body["input"]
if not input:
raise HTTPException(status_code=400, detail="Missing required argument input")
response = OAImoderations.moderations(input)
return JSONResponse(response)
@app.get("/v1/internal/health", dependencies=check_key)
async def handle_health_check():
return JSONResponse(content={"status": "ok"})
@app.post("/v1/internal/encode", response_model=EncodeResponse, dependencies=check_key)
async def handle_token_encode(request_data: EncodeRequest):
response = token_encode(request_data.text)
return JSONResponse(response)
@app.post("/v1/internal/decode", response_model=DecodeResponse, dependencies=check_key)
async def handle_token_decode(request_data: DecodeRequest):
response = token_decode(request_data.tokens)
return JSONResponse(response)
@app.post("/v1/internal/token-count", response_model=TokenCountResponse, dependencies=check_key)
async def handle_token_count(request_data: EncodeRequest):
response = token_count(request_data.text)
return JSONResponse(response)
@app.post("/v1/internal/logits", response_model=LogitsResponse, dependencies=check_key)
async def handle_logits(request_data: LogitsRequest):
response = OAIlogits._get_next_logits(to_dict(request_data))
return JSONResponse(response)
@app.post('/v1/internal/chat-prompt', response_model=ChatPromptResponse, dependencies=check_key)
async def handle_chat_prompt(request: Request, request_data: ChatCompletionRequest):
path = request.url.path
is_legacy = "/generate" in path
generator = OAIcompletions.chat_completions_common(to_dict(request_data), is_legacy=is_legacy, prompt_only=True)
response = deque(generator, maxlen=1).pop()
return JSONResponse(response)
@app.post("/v1/internal/stop-generation", dependencies=check_key)
async def handle_stop_generation(request: Request):
stop_everything_event()
return JSONResponse(content="OK")
@app.get("/v1/internal/model/info", response_model=ModelInfoResponse, dependencies=check_key)
async def handle_model_info():
payload = OAImodels.get_current_model_info()
return JSONResponse(content=payload)
@app.get("/v1/internal/model/list", response_model=ModelListResponse, dependencies=check_admin_key)
async def handle_list_models():
payload = OAImodels.list_models()
return JSONResponse(content=payload)
@app.post("/v1/internal/model/load", dependencies=check_admin_key)
async def handle_load_model(request_data: LoadModelRequest):
try:
OAImodels._load_model(to_dict(request_data))
return JSONResponse(content="OK")
except Exception:
traceback.print_exc()
raise HTTPException(status_code=400, detail="Failed to load the model.")
@app.post("/v1/internal/model/unload", dependencies=check_admin_key)
async def handle_unload_model():
unload_model()
@app.get("/v1/internal/lora/list", response_model=LoraListResponse, dependencies=check_admin_key)
async def handle_list_loras():
response = OAImodels.list_loras()
return JSONResponse(content=response)
@app.post("/v1/internal/lora/load", dependencies=check_admin_key)
async def handle_load_loras(request_data: LoadLorasRequest):
try:
OAImodels.load_loras(request_data.lora_names)
return JSONResponse(content="OK")
except Exception:
traceback.print_exc()
raise HTTPException(status_code=400, detail="Failed to apply the LoRA(s).")
@app.post("/v1/internal/lora/unload", dependencies=check_admin_key)
async def handle_unload_loras():
OAImodels.unload_all_loras()
return JSONResponse(content="OK")
def find_available_port(starting_port):
try:
# Try to create a socket with the starting port
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind(('', starting_port))
return starting_port
except OSError:
# Port is already in use, so find a new one
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind(('', 0)) # Bind to port 0 to get an available port
new_port = s.getsockname()[1]
logger.warning(f"Port {starting_port} is already in use. Using port {new_port} instead.")
return new_port
def run_server():
# Parse configuration
port = int(os.environ.get('OPENEDAI_PORT', shared.args.api_port))
port = find_available_port(port)
ssl_certfile = os.environ.get('OPENEDAI_CERT_PATH', shared.args.ssl_certfile)
ssl_keyfile = os.environ.get('OPENEDAI_KEY_PATH', shared.args.ssl_keyfile)
# In the server configuration:
server_addrs = []
if shared.args.listen and shared.args.listen_host:
server_addrs.append(shared.args.listen_host)
else:
if os.environ.get('OPENEDAI_ENABLE_IPV6', shared.args.api_enable_ipv6):
server_addrs.append('[::]' if shared.args.listen else '[::1]')
if not os.environ.get('OPENEDAI_DISABLE_IPV4', shared.args.api_disable_ipv4):
server_addrs.append('0.0.0.0' if shared.args.listen else '127.0.0.1')
if not server_addrs:
raise Exception('you MUST enable IPv6 or IPv4 for the API to work')
# Log server information
if shared.args.public_api:
_start_cloudflared(
port,
shared.args.public_api_id,
max_attempts=3,
on_start=lambda url: logger.info(f'OpenAI-compatible API URL:\n\n{url}/v1\n')
)
else:
url_proto = 'https://' if (ssl_certfile and ssl_keyfile) else 'http://'
urls = [f'{url_proto}{addr}:{port}/v1' for addr in server_addrs]
if len(urls) > 1:
logger.info('OpenAI-compatible API URLs:\n\n' + '\n'.join(urls) + '\n')
else:
logger.info('OpenAI-compatible API URL:\n\n' + '\n'.join(urls) + '\n')
# Log API keys
if shared.args.api_key:
if not shared.args.admin_key:
shared.args.admin_key = shared.args.api_key
logger.info(f'OpenAI API key:\n\n{shared.args.api_key}\n')
if shared.args.admin_key and shared.args.admin_key != shared.args.api_key:
logger.info(f'OpenAI API admin key (for loading/unloading models):\n\n{shared.args.admin_key}\n')
# Start server
logging.getLogger("uvicorn.error").propagate = False
uvicorn.run(app, host=server_addrs, port=port, ssl_certfile=ssl_certfile, ssl_keyfile=ssl_keyfile, access_log=False)
def setup():
if shared.args.nowebui:
run_server()
else:
Thread(target=run_server, daemon=True).start() | --- +++ @@ -61,6 +61,7 @@
async def _wait_for_disconnect(request: Request, stop_event: threading.Event):
+ """Block until the client disconnects, then signal the stop_event."""
while True:
message = await request.receive()
if message["type"] == "http.disconnect":
@@ -234,6 +235,9 @@
@app.get('/v1/billing/usage', dependencies=check_key)
def handle_billing_usage():
+ '''
+ Ex. /v1/dashboard/billing/usage?start_date=2023-05-01&end_date=2023-05-31
+ '''
return JSONResponse(content={"total_usage": 0})
@@ -330,6 +334,10 @@
@app.post("/v1/internal/logits", response_model=LogitsResponse, dependencies=check_key)
async def handle_logits(request_data: LogitsRequest):
+ '''
+ Given a prompt, returns the top 50 most likely logits as a dict.
+ The keys are the tokens, and the values are the probabilities.
+ '''
response = OAIlogits._get_next_logits(to_dict(request_data))
return JSONResponse(response)
@@ -363,6 +371,32 @@
@app.post("/v1/internal/model/load", dependencies=check_admin_key)
async def handle_load_model(request_data: LoadModelRequest):
+ '''
+ This endpoint is experimental and may change in the future.
+
+ The "args" parameter can be used to modify flags like "--load-in-4bit"
+ or "--n-gpu-layers" before loading a model. Example:
+
+ ```
+ "args": {
+ "load_in_4bit": true,
+ "n_gpu_layers": 12
+ }
+ ```
+
+ Note that those settings will remain after loading the model. So you
+ may need to change them back to load a second model.
+
+ The "settings" parameter is also a dict but with keys for the
+ shared.settings object. It can be used to modify the default instruction
+ template like this:
+
+ ```
+ "settings": {
+ "instruction_template": "Alpaca"
+ }
+ ```
+ '''
try:
OAImodels._load_model(to_dict(request_data))
@@ -400,6 +434,7 @@
def find_available_port(starting_port):
+ """Try the starting port, then find an available one if it's taken."""
try:
# Try to create a socket with the starting port
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
@@ -469,4 +504,4 @@ if shared.args.nowebui:
run_server()
else:
- Thread(target=run_server, daemon=True).start()+ Thread(target=run_server, daemon=True).start()
| https://raw.githubusercontent.com/oobabooga/text-generation-webui/HEAD/extensions/openai/script.py |
Write docstrings for backend logic | import threading
import time
from pathlib import Path
import gradio as gr
from modules import logits, shared, ui, utils
from modules.prompts import count_tokens, load_prompt
from modules.text_generation import (
generate_reply_wrapper,
get_token_ids,
stop_everything_event
)
from modules.utils import gradio
_notebook_file_lock = threading.Lock()
_notebook_auto_save_timer = None
_last_notebook_text = None
_last_notebook_prompt = None
inputs = ('textbox-notebook', 'interface_state')
outputs = ('textbox-notebook', 'html-notebook')
def create_ui():
mu = shared.args.multi_user
with gr.Row(visible=not shared.settings['show_two_notebook_columns']) as shared.gradio['notebook-tab']:
shared.gradio['last_input-notebook'] = gr.State('')
with gr.Row():
with gr.Column(scale=4):
with gr.Tab('Raw'):
with gr.Row():
shared.gradio['textbox-notebook'] = gr.Textbox(label="", value="", lines=27, elem_id='textbox-notebook', elem_classes=['textbox', 'add_scrollbar'])
shared.gradio['token-counter-notebook'] = gr.HTML(value="<span>0</span>", elem_id="notebook-token-counter")
with gr.Tab('Markdown'):
shared.gradio['markdown_render-notebook'] = gr.Button('Render')
shared.gradio['markdown-notebook'] = gr.Markdown()
with gr.Tab('HTML'):
shared.gradio['html-notebook'] = gr.HTML()
with gr.Tab('Logits'):
with gr.Row():
with gr.Column(scale=10):
shared.gradio['get_logits-notebook'] = gr.Button('Get next token probabilities')
with gr.Column(scale=1):
shared.gradio['use_samplers-notebook'] = gr.Checkbox(label='Use samplers', value=True, elem_classes=['no-background'])
with gr.Row():
shared.gradio['logits-notebook'] = gr.Textbox(lines=23, label='Output', elem_classes=['textbox_logits_notebook', 'add_scrollbar'])
shared.gradio['logits-notebook-previous'] = gr.Textbox(lines=23, label='Previous output', elem_classes=['textbox_logits_notebook', 'add_scrollbar'])
with gr.Tab('Tokens'):
shared.gradio['get_tokens-notebook'] = gr.Button('Get token IDs for the input')
shared.gradio['tokens-notebook'] = gr.Textbox(lines=23, label='Tokens', elem_classes=['textbox_logits_notebook', 'add_scrollbar', 'monospace'])
with gr.Row():
shared.gradio['Undo'] = gr.Button('Undo', elem_classes='small-button')
shared.gradio['Regenerate-notebook'] = gr.Button('Regenerate', elem_classes='small-button')
shared.gradio['Stop-notebook'] = gr.Button('Stop', visible=False, elem_classes='small-button', elem_id='stop')
shared.gradio['Generate-notebook'] = gr.Button('Generate', variant='primary', elem_classes='small-button')
with gr.Column(scale=1):
gr.HTML('<div style="padding-bottom: 13px"></div>')
with gr.Row():
shared.gradio['prompt_menu-notebook'] = gr.Dropdown(choices=utils.get_available_prompts(), value=shared.settings['prompt-notebook'], label='Prompt', elem_classes='slim-dropdown')
with gr.Row():
ui.create_refresh_button(shared.gradio['prompt_menu-notebook'], lambda: None, lambda: {'choices': utils.get_available_prompts()}, ['refresh-button'], interactive=not mu)
shared.gradio['new_prompt-notebook'] = gr.Button('New', elem_classes=['refresh-button'], interactive=not mu)
shared.gradio['rename_prompt-notebook'] = gr.Button('Rename', elem_classes=['refresh-button'], interactive=not mu)
shared.gradio['delete_prompt-notebook'] = gr.Button('🗑️', elem_classes=['refresh-button'], interactive=not mu)
shared.gradio['delete_prompt-confirm-notebook'] = gr.Button('Confirm', variant='stop', elem_classes=['refresh-button'], visible=False)
shared.gradio['delete_prompt-cancel-notebook'] = gr.Button('Cancel', elem_classes=['refresh-button'], visible=False)
with gr.Row(visible=False) as shared.gradio['rename-row-notebook']:
shared.gradio['rename_prompt_to-notebook'] = gr.Textbox(label="New name", elem_classes=['no-background'])
shared.gradio['rename_prompt-cancel-notebook'] = gr.Button('Cancel', elem_classes=['refresh-button'])
shared.gradio['rename_prompt-confirm-notebook'] = gr.Button('Confirm', elem_classes=['refresh-button'], variant='primary')
def create_event_handlers():
shared.gradio['Generate-notebook'].click(
lambda x: x, gradio('textbox-notebook'), gradio('last_input-notebook')).then(
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
lambda: [gr.update(visible=True), gr.update(visible=False)], None, gradio('Stop-notebook', 'Generate-notebook')).then(
generate_and_save_wrapper_notebook, gradio('textbox-notebook', 'interface_state', 'prompt_menu-notebook'), gradio(outputs), show_progress=False).then(
lambda state, text: state.update({'textbox-notebook': text}), gradio('interface_state', 'textbox-notebook'), None).then(
lambda: [gr.update(visible=False), gr.update(visible=True)], None, gradio('Stop-notebook', 'Generate-notebook')).then(
None, None, None, js=f'() => {{{ui.audio_notification_js}}}')
shared.gradio['textbox-notebook'].submit(
lambda x: x, gradio('textbox-notebook'), gradio('last_input-notebook')).then(
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
lambda: [gr.update(visible=True), gr.update(visible=False)], None, gradio('Stop-notebook', 'Generate-notebook')).then(
generate_and_save_wrapper_notebook, gradio('textbox-notebook', 'interface_state', 'prompt_menu-notebook'), gradio(outputs), show_progress=False).then(
lambda state, text: state.update({'textbox-notebook': text}), gradio('interface_state', 'textbox-notebook'), None).then(
lambda: [gr.update(visible=False), gr.update(visible=True)], None, gradio('Stop-notebook', 'Generate-notebook')).then(
None, None, None, js=f'() => {{{ui.audio_notification_js}}}')
shared.gradio['Regenerate-notebook'].click(
lambda x: x, gradio('last_input-notebook'), gradio('textbox-notebook'), show_progress=False).then(
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
lambda: [gr.update(visible=True), gr.update(visible=False)], None, gradio('Stop-notebook', 'Generate-notebook')).then(
generate_and_save_wrapper_notebook, gradio('textbox-notebook', 'interface_state', 'prompt_menu-notebook'), gradio(outputs), show_progress=False).then(
lambda state, text: state.update({'textbox-notebook': text}), gradio('interface_state', 'textbox-notebook'), None).then(
lambda: [gr.update(visible=False), gr.update(visible=True)], None, gradio('Stop-notebook', 'Generate-notebook')).then(
None, None, None, js=f'() => {{{ui.audio_notification_js}}}')
shared.gradio['Undo'].click(
lambda x: x, gradio('last_input-notebook'), gradio('textbox-notebook'), show_progress=False).then(
lambda state, text: state.update({'textbox-notebook': text}), gradio('interface_state', 'textbox-notebook'), None)
shared.gradio['markdown_render-notebook'].click(lambda x: x, gradio('textbox-notebook'), gradio('markdown-notebook'), queue=False)
shared.gradio['Stop-notebook'].click(stop_everything_event, None, None, queue=False)
shared.gradio['prompt_menu-notebook'].change(load_prompt, gradio('prompt_menu-notebook'), gradio('textbox-notebook'), show_progress=False)
shared.gradio['new_prompt-notebook'].click(handle_new_prompt, None, gradio('prompt_menu-notebook'), show_progress=False)
shared.gradio['delete_prompt-notebook'].click(
lambda: [gr.update(visible=False), gr.update(visible=True), gr.update(visible=True)],
None,
gradio('delete_prompt-notebook', 'delete_prompt-cancel-notebook', 'delete_prompt-confirm-notebook'),
show_progress=False)
shared.gradio['delete_prompt-cancel-notebook'].click(
lambda: [gr.update(visible=True), gr.update(visible=False), gr.update(visible=False)],
None,
gradio('delete_prompt-notebook', 'delete_prompt-cancel-notebook', 'delete_prompt-confirm-notebook'),
show_progress=False)
shared.gradio['delete_prompt-confirm-notebook'].click(
handle_delete_prompt_confirm_notebook,
gradio('prompt_menu-notebook'),
gradio('prompt_menu-notebook', 'delete_prompt-notebook', 'delete_prompt-cancel-notebook', 'delete_prompt-confirm-notebook'),
show_progress=False)
shared.gradio['rename_prompt-notebook'].click(
handle_rename_prompt_click_notebook,
gradio('prompt_menu-notebook'),
gradio('rename_prompt_to-notebook', 'rename_prompt-notebook', 'rename-row-notebook'),
show_progress=False)
shared.gradio['rename_prompt-cancel-notebook'].click(
lambda: [gr.update(visible=True), gr.update(visible=False)],
None,
gradio('rename_prompt-notebook', 'rename-row-notebook'),
show_progress=False)
shared.gradio['rename_prompt-confirm-notebook'].click(
handle_rename_prompt_confirm_notebook,
gradio('rename_prompt_to-notebook', 'prompt_menu-notebook'),
gradio('prompt_menu-notebook', 'rename_prompt-notebook', 'rename-row-notebook'),
show_progress=False)
shared.gradio['textbox-notebook'].input(lambda x: f"<span>{count_tokens(x)}</span>", gradio('textbox-notebook'), gradio('token-counter-notebook'), show_progress=False)
shared.gradio['textbox-notebook'].change(
store_notebook_state_and_debounce,
gradio('textbox-notebook', 'prompt_menu-notebook'),
None,
show_progress=False
)
shared.gradio['get_logits-notebook'].click(
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
logits.get_next_logits, gradio('textbox-notebook', 'interface_state', 'use_samplers-notebook', 'logits-notebook'), gradio('logits-notebook', 'logits-notebook-previous'), show_progress=False)
shared.gradio['get_tokens-notebook'].click(get_token_ids, gradio('textbox-notebook'), gradio('tokens-notebook'), show_progress=False)
def generate_and_save_wrapper_notebook(textbox_content, interface_state, prompt_name):
last_save_time = time.monotonic()
save_interval = 8
output = textbox_content
# Initial autosave
safe_autosave_prompt(output, prompt_name)
for i, (output, html_output) in enumerate(generate_reply_wrapper(textbox_content, interface_state)):
yield output, html_output
current_time = time.monotonic()
# Save on first iteration or if save_interval seconds have passed
if i == 0 or (current_time - last_save_time) >= save_interval:
safe_autosave_prompt(output, prompt_name)
last_save_time = current_time
# Final autosave
safe_autosave_prompt(output, prompt_name)
def handle_new_prompt():
new_name = utils.current_time()
# Create the new prompt file
prompt_path = shared.user_data_dir / "logs" / "notebook" / f"{new_name}.txt"
prompt_path.parent.mkdir(parents=True, exist_ok=True)
prompt_path.write_text("In this story,", encoding='utf-8')
return gr.update(choices=utils.get_available_prompts(), value=new_name)
def handle_delete_prompt_confirm_notebook(prompt_name):
available_prompts = utils.get_available_prompts()
current_index = available_prompts.index(prompt_name) if prompt_name in available_prompts else 0
(shared.user_data_dir / "logs" / "notebook" / f"{prompt_name}.txt").unlink(missing_ok=True)
available_prompts = utils.get_available_prompts()
if available_prompts:
new_value = available_prompts[min(current_index, len(available_prompts) - 1)]
else:
new_value = utils.current_time()
(shared.user_data_dir / "logs" / "notebook").mkdir(parents=True, exist_ok=True)
(shared.user_data_dir / "logs" / "notebook" / f"{new_value}.txt").write_text("In this story,")
available_prompts = [new_value]
return [
gr.update(choices=available_prompts, value=new_value),
gr.update(visible=True),
gr.update(visible=False),
gr.update(visible=False)
]
def handle_rename_prompt_click_notebook(current_name):
return [
gr.update(value=current_name),
gr.update(visible=False),
gr.update(visible=True)
]
def handle_rename_prompt_confirm_notebook(new_name, current_name):
old_path = shared.user_data_dir / "logs" / "notebook" / f"{current_name}.txt"
new_path = shared.user_data_dir / "logs" / "notebook" / f"{new_name}.txt"
if old_path.exists() and not new_path.exists():
old_path.rename(new_path)
available_prompts = utils.get_available_prompts()
return [
gr.update(choices=available_prompts, value=new_name),
gr.update(visible=True),
gr.update(visible=False)
]
def autosave_prompt(text, prompt_name):
if prompt_name and text.strip():
prompt_path = shared.user_data_dir / "logs" / "notebook" / f"{prompt_name}.txt"
prompt_path.parent.mkdir(parents=True, exist_ok=True)
prompt_path.write_text(text, encoding='utf-8')
def safe_autosave_prompt(content, prompt_name):
with _notebook_file_lock:
autosave_prompt(content, prompt_name)
def store_notebook_state_and_debounce(text, prompt_name):
global _notebook_auto_save_timer, _last_notebook_text, _last_notebook_prompt
if shared.args.multi_user:
return
_last_notebook_text = text
_last_notebook_prompt = prompt_name
if _notebook_auto_save_timer is not None:
_notebook_auto_save_timer.cancel()
_notebook_auto_save_timer = threading.Timer(1.0, _perform_notebook_debounced_save)
_notebook_auto_save_timer.start()
def _perform_notebook_debounced_save():
try:
if _last_notebook_text is not None and _last_notebook_prompt is not None:
safe_autosave_prompt(_last_notebook_text, _last_notebook_prompt)
except Exception as e:
print(f"Notebook auto-save failed: {e}") | --- +++ @@ -169,6 +169,7 @@
def generate_and_save_wrapper_notebook(textbox_content, interface_state, prompt_name):
+ """Generate reply and automatically save the result for notebook mode with periodic saves"""
last_save_time = time.monotonic()
save_interval = 8
output = textbox_content
@@ -247,6 +248,7 @@
def autosave_prompt(text, prompt_name):
+ """Automatically save the text to the selected prompt file"""
if prompt_name and text.strip():
prompt_path = shared.user_data_dir / "logs" / "notebook" / f"{prompt_name}.txt"
prompt_path.parent.mkdir(parents=True, exist_ok=True)
@@ -254,11 +256,13 @@
def safe_autosave_prompt(content, prompt_name):
+ """Thread-safe wrapper for autosave_prompt to prevent file corruption"""
with _notebook_file_lock:
autosave_prompt(content, prompt_name)
def store_notebook_state_and_debounce(text, prompt_name):
+ """Store current notebook state and trigger debounced save"""
global _notebook_auto_save_timer, _last_notebook_text, _last_notebook_prompt
if shared.args.multi_user:
@@ -275,8 +279,9 @@
def _perform_notebook_debounced_save():
+ """Actually perform the notebook save using the stored state"""
try:
if _last_notebook_text is not None and _last_notebook_prompt is not None:
safe_autosave_prompt(_last_notebook_text, _last_notebook_prompt)
except Exception as e:
- print(f"Notebook auto-save failed: {e}")+ print(f"Notebook auto-save failed: {e}")
| https://raw.githubusercontent.com/oobabooga/text-generation-webui/HEAD/modules/ui_notebook.py |
Write proper docstrings for these functions | import json
import math
import pprint
import random
import torch
import transformers
from transformers.generation.logits_process import (
LogitNormalization,
LogitsProcessor,
LogitsProcessorList
)
from modules import shared
from modules.logging_colors import logger
from modules.torch_utils import get_device
original_init = transformers.GenerationConfig.__init__
original_get_logits_processor = transformers.GenerationMixin._get_logits_processor
global_scores = None
class TemperatureLogitsWarperCustom(LogitsProcessor):
def __init__(self, temperature: float):
if not isinstance(temperature, float) or not (temperature > 0):
except_msg = (
f"`temperature` (={temperature}) has to be a strictly positive float, otherwise your next token "
"scores will be invalid."
)
if isinstance(temperature, float) and temperature == 0.0:
except_msg += " If you're looking for greedy decoding strategies, set `do_sample=False`."
raise ValueError(except_msg)
self.temperature = temperature
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
scores = scores / self.temperature
return scores
class DynamicTemperatureLogitsWarper(LogitsProcessor):
def __init__(self, dynatemp_low: float, dynatemp_high: float, dynatemp_exponent: float):
self.dynatemp_low = dynatemp_low
self.dynatemp_high = dynatemp_high
self.dynatemp_exponent = dynatemp_exponent
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
min_temp = self.dynatemp_low
max_temp = self.dynatemp_high
exponent_val = self.dynatemp_exponent
# Convert logits to probabilities
probs = torch.softmax(scores, dim=-1)
# Calculate entropy of the softmax probabilities
entropy = -1.0 * torch.where(probs > 0, probs * torch.log(probs), torch.zeros_like(probs)).sum()
# Guard against future possible division by zero
entropy = max(entropy, torch.tensor(1e-10)) # Ensures entropy is slightly greater than 0
# Any logits which are not -Infinity will be considered for calculating max entropy.
num_valid_tokens = torch.sum(scores > -float('inf')).item()
# Now, calculate the max entropy by using only the valid tokens' count
max_entropy = math.log(num_valid_tokens)
# Guard against future possible division by zero
max_entropy = max_entropy if max_entropy > 0.0 else 1e-10
# Normalize the entropy
normalized_entropy = entropy / max_entropy
# Map the normalized entropy to the desired temperature range using the power function
dyn_temp = min_temp + (max_temp - min_temp) * (normalized_entropy.pow(exponent_val))
# Apply the dynamically calculated temperature scaling
scores = scores / dyn_temp
# print("----------------------\nTemperature from generation_config:", self.temperature)
# print("min_temp:", min_temp)
# print("max_temp:", max_temp)
# print("Entropy:", entropy.item())
# print("Max Possible Entropy considering valid tokens only:", max_entropy)
# print("Normalized Entropy:", normalized_entropy.item())
# print("Dynamic Temperature (dyn_temp):", dyn_temp.item())
# print("----------------------")
# max_prob_token_id = torch.argmax(scores, dim=-1) # Get the token ID with the highest probability
# max_prob_token = shared.tokenizer.convert_ids_to_tokens(int(max_prob_token_id)) # Convert ID to token
# print("--- T=", float(dyn_temp), "token=", max_prob_token, "min=", min_temp, "max=", max_temp, "exponent=", exponent_val)
return scores
class QuadraticSamplingLogitsWarper(LogitsProcessor):
def __init__(self, smoothing_factor, smoothing_curve):
self.smoothing_factor = smoothing_factor
self.smoothing_curve = smoothing_curve
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
# Compute necessary values
max_logit = scores.max()
diff = scores - max_logit
k = (3 - self.smoothing_curve) / 2
s = (self.smoothing_curve - 1) / 2
# Apply transformation to non-negative infinity values
transformed_logits = torch.where(
scores != float('-inf'),
-(k * self.smoothing_factor * diff**2) + (s * self.smoothing_factor * diff**3) + max_logit,
scores
)
return transformed_logits
class TailFreeLogitsWarper(LogitsProcessor):
def __init__(self, tfs: float, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1):
tfs = float(tfs)
if tfs < 0 or tfs > 1.0:
raise ValueError(f"`tfs` has to be a float >= 0 and <= 1, but is {tfs}")
self.tfs = tfs
self.filter_value = filter_value
self.min_tokens_to_keep = min_tokens_to_keep
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
sorted_logits, sorted_indices = torch.sort(scores, descending=True)
probs = sorted_logits.softmax(dim=-1)
# Compute second derivative normalized CDF
d2 = probs.diff().diff().abs()
normalized_d2 = d2 / d2.sum(dim=-1, keepdim=True)
normalized_d2_cdf = normalized_d2.cumsum(dim=-1)
# Remove tokens with CDF value above the threshold (token with 0 are kept)
sorted_indices_to_remove = normalized_d2_cdf > self.tfs
# Centre the distribution around the cutoff as in the original implementation of the algorithm
sorted_indices_to_remove = torch.cat(
(
torch.zeros(scores.shape[0], 1, dtype=torch.bool, device=scores.device),
sorted_indices_to_remove,
torch.ones(scores.shape[0], 1, dtype=torch.bool, device=scores.device),
),
dim=-1,
)
if self.min_tokens_to_keep > 1:
# Keep at least min_tokens_to_keep
sorted_indices_to_remove[..., : self.min_tokens_to_keep] = 0
indices_to_remove = sorted_indices_to_remove.scatter(1, sorted_indices, sorted_indices_to_remove)
scores = scores.masked_fill(indices_to_remove, self.filter_value)
return scores
class TopALogitsWarper(LogitsProcessor):
def __init__(self, top_a: float, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1):
top_a = float(top_a)
if top_a < 0 or top_a > 1.0:
raise ValueError(f"`top_a` has to be a float >= 0 and <= 1, but is {top_a}")
self.top_a = top_a
self.filter_value = filter_value
self.min_tokens_to_keep = min_tokens_to_keep
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
sorted_logits, sorted_indices = torch.sort(scores, descending=True)
probs = sorted_logits.softmax(dim=-1)
# Remove tokens with probability less than top_a*(max(probs))^2 (token with 0 are kept)
probs_max = probs[..., 0, None]
sorted_indices_to_remove = probs < probs_max * probs_max * self.top_a
if self.min_tokens_to_keep > 1:
# Keep at least min_tokens_to_keep
sorted_indices_to_remove[..., : self.min_tokens_to_keep] = 0
indices_to_remove = sorted_indices_to_remove.scatter(1, sorted_indices, sorted_indices_to_remove)
scores = scores.masked_fill(indices_to_remove, self.filter_value)
return scores
class TopNSigmaLogitsWarper(LogitsProcessor):
def __init__(self, n_sigma: float = 2.0, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1):
if n_sigma < 0:
raise ValueError(f"`n_sigma` must be a non-negative float, but is {n_sigma}")
self.n_sigma = n_sigma
self.filter_value = filter_value
self.min_tokens_to_keep = min_tokens_to_keep
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
# Calculate max of logits
max_logit = torch.max(scores, dim=-1, keepdim=True)[0]
# Calculate standard deviation only on finite values
finite_mask = torch.isfinite(scores)
finite_scores = scores.masked_fill(~finite_mask, 0.0)
std_logit = torch.std(finite_scores, dim=-1, keepdim=True)
# Create mask where tokens with logits >= max_logit - n_sigma * std_logit are kept
threshold = max_logit - self.n_sigma * std_logit
indices_to_remove = scores < threshold
if self.min_tokens_to_keep > 1:
# Keep at least min_tokens_to_keep tokens
top_k_indices = torch.topk(scores, self.min_tokens_to_keep, dim=-1)[1]
indices_to_remove.scatter_(-1, top_k_indices, False)
# Apply mask by setting filtered tokens to filter_value
scores = scores.masked_fill(indices_to_remove, self.filter_value)
return scores
class AdaptivePLogitsWarper(LogitsProcessor):
DISTRIBUTION_WIDTH = 0.3
PEAK_LOGIT_VALUE = 5.0
SHARPNESS = 10.0
INV_WIDTH = 1.0 / DISTRIBUTION_WIDTH
def __init__(self, adaptive_target, adaptive_decay, filter_value=-float("Inf"), min_tokens_to_keep=1):
self.target = adaptive_target
self.decay = min(adaptive_decay, 0.99)
self.filter_value = filter_value
self.min_tokens_to_keep = min_tokens_to_keep
# Initialize EMA at equilibrium (as if target was already achieved)
if self.decay < 1.0:
self.weighted_sum = self.target / (1.0 - self.decay)
self.total_weight = 1.0 / (1.0 - self.decay)
else:
self.weighted_sum = 0.0
self.total_weight = 0.0
def __call__(self, input_ids, scores):
logits = scores[0]
# Compute original probabilities (before transform)
probs = torch.softmax(logits, dim=-1)
# Compute adapted target using proportional control on the EMA
if self.total_weight > 0:
ema_avg = self.weighted_sum / self.total_weight
else:
ema_avg = self.target
adapted_target = max(0.0, min(1.0, 2.0 * self.target - ema_avg))
# Adaptive probability transform:
# quadratic near target for fine differentiation, transitioning
# to linear decay in the tails for proper suppression after softmax
dist = torch.abs((probs - adapted_target) * self.INV_WIDTH)
new_logits = self.PEAK_LOGIT_VALUE - self.SHARPNESS * dist * dist / (1.0 + dist)
# Preserve already-masked tokens (-inf logits from prior samplers)
new_logits = torch.where(torch.isfinite(logits), new_logits, logits)
# Softmax and sample from the transformed distribution
new_probs = torch.softmax(new_logits, dim=-1)
selected = torch.multinomial(new_probs, num_samples=1, replacement=True)
# Update EMA with the original probability of the selected token
original_prob = probs[selected[0]].item()
self.weighted_sum = original_prob + self.decay * self.weighted_sum
self.total_weight = 1.0 + self.decay * self.total_weight
# Mask all tokens except the selected one
indices_to_remove = torch.ones_like(scores[0], dtype=torch.bool)
indices_to_remove[selected[0]] = False
indices_to_remove = indices_to_remove.unsqueeze(0)
scores = scores.masked_fill(indices_to_remove, self.filter_value)
return scores
# Exclude Top Choices (XTC)
class XTCLogitsWarper(LogitsProcessor):
def __init__(self, threshold: float, probability: float, filter_value: float = -float("Inf")):
self.threshold = threshold
self.probability = probability
self.filter_value = filter_value
self.special_token_ids = [
shared.tokenizer.encode("\n")[-1],
]
if shared.tokenizer.eos_token_id is not None:
self.special_token_ids.append(shared.tokenizer.eos_token_id)
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
# `random` returns values in the half-open range [0, 1), so setting `probability`
# to 0 means the sampler never takes action, while setting it to 1 means the sampler
# always takes action.
#
# Note that while XTC is most intuitively described as "if multiple tokens meet
# the threshold, then with probability...", reversing the two conditions is logically
# equivalent, and improves performance because processing can immediately be stopped
# if the random check fails.
if random.random() >= self.probability:
return scores
sorted_logits, sorted_indices = torch.sort(scores, descending=True)
probs = sorted_logits.softmax(dim=-1)
sorted_indices_to_remove = torch.full_like(probs, False, dtype=torch.bool)
# This operation sets exactly those indices to `True` for which the next index has
# probability above the threshold. Since `probs` is sorted, those are the indices
# of all tokens that meet the threshold, *except* the least probable one.
sorted_indices_to_remove[..., :-1] = probs[..., 1:] >= self.threshold
# Convert sorted_indices_to_remove to the original indices
indices_to_remove = sorted_indices_to_remove.scatter(1, sorted_indices, sorted_indices_to_remove)
# If newline or EOS tokens would be removed, return the original scores
if indices_to_remove[:, self.special_token_ids].any():
return scores
# Otherwise, remove tokens with the mask
scores = scores.masked_fill(indices_to_remove, self.filter_value)
return scores
class DRYLogitsProcessor(LogitsProcessor):
def __init__(self, multiplier: float, base: float, allowed_length: int, sequence_breakers: set[int], _range: int):
self.multiplier = multiplier
self.base = base
self.allowed_length = allowed_length
self.sequence_breakers = sequence_breakers
self._range = _range
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
if self._range > 0:
input_ids = input_ids[:, -self._range:]
for input_ids_row, scores_row in zip(input_ids, scores):
# Use normal Python data types for improved performance
input_ids = input_ids_row.tolist()
last_token = input_ids[-1]
if last_token in self.sequence_breakers:
continue
# Exclude the last token as it always matches.
match_indices = []
for idx, val in enumerate(input_ids[:-1]):
if val == last_token:
match_indices.append(idx)
# Stores the maximum matching sequence length
# for each token immediately following the sequence in the input.
match_lengths = {}
for i in match_indices:
next_token = input_ids[i + 1]
if next_token in self.sequence_breakers:
continue
# We have already found that `last_token` matches at this index,
# so the match is at least of length 1.
match_length = 1
# Extend the match backwards (at most to 50 to prevent exponent overflow at penalty calculation) (this cap also improves performance on worst case)
while match_length < 50:
j = i - match_length
if j < 0:
# Start of input reached.
break
previous_token = input_ids[-(match_length + 1)]
if input_ids[j] != previous_token:
# Start of match reached.
break
if previous_token in self.sequence_breakers:
# Sequence-breaking token reached.
break
match_length += 1
if next_token in match_lengths:
match_lengths[next_token] = max(match_length, match_lengths[next_token])
else:
match_lengths[next_token] = match_length
# Apply penalties.
for token, match_length in match_lengths.items():
if match_length >= self.allowed_length:
penalty = self.multiplier * self.base ** (match_length - self.allowed_length)
scores_row[token] -= penalty
return scores
class MirostatLogitsWarper(LogitsProcessor):
def __init__(self, mirostat_mode: int, mirostat_tau: float, mirostat_eta: float, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1):
if mirostat_mode not in [2]:
raise ValueError(f"`mirostat` has to be a an integer 2, but is {mirostat_mode}")
self.mirostat_mode = mirostat_mode
self.mirostat_eta = mirostat_eta
self.mirostat_tau = mirostat_tau
self.filter_value = filter_value
self.min_tokens_to_keep = min_tokens_to_keep
self.mu = 2 * self.mirostat_tau
self.e = 0
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
logits = scores[0]
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
prob_original = torch.softmax(sorted_logits, dim=-1).tolist() # candidates
# Truncate the words with surprise values greater than mu
for i, candidate in enumerate(prob_original):
if candidate > 0 and -math.log2(candidate) > self.mu:
if (i == 0):
sorted_logits = sorted_logits[:1]
else:
sorted_logits = sorted_logits[:i]
break
# Normalize the probabilities of the remaining words
prob_topk = torch.softmax(sorted_logits, dim=0)
prev_i = torch.multinomial(prob_topk, num_samples=1, replacement=True)
device = get_device()
if device:
prob_topk = prob_topk.to(device)
prev_i = prev_i.to(device)
observed_surprise = -math.log2(prob_topk[prev_i])
self.e = observed_surprise - self.mirostat_tau
# Update mu using the learning rate and error
self.mu -= self.mirostat_eta * self.e
sorted_indices_to_remove = torch.ones_like(scores[0], dtype=torch.bool)
sorted_indices_to_remove[prev_i] = False
indices_to_remove = sorted_indices_to_remove.unsqueeze(0).scatter(1, sorted_indices.unsqueeze(0), sorted_indices_to_remove.unsqueeze(0))
scores = scores.masked_fill(indices_to_remove, self.filter_value)
return scores
class SpyLogitsWarper(LogitsProcessor):
def __init__(self):
pass
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
global global_scores
global_scores = scores
return scores
class RepetitionPenaltyLogitsProcessorWithRange(LogitsProcessor):
def __init__(self, penalty: float, _range: int):
if not (penalty > 0):
raise ValueError(f"`penalty` has to be strictly positive, but is {penalty}")
self.penalty = penalty
self._range = _range
def apply_repetition_penalty(self, input_ids_row, scores_row):
unique_ids = torch.unique(input_ids_row)
score = torch.gather(scores_row, 0, unique_ids)
# Apply multiplicative repetition penalty
score = torch.where(score < 0, score * self.penalty, score / self.penalty)
scores_row.scatter_(0, unique_ids, score)
return scores_row
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
input_ids = input_ids[:, -self._range:]
for input_ids_row, scores_row in zip(input_ids, scores):
scores_row = self.apply_repetition_penalty(input_ids_row, scores_row)
return scores
class PresencePenaltyLogitsProcessor(LogitsProcessor):
def __init__(self, presence_penalty: float, _range: int):
self.presence_penalty = presence_penalty
self._range = _range
def apply_presence_penalty(self, input_ids_row, scores_row):
unique_ids, counts = torch.unique(input_ids_row, return_counts=True)
# Apply presence penalty
raw_presence_penalty = (counts > 0).to(scores_row.dtype)
presence_penalty = raw_presence_penalty * self.presence_penalty
scores_row.scatter_add_(0, unique_ids, -presence_penalty)
return scores_row
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
input_ids = input_ids[:, -self._range:]
for input_ids_row, scores_row in zip(input_ids, scores):
scores_row = self.apply_presence_penalty(input_ids_row, scores_row)
return scores
class FrequencyPenaltyLogitsProcessor(LogitsProcessor):
def __init__(self, frequency_penalty: float, _range: int):
self.frequency_penalty = frequency_penalty
self._range = _range
def apply_frequency_penalty(self, input_ids_row, scores_row):
unique_ids, counts = torch.unique(input_ids_row, return_counts=True)
# Apply frequency penalty
raw_frequency_penalty = counts.to(scores_row.dtype)
frequency_penalty = raw_frequency_penalty * self.frequency_penalty
scores_row.scatter_add_(0, unique_ids, -frequency_penalty)
return scores_row
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
input_ids = input_ids[:, -self._range:]
for input_ids_row, scores_row in zip(input_ids, scores):
scores_row = self.apply_frequency_penalty(input_ids_row, scores_row)
return scores
def get_logits_processor_patch(self, **kwargs):
generation_config = kwargs['generation_config']
# Parameter sanitization
if isinstance(generation_config.temperature, int):
generation_config.temperature = float(generation_config.temperature) # Must be float
# Get the original warpers
warpers = original_get_logits_processor(self, **kwargs)
for i in range(len(warpers) - 1, -1, -1):
# Replace temperature with our modified class.
if warpers[i].__class__.__name__ == 'TemperatureLogitsWarper':
warpers[i] = TemperatureLogitsWarperCustom(
generation_config.temperature,
)
# Stuff we don't need
elif warpers[i].__class__.__name__ in ['RepetitionPenaltyLogitsProcessor']:
del warpers[i]
# Add custom warpers
warpers_to_add = LogitsProcessorList()
min_tokens_to_keep = 2 if generation_config.num_beams > 1 else 1
if generation_config.repetition_penalty is not None and generation_config.repetition_penalty != 1.0:
warpers_to_add.append(
RepetitionPenaltyLogitsProcessorWithRange(
penalty=generation_config.repetition_penalty,
_range=generation_config.repetition_penalty_range
)
)
if generation_config.presence_penalty is not None and generation_config.presence_penalty != 0.0:
warpers_to_add.append(
PresencePenaltyLogitsProcessor(
presence_penalty=generation_config.presence_penalty,
_range=generation_config.repetition_penalty_range
)
)
if generation_config.frequency_penalty is not None and generation_config.frequency_penalty != 0.0:
warpers_to_add.append(
FrequencyPenaltyLogitsProcessor(
frequency_penalty=generation_config.frequency_penalty,
_range=generation_config.repetition_penalty_range
)
)
if generation_config.dry_multiplier is not None and generation_config.dry_multiplier > 0.0:
dry_sequence_breakers = generation_config.dry_sequence_breakers
# Support both JSON array notation and comma-separated strings.
if not dry_sequence_breakers.startswith("["):
dry_sequence_breakers = "[" + dry_sequence_breakers + "]"
sequence_breaker_strings = json.loads(dry_sequence_breakers)
# Prefix with 'a' to get the correct encoding of the token at the end of a text.
sequence_breakers = {
shared.tokenizer.encode(f'a{s}')[-1] for s in sequence_breaker_strings
}
warpers.append(
DRYLogitsProcessor(
multiplier=generation_config.dry_multiplier,
base=generation_config.dry_base,
allowed_length=generation_config.dry_allowed_length,
sequence_breakers=sequence_breakers,
_range=generation_config.repetition_penalty_range,
)
)
if generation_config.tfs is not None and 0.0 <= generation_config.tfs < 1.0:
warpers_to_add.append(
TailFreeLogitsWarper(
tfs=generation_config.tfs,
min_tokens_to_keep=min_tokens_to_keep
)
)
if generation_config.top_a is not None and 0.0 < generation_config.top_a <= 1.0:
warpers_to_add.append(
TopALogitsWarper(
top_a=generation_config.top_a,
min_tokens_to_keep=min_tokens_to_keep
)
)
if generation_config.top_n_sigma is not None and generation_config.top_n_sigma > 0.0:
warpers_to_add.append(
TopNSigmaLogitsWarper(
n_sigma=generation_config.top_n_sigma,
min_tokens_to_keep=min_tokens_to_keep
)
)
if generation_config.adaptive_target is not None and generation_config.adaptive_target > 0.0:
warpers_to_add.append(
AdaptivePLogitsWarper(
adaptive_target=generation_config.adaptive_target,
adaptive_decay=generation_config.adaptive_decay,
min_tokens_to_keep=min_tokens_to_keep
)
)
if generation_config.xtc_probability is not None and generation_config.xtc_probability > 0:
warpers_to_add.append(
XTCLogitsWarper(
threshold=generation_config.xtc_threshold,
probability=generation_config.xtc_probability,
)
)
if generation_config.dynamic_temperature:
warpers_to_add.append(
DynamicTemperatureLogitsWarper(
dynatemp_low=generation_config.dynatemp_low,
dynatemp_high=generation_config.dynatemp_high,
dynatemp_exponent=generation_config.dynatemp_exponent,
)
)
if generation_config.smoothing_factor > 0:
warpers_to_add.append(
QuadraticSamplingLogitsWarper(
smoothing_factor=generation_config.smoothing_factor,
smoothing_curve=generation_config.smoothing_curve
)
)
if generation_config.mirostat_mode is not None and generation_config.mirostat_mode == 2:
warpers_to_add.append(
MirostatLogitsWarper(
mirostat_mode=generation_config.mirostat_mode,
mirostat_eta=generation_config.mirostat_eta,
mirostat_tau=generation_config.mirostat_tau,
min_tokens_to_keep=min_tokens_to_keep
)
)
if len(warpers) > 0 and isinstance(warpers[-1], LogitNormalization):
normalize = warpers.pop(-1)
else:
normalize = None
warpers += warpers_to_add
# Sort the samplers.
sampler_priority = generation_config.sampler_priority
# Handle temperature_last
if generation_config.temperature_last:
for param_name in ['temperature', 'dynamic_temperature', 'quadratic_sampling']:
if param_name in sampler_priority:
index = sampler_priority.index(param_name)
sampler_priority.append(sampler_priority.pop(index))
else:
sampler_priority.append(param_name)
class_name_to_nickname = {
'DynamicTemperatureLogitsWarper': 'dynamic_temperature',
'EpsilonLogitsWarper': 'epsilon_cutoff',
'EtaLogitsWarper': 'eta_cutoff',
'MinPLogitsWarper': 'min_p',
'MirostatLogitsWarper': 'mirostat',
'QuadraticSamplingLogitsWarper': 'quadratic_sampling',
'TailFreeLogitsWarper': 'tfs',
'TemperatureLogitsWarperCustom': 'temperature',
'TopALogitsWarper': 'top_a',
'TopNSigmaLogitsWarper': 'top_n_sigma',
'AdaptivePLogitsWarper': 'adaptive_p',
'TopKLogitsWarper': 'top_k',
'TopPLogitsWarper': 'top_p',
'TypicalLogitsWarper': 'typical_p',
'XTCLogitsWarper': 'xtc',
'RepetitionPenaltyLogitsProcessorWithRange': 'repetition_penalty',
'PresencePenaltyLogitsProcessor': 'presence_penalty',
'FrequencyPenaltyLogitsProcessor': 'frequency_penalty',
'DRYLogitsProcessor': 'dry',
'EncoderRepetitionPenaltyLogitsProcessor': 'encoder_repetition_penalty',
'NoRepeatNGramLogitsProcessor': 'no_repeat_ngram',
}
def custom_sort_key(obj):
class_name = obj.__class__.__name__
# Return -1 if class_name is not mapped
if class_name not in class_name_to_nickname or class_name_to_nickname[class_name] not in sampler_priority:
return -1
return sampler_priority.index(class_name_to_nickname[class_name])
# Sort the list using the custom key function
warpers = sorted(warpers, key=custom_sort_key)
if shared.args.verbose:
logger.info("WARPERS=")
pprint.PrettyPrinter(indent=4, sort_dicts=False).pprint([x.__class__.__name__ for x in warpers])
print()
if normalize is not None:
warpers.append(normalize)
warpers.append(SpyLogitsWarper())
warpers = LogitsProcessorList(warpers)
return warpers
def generation_config_init_patch(self, **kwargs):
original_init(self, **kwargs)
self.min_p = kwargs.pop("min_p", 0.0)
self.dynamic_temperature = kwargs.pop("dynamic_temperature", False)
self.dynatemp_low = kwargs.pop("dynatemp_low", 1)
self.dynatemp_high = kwargs.pop("dynatemp_high", 1)
self.dynatemp_exponent = kwargs.pop("dynatemp_exponent", 1)
self.smoothing_factor = kwargs.pop("smoothing_factor", 0.0)
self.smoothing_curve = kwargs.pop("smoothing_curve", 1.0)
self.tfs = kwargs.pop("tfs", 1.0)
self.top_a = kwargs.pop("top_a", 0.0)
self.top_n_sigma = kwargs.pop("top_n_sigma", 0.0)
self.adaptive_target = kwargs.pop("adaptive_target", 0.0)
self.adaptive_decay = kwargs.pop("adaptive_decay", 0.9)
self.mirostat_mode = kwargs.pop("mirostat_mode", 0)
self.mirostat_eta = kwargs.pop("mirostat_eta", 0.1)
self.mirostat_tau = kwargs.pop("mirostat_tau", 5)
self.repetition_penalty_range = kwargs.pop("repetition_penalty_range", 0)
self.presence_penalty = kwargs.pop("presence_penalty", 0)
self.frequency_penalty = kwargs.pop("frequency_penalty", 0)
self.dry_multiplier = kwargs.pop("dry_multiplier", 0.0)
self.dry_base = kwargs.pop("dry_base", 1.75)
self.dry_allowed_length = kwargs.pop("dry_allowed_length", 2)
self.dry_sequence_breakers = kwargs.pop("dry_sequence_breakers", '"\\n", ":", "\\"", "*"')
self.xtc_threshold = kwargs.pop("xtc_threshold", 0.1)
self.xtc_probability = kwargs.pop("xtc_probability", 0)
self.temperature_last = kwargs.pop("temperature_last", False)
self.sampler_priority = kwargs.pop("sampler_priority", ['repetition_penalty', 'presence_penalty', 'frequency_penalty', 'dry', 'temperature', 'dynamic_temperature', 'quadratic_sampling', 'top_n_sigma', 'top_k', 'top_p', 'typical_p', 'epsilon_cutoff', 'eta_cutoff', 'tfs', 'top_a', 'min_p', 'adaptive_p', 'mirostat', 'xtc', 'encoder_repetition_penalty', 'no_repeat_ngram'])
def hijack_samplers():
transformers.GenerationMixin._get_logits_processor = get_logits_processor_patch
transformers.GenerationConfig.__init__ = generation_config_init_patch | --- +++ @@ -22,6 +22,9 @@
class TemperatureLogitsWarperCustom(LogitsProcessor):
+ '''
+ A copy of the original Transformers temperature logits warper.
+ '''
def __init__(self, temperature: float):
if not isinstance(temperature, float) or not (temperature > 0):
@@ -42,6 +45,9 @@
class DynamicTemperatureLogitsWarper(LogitsProcessor):
+ '''
+ Dynamic temperature.
+ '''
def __init__(self, dynatemp_low: float, dynatemp_high: float, dynatemp_exponent: float):
self.dynatemp_low = dynatemp_low
@@ -97,6 +103,9 @@
class QuadraticSamplingLogitsWarper(LogitsProcessor):
+ '''
+ Quadratic sampling with smoothing factor and smoothing curve parameters.
+ '''
def __init__(self, smoothing_factor, smoothing_curve):
self.smoothing_factor = smoothing_factor
@@ -188,6 +197,14 @@
class TopNSigmaLogitsWarper(LogitsProcessor):
def __init__(self, n_sigma: float = 2.0, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1):
+ """
+ Initialize Top-nσ Sampling logits warper.
+
+ Args:
+ n_sigma: The threshold multiplier for standard deviation
+ filter_value: Value to assign to filtered logits
+ min_tokens_to_keep: Minimum number of tokens to keep
+ """
if n_sigma < 0:
raise ValueError(f"`n_sigma` must be a non-negative float, but is {n_sigma}")
self.n_sigma = n_sigma
@@ -219,6 +236,12 @@
class AdaptivePLogitsWarper(LogitsProcessor):
+ '''
+ Adaptive-p sampling. A stateful sampler that favors tokens near a target
+ probability, using an EMA-based control loop to adapt over time.
+
+ Matches the llama.cpp implementation from PR #17927.
+ '''
DISTRIBUTION_WIDTH = 0.3
PEAK_LOGIT_VALUE = 5.0
@@ -762,4 +785,4 @@
def hijack_samplers():
transformers.GenerationMixin._get_logits_processor = get_logits_processor_patch
- transformers.GenerationConfig.__init__ = generation_config_init_patch+ transformers.GenerationConfig.__init__ = generation_config_init_patch
| https://raw.githubusercontent.com/oobabooga/text-generation-webui/HEAD/modules/sampler_hijack.py |
Write docstrings describing each step | import datetime
import functools
import html
import os
import re
import time
from pathlib import Path
import markdown
from PIL import Image, ImageOps
from modules import shared
from modules.reasoning import extract_reasoning
from modules.sane_markdown_lists import SaneListExtension
from modules.utils import get_available_chat_styles
# This is to store the paths to the thumbnails of the profile pictures
image_cache = {}
def minify_css(css: str) -> str:
# Step 1: Remove comments
css = re.sub(r'/\*.*?\*/', '', css, flags=re.DOTALL)
# Step 2: Remove leading and trailing whitespace
css = re.sub(r'^[ \t]*|[ \t]*$', '', css, flags=re.MULTILINE)
# Step 3: Remove spaces after specific characters ({ : ; ,})
css = re.sub(r'([:{;,])\s+', r'\1', css)
# Step 4: Remove spaces before `{`
css = re.sub(r'\s+{', '{', css)
# Step 5: Remove empty lines
css = re.sub(r'^\s*$', '', css, flags=re.MULTILINE)
# Step 6: Collapse all lines into one
css = re.sub(r'\n', '', css)
return css
with open(Path(__file__).resolve().parent / '../css/html_readable_style.css', 'r', encoding='utf-8') as f:
readable_css = f.read()
with open(Path(__file__).resolve().parent / '../css/html_instruct_style.css', 'r', encoding='utf-8') as f:
instruct_css = f.read()
# Custom chat styles
chat_styles = {}
for k in get_available_chat_styles():
with open(Path(f'css/chat_style-{k}.css'), 'r', encoding='utf-8') as f:
chat_styles[k] = f.read()
# Handle styles that derive from other styles
for k in chat_styles:
lines = chat_styles[k].split('\n')
input_string = lines[0]
match = re.search(r'chat_style-([a-z\-]*)\.css', input_string)
if match:
style = match.group(1)
chat_styles[k] = chat_styles.get(style, '') + '\n\n' + '\n'.join(lines[1:])
# Reduce the size of the CSS sources above
readable_css = minify_css(readable_css)
instruct_css = minify_css(instruct_css)
for k in chat_styles:
chat_styles[k] = minify_css(chat_styles[k])
def fix_newlines(string):
string = string.replace('\n', '\n\n')
string = re.sub(r"\n{3,}", "\n\n", string)
string = string.strip()
return string
def replace_quotes(text):
# Define a list of quote pairs (opening and closing), using HTML entities
quote_pairs = [
('"', '"'), # Double quotes
('“', '”'), # Unicode left and right double quotation marks
('‘', '’'), # Unicode left and right single quotation marks
('«', '»'), # French quotes
('„', '“'), # German quotes
('‘', '’'), # Alternative single quotes
('“', '”'), # Unicode quotes (numeric entities)
('“', '”'), # Unicode quotes (hex entities)
('\u201C', '\u201D'), # Unicode quotes (literal chars)
]
# Create a regex pattern that matches any of the quote pairs, including newlines
pattern = '|'.join(f'({re.escape(open_q)})(.*?)({re.escape(close_q)})' for open_q, close_q in quote_pairs)
# Replace matched patterns with <q> tags, keeping original quotes
def replacer(m):
# Find the first non-None group set
for i in range(1, len(m.groups()), 3): # Step through each sub-pattern's groups
if m.group(i): # If this sub-pattern matched
return f'<q>{m.group(i)}{m.group(i + 1)}{m.group(i + 2)}</q>'
return m.group(0) # Fallback (shouldn't happen)
replaced_text = re.sub(pattern, replacer, text, flags=re.DOTALL)
return replaced_text
def replace_blockquote(m):
return m.group().replace('\n', '\n> ').replace('\\begin{blockquote}', '').replace('\\end{blockquote}', '')
def extract_thinking_block(string):
return extract_reasoning(string, html_escaped=True)
def build_tool_call_block(header, body, message_id, index):
block_id = f"tool-call-{message_id}-{index}"
if body == '...':
# Pending placeholder — no expandable body, just title with ellipsis
return f'''
<details class="thinking-block" data-block-id="{block_id}">
<summary class="thinking-header">
{tool_svg_small}
<span class="thinking-title">{html.escape(header)} ...</span>
</summary>
</details>
'''
# Build a plain <pre> directly to avoid highlight.js auto-detection
escaped_body = html.escape(body)
return f'''
<details class="thinking-block" data-block-id="{block_id}">
<summary class="thinking-header">
{tool_svg_small}
<span class="thinking-title">{html.escape(header)}</span>
</summary>
<div class="thinking-content pretty_scrollbar"><pre><code class="nohighlight">{escaped_body}</code></pre></div>
</details>
'''
def build_thinking_block(thinking_content, message_id, has_remaining_content, thinking_index=0):
if thinking_content is None:
return None
# Process the thinking content through markdown
thinking_html = process_markdown_content(thinking_content)
# Generate unique ID for the thinking block
block_id = f"thinking-{message_id}-{thinking_index}"
# Check if thinking is complete or still in progress
is_streaming = not has_remaining_content
title_text = "Thinking..." if is_streaming else "Thought"
return f'''
<details class="thinking-block" data-block-id="{block_id}" data-streaming="{str(is_streaming).lower()}">
<summary class="thinking-header">
{info_svg_small}
<span class="thinking-title">{title_text}</span>
</summary>
<div class="thinking-content pretty_scrollbar">{thinking_html}</div>
</details>
'''
def build_main_content_block(content):
if not content:
return ""
return process_markdown_content(content)
def process_markdown_content(string):
if not string:
return ""
# Define unique placeholders for LaTeX asterisks and underscores
LATEX_ASTERISK_PLACEHOLDER = "LATEXASTERISKPLACEHOLDER"
LATEX_UNDERSCORE_PLACEHOLDER = "LATEXUNDERSCOREPLACEHOLDER"
def protect_asterisks_underscores_in_latex(match):
# Check which delimiter group was captured
if match.group(1) is not None: # Content from $$...$$
content = match.group(1)
modified_content = content.replace('*', LATEX_ASTERISK_PLACEHOLDER)
modified_content = modified_content.replace('_', LATEX_UNDERSCORE_PLACEHOLDER)
return f'{modified_content}'
elif match.group(2) is not None: # Content from \[...\]
content = match.group(2)
modified_content = content.replace('*', LATEX_ASTERISK_PLACEHOLDER)
modified_content = modified_content.replace('_', LATEX_UNDERSCORE_PLACEHOLDER)
return f'\\[{modified_content}\\]'
elif match.group(3) is not None: # Content from \(...\)
content = match.group(3)
modified_content = content.replace('*', LATEX_ASTERISK_PLACEHOLDER)
modified_content = modified_content.replace('_', LATEX_UNDERSCORE_PLACEHOLDER)
return f'\\({modified_content}\\)'
return match.group(0) # Fallback
# Make \[ \] LaTeX equations inline
pattern = r'^\s*\\\[\s*\n([\s\S]*?)\n\s*\\\]\s*$'
replacement = r'\\[ \1 \\]'
string = re.sub(pattern, replacement, string, flags=re.MULTILINE)
# Escape backslashes
string = string.replace('\\', '\\\\')
# Quote to <q></q>
string = replace_quotes(string)
# Blockquote
string = re.sub(r'(^|[\n])>', r'\1>', string)
pattern = re.compile(r'\\begin{blockquote}(.*?)\\end{blockquote}', re.DOTALL)
string = pattern.sub(replace_blockquote, string)
# Code block standardization
string = string.replace('\\begin{code}', '```')
string = string.replace('\\end{code}', '```')
string = string.replace('\\begin{align*}', '$$')
string = string.replace('\\end{align*}', '$$')
string = string.replace('\\begin{align}', '$$')
string = string.replace('\\end{align}', '$$')
string = string.replace('\\begin{equation}', '$$')
string = string.replace('\\end{equation}', '$$')
string = string.replace('\\begin{equation*}', '$$')
string = string.replace('\\end{equation*}', '$$')
string = re.sub(r"(.)```", r"\1\n```", string)
# Protect asterisks and underscores within all LaTeX blocks before markdown conversion
latex_pattern = re.compile(r'((?:^|[\r\n\s])\$\$[^`]*?\$\$)|\\\[(.*?)\\\]|\\\((.*?)\\\)',
re.DOTALL)
string = latex_pattern.sub(protect_asterisks_underscores_in_latex, string)
result = ''
is_code = False
is_latex = False
# Manual line iteration for robust structure parsing
for line in string.split('\n'):
stripped_line = line.strip()
if stripped_line.startswith('```'):
is_code = not is_code
elif stripped_line.startswith('$$') and (stripped_line == "$$" or not stripped_line.endswith('$$')):
is_latex = not is_latex
elif stripped_line.endswith('$$'):
is_latex = False
elif stripped_line.startswith('\\\\[') and not stripped_line.endswith('\\\\]'):
is_latex = True
elif stripped_line.startswith('\\\\]'):
is_latex = False
elif stripped_line.endswith('\\\\]'):
is_latex = False
result += line
# Don't add an extra \n for code, LaTeX, or tables
if is_code or is_latex or line.startswith('|'):
result += '\n'
# Also don't add an extra \n for lists
elif stripped_line.startswith('-') or stripped_line.startswith('*') or stripped_line.startswith('+') or stripped_line.startswith('>') or re.match(r'\d+\.', stripped_line):
result += ' \n'
else:
result += ' \n'
result = result.strip()
if is_code:
result += '\n```' # Unfinished code block
# Unfinished list, like "\n1.". A |delete| string is added and then
# removed to force a <ol> or <ul> to be generated instead of a <p>.
list_item_pattern = r'(\n\d+\.?|\n\s*[-*+]\s*([*_~]{1,3})?)$'
if re.search(list_item_pattern, result):
delete_str = '|delete|'
if re.search(r'(\d+\.?)$', result) and not result.endswith('.'):
result += '.'
# Add the delete string after the list item
result = re.sub(list_item_pattern, r'\g<1> ' + delete_str, result)
# Convert to HTML using markdown
html_output = markdown.markdown(result, extensions=['fenced_code', 'tables', SaneListExtension()])
# Remove the delete string from the HTML output
pos = html_output.rfind(delete_str)
if pos > -1:
html_output = html_output[:pos] + html_output[pos + len(delete_str):]
else:
# Convert to HTML using markdown
html_output = markdown.markdown(result, extensions=['fenced_code', 'tables', SaneListExtension()])
# Restore the LaTeX asterisks and underscores after markdown conversion
html_output = html_output.replace(LATEX_ASTERISK_PLACEHOLDER, '*')
html_output = html_output.replace(LATEX_UNDERSCORE_PLACEHOLDER, '_')
# Remove extra newlines before </code>
html_output = re.sub(r'\s*</code>', '</code>', html_output)
# Unescape code blocks
pattern = re.compile(r'<code[^>]*>(.*?)</code>', re.DOTALL)
html_output = pattern.sub(lambda x: html.unescape(x.group()), html_output)
# Unescape backslashes
html_output = html_output.replace('\\\\', '\\')
# Wrap tables in a scrollable div
html_output = html_output.replace('<table>', '<div class="table-wrapper pretty_scrollbar"><table>').replace('</table>', '</table></div>')
return html_output
@functools.lru_cache(maxsize=None)
def convert_to_markdown(string, message_id=None):
if not string:
return ""
# Use a default message ID if none provided
if message_id is None:
message_id = "unknown"
# Find tool call blocks by position, then process the text segments
# between them using extract_thinking_block (which supports all
# THINKING_FORMATS, including end-only variants like Qwen's).
tool_call_pattern = re.compile(r'<tool_call>(.*?)\n(.*?)\n</tool_call>', re.DOTALL)
tool_calls = list(tool_call_pattern.finditer(string))
if not tool_calls:
# No tool calls — use original single-pass extraction
thinking_content, remaining_content = extract_thinking_block(string)
blocks = []
thinking_html = build_thinking_block(thinking_content, message_id, bool(remaining_content))
if thinking_html:
blocks.append(thinking_html)
main_html = build_main_content_block(remaining_content)
if main_html:
blocks.append(main_html)
return ''.join(blocks)
# Split string into text segments around tool_call blocks and
# run extract_thinking_block on each segment for full format support.
html_parts = []
last_end = 0
tool_idx = 0
think_idx = 0
def process_text_segment(text, is_last_segment):
nonlocal think_idx
if not text.strip():
return
while text.strip():
thinking_content, remaining = extract_thinking_block(text)
if thinking_content is None:
break
has_remaining = bool(remaining.strip()) or not is_last_segment
html_parts.append(build_thinking_block(thinking_content, message_id, has_remaining, think_idx))
think_idx += 1
text = remaining
if text.strip():
html_parts.append(process_markdown_content(text))
for tc in tool_calls:
# Process text before this tool_call
process_text_segment(string[last_end:tc.start()], is_last_segment=False)
# Add tool call accordion
header = tc.group(1).strip()
body = tc.group(2).strip()
html_parts.append(build_tool_call_block(header, body, message_id, tool_idx))
tool_idx += 1
last_end = tc.end()
# Process text after the last tool_call
process_text_segment(string[last_end:], is_last_segment=True)
return ''.join(html_parts)
def convert_to_markdown_wrapped(string, message_id=None, use_cache=True):
if use_cache:
return convert_to_markdown(string, message_id=message_id)
return convert_to_markdown.__wrapped__(string, message_id=message_id)
def generate_basic_html(string):
convert_to_markdown.cache_clear()
string = convert_to_markdown(string)
string = f'<style>{readable_css}</style><div class="readable-container">{string}</div>'
return string
def make_thumbnail(image):
image = image.resize((350, round(image.size[1] / image.size[0] * 350)), Image.Resampling.LANCZOS)
if image.size[1] > 470:
image = ImageOps.fit(image, (350, 470), Image.LANCZOS)
return image
def get_image_cache(path):
cache_folder = Path(shared.args.disk_cache_dir)
if not cache_folder.exists():
cache_folder.mkdir()
mtime = os.stat(path).st_mtime
if (path in image_cache and mtime != image_cache[path][0]) or (path not in image_cache):
img = make_thumbnail(Image.open(path))
old_p = Path(f'{cache_folder}/{path.name}_cache.png')
p = Path(f'{cache_folder}/cache_{path.name}.png')
if old_p.exists():
old_p.rename(p)
output_file = p
img.convert('RGBA').save(output_file, format='PNG')
image_cache[path] = [mtime, output_file.as_posix()]
return image_cache[path][1]
copy_svg = '''<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round" class="tabler-icon tabler-icon-copy"><path d="M8 8m0 2a2 2 0 0 1 2 -2h8a2 2 0 0 1 2 2v8a2 2 0 0 1 -2 2h-8a2 2 0 0 1 -2 -2z"></path><path d="M16 8v-2a2 2 0 0 0 -2 -2h-8a2 2 0 0 0 -2 2v8a2 2 0 0 0 2 2h2"></path></svg>'''
refresh_svg = '''<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round" class="tabler-icon tabler-icon-repeat"><path d="M4 12v-3a3 3 0 0 1 3 -3h13m-3 -3l3 3l-3 3"></path><path d="M20 12v3a3 3 0 0 1 -3 3h-13m3 3l-3 -3l3 -3"></path></svg>'''
continue_svg = '''<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round" class="icon icon-tabler icons-tabler-outline icon-tabler-player-play"><path stroke="none" d="M0 0h24v24H0z" fill="none"/><path d="M7 4v16l13 -8z" /></svg>'''
remove_svg = '''<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round" class="icon icon-tabler icons-tabler-outline icon-tabler-trash"><path stroke="none" d="M0 0h24v24H0z" fill="none"/><path d="M4 7l16 0" /><path d="M10 11l0 6" /><path d="M14 11l0 6" /><path d="M5 7l1 12a2 2 0 0 0 2 2h8a2 2 0 0 0 2 -2l1 -12" /><path d="M9 7v-3a1 1 0 0 1 1 -1h4a1 1 0 0 1 1 1v3" /></svg>'''
branch_svg = '''<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round" class="icon icon-tabler icons-tabler-outline icon-tabler-git-branch"><path stroke="none" d="M0 0h24v24H0z" fill="none"/><path d="M7 18m-2 0a2 2 0 1 0 4 0a2 2 0 1 0 -4 0" /><path d="M7 6m-2 0a2 2 0 1 0 4 0a2 2 0 1 0 -4 0" /><path d="M17 6m-2 0a2 2 0 1 0 4 0a2 2 0 1 0 -4 0" /><path d="M7 8l0 8" /><path d="M9 18h6a2 2 0 0 0 2 -2v-5" /><path d="M14 14l3 -3l3 3" /></svg>'''
edit_svg = '''<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round" class="tabler-icon tabler-icon-pencil"><path d="M4 20h4l10.5 -10.5a2.828 2.828 0 1 0 -4 -4l-10.5 10.5v4"></path><path d="M13.5 6.5l4 4"></path></svg>'''
info_svg = '''<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round" class="thinking-icon tabler-icon tabler-icon-info-circle"><path stroke="none" d="M0 0h24v24H0z" fill="none"/><path d="M12 2a10 10 0 0 1 0 20a10 10 0 0 1 0 -20z" /><path d="M12 16v-4" /><path d="M12 8h.01" /></svg>'''
info_svg_small = '''<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round" class="thinking-icon tabler-icon tabler-icon-info-circle"><path stroke="none" d="M0 0h24v24H0z" fill="none"/><path d="M12 2a10 10 0 0 1 0 20a10 10 0 0 1 0 -20z" /><path d="M12 16v-4" /><path d="M12 8h.01" /></svg>'''
tool_svg_small = '''<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round" class="thinking-icon tabler-icon tabler-icon-tool"><path stroke="none" d="M0 0h24v24H0z" fill="none"/><path d="M7 10h3v-3l-3.5 -3.5a6 6 0 0 1 8 8l6 6a2 2 0 0 1 -3 3l-6 -6a6 6 0 0 1 -8 -8l3.5 3.5" /></svg>'''
attachment_svg = '''<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"><path d="M21.44 11.05l-9.19 9.19a6 6 0 0 1-8.48-8.48l9.19-9.19a4 4 0 0 1 5.66 5.66l-9.2 9.19a2 2 0 0 1-2.83-2.83l8.49-8.48"></path></svg>'''
copy_button = f'<button class="footer-button footer-copy-button" title="Copy" onclick="copyToClipboard(this)">{copy_svg}</button>'
branch_button = f'<button class="footer-button footer-branch-button" title="Branch here" onclick="branchHere(this)">{branch_svg}</button>'
edit_button = f'<button class="footer-button footer-edit-button" title="Edit" onclick="editHere(this)">{edit_svg}</button>'
refresh_button = f'<button class="footer-button footer-refresh-button" title="Regenerate" onclick="regenerateClick()">{refresh_svg}</button>'
continue_button = f'<button class="footer-button footer-continue-button" title="Continue" onclick="continueClick()">{continue_svg}</button>'
remove_button = f'<button class="footer-button footer-remove-button" title="Remove last reply" onclick="removeLastClick()">{remove_svg}</button>'
info_button = f'<button class="footer-button footer-info-button" title="message">{info_svg}</button>'
def format_message_timestamp(history, role, index, tooltip_include_timestamp=True):
key = f"{role}_{index}"
if 'metadata' in history and key in history['metadata'] and history['metadata'][key].get('timestamp'):
timestamp = history['metadata'][key]['timestamp']
tooltip_text = get_message_tooltip(history, role, index, include_timestamp=tooltip_include_timestamp)
title_attr = f' title="{html.escape(tooltip_text)}"' if tooltip_text else ''
return f"<span class='timestamp'{title_attr}>{timestamp}</span>"
return ""
def format_message_attachments(history, role, index):
key = f"{role}_{index}"
if 'metadata' in history and key in history['metadata'] and 'attachments' in history['metadata'][key]:
attachments = history['metadata'][key]['attachments']
if not attachments:
return ""
attachments_html = '<div class="message-attachments">'
for attachment in attachments:
name = html.escape(attachment["name"])
if attachment.get("type") == "image":
image_data = attachment.get("image_data", "")
attachments_html += (
f'<div class="attachment-box image-attachment">'
f'<img src="{image_data}" alt="{name}" class="image-preview" />'
f'<div class="attachment-name">{name}</div>'
f'</div>'
)
else:
# Make clickable if URL exists (web search)
if "url" in attachment:
name = f'<a href="{html.escape(attachment["url"])}" target="_blank" rel="noopener noreferrer">{name}</a>'
attachments_html += (
f'<div class="attachment-box">'
f'<div class="attachment-icon">{attachment_svg}</div>'
f'<div class="attachment-name">{name}</div>'
f'</div>'
)
attachments_html += '</div>'
return attachments_html
return ""
def get_message_tooltip(history, role, index, include_timestamp=True):
key = f"{role}_{index}"
if 'metadata' not in history or key not in history['metadata']:
return ""
meta = history['metadata'][key]
tooltip_parts = []
if include_timestamp and meta.get('timestamp'):
tooltip_parts.append(meta['timestamp'])
if meta.get('model_name'):
tooltip_parts.append(f"Model: {meta['model_name']}")
return " | ".join(tooltip_parts)
def get_version_navigation_html(history, i, role):
key = f"{role}_{i}"
metadata = history.get('metadata', {})
if key not in metadata or 'versions' not in metadata[key]:
return ""
versions = metadata[key]['versions']
# Default to the last version if current_version_index isn't set in metadata
current_idx = metadata[key].get('current_version_index', len(versions) - 1 if versions else 0)
if len(versions) <= 1:
return ""
left_disabled = ' disabled' if current_idx == 0 else ''
right_disabled = ' disabled' if current_idx >= len(versions) - 1 else ''
left_arrow = f'<button class="footer-button version-nav-button"{left_disabled} onclick="navigateVersion(this, \'left\')" title="Previous version"><</button>'
right_arrow = f'<button class="footer-button version-nav-button"{right_disabled} onclick="navigateVersion(this, \'right\')" title="Next version">></button>'
position = f'<span class="version-position">{current_idx + 1}/{len(versions)}</span>'
return f'<div class="version-navigation">{left_arrow}{position}{right_arrow}</div>'
def actions_html(history, i, role, info_message=""):
action_buttons = ""
version_nav_html = ""
if role == "assistant":
action_buttons = (
f'{copy_button}'
f'{edit_button}'
f'{refresh_button if i == len(history["visible"]) - 1 else ""}'
f'{continue_button if i == len(history["visible"]) - 1 else ""}'
f'{remove_button if i == len(history["visible"]) - 1 else ""}'
f'{branch_button}'
)
version_nav_html = get_version_navigation_html(history, i, "assistant")
elif role == "user":
action_buttons = (
f'{copy_button}'
f'{edit_button}'
)
version_nav_html = get_version_navigation_html(history, i, "user")
return (f'<div class="message-actions">'
f'{action_buttons}'
f'{info_message}'
f'</div>'
f'{version_nav_html}')
def generate_instruct_html(history, last_message_only=False):
if not last_message_only:
output = f'<style>{instruct_css}</style><div class="chat" id="chat" data-mode="instruct"><div class="messages">'
else:
output = ""
def create_message(role, content, raw_content):
class_name = "user-message" if role == "user" else "assistant-message"
# Get role-specific data
timestamp = format_message_timestamp(history, role, i)
attachments = format_message_attachments(history, role, i)
# Create info button if timestamp exists
info_message = ""
if timestamp:
tooltip_text = get_message_tooltip(history, role, i)
info_message = info_button.replace('title="message"', f'title="{html.escape(tooltip_text)}"')
return (
f'<div class="{class_name}" '
f'data-raw="{html.escape(raw_content, quote=True)}"'
f'data-index={i}>'
f'<div class="text">'
f'<div class="message-body">{content}</div>'
f'{attachments}'
f'{actions_html(history, i, role, info_message)}'
f'</div>'
f'</div>'
)
# Determine range
start_idx = len(history['visible']) - 1 if last_message_only else 0
end_idx = len(history['visible'])
for i in range(start_idx, end_idx):
row_visible = history['visible'][i]
row_internal = history['internal'][i]
# Convert content
if last_message_only:
converted_visible = [None, convert_to_markdown_wrapped(row_visible[1], message_id=i, use_cache=i != len(history['visible']) - 1)]
else:
converted_visible = [convert_to_markdown_wrapped(entry, message_id=i, use_cache=i != len(history['visible']) - 1) for entry in row_visible]
# Generate messages
if not last_message_only and converted_visible[0]:
output += create_message("user", converted_visible[0], row_internal[0])
output += create_message("assistant", converted_visible[1], row_internal[1])
if not last_message_only:
output += "</div></div>"
return output
def get_character_image_with_cache_buster():
cache_path = shared.user_data_dir / "cache" / "pfp_character_thumb.png"
if cache_path.exists():
mtime = int(cache_path.stat().st_mtime)
return f'<img src="file/{shared.user_data_dir}/cache/pfp_character_thumb.png?{mtime}" class="pfp_character">'
return ''
def generate_cai_chat_html(history, name1, name2, style, character, reset_cache=False, last_message_only=False):
if not last_message_only:
output = f'<style>{chat_styles[style]}</style><div class="chat" id="chat"><div class="messages">'
else:
output = ""
img_bot = get_character_image_with_cache_buster()
def create_message(role, content, raw_content):
circle_class = "circle-you" if role == "user" else "circle-bot"
name = name1 if role == "user" else name2
# Get role-specific data
timestamp = format_message_timestamp(history, role, i, tooltip_include_timestamp=False)
attachments = format_message_attachments(history, role, i)
# Get appropriate image
if role == "user":
img = (f'<img src="file/{shared.user_data_dir}/cache/pfp_me.png?{time.time() if reset_cache else ""}">'
if (shared.user_data_dir / "cache" / "pfp_me.png").exists() else '')
else:
img = img_bot
return (
f'<div class="message" '
f'data-raw="{html.escape(raw_content, quote=True)}"'
f'data-index={i}>'
f'<div class="{circle_class}">{img}</div>'
f'<div class="text">'
f'<div class="username">{name}{timestamp}</div>'
f'<div class="message-body">{content}</div>'
f'{attachments}'
f'{actions_html(history, i, role)}'
f'</div>'
f'</div>'
)
# Determine range
start_idx = len(history['visible']) - 1 if last_message_only else 0
end_idx = len(history['visible'])
for i in range(start_idx, end_idx):
row_visible = history['visible'][i]
row_internal = history['internal'][i]
# Convert content
if last_message_only:
converted_visible = [None, convert_to_markdown_wrapped(row_visible[1], message_id=i, use_cache=i != len(history['visible']) - 1)]
else:
converted_visible = [convert_to_markdown_wrapped(entry, message_id=i, use_cache=i != len(history['visible']) - 1) for entry in row_visible]
# Generate messages
if not last_message_only and converted_visible[0]:
output += create_message("user", converted_visible[0], row_internal[0])
output += create_message("assistant", converted_visible[1], row_internal[1])
if not last_message_only:
output += "</div></div>"
return output
def time_greeting():
current_hour = datetime.datetime.now().hour
if 5 <= current_hour < 12:
return "Good morning!"
elif 12 <= current_hour < 18:
return "Good afternoon!"
else:
return "Good evening!"
def chat_html_wrapper(history, name1, name2, mode, style, character, reset_cache=False, last_message_only=False):
if len(history['visible']) == 0:
greeting = f"<div class=\"welcome-greeting\">{time_greeting()} How can I help you today?</div>"
result = f'<div class="chat" id="chat">{greeting}</div>'
elif mode == 'instruct':
result = generate_instruct_html(history, last_message_only=last_message_only)
else:
result = generate_cai_chat_html(history, name1, name2, style, character, reset_cache=reset_cache, last_message_only=last_message_only)
return {'html': result, 'last_message_only': last_message_only} | --- +++ @@ -110,11 +110,13 @@
def extract_thinking_block(string):
+ """Extract thinking blocks from the beginning of an HTML-escaped string."""
return extract_reasoning(string, html_escaped=True)
def build_tool_call_block(header, body, message_id, index):
+ """Build HTML for a tool call accordion block."""
block_id = f"tool-call-{message_id}-{index}"
if body == '...':
@@ -142,6 +144,7 @@
def build_thinking_block(thinking_content, message_id, has_remaining_content, thinking_index=0):
+ """Build HTML for a thinking block."""
if thinking_content is None:
return None
@@ -167,6 +170,7 @@
def build_main_content_block(content):
+ """Build HTML for the main content block."""
if not content:
return ""
@@ -174,6 +178,10 @@
def process_markdown_content(string):
+ """
+ Process a string through the markdown conversion pipeline.
+ Uses robust manual parsing to ensure correct LaTeX and Code Block rendering.
+ """
if not string:
return ""
@@ -182,6 +190,7 @@ LATEX_UNDERSCORE_PLACEHOLDER = "LATEXUNDERSCOREPLACEHOLDER"
def protect_asterisks_underscores_in_latex(match):
+ """A replacer function for re.sub to protect asterisks and underscores in multiple LaTeX formats."""
# Check which delimiter group was captured
if match.group(1) is not None: # Content from $$...$$
content = match.group(1)
@@ -316,6 +325,10 @@
@functools.lru_cache(maxsize=None)
def convert_to_markdown(string, message_id=None):
+ """
+ Convert a string to markdown HTML with support for multiple block types.
+ Blocks are assembled in order: thinking, main content, etc.
+ """
if not string:
return ""
@@ -351,6 +364,7 @@ think_idx = 0
def process_text_segment(text, is_last_segment):
+ """Process a text segment between tool_call blocks for thinking content."""
nonlocal think_idx
if not text.strip():
return
@@ -385,6 +399,9 @@
def convert_to_markdown_wrapped(string, message_id=None, use_cache=True):
+ '''
+ Used to avoid caching convert_to_markdown calls during streaming.
+ '''
if use_cache:
return convert_to_markdown(string, message_id=message_id)
@@ -449,6 +466,7 @@
def format_message_timestamp(history, role, index, tooltip_include_timestamp=True):
+ """Get a formatted timestamp HTML span for a message if available"""
key = f"{role}_{index}"
if 'metadata' in history and key in history['metadata'] and history['metadata'][key].get('timestamp'):
timestamp = history['metadata'][key]['timestamp']
@@ -460,6 +478,7 @@
def format_message_attachments(history, role, index):
+ """Get formatted HTML for message attachments if available"""
key = f"{role}_{index}"
if 'metadata' in history and key in history['metadata'] and 'attachments' in history['metadata'][key]:
attachments = history['metadata'][key]['attachments']
@@ -497,6 +516,7 @@
def get_message_tooltip(history, role, index, include_timestamp=True):
+ """Get tooltip text combining timestamp and model name for a message"""
key = f"{role}_{index}"
if 'metadata' not in history or key not in history['metadata']:
return ""
@@ -513,6 +533,7 @@
def get_version_navigation_html(history, i, role):
+ """Generate simple navigation arrows for message versions"""
key = f"{role}_{i}"
metadata = history.get('metadata', {})
@@ -573,6 +594,7 @@ output = ""
def create_message(role, content, raw_content):
+ """Inner function that captures variables from outer scope."""
class_name = "user-message" if role == "user" else "assistant-message"
# Get role-specific data
@@ -624,6 +646,7 @@
def get_character_image_with_cache_buster():
+ """Get character image URL with cache busting based on file modification time"""
cache_path = shared.user_data_dir / "cache" / "pfp_character_thumb.png"
if cache_path.exists():
mtime = int(cache_path.stat().st_mtime)
@@ -641,6 +664,7 @@ img_bot = get_character_image_with_cache_buster()
def create_message(role, content, raw_content):
+ """Inner function for CAI-style messages."""
circle_class = "circle-you" if role == "user" else "circle-bot"
name = name1 if role == "user" else name2
@@ -714,4 +738,4 @@ else:
result = generate_cai_chat_html(history, name1, name2, style, character, reset_cache=reset_cache, last_message_only=last_message_only)
- return {'html': result, 'last_message_only': last_message_only}+ return {'html': result, 'last_message_only': last_message_only}
| https://raw.githubusercontent.com/oobabooga/text-generation-webui/HEAD/modules/html_generator.py |
Generate consistent docstrings | import importlib.util
import json
from modules import shared
from modules.logging_colors import logger
from modules.utils import natural_keys, sanitize_filename
def get_available_tools():
tools_dir = shared.user_data_dir / 'tools'
tools_dir.mkdir(parents=True, exist_ok=True)
return sorted((p.stem for p in tools_dir.glob('*.py')), key=natural_keys)
def load_tools(selected_names):
tool_defs = []
executors = {}
for name in selected_names:
name = sanitize_filename(name)
if not name:
continue
path = shared.user_data_dir / 'tools' / f'{name}.py'
if not path.exists():
continue
try:
spec = importlib.util.spec_from_file_location(f"tool_{name}", str(path))
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
except Exception:
logger.exception(f'Failed to load tool script "{name}"')
continue
tool_def = getattr(module, 'tool', None)
execute_fn = getattr(module, 'execute', None)
if tool_def is None or execute_fn is None:
logger.warning(f'Tool "{name}" is missing a "tool" dict or "execute" function.')
continue
func_name = tool_def.get('function', {}).get('name', name)
if func_name in executors:
logger.warning(f'Tool "{name}" declares function name "{func_name}" which conflicts with an already loaded tool. Skipping.')
continue
tool_defs.append(tool_def)
executors[func_name] = execute_fn
return tool_defs, executors
def execute_tool(func_name, arguments, executors):
fn = executors.get(func_name)
if fn is None:
return json.dumps({"error": f"Unknown tool: {func_name}"})
try:
if isinstance(arguments, str):
arguments = json.loads(arguments)
result = fn(arguments)
return json.dumps(result) if not isinstance(result, str) else result
except Exception as e:
logger.exception(f'Tool "{func_name}" execution failed')
return json.dumps({"error": str(e)}) | --- +++ @@ -7,12 +7,19 @@
def get_available_tools():
+ """Return sorted list of tool script names from user_data/tools/*.py."""
tools_dir = shared.user_data_dir / 'tools'
tools_dir.mkdir(parents=True, exist_ok=True)
return sorted((p.stem for p in tools_dir.glob('*.py')), key=natural_keys)
def load_tools(selected_names):
+ """
+ Import selected tool scripts and return their definitions and executors.
+ Returns (tool_defs, executors) where:
+ - tool_defs: list of OpenAI-format tool dicts
+ - executors: dict mapping function_name -> execute callable
+ """
tool_defs = []
executors = {}
for name in selected_names:
@@ -49,6 +56,7 @@
def execute_tool(func_name, arguments, executors):
+ """Execute a tool by function name. Returns result as a JSON string."""
fn = executors.get(func_name)
if fn is None:
return json.dumps({"error": f"Unknown tool: {func_name}"})
@@ -60,4 +68,4 @@ return json.dumps(result) if not isinstance(result, str) else result
except Exception as e:
logger.exception(f'Tool "{func_name}" execution failed')
- return json.dumps({"error": str(e)})+ return json.dumps({"error": str(e)})
| https://raw.githubusercontent.com/oobabooga/text-generation-webui/HEAD/modules/tool_use.py |
Write clean docstrings for readability | import os
import re
from datetime import datetime
from pathlib import Path
from modules import shared
from modules.logging_colors import logger
# Helper function to get multiple values from shared.gradio
def gradio(*keys):
if len(keys) == 1 and type(keys[0]) in [list, tuple]:
keys = keys[0]
return [shared.gradio[k] for k in keys]
def sanitize_filename(name):
name = Path(name).name # drop all directory components
name = name.lstrip('.') # remove leading dots
return name
def _is_path_allowed(abs_path_str):
abs_path = Path(abs_path_str).resolve()
user_data_resolved = shared.user_data_dir.resolve()
try:
abs_path.relative_to(user_data_resolved)
return True
except ValueError:
return False
def save_file(fname, contents):
if fname == '':
logger.error('File name is empty!')
return
abs_path_str = os.path.abspath(fname)
if not _is_path_allowed(abs_path_str):
logger.error(f'Invalid file path: \"{fname}\"')
return
if Path(abs_path_str).suffix.lower() not in ('.yaml', '.yml', '.json', '.txt', '.gbnf'):
logger.error(f'Refusing to save file with disallowed extension: \"{fname}\"')
return
with open(abs_path_str, 'w', encoding='utf-8') as f:
f.write(contents)
logger.info(f'Saved \"{abs_path_str}\".')
def delete_file(fname):
if fname == '':
logger.error('File name is empty!')
return
abs_path_str = os.path.abspath(fname)
if not _is_path_allowed(abs_path_str):
logger.error(f'Invalid file path: \"{fname}\"')
return
p = Path(abs_path_str)
if p.exists():
p.unlink()
logger.info(f'Deleted \"{fname}\".')
def current_time():
return f"{datetime.now().strftime('%Y-%m-%d_%Hh%Mm%Ss')}"
def atoi(text):
return int(text) if text.isdigit() else text.lower()
# Replace multiple string pairs in a string
def replace_all(text, dic):
for i, j in dic.items():
text = text.replace(i, j)
return text
def natural_keys(text):
return [atoi(c) for c in re.split(r'(\d+)', text)]
def check_model_loaded():
if shared.model_name == 'None' or shared.model is None:
if len(get_available_models()) == 0:
error_msg = f"No model is loaded.\n\nTo get started:\n1) Place a GGUF file in your {shared.user_data_dir}/models folder\n2) Go to the Model tab and select it"
logger.error(error_msg)
return False, error_msg
else:
error_msg = "No model is loaded. Please select one in the Model tab."
logger.error(error_msg)
return False, error_msg
return True, None
def resolve_model_path(model_name_or_path, image_model=False):
path_candidate = Path(model_name_or_path)
if path_candidate.exists():
return path_candidate
elif image_model:
return Path(f'{shared.args.image_model_dir}/{model_name_or_path}')
else:
return Path(f'{shared.args.model_dir}/{model_name_or_path}')
def get_available_models():
# Get all GGUF files
gguf_files = get_available_ggufs()
# Filter out non-first parts of multipart GGUF files
filtered_gguf_files = []
for gguf_path in gguf_files:
filename = os.path.basename(gguf_path)
match = re.search(r'-(\d+)-of-\d+\.gguf$', filename)
if match:
part_number = match.group(1)
# Keep only if it's part 1
if part_number.lstrip("0") == "1":
filtered_gguf_files.append(gguf_path)
else:
# Not a multi-part file
filtered_gguf_files.append(gguf_path)
model_dir = Path(shared.args.model_dir)
# Find top-level directories containing GGUF files
dirs_with_gguf = set()
for gguf_path in gguf_files:
path = Path(gguf_path)
if len(path.parts) > 0:
dirs_with_gguf.add(path.parts[0])
# Find directories with safetensors files
dirs_with_safetensors = set()
for item in os.listdir(model_dir):
item_path = model_dir / item
if item_path.is_dir():
if any(file.lower().endswith(('.safetensors', '.pt')) for file in os.listdir(item_path) if (item_path / file).is_file()):
dirs_with_safetensors.add(item)
# Find valid model directories
model_dirs = []
for item in os.listdir(model_dir):
item_path = model_dir / item
if not item_path.is_dir():
continue
# Include directory if it either doesn't contain GGUF files
# or contains both GGUF and safetensors files
if item not in dirs_with_gguf or item in dirs_with_safetensors:
model_dirs.append(item)
model_dirs = sorted(model_dirs, key=natural_keys)
return filtered_gguf_files + model_dirs
def get_available_image_models():
model_dir = Path(shared.args.image_model_dir)
model_dir.mkdir(parents=True, exist_ok=True)
# Find valid model directories
model_dirs = []
for item in os.listdir(model_dir):
item_path = model_dir / item
if not item_path.is_dir():
continue
model_dirs.append(item)
model_dirs = sorted(model_dirs, key=natural_keys)
return model_dirs
def get_available_ggufs():
model_list = []
model_dir = Path(shared.args.model_dir)
for dirpath, _, files in os.walk(model_dir, followlinks=True):
for file in files:
if file.lower().endswith(".gguf"):
model_path = Path(dirpath) / file
rel_path = model_path.relative_to(model_dir)
model_list.append(str(rel_path))
return sorted(model_list, key=natural_keys)
def get_available_mmproj():
mmproj_dir = shared.user_data_dir / 'mmproj'
if not mmproj_dir.exists():
return ['None']
mmproj_files = []
for item in mmproj_dir.iterdir():
if item.is_file() and item.suffix.lower() in ('.gguf', '.bin'):
mmproj_files.append(item.name)
return ['None'] + sorted(mmproj_files, key=natural_keys)
def get_available_presets():
return sorted(set((k.stem for k in (shared.user_data_dir / 'presets').glob('*.yaml'))), key=natural_keys)
def get_available_prompts():
notebook_dir = shared.user_data_dir / 'logs' / 'notebook'
notebook_dir.mkdir(parents=True, exist_ok=True)
prompt_files = list(notebook_dir.glob('*.txt'))
if not prompt_files:
new_name = current_time()
new_path = notebook_dir / f"{new_name}.txt"
new_path.write_text("In this story,", encoding='utf-8')
prompt_files = [new_path]
sorted_files = sorted(prompt_files, key=lambda x: x.stat().st_mtime, reverse=True)
prompts = [file.stem for file in sorted_files]
return prompts
def get_available_characters():
paths = (x for x in (shared.user_data_dir / 'characters').iterdir() if x.suffix in ('.json', '.yaml', '.yml'))
return sorted(set((k.stem for k in paths)), key=natural_keys)
def get_available_users():
users_dir = shared.user_data_dir / 'users'
users_dir.mkdir(parents=True, exist_ok=True)
paths = (x for x in users_dir.iterdir() if x.suffix in ('.json', '.yaml', '.yml'))
return sorted(set((k.stem for k in paths)), key=natural_keys)
def get_available_instruction_templates():
path = str(shared.user_data_dir / "instruction-templates")
paths = []
if os.path.exists(path):
paths = (x for x in Path(path).iterdir() if x.suffix in ('.json', '.yaml', '.yml'))
return ['None'] + sorted(set((k.stem for k in paths)), key=natural_keys)
def get_available_extensions():
# User extensions (higher priority)
user_extensions = []
user_ext_path = shared.user_data_dir / 'extensions'
if user_ext_path.exists():
user_exts = map(lambda x: x.parent.name, user_ext_path.glob('*/script.py'))
user_extensions = sorted(set(user_exts), key=natural_keys)
# System extensions (excluding those overridden by user extensions)
system_exts = map(lambda x: x.parent.name, Path('extensions').glob('*/script.py'))
system_extensions = sorted(set(system_exts) - set(user_extensions), key=natural_keys)
return user_extensions + system_extensions
def get_available_loras():
return ['None'] + sorted([item.name for item in list(Path(shared.args.lora_dir).glob('*')) if not item.name.endswith(('.txt', '-np', '.pt', '.json'))], key=natural_keys)
def get_datasets(path: str, ext: str):
# include subdirectories for raw txt files to allow training from a subdirectory of txt files
if ext == "txt":
return ['None'] + sorted(set([k.stem for k in list(Path(path).glob('*.txt')) + list(Path(path).glob('*/')) if k.stem != 'put-trainer-datasets-here']), key=natural_keys)
return ['None'] + sorted(set([k.stem for k in Path(path).glob(f'*.{ext}') if k.stem != 'put-trainer-datasets-here']), key=natural_keys)
def get_chat_datasets(path: str):
return ['None'] + sorted(set([k.stem for k in Path(path).glob('*.json') if k.stem != 'put-trainer-datasets-here' and _is_chat_dataset(k)]), key=natural_keys)
def get_text_datasets(path: str):
return ['None'] + sorted(set([k.stem for k in Path(path).glob('*.json') if k.stem != 'put-trainer-datasets-here' and _is_text_dataset(k)]), key=natural_keys)
def _peek_json_keys(filepath):
import json
decoder = json.JSONDecoder()
WS = ' \t\n\r'
try:
with open(filepath, 'r', encoding='utf-8') as f:
buf = ''
obj_start = None
while len(buf) < 1 << 20: # Read up to 1MB
chunk = f.read(8192)
if not chunk:
break
buf += chunk
if obj_start is None:
idx = 0
while idx < len(buf) and buf[idx] in WS:
idx += 1
if idx >= len(buf):
continue
if buf[idx] != '[':
return set()
idx += 1
while idx < len(buf) and buf[idx] in WS:
idx += 1
if idx >= len(buf):
continue
obj_start = idx
try:
obj, _ = decoder.raw_decode(buf, obj_start)
if isinstance(obj, dict):
return set(obj.keys())
return set()
except json.JSONDecodeError:
continue
except Exception:
pass
return set()
def _is_chat_dataset(filepath):
keys = _peek_json_keys(filepath)
return bool(keys & {'messages', 'conversations'})
def _is_text_dataset(filepath):
keys = _peek_json_keys(filepath)
return 'text' in keys
def get_available_chat_styles():
return sorted(set(('-'.join(k.stem.split('-')[1:]) for k in Path('css').glob('chat_style*.css'))), key=natural_keys)
def get_available_grammars():
return ['None'] + sorted([item.name for item in list((shared.user_data_dir / 'grammars').glob('*.gbnf'))], key=natural_keys) | --- +++ @@ -16,12 +16,18 @@
def sanitize_filename(name):
+ """Strip path traversal components from a filename.
+
+ Returns only the final path component with leading dots removed,
+ preventing directory traversal via '../' or absolute paths.
+ """
name = Path(name).name # drop all directory components
name = name.lstrip('.') # remove leading dots
return name
def _is_path_allowed(abs_path_str):
+ """Check if a path is under the configured user_data directory."""
abs_path = Path(abs_path_str).resolve()
user_data_resolved = shared.user_data_dir.resolve()
try:
@@ -102,6 +108,10 @@
def resolve_model_path(model_name_or_path, image_model=False):
+ """
+ Resolves a model path, checking for a direct path
+ before the default models directory.
+ """
path_candidate = Path(model_name_or_path)
if path_candidate.exists():
@@ -280,14 +290,17 @@
def get_chat_datasets(path: str):
+ """List JSON datasets that contain chat conversations (messages or ShareGPT format)."""
return ['None'] + sorted(set([k.stem for k in Path(path).glob('*.json') if k.stem != 'put-trainer-datasets-here' and _is_chat_dataset(k)]), key=natural_keys)
def get_text_datasets(path: str):
+ """List JSON datasets that contain raw text ({"text": ...} format)."""
return ['None'] + sorted(set([k.stem for k in Path(path).glob('*.json') if k.stem != 'put-trainer-datasets-here' and _is_text_dataset(k)]), key=natural_keys)
def _peek_json_keys(filepath):
+ """Read the first object in a JSON array file and return its keys."""
import json
decoder = json.JSONDecoder()
WS = ' \t\n\r'
@@ -341,4 +354,4 @@
def get_available_grammars():
- return ['None'] + sorted([item.name for item in list((shared.user_data_dir / 'grammars').glob('*.gbnf'))], key=natural_keys)+ return ['None'] + sorted([item.name for item in list((shared.user_data_dir / 'grammars').glob('*.gbnf'))], key=natural_keys)
| https://raw.githubusercontent.com/oobabooga/text-generation-webui/HEAD/modules/utils.py |
Add return value explanations in docstrings |
import base64
import io
import time
from extensions.openai.errors import ServiceUnavailableError
from modules import shared
def generations(request):
from modules.ui_image_generation import generate
if shared.image_model is None:
raise ServiceUnavailableError("No image model loaded. Load a model via the UI first.")
width, height = request.get_width_height()
# Build state dict: GenerationOptions fields + image-specific keys
state = request.model_dump()
state.update({
'image_model_menu': shared.image_model_name,
'image_prompt': request.prompt,
'image_neg_prompt': request.negative_prompt,
'image_width': width,
'image_height': height,
'image_steps': request.steps,
'image_seed': request.image_seed,
'image_batch_size': request.batch_size,
'image_batch_count': request.batch_count,
'image_cfg_scale': request.cfg_scale,
'image_llm_variations': False,
})
# Exhaust generator, keep final result
images = []
for images, _ in generate(state, save_images=False):
pass
if not images:
raise ServiceUnavailableError("Image generation failed or produced no images.")
# Build response
resp = {'created': int(time.time()), 'data': []}
for img in images:
b64 = _image_to_base64(img)
image_obj = {'revised_prompt': request.prompt}
if request.response_format == 'b64_json':
image_obj['b64_json'] = b64
else:
image_obj['url'] = f'data:image/png;base64,{b64}'
resp['data'].append(image_obj)
return resp
def _image_to_base64(image) -> str:
buffered = io.BytesIO()
image.save(buffered, format="PNG")
return base64.b64encode(buffered.getvalue()).decode('utf-8') | --- +++ @@ -1,3 +1,6 @@+"""
+OpenAI-compatible image generation using local diffusion models.
+"""
import base64
import io
@@ -8,6 +11,10 @@
def generations(request):
+ """
+ Generate images using the loaded diffusion model.
+ Returns dict with 'created' timestamp and 'data' list of images.
+ """
from modules.ui_image_generation import generate
if shared.image_model is None:
@@ -59,4 +66,4 @@ def _image_to_base64(image) -> str:
buffered = io.BytesIO()
image.save(buffered, format="PNG")
- return base64.b64encode(buffered.getvalue()).decode('utf-8')+ return base64.b64encode(buffered.getvalue()).decode('utf-8')
| https://raw.githubusercontent.com/oobabooga/text-generation-webui/HEAD/extensions/openai/images.py |
Include argument descriptions in docstrings | import json
import os
import random
import time
import traceback
from datetime import datetime
from pathlib import Path
import gradio as gr
from PIL.PngImagePlugin import PngInfo
from modules import shared, ui, utils
from modules.image_models import (
get_pipeline_type,
load_image_model,
unload_image_model
)
from modules.image_utils import open_image_safely
from modules.logging_colors import logger
from modules.text_generation import stop_everything_event
from modules.utils import check_model_loaded, gradio
ASPECT_RATIOS = {
"1:1 Square": (1, 1),
"16:9 Cinema": (16, 9),
"9:16 Mobile": (9, 16),
"4:3 Photo": (4, 3),
"Custom": None,
}
STEP = 16
IMAGES_PER_PAGE = 32
# Settings keys to save in PNG metadata (Generate tab only)
METADATA_SETTINGS_KEYS = [
'image_prompt',
'image_neg_prompt',
'image_width',
'image_height',
'image_aspect_ratio',
'image_steps',
'image_seed',
'image_cfg_scale',
]
# Cache for all image paths
_image_cache = []
_cache_timestamp = 0
def round_to_step(value, step=STEP):
return round(value / step) * step
def clamp(value, min_val, max_val):
return max(min_val, min(max_val, value))
def apply_aspect_ratio(aspect_ratio, current_width, current_height):
if aspect_ratio == "Custom" or aspect_ratio not in ASPECT_RATIOS:
return current_width, current_height
w_ratio, h_ratio = ASPECT_RATIOS[aspect_ratio]
if w_ratio == h_ratio:
base = min(current_width, current_height)
new_width = base
new_height = base
elif w_ratio < h_ratio:
new_width = current_width
new_height = round_to_step(current_width * h_ratio / w_ratio)
else:
new_height = current_height
new_width = round_to_step(current_height * w_ratio / h_ratio)
new_width = clamp(new_width, 256, 2048)
new_height = clamp(new_height, 256, 2048)
return int(new_width), int(new_height)
def update_height_from_width(width, aspect_ratio):
if aspect_ratio == "Custom" or aspect_ratio not in ASPECT_RATIOS:
return gr.update()
w_ratio, h_ratio = ASPECT_RATIOS[aspect_ratio]
new_height = round_to_step(width * h_ratio / w_ratio)
new_height = clamp(new_height, 256, 2048)
return int(new_height)
def update_width_from_height(height, aspect_ratio):
if aspect_ratio == "Custom" or aspect_ratio not in ASPECT_RATIOS:
return gr.update()
w_ratio, h_ratio = ASPECT_RATIOS[aspect_ratio]
new_width = round_to_step(height * w_ratio / h_ratio)
new_width = clamp(new_width, 256, 2048)
return int(new_width)
def swap_dimensions_and_update_ratio(width, height, aspect_ratio):
new_width, new_height = height, width
new_ratio = "Custom"
for name, ratios in ASPECT_RATIOS.items():
if ratios is None:
continue
w_r, h_r = ratios
expected_height = new_width * h_r / w_r
if abs(expected_height - new_height) < STEP:
new_ratio = name
break
return new_width, new_height, new_ratio
def build_generation_metadata(state, actual_seed):
metadata = {}
for key in METADATA_SETTINGS_KEYS:
if key in state:
metadata[key] = state[key]
# Store the actual seed used (not -1)
metadata['image_seed'] = actual_seed
metadata['generated_at'] = datetime.now().isoformat()
metadata['model'] = shared.image_model_name
return metadata
def save_generated_images(images, state, actual_seed):
if shared.args.multi_user:
return []
date_str = datetime.now().strftime("%Y-%m-%d")
folder_path = str(shared.user_data_dir / "image_outputs" / date_str)
os.makedirs(folder_path, exist_ok=True)
metadata = build_generation_metadata(state, actual_seed)
metadata_json = json.dumps(metadata, ensure_ascii=False)
saved_paths = []
for idx, img in enumerate(images):
timestamp = datetime.now().strftime("%H-%M-%S")
filename = f"TGW_{timestamp}_{actual_seed:010d}_{idx:03d}.png"
filepath = os.path.join(folder_path, filename)
# Create PNG metadata
png_info = PngInfo()
png_info.add_text("image_gen_settings", metadata_json)
# Save with metadata
img.save(filepath, pnginfo=png_info)
saved_paths.append(filepath)
return saved_paths
def read_image_metadata(image_path):
try:
img = open_image_safely(image_path)
if img is None:
return None
try:
if hasattr(img, 'text') and 'image_gen_settings' in img.text:
return json.loads(img.text['image_gen_settings'])
finally:
img.close()
except Exception as e:
logger.debug(f"Could not read metadata from {image_path}: {e}")
return None
def format_metadata_for_display(metadata):
if not metadata:
return "No generation settings found in this image."
lines = []
# Display in a nice order
display_order = [
('image_prompt', 'Prompt'),
('image_neg_prompt', 'Negative Prompt'),
('image_width', 'Width'),
('image_height', 'Height'),
('image_aspect_ratio', 'Aspect Ratio'),
('image_steps', 'Steps'),
('image_cfg_scale', 'CFG Scale'),
('image_seed', 'Seed'),
('model', 'Model'),
('generated_at', 'Generated At'),
]
for key, label in display_order:
if key in metadata:
value = metadata[key]
if key in ['image_prompt', 'image_neg_prompt'] and value:
# Truncate long prompts for display
if len(str(value)) > 200:
value = str(value)[:200] + "..."
lines.append(f"**{label}:** {value}")
return "\n\n".join(lines)
def get_all_history_images(force_refresh=False):
global _image_cache, _cache_timestamp
output_dir = str(shared.user_data_dir / "image_outputs")
if not os.path.exists(output_dir):
return []
# Check if we need to refresh cache
current_time = time.time()
if not force_refresh and _image_cache and (current_time - _cache_timestamp) < 2:
return _image_cache
image_files = []
for root, _, files in os.walk(output_dir):
for file in files:
if file.endswith((".png", ".jpg", ".jpeg")):
full_path = os.path.join(root, file)
image_files.append((full_path, os.path.getmtime(full_path)))
image_files.sort(key=lambda x: x[1], reverse=True)
_image_cache = [x[0] for x in image_files]
_cache_timestamp = current_time
return _image_cache
def get_paginated_images(page=0, force_refresh=False):
all_images = get_all_history_images(force_refresh)
total_images = len(all_images)
total_pages = max(1, (total_images + IMAGES_PER_PAGE - 1) // IMAGES_PER_PAGE)
# Clamp page to valid range
page = max(0, min(page, total_pages - 1))
start_idx = page * IMAGES_PER_PAGE
end_idx = min(start_idx + IMAGES_PER_PAGE, total_images)
page_images = all_images[start_idx:end_idx]
return page_images, page, total_pages, total_images
def get_initial_page_info():
_, page, total_pages, total_images = get_paginated_images(0)
return f"Page {page + 1} of {total_pages} ({total_images} total images)"
def refresh_gallery(current_page=0):
images, page, total_pages, total_images = get_paginated_images(current_page, force_refresh=True)
page_info = f"Page {page + 1} of {total_pages} ({total_images} total images)"
return images, page, page_info
def go_to_page(page_num, current_page):
try:
page = int(page_num) - 1 # Convert to 0-indexed
except (ValueError, TypeError):
page = current_page
images, page, total_pages, total_images = get_paginated_images(page)
page_info = f"Page {page + 1} of {total_pages} ({total_images} total images)"
return images, page, page_info
def next_page(current_page):
images, page, total_pages, total_images = get_paginated_images(current_page + 1)
page_info = f"Page {page + 1} of {total_pages} ({total_images} total images)"
return images, page, page_info
def prev_page(current_page):
images, page, total_pages, total_images = get_paginated_images(current_page - 1)
page_info = f"Page {page + 1} of {total_pages} ({total_images} total images)"
return images, page, page_info
def on_gallery_select(evt: gr.SelectData, current_page):
if evt.index is None:
return "", "Select an image to view its settings"
if not _image_cache:
get_all_history_images()
all_images = _image_cache
total_images = len(all_images)
# Calculate the actual index in the full list
start_idx = current_page * IMAGES_PER_PAGE
actual_idx = start_idx + evt.index
if actual_idx >= total_images:
return "", "Image not found"
image_path = all_images[actual_idx]
metadata = read_image_metadata(image_path)
metadata_display = format_metadata_for_display(metadata)
return image_path, metadata_display
def send_to_generate(selected_image_path):
if not selected_image_path or not os.path.exists(selected_image_path):
return [gr.update()] * 8 + ["No image selected"]
metadata = read_image_metadata(selected_image_path)
if not metadata:
return [gr.update()] * 8 + ["No settings found in this image"]
# Return updates for each input element in order
updates = [
gr.update(value=metadata.get('image_prompt', '')),
gr.update(value=metadata.get('image_neg_prompt', '')),
gr.update(value=metadata.get('image_width', 1024)),
gr.update(value=metadata.get('image_height', 1024)),
gr.update(value=metadata.get('image_aspect_ratio', '1:1 Square')),
gr.update(value=metadata.get('image_steps', 9)),
gr.update(value=metadata.get('image_seed', -1)),
gr.update(value=metadata.get('image_cfg_scale', 0.0)),
]
status = f"✓ Settings loaded from image (seed: {metadata.get('image_seed', 'unknown')})"
return updates + [status]
def read_dropped_image_metadata(image_path):
if not image_path:
return "Drop an image to view its generation settings."
metadata = read_image_metadata(image_path)
return format_metadata_for_display(metadata)
def create_ui():
if shared.settings['image_model_menu'] != 'None':
shared.image_model_name = shared.settings['image_model_menu']
with gr.Tab("Image AI", elem_id="image-ai-tab"):
with gr.Tabs():
# TAB 1: GENERATE
with gr.TabItem("Generate"):
with gr.Row():
with gr.Column(scale=4, min_width=350):
shared.gradio['image_prompt'] = gr.Textbox(
label="Prompt",
placeholder="Describe your imagination...",
lines=3,
autofocus=True,
value=shared.settings['image_prompt']
)
shared.gradio['image_neg_prompt'] = gr.Textbox(
label="Negative Prompt",
placeholder="Low quality...",
lines=3,
value=shared.settings['image_neg_prompt']
)
shared.gradio['image_llm_variations'] = gr.Checkbox(
value=shared.settings['image_llm_variations'],
label='LLM Prompt Variations',
elem_id="llm-prompt-variations",
)
shared.gradio['image_llm_variations_prompt'] = gr.Textbox(
value=shared.settings['image_llm_variations_prompt'],
label='Variation Prompt',
lines=3,
placeholder='Instructions for generating prompt variations...',
visible=shared.settings['image_llm_variations'],
info='Use the loaded LLM to generate creative prompt variations for each sequential batch.'
)
shared.gradio['image_generate_btn'] = gr.Button("Generate", variant="primary", size="lg")
shared.gradio['image_stop_btn'] = gr.Button("Stop", size="lg", visible=False)
shared.gradio['image_progress'] = gr.HTML(
value=progress_bar_html(),
elem_id="image-progress"
)
gr.Markdown("### Dimensions")
with gr.Row():
with gr.Column():
shared.gradio['image_width'] = gr.Slider(256, 2048, value=shared.settings['image_width'], step=STEP, label="Width")
with gr.Column():
shared.gradio['image_height'] = gr.Slider(256, 2048, value=shared.settings['image_height'], step=STEP, label="Height")
shared.gradio['image_swap_btn'] = gr.Button("⇄ Swap", elem_classes='refresh-button', scale=0, min_width=80, elem_id="swap-height-width")
with gr.Row():
shared.gradio['image_aspect_ratio'] = gr.Radio(
choices=["1:1 Square", "16:9 Cinema", "9:16 Mobile", "4:3 Photo", "Custom"],
value=shared.settings['image_aspect_ratio'],
label="Aspect Ratio",
interactive=True
)
gr.Markdown("### Config")
with gr.Row():
with gr.Column():
shared.gradio['image_steps'] = gr.Slider(1, 100, value=shared.settings['image_steps'], step=1, label="Steps")
shared.gradio['image_cfg_scale'] = gr.Slider(
0.0, 10.0,
value=shared.settings['image_cfg_scale'],
step=0.1,
label="CFG Scale",
info="Z-Image Turbo: 0.0 | Qwen: 4.0"
)
shared.gradio['image_seed'] = gr.Number(label="Seed", value=shared.settings['image_seed'], precision=0, info="-1 = Random")
with gr.Column():
shared.gradio['image_batch_size'] = gr.Slider(1, 32, value=shared.settings['image_batch_size'], step=1, label="Batch Size (VRAM Heavy)", info="Generates N images at once.")
shared.gradio['image_batch_count'] = gr.Slider(1, 128, value=shared.settings['image_batch_count'], step=1, label="Sequential Count (Loop)", info="Repeats the generation N times.")
with gr.Column(scale=6, min_width=500):
with gr.Column(elem_classes=["viewport-container"]):
shared.gradio['image_output_gallery'] = gr.Gallery(label="Output", show_label=False, columns=2, rows=2, height="80vh", object_fit="contain", preview=True, elem_id="image-output-gallery")
# TAB 2: GALLERY (with pagination)
with gr.TabItem("Gallery"):
with gr.Row():
with gr.Column(scale=3):
# Pagination controls
with gr.Row():
shared.gradio['image_refresh_history'] = gr.Button("🔄 Refresh", elem_classes="refresh-button")
shared.gradio['image_prev_page'] = gr.Button("◀ Prev Page", elem_classes="refresh-button")
shared.gradio['image_page_info'] = gr.Markdown(value=get_initial_page_info, elem_id="image-page-info")
shared.gradio['image_next_page'] = gr.Button("Next Page ▶", elem_classes="refresh-button")
shared.gradio['image_page_input'] = gr.Number(value=1, label="Page", precision=0, minimum=1, scale=0, min_width=80)
shared.gradio['image_go_to_page'] = gr.Button("Go", elem_classes="refresh-button", scale=0, min_width=50)
# State for current page and selected image path
shared.gradio['image_current_page'] = gr.State(value=0)
shared.gradio['image_selected_path'] = gr.State(value="")
# Paginated gallery using gr.Gallery
shared.gradio['image_history_gallery'] = gr.Gallery(
value=lambda: get_paginated_images(0)[0],
label="Image History",
show_label=False,
columns=6,
object_fit="cover",
height="auto",
allow_preview=True,
elem_id="image-history-gallery"
)
with gr.Column(scale=1):
gr.Markdown("### Generation Settings")
shared.gradio['image_settings_display'] = gr.Markdown("Select an image to view its settings")
shared.gradio['image_send_to_generate'] = gr.Button("Send to Generate", variant="primary")
shared.gradio['image_gallery_status'] = gr.Markdown("")
gr.Markdown("### Import Image")
shared.gradio['image_drop_upload'] = gr.Image(
label="Drop image here to view settings",
type="filepath",
height=150
)
# TAB 3: MODEL
with gr.TabItem("Model"):
with gr.Row():
with gr.Column():
with gr.Row():
shared.gradio['image_model_menu'] = gr.Dropdown(
choices=utils.get_available_image_models(),
value=shared.settings['image_model_menu'],
label='Model',
elem_classes='slim-dropdown'
)
shared.gradio['image_refresh_models'] = gr.Button("🔄", elem_classes='refresh-button', scale=0, min_width=40)
shared.gradio['image_load_model'] = gr.Button("Load", variant='primary', elem_classes='refresh-button')
shared.gradio['image_unload_model'] = gr.Button("Unload", elem_classes='refresh-button')
gr.Markdown("## Settings")
with gr.Row():
with gr.Column():
shared.gradio['image_quant'] = gr.Dropdown(
label='Quantization',
choices=['none', 'bnb-8bit', 'bnb-4bit', 'torchao-int8wo', 'torchao-fp4', 'torchao-float8wo'],
value=shared.settings['image_quant'],
info='BnB: bitsandbytes quantization. torchao: int8wo, fp4, float8wo.'
)
shared.gradio['image_dtype'] = gr.Dropdown(
choices=['bfloat16', 'float16'],
value=shared.settings['image_dtype'],
label='Data Type',
info='bfloat16 recommended for modern GPUs'
)
shared.gradio['image_attn_backend'] = gr.Dropdown(
choices=['sdpa', 'flash_attention_2'],
value=shared.settings['image_attn_backend'],
label='Attention Backend',
info='SDPA is default. Flash Attention requires compatible GPU.'
)
with gr.Column():
shared.gradio['image_compile'] = gr.Checkbox(
value=shared.settings['image_compile'],
label='Compile Model',
info='Faster inference after first run. First run will be slow.'
)
shared.gradio['image_cpu_offload'] = gr.Checkbox(
value=shared.settings['image_cpu_offload'],
label='CPU Offload',
info='Enable for low VRAM GPUs. Slower but uses less memory.'
)
with gr.Column():
shared.gradio['image_download_path'] = gr.Textbox(
label="Download model",
placeholder="Tongyi-MAI/Z-Image-Turbo",
info="Enter HuggingFace path. Use : for branch, e.g. user/model:main"
)
shared.gradio['image_download_btn'] = gr.Button("Download", variant='primary')
shared.gradio['image_model_status'] = gr.Markdown(value="")
def create_event_handlers():
# Dimension controls
shared.gradio['image_aspect_ratio'].change(
apply_aspect_ratio,
gradio('image_aspect_ratio', 'image_width', 'image_height'),
gradio('image_width', 'image_height'),
show_progress=False
)
shared.gradio['image_width'].release(
update_height_from_width,
gradio('image_width', 'image_aspect_ratio'),
gradio('image_height'),
show_progress=False
)
shared.gradio['image_height'].release(
update_width_from_height,
gradio('image_height', 'image_aspect_ratio'),
gradio('image_width'),
show_progress=False
)
shared.gradio['image_swap_btn'].click(
swap_dimensions_and_update_ratio,
gradio('image_width', 'image_height', 'image_aspect_ratio'),
gradio('image_width', 'image_height', 'image_aspect_ratio'),
show_progress=False
)
# Generation
shared.gradio['image_generate_btn'].click(
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
lambda: [gr.update(visible=True), gr.update(visible=False)], None, gradio('image_stop_btn', 'image_generate_btn')).then(
generate, gradio('interface_state'), gradio('image_output_gallery', 'image_progress'), show_progress=False).then(
lambda: [gr.update(visible=False), gr.update(visible=True)], None, gradio('image_stop_btn', 'image_generate_btn'))
shared.gradio['image_prompt'].submit(
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
lambda: [gr.update(visible=True), gr.update(visible=False)], None, gradio('image_stop_btn', 'image_generate_btn')).then(
generate, gradio('interface_state'), gradio('image_output_gallery', 'image_progress'), show_progress=False).then(
lambda: [gr.update(visible=False), gr.update(visible=True)], None, gradio('image_stop_btn', 'image_generate_btn'))
shared.gradio['image_neg_prompt'].submit(
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
lambda: [gr.update(visible=True), gr.update(visible=False)], None, gradio('image_stop_btn', 'image_generate_btn')).then(
generate, gradio('interface_state'), gradio('image_output_gallery', 'image_progress'), show_progress=False).then(
lambda: [gr.update(visible=False), gr.update(visible=True)], None, gradio('image_stop_btn', 'image_generate_btn'))
# Stop button
shared.gradio['image_stop_btn'].click(
stop_everything_event, None, None, show_progress=False
)
# Model management
shared.gradio['image_refresh_models'].click(
lambda: gr.update(choices=utils.get_available_image_models()),
None,
gradio('image_model_menu'),
show_progress=False
)
shared.gradio['image_load_model'].click(
load_image_model_wrapper,
gradio('image_model_menu', 'image_dtype', 'image_attn_backend', 'image_cpu_offload', 'image_compile', 'image_quant'),
gradio('image_model_status'),
show_progress=True
)
shared.gradio['image_unload_model'].click(
unload_image_model_wrapper,
None,
gradio('image_model_status'),
show_progress=False
)
shared.gradio['image_download_btn'].click(
download_image_model_wrapper,
gradio('image_download_path'),
gradio('image_model_status', 'image_model_menu'),
show_progress=True
)
# Gallery pagination handlers
shared.gradio['image_refresh_history'].click(
refresh_gallery,
gradio('image_current_page'),
gradio('image_history_gallery', 'image_current_page', 'image_page_info'),
show_progress=False
)
shared.gradio['image_next_page'].click(
next_page,
gradio('image_current_page'),
gradio('image_history_gallery', 'image_current_page', 'image_page_info'),
show_progress=False
)
shared.gradio['image_prev_page'].click(
prev_page,
gradio('image_current_page'),
gradio('image_history_gallery', 'image_current_page', 'image_page_info'),
show_progress=False
)
shared.gradio['image_go_to_page'].click(
go_to_page,
gradio('image_page_input', 'image_current_page'),
gradio('image_history_gallery', 'image_current_page', 'image_page_info'),
show_progress=False
)
# Image selection from gallery
shared.gradio['image_history_gallery'].select(
on_gallery_select,
gradio('image_current_page'),
gradio('image_selected_path', 'image_settings_display'),
show_progress=False
)
# Send to Generate
shared.gradio['image_send_to_generate'].click(
send_to_generate,
gradio('image_selected_path'),
gradio(
'image_prompt',
'image_neg_prompt',
'image_width',
'image_height',
'image_aspect_ratio',
'image_steps',
'image_seed',
'image_cfg_scale',
'image_gallery_status'
),
js=f'() => {{{ui.switch_tabs_js}; switch_to_image_ai_generate()}}',
show_progress=False
)
shared.gradio['image_drop_upload'].change(
read_dropped_image_metadata,
gradio('image_drop_upload'),
gradio('image_settings_display'),
show_progress=False
)
# LLM Variations visibility toggle
shared.gradio['image_llm_variations'].change(
lambda x: gr.update(visible=x),
gradio('image_llm_variations'),
gradio('image_llm_variations_prompt'),
show_progress=False
)
def generate_prompt_variation(state):
from modules.chat import generate_chat_prompt
from modules.text_generation import generate_reply
prompt = state['image_prompt']
# Check if LLM is loaded
model_loaded, _ = check_model_loaded()
if not model_loaded:
logger.warning("No LLM loaded for prompt variation. Using original prompt.")
return prompt
# Get the custom variation prompt or use default
variation_instruction = state.get('image_llm_variations_prompt', '')
if not variation_instruction:
variation_instruction = 'Write a variation of the image generation prompt above. Consider the intent of the user with that prompt and write something that will likely please them, with added details. Output only the new prompt. Do not add any explanations, prefixes, or additional text.'
augmented_message = f"{prompt}\n\n=====\n\n{variation_instruction}"
# Use minimal state for generation
var_state = state.copy()
var_state['history'] = {'internal': [], 'visible': [], 'metadata': {}}
var_state['auto_max_new_tokens'] = True
var_state['enable_thinking'] = False
var_state['reasoning_effort'] = 'low'
var_state['start_with'] = ""
formatted_prompt = generate_chat_prompt(augmented_message, var_state)
variation = ""
for reply in generate_reply(formatted_prompt, var_state, stopping_strings=[], is_chat=True):
variation = reply
# Strip thinking blocks if present
if "</think>" in variation:
variation = variation.rsplit("</think>", 1)[1]
elif "<|start|>assistant<|channel|>final<|message|>" in variation:
variation = variation.rsplit("<|start|>assistant<|channel|>final<|message|>", 1)[1]
elif "<|channel|>final<|message|>" in variation:
variation = variation.rsplit("<|channel|>final<|message|>", 1)[1]
elif "</seed:think>" in variation:
variation = variation.rsplit("</seed:think>", 1)[1]
variation = variation.strip()
if len(variation) >= 2 and variation.startswith('"') and variation.endswith('"'):
variation = variation[1:-1]
if variation:
logger.info("Prompt variation:")
print(variation)
return variation
return prompt
def progress_bar_html(progress=0, text=""):
if progress <= 0:
return '<div class="image-ai-separator"></div>'
return f'''<div class="image-ai-progress-wrapper">
<div class="image-ai-progress-track">
<div class="image-ai-progress-fill" style="width: {progress * 100:.1f}%;"></div>
</div>
<div class="image-ai-progress-text">{text}</div>
</div>'''
def generate(state, save_images=True):
import queue
import threading
import torch
from modules.torch_utils import clear_torch_cache, get_device
try:
model_name = state['image_model_menu']
if not model_name or model_name == 'None':
logger.error("No image model selected. Go to the Model tab and select a model.")
yield [], progress_bar_html()
return
if shared.image_model is None:
result = load_image_model(
model_name,
dtype=state['image_dtype'],
attn_backend=state['image_attn_backend'],
cpu_offload=state['image_cpu_offload'],
compile_model=state['image_compile'],
quant_method=state['image_quant']
)
if result is None:
logger.error(f"Failed to load model `{model_name}`.")
yield [], progress_bar_html()
return
shared.image_model_name = model_name
seed = state['image_seed']
if seed == -1:
seed = random.randint(0, 2**32 - 1)
device = get_device()
if device is None:
device = "cpu"
generator = torch.Generator(device)
all_images = []
# Get pipeline type for parameter adjustment
pipeline_type = getattr(shared, 'image_pipeline_type', None)
if pipeline_type is None:
pipeline_type = get_pipeline_type(shared.image_model)
prompt = state['image_prompt']
shared.stop_everything = False
batch_count = int(state['image_batch_count'])
steps_per_batch = int(state['image_steps'])
total_steps = steps_per_batch * batch_count
# Queue for progress updates from callback
progress_queue = queue.Queue()
def interrupt_callback(pipe, step_index, timestep, callback_kwargs):
if shared.stop_everything:
pipe._interrupt = True
progress_queue.put(step_index + 1)
return callback_kwargs
gen_kwargs = {
"prompt": prompt,
"negative_prompt": state['image_neg_prompt'],
"height": int(state['image_height']),
"width": int(state['image_width']),
"num_inference_steps": steps_per_batch,
"num_images_per_prompt": int(state['image_batch_size']),
"generator": generator,
"callback_on_step_end": interrupt_callback,
}
cfg_val = state.get('image_cfg_scale', 0.0)
if pipeline_type == 'qwenimage':
gen_kwargs["true_cfg_scale"] = cfg_val
else:
gen_kwargs["guidance_scale"] = cfg_val
t0 = time.time()
for batch_idx in range(batch_count):
if shared.stop_everything:
break
generator.manual_seed(int(seed + batch_idx))
# Generate prompt variation if enabled
if state['image_llm_variations']:
gen_kwargs["prompt"] = generate_prompt_variation(state)
# Run generation in thread so we can yield progress
result_holder = []
error_holder = []
def run_batch():
try:
# Apply magic suffix only at generation time for qwenimage
clean_prompt = gen_kwargs["prompt"]
if pipeline_type == 'qwenimage':
magic_suffix = ", Ultra HD, 4K, cinematic composition"
if magic_suffix.strip(", ") not in clean_prompt:
gen_kwargs["prompt"] = clean_prompt + magic_suffix
result_holder.extend(shared.image_model(**gen_kwargs).images)
gen_kwargs["prompt"] = clean_prompt # restore
except Exception as e:
error_holder.append(e)
thread = threading.Thread(target=run_batch)
thread.start()
# Yield progress updates while generation runs
while thread.is_alive():
try:
step = progress_queue.get(timeout=0.1)
absolute_step = batch_idx * steps_per_batch + step
pct = absolute_step / total_steps
text = f"Batch {batch_idx + 1}/{batch_count} — Step {step}/{steps_per_batch}"
yield all_images, progress_bar_html(pct, text)
except queue.Empty:
pass
thread.join()
if error_holder:
raise error_holder[0]
# Save this batch's images with the actual prompt and seed used
if save_images:
batch_seed = seed + batch_idx
original_prompt = state['image_prompt']
state['image_prompt'] = gen_kwargs["prompt"]
saved_paths = save_generated_images(result_holder, state, batch_seed)
state['image_prompt'] = original_prompt
# Use file paths so gallery serves actual PNGs with metadata
all_images.extend(saved_paths)
else:
# Fallback to PIL objects if not saving
all_images.extend(result_holder)
yield all_images, progress_bar_html((batch_idx + 1) / batch_count, f"Batch {batch_idx + 1}/{batch_count} complete")
t1 = time.time()
total_images = batch_count * int(state['image_batch_size'])
logger.info(f'Generated {total_images} {"image" if total_images == 1 else "images"} in {(t1 - t0):.2f} seconds ({total_steps / (t1 - t0):.2f} steps/s, seed {seed})')
yield all_images, progress_bar_html()
clear_torch_cache()
except Exception as e:
logger.error(f"Image generation failed: {e}")
traceback.print_exc()
yield [], progress_bar_html()
clear_torch_cache()
def load_image_model_wrapper(model_name, dtype, attn_backend, cpu_offload, compile_model, quant_method):
if not model_name or model_name == 'None':
yield "No model selected"
return
try:
yield f"Loading `{model_name}`..."
unload_image_model()
result = load_image_model(
model_name,
dtype=dtype,
attn_backend=attn_backend,
cpu_offload=cpu_offload,
compile_model=compile_model,
quant_method=quant_method
)
if result is not None:
shared.image_model_name = model_name
yield f"✓ Loaded **{model_name}** (quantization: {quant_method})"
else:
yield f"✗ Failed to load `{model_name}`"
except Exception:
yield f"Error:\n```\n{traceback.format_exc()}\n```"
def unload_image_model_wrapper():
previous_name = shared.image_model_name
unload_image_model()
if previous_name != 'None':
return f"Model: **{previous_name}** (unloaded)"
return "No model loaded"
def download_image_model_wrapper(model_path):
from huggingface_hub import snapshot_download
if not model_path:
yield "No model specified", gr.update()
return
try:
model_path = model_path.strip()
if model_path.startswith('https://huggingface.co/'):
model_path = model_path[len('https://huggingface.co/'):]
elif model_path.startswith('huggingface.co/'):
model_path = model_path[len('huggingface.co/'):]
if ':' in model_path:
model_id, branch = model_path.rsplit(':', 1)
else:
model_id, branch = model_path, 'main'
folder_name = model_id.replace('/', '_')
output_folder = Path(shared.args.image_model_dir) / folder_name
yield f"Downloading `{model_id}` (branch: {branch})...", gr.update()
snapshot_download(
repo_id=model_id,
revision=branch,
local_dir=output_folder,
local_dir_use_symlinks=False,
)
new_choices = utils.get_available_image_models()
yield f"✓ Downloaded to `{output_folder}`", gr.update(choices=new_choices, value=folder_name)
except Exception:
yield f"Error:\n```\n{traceback.format_exc()}\n```", gr.update() | --- +++ @@ -118,6 +118,7 @@
def build_generation_metadata(state, actual_seed):
+ """Build metadata dict from generation settings."""
metadata = {}
for key in METADATA_SETTINGS_KEYS:
if key in state:
@@ -132,6 +133,7 @@
def save_generated_images(images, state, actual_seed):
+ """Save images with generation metadata embedded in PNG. Returns list of saved file paths."""
if shared.args.multi_user:
return []
@@ -160,6 +162,7 @@
def read_image_metadata(image_path):
+ """Read generation metadata from PNG file."""
try:
img = open_image_safely(image_path)
if img is None:
@@ -175,6 +178,7 @@
def format_metadata_for_display(metadata):
+ """Format metadata as readable text."""
if not metadata:
return "No generation settings found in this image."
@@ -207,6 +211,7 @@
def get_all_history_images(force_refresh=False):
+ """Get all history images sorted by modification time (newest first). Uses caching."""
global _image_cache, _cache_timestamp
output_dir = str(shared.user_data_dir / "image_outputs")
@@ -233,6 +238,7 @@
def get_paginated_images(page=0, force_refresh=False):
+ """Get images for a specific page."""
all_images = get_all_history_images(force_refresh)
total_images = len(all_images)
total_pages = max(1, (total_images + IMAGES_PER_PAGE - 1) // IMAGES_PER_PAGE)
@@ -249,17 +255,20 @@
def get_initial_page_info():
+ """Get page info string for initial load."""
_, page, total_pages, total_images = get_paginated_images(0)
return f"Page {page + 1} of {total_pages} ({total_images} total images)"
def refresh_gallery(current_page=0):
+ """Refresh gallery with current page."""
images, page, total_pages, total_images = get_paginated_images(current_page, force_refresh=True)
page_info = f"Page {page + 1} of {total_pages} ({total_images} total images)"
return images, page, page_info
def go_to_page(page_num, current_page):
+ """Go to a specific page (1-indexed input)."""
try:
page = int(page_num) - 1 # Convert to 0-indexed
except (ValueError, TypeError):
@@ -271,18 +280,21 @@
def next_page(current_page):
+ """Go to next page."""
images, page, total_pages, total_images = get_paginated_images(current_page + 1)
page_info = f"Page {page + 1} of {total_pages} ({total_images} total images)"
return images, page, page_info
def prev_page(current_page):
+ """Go to previous page."""
images, page, total_pages, total_images = get_paginated_images(current_page - 1)
page_info = f"Page {page + 1} of {total_pages} ({total_images} total images)"
return images, page, page_info
def on_gallery_select(evt: gr.SelectData, current_page):
+ """Handle image selection from gallery."""
if evt.index is None:
return "", "Select an image to view its settings"
@@ -307,6 +319,7 @@
def send_to_generate(selected_image_path):
+ """Load settings from selected image and return updates for all Generate tab inputs."""
if not selected_image_path or not os.path.exists(selected_image_path):
return [gr.update()] * 8 + ["No image selected"]
@@ -331,6 +344,7 @@
def read_dropped_image_metadata(image_path):
+ """Read metadata from a dropped/uploaded image."""
if not image_path:
return "Drop an image to view its generation settings."
@@ -676,6 +690,7 @@
def generate_prompt_variation(state):
+ """Generate a creative variation of the image prompt using the LLM."""
from modules.chat import generate_chat_prompt
from modules.text_generation import generate_reply
@@ -731,6 +746,7 @@
def progress_bar_html(progress=0, text=""):
+ """Generate HTML for progress bar. Empty div when progress <= 0."""
if progress <= 0:
return '<div class="image-ai-separator"></div>'
@@ -743,6 +759,10 @@
def generate(state, save_images=True):
+ """
+ Generate images using the loaded model.
+ Automatically adjusts parameters based on pipeline type.
+ """
import queue
import threading
@@ -972,4 +992,4 @@ new_choices = utils.get_available_image_models()
yield f"✓ Downloaded to `{output_folder}`", gr.update(choices=new_choices, value=folder_name)
except Exception:
- yield f"Error:\n```\n{traceback.format_exc()}\n```", gr.update()+ yield f"Error:\n```\n{traceback.format_exc()}\n```", gr.update()
| https://raw.githubusercontent.com/oobabooga/text-generation-webui/HEAD/modules/ui_image_generation.py |
Insert docstrings into my code | import argparse
import copy
import os
import shlex
import sys
from collections import OrderedDict
from pathlib import Path
import yaml
from modules.logging_colors import logger
from modules.paths import resolve_user_data_dir
from modules.presets import default_preset, default_preset_values
# Resolve user_data directory early (before argparse defaults are set)
user_data_dir = resolve_user_data_dir()
# Text model variables
model = None
tokenizer = None
model_name = 'None'
is_seq2seq = False
is_multimodal = False
model_dirty_from_training = False
lora_names = []
bos_token = '<s>'
eos_token = '</s>'
# Image model variables
image_model = None
image_model_name = 'None'
image_pipeline_type = None
# Generation variables
stop_everything = False
generation_lock = None
processing_message = ''
# UI variables
gradio = {}
persistent_interface_state = {}
need_restart = False
# Parser copied from https://github.com/vladmandic/automatic
parser = argparse.ArgumentParser(description="Text Generation Web UI", conflict_handler='resolve', add_help=True, formatter_class=lambda prog: argparse.HelpFormatter(prog, max_help_position=55, indent_increment=2, width=200))
# Basic settings
group = parser.add_argument_group('Basic settings')
group.add_argument('--user-data-dir', type=str, default=str(user_data_dir), help='Path to the user data directory. Default: auto-detected.')
group.add_argument('--multi-user', action='store_true', help='Multi-user mode. Chat histories are not saved or automatically loaded. Best suited for small trusted teams.')
group.add_argument('--model', type=str, help='Name of the model to load by default.')
group.add_argument('--lora', type=str, nargs='+', help='The list of LoRAs to load. If you want to load more than one LoRA, write the names separated by spaces.')
group.add_argument('--model-dir', type=str, default=str(user_data_dir / 'models'), help='Path to directory with all the models.')
group.add_argument('--lora-dir', type=str, default=str(user_data_dir / 'loras'), help='Path to directory with all the loras.')
group.add_argument('--model-menu', action='store_true', help='Show a model menu in the terminal when the web UI is first launched.')
group.add_argument('--settings', type=str, help='Load the default interface settings from this yaml file. See user_data/settings-template.yaml for an example. If you create a file called user_data/settings.yaml, this file will be loaded by default without the need to use the --settings flag.')
group.add_argument('--extensions', type=str, nargs='+', help='The list of extensions to load. If you want to load more than one extension, write the names separated by spaces.')
group.add_argument('--verbose', action='store_true', help='Print the prompts to the terminal.')
group.add_argument('--idle-timeout', type=int, default=0, help='Unload model after this many minutes of inactivity. It will be automatically reloaded when you try to use it again.')
# Image generation
group = parser.add_argument_group('Image model')
group.add_argument('--image-model', type=str, help='Name of the image model to select on startup (overrides saved setting).')
group.add_argument('--image-model-dir', type=str, default=str(user_data_dir / 'image_models'), help='Path to directory with all the image models.')
group.add_argument('--image-dtype', type=str, default=None, choices=['bfloat16', 'float16'], help='Data type for image model.')
group.add_argument('--image-attn-backend', type=str, default=None, choices=['flash_attention_2', 'sdpa'], help='Attention backend for image model.')
group.add_argument('--image-cpu-offload', action='store_true', help='Enable CPU offloading for image model.')
group.add_argument('--image-compile', action='store_true', help='Compile the image model for faster inference.')
group.add_argument('--image-quant', type=str, default=None,
choices=['none', 'bnb-8bit', 'bnb-4bit', 'torchao-int8wo', 'torchao-fp4', 'torchao-float8wo'],
help='Quantization method for image model.')
# Model loader
group = parser.add_argument_group('Model loader')
group.add_argument('--loader', type=str, help='Choose the model loader manually, otherwise, it will get autodetected. Valid options: Transformers, llama.cpp, ExLlamav3_HF, ExLlamav3, TensorRT-LLM.')
# Cache
group = parser.add_argument_group('Context and cache')
group.add_argument('--ctx-size', '--n_ctx', '--max_seq_len', type=int, default=0, metavar='N', help='Context size in tokens. 0 = auto for llama.cpp (requires gpu-layers=-1), 8192 for other loaders.')
group.add_argument('--cache-type', '--cache_type', type=str, default='fp16', metavar='N', help='KV cache type; valid options: llama.cpp - fp16, q8_0, q4_0; ExLlamaV3 - fp16, q2 to q8 (can specify k_bits and v_bits separately, e.g. q4_q8).')
# Speculative decoding
group = parser.add_argument_group('Speculative decoding')
group.add_argument('--model-draft', type=str, default=None, help='Path to the draft model for speculative decoding.')
group.add_argument('--draft-max', type=int, default=4, help='Number of tokens to draft for speculative decoding.')
group.add_argument('--gpu-layers-draft', type=int, default=256, help='Number of layers to offload to the GPU for the draft model.')
group.add_argument('--device-draft', type=str, default=None, help='Comma-separated list of devices to use for offloading the draft model. Example: CUDA0,CUDA1')
group.add_argument('--ctx-size-draft', type=int, default=0, help='Size of the prompt context for the draft model. If 0, uses the same as the main model.')
group.add_argument('--spec-type', type=str, default='none', choices=['none', 'ngram-mod', 'ngram-simple', 'ngram-map-k', 'ngram-map-k4v', 'ngram-cache'], help='Draftless speculative decoding type. Recommended: ngram-mod.')
group.add_argument('--spec-ngram-size-n', type=int, default=24, help='N-gram lookup size for ngram speculative decoding.')
group.add_argument('--spec-ngram-size-m', type=int, default=48, help='Draft n-gram size for ngram speculative decoding.')
group.add_argument('--spec-ngram-min-hits', type=int, default=1, help='Minimum n-gram hits for ngram-map speculative decoding.')
# llama.cpp
group = parser.add_argument_group('llama.cpp')
group.add_argument('--gpu-layers', '--n-gpu-layers', type=int, default=-1, metavar='N', help='Number of layers to offload to the GPU. -1 = auto.')
group.add_argument('--cpu-moe', action='store_true', help='Move the experts to the CPU (for MoE models).')
group.add_argument('--mmproj', type=str, default=None, help='Path to the mmproj file for vision models.')
group.add_argument('--streaming-llm', action='store_true', help='Activate StreamingLLM to avoid re-evaluating the entire prompt when old messages are removed.')
group.add_argument('--tensor-split', type=str, default=None, help='Split the model across multiple GPUs. Comma-separated list of proportions. Example: 60,40.')
group.add_argument('--row-split', action='store_true', help='Split the model by rows across GPUs. This may improve multi-gpu performance.')
group.add_argument('--no-mmap', action='store_true', help='Prevent mmap from being used.')
group.add_argument('--mlock', action='store_true', help='Force the system to keep the model in RAM.')
group.add_argument('--no-kv-offload', action='store_true', help='Do not offload the K, Q, V to the GPU. This saves VRAM but reduces the performance.')
group.add_argument('--batch-size', type=int, default=1024, help='Maximum number of prompt tokens to batch together when calling llama-server. This is the application level batch size.')
group.add_argument('--ubatch-size', type=int, default=1024, help='Maximum number of prompt tokens to batch together when calling llama-server. This is the max physical batch size for computation (device level).')
group.add_argument('--threads', type=int, default=0, help='Number of threads to use.')
group.add_argument('--threads-batch', type=int, default=0, help='Number of threads to use for batches/prompt processing.')
group.add_argument('--numa', action='store_true', help='Activate NUMA task allocation for llama.cpp.')
group.add_argument('--parallel', type=int, default=1, help='Number of parallel request slots. The context size is divided equally among slots. For example, to have 4 slots with 8192 context each, set ctx_size to 32768.')
group.add_argument('--fit-target', type=str, default='512', help='Target VRAM margin per device for auto GPU layers, comma-separated list of values in MiB. A single value is broadcast across all devices.')
group.add_argument('--extra-flags', type=str, default=None, help='Extra flags to pass to llama-server. Format: "flag1=value1,flag2,flag3=value3". Example: "override-tensor=exps=CPU"')
# Transformers/Accelerate
group = parser.add_argument_group('Transformers/Accelerate')
group.add_argument('--cpu', action='store_true', help='Use the CPU to generate text. Warning: Training on CPU is extremely slow.')
group.add_argument('--cpu-memory', type=float, default=0, help='Maximum CPU memory in GiB. Use this for CPU offloading.')
group.add_argument('--disk', action='store_true', help='If the model is too large for your GPU(s) and CPU combined, send the remaining layers to the disk.')
group.add_argument('--disk-cache-dir', type=str, default=str(user_data_dir / 'cache'), help='Directory to save the disk cache to.')
group.add_argument('--load-in-8bit', action='store_true', help='Load the model with 8-bit precision (using bitsandbytes).')
group.add_argument('--bf16', action='store_true', help='Load the model with bfloat16 precision. Requires NVIDIA Ampere GPU.')
group.add_argument('--no-cache', action='store_true', help='Set use_cache to False while generating text. This reduces VRAM usage slightly, but it comes at a performance cost.')
group.add_argument('--trust-remote-code', action='store_true', help='Set trust_remote_code=True while loading the model. Necessary for some models.')
group.add_argument('--force-safetensors', action='store_true', help='Set use_safetensors=True while loading the model. This prevents arbitrary code execution.')
group.add_argument('--no_use_fast', action='store_true', help='Set use_fast=False while loading the tokenizer (it\'s True by default). Use this if you have any problems related to use_fast.')
group.add_argument('--attn-implementation', type=str, default='sdpa', metavar="IMPLEMENTATION", help='Attention implementation. Valid options: sdpa, eager, flash_attention_2.')
# bitsandbytes 4-bit
group = parser.add_argument_group('bitsandbytes 4-bit')
group.add_argument('--load-in-4bit', action='store_true', help='Load the model with 4-bit precision (using bitsandbytes).')
group.add_argument('--use_double_quant', action='store_true', help='use_double_quant for 4-bit.')
group.add_argument('--compute_dtype', type=str, default='float16', help='compute dtype for 4-bit. Valid options: bfloat16, float16, float32.')
group.add_argument('--quant_type', type=str, default='nf4', help='quant_type for 4-bit. Valid options: nf4, fp4.')
# ExLlamaV3
group = parser.add_argument_group('ExLlamaV3')
group.add_argument('--gpu-split', type=str, help='Comma-separated list of VRAM (in GB) to use per GPU device for model layers. Example: 20,7,7.')
group.add_argument('--enable-tp', '--enable_tp', action='store_true', help='Enable Tensor Parallelism (TP) to split the model across GPUs.')
group.add_argument('--tp-backend', type=str, default='native', help='The backend for tensor parallelism. Valid options: native, nccl. Default: native.')
group.add_argument('--cfg-cache', action='store_true', help='Create an additional cache for CFG negative prompts. Necessary to use CFG with that loader.')
# Gradio
group = parser.add_argument_group('Gradio')
group.add_argument('--listen', action='store_true', help='Make the web UI reachable from your local network.')
group.add_argument('--listen-port', type=int, help='The listening port that the server will use.')
group.add_argument('--listen-host', type=str, help='The hostname that the server will use.')
group.add_argument('--share', action='store_true', help='Create a public URL. This is useful for running the web UI on Google Colab or similar.')
group.add_argument('--auto-launch', action='store_true', default=False, help='Open the web UI in the default browser upon launch.')
group.add_argument('--gradio-auth', type=str, help='Set Gradio authentication password in the format "username:password". Multiple credentials can also be supplied with "u1:p1,u2:p2,u3:p3".', default=None)
group.add_argument('--gradio-auth-path', type=str, help='Set the Gradio authentication file path. The file should contain one or more user:password pairs in the same format as above.', default=None)
group.add_argument('--ssl-keyfile', type=str, help='The path to the SSL certificate key file.', default=None)
group.add_argument('--ssl-certfile', type=str, help='The path to the SSL certificate cert file.', default=None)
group.add_argument('--subpath', type=str, help='Customize the subpath for gradio, use with reverse proxy')
group.add_argument('--old-colors', action='store_true', help='Use the legacy Gradio colors, before the December/2024 update.')
group.add_argument('--portable', action='store_true', help='Hide features not available in portable mode like training.')
# API
group = parser.add_argument_group('API')
group.add_argument('--api', action='store_true', help='Enable the API extension.')
group.add_argument('--public-api', action='store_true', help='Create a public URL for the API using Cloudflare.')
group.add_argument('--public-api-id', type=str, help='Tunnel ID for named Cloudflare Tunnel. Use together with public-api option.', default=None)
group.add_argument('--api-port', type=int, default=5000, help='The listening port for the API.')
group.add_argument('--api-key', type=str, default='', help='API authentication key.')
group.add_argument('--admin-key', type=str, default='', help='API authentication key for admin tasks like loading and unloading models. If not set, will be the same as --api-key.')
group.add_argument('--api-enable-ipv6', action='store_true', help='Enable IPv6 for the API')
group.add_argument('--api-disable-ipv4', action='store_true', help='Disable IPv4 for the API')
group.add_argument('--nowebui', action='store_true', help='Do not launch the Gradio UI. Useful for launching the API in standalone mode.')
# API generation defaults
_d = default_preset_values
group = parser.add_argument_group('API generation defaults')
group.add_argument('--temperature', type=float, default=_d['temperature'], metavar='N', help='Temperature')
group.add_argument('--dynatemp-low', type=float, default=_d['dynatemp_low'], metavar='N', help='Dynamic temperature low')
group.add_argument('--dynatemp-high', type=float, default=_d['dynatemp_high'], metavar='N', help='Dynamic temperature high')
group.add_argument('--dynatemp-exponent', type=float, default=_d['dynatemp_exponent'], metavar='N', help='Dynamic temperature exponent')
group.add_argument('--smoothing-factor', type=float, default=_d['smoothing_factor'], metavar='N', help='Smoothing factor')
group.add_argument('--smoothing-curve', type=float, default=_d['smoothing_curve'], metavar='N', help='Smoothing curve')
group.add_argument('--top-p', type=float, default=_d['top_p'], metavar='N', help='Top P')
group.add_argument('--top-k', type=int, default=_d['top_k'], metavar='N', help='Top K')
group.add_argument('--min-p', type=float, default=_d['min_p'], metavar='N', help='Min P')
group.add_argument('--top-n-sigma', type=float, default=_d['top_n_sigma'], metavar='N', help='Top N Sigma')
group.add_argument('--typical-p', type=float, default=_d['typical_p'], metavar='N', help='Typical P')
group.add_argument('--xtc-threshold', type=float, default=_d['xtc_threshold'], metavar='N', help='XTC threshold')
group.add_argument('--xtc-probability', type=float, default=_d['xtc_probability'], metavar='N', help='XTC probability')
group.add_argument('--epsilon-cutoff', type=float, default=_d['epsilon_cutoff'], metavar='N', help='Epsilon cutoff')
group.add_argument('--eta-cutoff', type=float, default=_d['eta_cutoff'], metavar='N', help='Eta cutoff')
group.add_argument('--tfs', type=float, default=_d['tfs'], metavar='N', help='TFS')
group.add_argument('--top-a', type=float, default=_d['top_a'], metavar='N', help='Top A')
group.add_argument('--adaptive-target', type=float, default=_d['adaptive_target'], metavar='N', help='Adaptive target')
group.add_argument('--adaptive-decay', type=float, default=_d['adaptive_decay'], metavar='N', help='Adaptive decay')
group.add_argument('--dry-multiplier', type=float, default=_d['dry_multiplier'], metavar='N', help='DRY multiplier')
group.add_argument('--dry-allowed-length', type=int, default=_d['dry_allowed_length'], metavar='N', help='DRY allowed length')
group.add_argument('--dry-base', type=float, default=_d['dry_base'], metavar='N', help='DRY base')
group.add_argument('--repetition-penalty', type=float, default=_d['repetition_penalty'], metavar='N', help='Repetition penalty')
group.add_argument('--frequency-penalty', type=float, default=_d['frequency_penalty'], metavar='N', help='Frequency penalty')
group.add_argument('--presence-penalty', type=float, default=_d['presence_penalty'], metavar='N', help='Presence penalty')
group.add_argument('--encoder-repetition-penalty', type=float, default=_d['encoder_repetition_penalty'], metavar='N', help='Encoder repetition penalty')
group.add_argument('--no-repeat-ngram-size', type=int, default=_d['no_repeat_ngram_size'], metavar='N', help='No repeat ngram size')
group.add_argument('--repetition-penalty-range', type=int, default=_d['repetition_penalty_range'], metavar='N', help='Repetition penalty range')
group.add_argument('--penalty-alpha', type=float, default=_d['penalty_alpha'], metavar='N', help='Penalty alpha')
group.add_argument('--guidance-scale', type=float, default=_d['guidance_scale'], metavar='N', help='Guidance scale')
group.add_argument('--mirostat-mode', type=int, default=_d['mirostat_mode'], metavar='N', help='Mirostat mode')
group.add_argument('--mirostat-tau', type=float, default=_d['mirostat_tau'], metavar='N', help='Mirostat tau')
group.add_argument('--mirostat-eta', type=float, default=_d['mirostat_eta'], metavar='N', help='Mirostat eta')
group.add_argument('--do-sample', action=argparse.BooleanOptionalAction, default=_d['do_sample'], help='Do sample')
group.add_argument('--dynamic-temperature', action=argparse.BooleanOptionalAction, default=_d['dynamic_temperature'], help='Dynamic temperature')
group.add_argument('--temperature-last', action=argparse.BooleanOptionalAction, default=_d['temperature_last'], help='Temperature last')
group.add_argument('--sampler-priority', type=str, default=_d['sampler_priority'], metavar='N', help='Sampler priority')
group.add_argument('--dry-sequence-breakers', type=str, default=_d['dry_sequence_breakers'], metavar='N', help='DRY sequence breakers')
group.add_argument('--enable-thinking', action=argparse.BooleanOptionalAction, default=True, help='Enable thinking')
group.add_argument('--reasoning-effort', type=str, default='medium', metavar='N', help='Reasoning effort')
group.add_argument('--chat-template-file', type=str, default=None, help='Path to a chat template file (.jinja, .jinja2, or .yaml) to use as the default instruction template for API requests. Overrides the model\'s built-in template.')
# Handle CMD_FLAGS.txt
cmd_flags_path = user_data_dir / "CMD_FLAGS.txt"
if cmd_flags_path.exists():
with cmd_flags_path.open('r', encoding='utf-8') as f:
cmd_flags = ' '.join(
line.strip().rstrip('\\').strip()
for line in f
if line.strip().rstrip('\\').strip() and not line.strip().startswith('#')
)
if cmd_flags:
# Command-line takes precedence over CMD_FLAGS.txt
sys.argv = [sys.argv[0]] + shlex.split(cmd_flags) + sys.argv[1:]
args = parser.parse_args()
user_data_dir = Path(args.user_data_dir) # Update from parsed args (may differ from pre-parse)
original_args = copy.deepcopy(args)
args_defaults = parser.parse_args([])
# Create a mapping of all argument aliases to their canonical names
alias_to_dest = {}
for action in parser._actions:
for opt in action.option_strings:
alias_to_dest[opt.lstrip('-').replace('-', '_')] = action.dest
provided_arguments = []
for arg in sys.argv[1:]:
arg = arg.lstrip('-').replace('-', '_')
if arg in alias_to_dest:
provided_arguments.append(alias_to_dest[arg])
elif hasattr(args, arg):
provided_arguments.append(arg)
# Default generation parameters
neutral_samplers = default_preset()
# UI defaults
settings = {
'show_controls': True,
'start_with': '',
'mode': 'instruct',
'chat_style': 'cai-chat',
'chat-instruct_command': 'Continue the chat dialogue below. Write a single reply for the character "<|character|>". Reply directly, without starting the reply with the character name.\n\n<|prompt|>',
'enable_web_search': False,
'web_search_pages': 3,
'selected_tools': [],
'prompt-notebook': '',
'preset': 'Top-P' if (user_data_dir / 'presets/Top-P.yaml').exists() else None,
'max_new_tokens': 512,
'max_new_tokens_min': 1,
'max_new_tokens_max': 4096,
'prompt_lookup_num_tokens': 0,
'max_tokens_second': 0,
'auto_max_new_tokens': True,
'ban_eos_token': False,
'add_bos_token': True,
'enable_thinking': True,
'reasoning_effort': 'medium',
'skip_special_tokens': True,
'stream': True,
'static_cache': False,
'truncation_length': 8192,
'seed': -1,
'custom_stopping_strings': '',
'custom_token_bans': '',
'negative_prompt': '',
'dark_theme': True,
'show_two_notebook_columns': False,
'paste_to_attachment': False,
'include_past_attachments': True,
# Generation parameters - Curve shape
'temperature': neutral_samplers['temperature'],
'dynatemp_low': neutral_samplers['dynatemp_low'],
'dynatemp_high': neutral_samplers['dynatemp_high'],
'dynatemp_exponent': neutral_samplers['dynatemp_exponent'],
'smoothing_factor': neutral_samplers['smoothing_factor'],
'smoothing_curve': neutral_samplers['smoothing_curve'],
# Generation parameters - Curve cutoff
'top_p': 0.95,
'top_k': neutral_samplers['top_k'],
'min_p': neutral_samplers['min_p'],
'top_n_sigma': neutral_samplers['top_n_sigma'],
'typical_p': neutral_samplers['typical_p'],
'xtc_threshold': neutral_samplers['xtc_threshold'],
'xtc_probability': neutral_samplers['xtc_probability'],
'epsilon_cutoff': neutral_samplers['epsilon_cutoff'],
'eta_cutoff': neutral_samplers['eta_cutoff'],
'tfs': neutral_samplers['tfs'],
'top_a': neutral_samplers['top_a'],
'adaptive_target': neutral_samplers['adaptive_target'],
'adaptive_decay': neutral_samplers['adaptive_decay'],
# Generation parameters - Repetition suppression
'dry_multiplier': neutral_samplers['dry_multiplier'],
'dry_allowed_length': neutral_samplers['dry_allowed_length'],
'dry_base': neutral_samplers['dry_base'],
'repetition_penalty': neutral_samplers['repetition_penalty'],
'frequency_penalty': neutral_samplers['frequency_penalty'],
'presence_penalty': neutral_samplers['presence_penalty'],
'encoder_repetition_penalty': neutral_samplers['encoder_repetition_penalty'],
'no_repeat_ngram_size': neutral_samplers['no_repeat_ngram_size'],
'repetition_penalty_range': neutral_samplers['repetition_penalty_range'],
# Generation parameters - Alternative sampling methods
'penalty_alpha': neutral_samplers['penalty_alpha'],
'guidance_scale': neutral_samplers['guidance_scale'],
'mirostat_mode': neutral_samplers['mirostat_mode'],
'mirostat_tau': neutral_samplers['mirostat_tau'],
'mirostat_eta': neutral_samplers['mirostat_eta'],
# Generation parameters - Other options
'do_sample': neutral_samplers['do_sample'],
'dynamic_temperature': neutral_samplers['dynamic_temperature'],
'temperature_last': neutral_samplers['temperature_last'],
'sampler_priority': neutral_samplers['sampler_priority'],
'dry_sequence_breakers': neutral_samplers['dry_sequence_breakers'],
'grammar_string': '',
# Character settings
'character': 'Assistant',
'user': 'Default',
'name1': 'You',
'name2': 'AI',
'user_bio': '',
'context': 'The following is a conversation with an AI Large Language Model. The AI has been trained to answer questions, provide recommendations, and help with decision making. The AI follows user requests. The AI thinks outside the box.',
'greeting': 'How can I help you today?',
'custom_system_message': '',
'instruction_template_str': "{%- set ns = namespace(found=false) -%}\n{%- for message in messages -%}\n {%- if message['role'] == 'system' -%}\n {%- set ns.found = true -%}\n {%- endif -%}\n{%- endfor -%}\n{%- if not ns.found -%}\n {{- '' + 'Below is an instruction that describes a task. Write a response that appropriately completes the request.' + '\\n\\n' -}}\n{%- endif %}\n{%- for message in messages %}\n {%- if message['role'] == 'system' -%}\n {{- '' + message['content'] + '\\n\\n' -}}\n {%- else -%}\n {%- if message['role'] == 'user' -%}\n {{-'### Instruction:\\n' + message['content'] + '\\n\\n'-}}\n {%- else -%}\n {{-'### Response:\\n' + message['content'] + '\\n\\n' -}}\n {%- endif -%}\n {%- endif -%}\n{%- endfor -%}\n{%- if add_generation_prompt -%}\n {{-'### Response:\\n'-}}\n{%- endif -%}",
'chat_template_str': "{%- for message in messages %}\n {%- if message['role'] == 'system' -%}\n {%- if message['content'] -%}\n {{- message['content'] + '\\n\\n' -}}\n {%- endif -%}\n {%- if user_bio -%}\n {{- user_bio + '\\n\\n' -}}\n {%- endif -%}\n {%- elif message['role'] == 'tool' -%}\n {{- '[Tool result: ' + message['content'] + ']\\n' -}}\n {%- elif message['role'] == 'user' -%}\n {{- name1 + ': ' + message['content'] + '\\n'-}}\n {%- elif message['tool_calls'] is defined and message['tool_calls'] -%}\n {%- for tc in message['tool_calls'] -%}\n {{- '[Calling: ' + tc['function']['name'] + '(' + tc['function']['arguments'] + ')]\\n' -}}\n {%- endfor -%}\n {%- else -%}\n {{- name2 + ': ' + message['content'] + '\\n' -}}\n {%- endif -%}\n{%- endfor -%}\n{%- if add_generation_prompt %}\n {{- name2 + ':' -}}\n{%- endif %}",
# Extensions
'default_extensions': [],
# Image generation settings
'image_prompt': '',
'image_neg_prompt': '',
'image_width': 1024,
'image_height': 1024,
'image_aspect_ratio': '1:1 Square',
'image_steps': 9,
'image_cfg_scale': 0.0,
'image_seed': -1,
'image_batch_size': 1,
'image_batch_count': 1,
'image_llm_variations': False,
'image_llm_variations_prompt': 'Write a variation of the image generation prompt above. Consider the intent of the user with that prompt and write something that will likely please them, with added details. Output only the new prompt. Do not add any explanations, prefixes, or additional text.',
'image_model_menu': 'None',
'image_dtype': 'bfloat16',
'image_attn_backend': 'flash_attention_2',
'image_cpu_offload': False,
'image_compile': False,
'image_quant': 'none',
}
default_settings = copy.deepcopy(settings)
def do_cmd_flags_warnings():
# Validate --chat-template-file
if args.chat_template_file and not Path(args.chat_template_file).is_file():
logger.error(f"--chat-template-file: file not found: {args.chat_template_file}")
sys.exit(1)
# Security warnings
if args.trust_remote_code:
logger.warning(
"The `--trust-remote-code` flag is enabled.\n"
"This allows models to execute arbitrary code on your machine.\n\n"
"1. Only use with models from sources you fully trust.\n"
"2. Set an access password with `--gradio-auth`."
)
if 'COLAB_GPU' not in os.environ and not args.nowebui:
if args.share:
logger.warning("The gradio \"share link\" feature uses a proprietary executable to create a reverse tunnel. Use it with care.")
if any((args.listen, args.share)) and not any((args.gradio_auth, args.gradio_auth_path)):
logger.warning("You are potentially exposing the web UI to the entire internet without any access password.\nYou can create one with the \"--gradio-auth\" flag like this:\n\n--gradio-auth username:password\n\nMake sure to replace username:password with your own.")
if args.multi_user:
logger.warning(
'Multi-user mode is enabled. Known limitations:'
'\n- The Stop button stops generation for all users, not just you.'
'\n- Chat history is not saved and will be lost on page refresh.'
'\n- Only one user can generate at a time unless using a parallel-capable backend (e.g. llama.cpp with --parallel N for N > 1, or ExLlamaV3).'
'\n\nThis mode works best for small trusted teams.'
'\n\nDo not expose publicly. Grayed-out actions can easily be bypassed client-side.\n'
)
def apply_image_model_cli_overrides():
if args.image_model is not None:
settings['image_model_menu'] = args.image_model
if args.image_dtype is not None:
settings['image_dtype'] = args.image_dtype
if args.image_attn_backend is not None:
settings['image_attn_backend'] = args.image_attn_backend
if args.image_cpu_offload:
settings['image_cpu_offload'] = True
if args.image_compile:
settings['image_compile'] = True
if args.image_quant is not None:
settings['image_quant'] = args.image_quant
def fix_loader_name(name):
if not name:
return name
name = name.lower()
if name in ['llama.cpp', 'llamacpp', 'llama-cpp', 'llama cpp']:
return 'llama.cpp'
elif name in ['transformers', 'huggingface', 'hf', 'hugging_face', 'hugging face']:
return 'Transformers'
elif name in ['exllamav3-hf', 'exllamav3_hf', 'exllama-v3-hf', 'exllama_v3_hf', 'exllama-v3_hf', 'exllama3-hf', 'exllama3_hf', 'exllama-3-hf', 'exllama_3_hf', 'exllama-3_hf']:
return 'ExLlamav3_HF'
elif name in ['exllamav3']:
return 'ExLlamav3'
elif name in ['tensorrt', 'tensorrtllm', 'tensorrt_llm', 'tensorrt-llm', 'tensort', 'tensortllm']:
return 'TensorRT-LLM'
def add_extension(name, last=False):
if args.extensions is None:
args.extensions = [name]
elif last:
args.extensions = [x for x in args.extensions if x != name]
args.extensions.append(name)
elif name not in args.extensions:
args.extensions.append(name)
def is_chat():
return True
def load_user_config():
if Path(f'{args.model_dir}/config-user.yaml').exists():
file_content = open(f'{args.model_dir}/config-user.yaml', 'r').read().strip()
if file_content:
user_config = yaml.safe_load(file_content)
else:
user_config = {}
else:
user_config = {}
return user_config
args.loader = fix_loader_name(args.loader)
# Activate the API extension
if args.api or args.public_api:
add_extension('openai', last=True)
# Load model-specific settings
p = Path(f'{args.model_dir}/config.yaml')
if p.exists():
model_config = yaml.safe_load(open(p, 'r').read())
else:
model_config = {}
del p
# Load custom model-specific settings
user_config = load_user_config()
model_config = OrderedDict(model_config)
user_config = OrderedDict(user_config) | --- +++ @@ -403,6 +403,7 @@
def apply_image_model_cli_overrides():
+ """Apply command-line overrides for image model settings."""
if args.image_model is not None:
settings['image_model_menu'] = args.image_model
if args.image_dtype is not None:
@@ -449,6 +450,9 @@
def load_user_config():
+ '''
+ Loads custom model-specific settings
+ '''
if Path(f'{args.model_dir}/config-user.yaml').exists():
file_content = open(f'{args.model_dir}/config-user.yaml', 'r').read().strip()
@@ -481,4 +485,4 @@ user_config = load_user_config()
model_config = OrderedDict(model_config)
-user_config = OrderedDict(user_config)+user_config = OrderedDict(user_config)
| https://raw.githubusercontent.com/oobabooga/text-generation-webui/HEAD/modules/shared.py |
Document this script properly | import concurrent.futures
import html
import ipaddress
import random
import re
import socket
from concurrent.futures import as_completed
from datetime import datetime
from urllib.parse import parse_qs, quote_plus, urljoin, urlparse
import requests
from modules import shared
from modules.logging_colors import logger
def _validate_url(url):
parsed = urlparse(url)
if parsed.scheme not in ('http', 'https'):
raise ValueError(f"Unsupported URL scheme: {parsed.scheme}")
hostname = parsed.hostname
if not hostname:
raise ValueError("No hostname in URL")
# Resolve hostname and check all returned addresses
try:
for family, _, _, _, sockaddr in socket.getaddrinfo(hostname, None):
ip = ipaddress.ip_address(sockaddr[0])
if not ip.is_global:
raise ValueError(f"Access to non-public address {ip} is blocked")
except socket.gaierror:
raise ValueError(f"Could not resolve hostname: {hostname}")
def get_current_timestamp():
return datetime.now().strftime('%b %d, %Y %H:%M')
def download_web_page(url, timeout=10, include_links=False):
import trafilatura
try:
_validate_url(url)
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
}
max_redirects = 5
for _ in range(max_redirects):
response = requests.get(url, headers=headers, timeout=timeout, allow_redirects=False)
if response.is_redirect and 'Location' in response.headers:
url = urljoin(url, response.headers['Location'])
_validate_url(url)
else:
break
response.raise_for_status()
result = trafilatura.extract(
response.text,
include_links=include_links,
output_format='markdown',
url=url
)
return result or ""
except requests.exceptions.RequestException as e:
logger.error(f"Error downloading {url}: {e}")
return ""
except Exception as e:
logger.error(f"An unexpected error occurred: {e}")
return ""
def perform_web_search(query, num_pages=3, max_workers=5, timeout=10, fetch_content=True):
try:
search_url = f"https://html.duckduckgo.com/html/?q={quote_plus(query)}"
agents = [
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36"
]
response = requests.get(search_url, headers={'User-Agent': random.choice(agents)}, timeout=timeout)
response.raise_for_status()
response_text = response.text
# Extract results - title and URL come from the same <a class="result__a"> element
result_links = re.findall(r'<a[^>]*class="[^"]*result__a[^"]*"[^>]*>(.*?)</a>', response_text, re.DOTALL)
result_tags = re.findall(r'<a([^>]*class="[^"]*result__a[^"]*"[^>]*)>', response_text, re.DOTALL)
# Prepare download tasks
download_tasks = []
for i, (tag_attrs, raw_title) in enumerate(zip(result_tags, result_links)):
if num_pages is not None and i >= num_pages:
break
# Extract href and resolve the actual URL from DuckDuckGo's redirect link
href_match = re.search(r'href="([^"]*)"', tag_attrs)
if not href_match:
continue
uddg = parse_qs(urlparse(html.unescape(href_match.group(1))).query).get('uddg', [''])[0]
if not uddg:
continue
title = html.unescape(re.sub(r'<[^>]+>', '', raw_title).strip())
download_tasks.append((uddg, title, len(download_tasks)))
search_results = [None] * len(download_tasks) # Pre-allocate to maintain order
if not fetch_content:
for url, title, index in download_tasks:
search_results[index] = {
'title': title,
'url': url,
'content': ''
}
return search_results
# Download pages in parallel
with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
# Submit all download tasks
future_to_task = {
executor.submit(download_web_page, task[0]): task
for task in download_tasks
}
# Collect results as they complete
for future in as_completed(future_to_task):
url, title, index = future_to_task[future]
try:
content = future.result()
search_results[index] = {
'title': title,
'url': url,
'content': content
}
except Exception:
search_results[index] = {
'title': title,
'url': url,
'content': ''
}
return search_results
except Exception as e:
logger.error(f"Error performing web search: {e}")
return []
def truncate_content_by_tokens(content, max_tokens=8192):
if len(shared.tokenizer.encode(content)) <= max_tokens:
return content
left, right = 0, len(content)
while left < right:
mid = (left + right + 1) // 2
if len(shared.tokenizer.encode(content[:mid])) <= max_tokens:
left = mid
else:
right = mid - 1
return content[:left]
def add_web_search_attachments(history, row_idx, user_message, search_query, state):
if not search_query:
logger.warning("No search query provided")
return
try:
logger.info(f"Using search query: {search_query}")
# Perform web search
num_pages = int(state.get('web_search_pages', 3))
search_results = perform_web_search(search_query, num_pages)
if not search_results:
logger.warning("No search results found")
return
# Filter out failed downloads before adding attachments
successful_results = [result for result in search_results if result['content'].strip()]
if not successful_results:
logger.warning("No successful downloads to add as attachments")
return
# Add search results as attachments
key = f"user_{row_idx}"
if key not in history['metadata']:
history['metadata'][key] = {"timestamp": get_current_timestamp()}
if "attachments" not in history['metadata'][key]:
history['metadata'][key]["attachments"] = []
for result in successful_results:
attachment = {
"name": result['title'],
"type": "text/html",
"url": result['url'],
"content": truncate_content_by_tokens(result['content'])
}
history['metadata'][key]["attachments"].append(attachment)
logger.info(f"Added {len(successful_results)} successful web search results as attachments.")
except Exception as e:
logger.error(f"Error in web search: {e}") | --- +++ @@ -15,6 +15,7 @@
def _validate_url(url):
+ """Validate that a URL is safe to fetch (not targeting private/internal networks)."""
parsed = urlparse(url)
if parsed.scheme not in ('http', 'https'):
raise ValueError(f"Unsupported URL scheme: {parsed.scheme}")
@@ -34,10 +35,14 @@
def get_current_timestamp():
+ """Returns the current time in 24-hour format"""
return datetime.now().strftime('%b %d, %Y %H:%M')
def download_web_page(url, timeout=10, include_links=False):
+ """
+ Download a web page and extract its main content as Markdown text.
+ """
import trafilatura
try:
@@ -72,6 +77,7 @@
def perform_web_search(query, num_pages=3, max_workers=5, timeout=10, fetch_content=True):
+ """Perform web search and return results, optionally with page content"""
try:
search_url = f"https://html.duckduckgo.com/html/?q={quote_plus(query)}"
@@ -148,6 +154,7 @@
def truncate_content_by_tokens(content, max_tokens=8192):
+ """Truncate content to fit within token limit using binary search"""
if len(shared.tokenizer.encode(content)) <= max_tokens:
return content
@@ -163,6 +170,7 @@
def add_web_search_attachments(history, row_idx, user_message, search_query, state):
+ """Perform web search and add results as attachments"""
if not search_query:
logger.warning("No search query provided")
return
@@ -204,4 +212,4 @@ logger.info(f"Added {len(successful_results)} successful web search results as attachments.")
except Exception as e:
- logger.error(f"Error in web search: {e}")+ logger.error(f"Error in web search: {e}")
| https://raw.githubusercontent.com/oobabooga/text-generation-webui/HEAD/modules/web_search.py |
Create documentation for each function signature | import os
os.environ["WANDB_MODE"] = "offline"
# os.environ["WANDB_DISABLED"] = "true"
import json
import math
import random
import shutil
import sys
import threading
import time
import traceback
from datetime import datetime
from pathlib import Path
import yaml
import gradio as gr
from modules import shared, ui, utils
from modules.evaluate import (
calculate_perplexity,
generate_markdown_table,
save_past_evaluations
)
from modules.logging_colors import logger
from modules.models import reload_model
PARAMETERS = ["lora_name", "always_override", "all_linear", "q_proj_en", "v_proj_en", "k_proj_en", "o_proj_en", "gate_proj_en", "down_proj_en", "up_proj_en", "save_steps", "micro_batch_size", "batch_size", "epochs", "learning_rate", "lr_scheduler_type", "lora_rank", "lora_alpha", "lora_dropout", "cutoff_len", "dataset", "eval_dataset", "format", "eval_steps", "text_dataset", "higher_rank_limit", "warmup_steps", "optimizer", "stride_length", "stop_at_loss", "add_eos_token", "excess_length", "report_to"]
WANT_INTERRUPT = False
train_log = {}
train_template = {}
def create_ui():
mu = shared.args.multi_user
with gr.Tab("Training", elem_id="training-tab"):
with gr.Tab('Train LoRA', elem_id='lora-train-tab'):
tmp = gr.State('')
with gr.Row():
with gr.Column():
gr.Markdown("[Tutorial](https://github.com/oobabooga/text-generation-webui/wiki/05-%E2%80%90-Training-Tab)")
with gr.Row():
copy_from = gr.Dropdown(label='Copy parameters from', value='None', choices=utils.get_available_loras(), elem_classes=['slim-dropdown'], interactive=not mu)
ui.create_refresh_button(copy_from, lambda: None, lambda: {'choices': utils.get_available_loras()}, 'refresh-button', interactive=not mu)
with gr.Row():
with gr.Column(scale=5):
lora_name = gr.Textbox(label='Name', info='The name of your new LoRA file')
with gr.Column():
always_override = gr.Checkbox(label='Override Existing Files', value=False, info='If the name is the same, checking will replace the existing file, and unchecking will load and continue from it (the rank must be the same).', elem_classes=['no-background'])
with gr.Accordion(label='Target Modules', open=False, elem_classes='tgw-accordion'):
gr.Markdown("Selects which modules to target in training. Targeting more modules is closer to a full fine-tune at the cost of increased VRAM and adapter size.")
all_linear = gr.Checkbox(label='Target all linear layers', value=True, info='Targets every nn.Linear layer except lm_head. Works for any model architecture. When checked, the individual module checkboxes below are ignored.', elem_classes=['no-background'])
with gr.Row():
with gr.Column():
q_proj_en = gr.Checkbox(label='Enable q_proj', value=True)
with gr.Column():
v_proj_en = gr.Checkbox(label='Enable v_proj', value=True)
with gr.Column():
k_proj_en = gr.Checkbox(label='Enable k_proj', value=False)
with gr.Column():
o_proj_en = gr.Checkbox(label='Enable o_proj', value=False)
with gr.Column():
gate_proj_en = gr.Checkbox(label='Enable gate_proj', value=False)
with gr.Column():
down_proj_en = gr.Checkbox(label='Enable down_proj', value=False)
with gr.Column():
up_proj_en = gr.Checkbox(label='Enable up_proj', value=False)
with gr.Row():
with gr.Column():
lora_rank = gr.Slider(label='LoRA Rank', value=8, minimum=0, maximum=1024, step=4, info='Also called dimension count. Higher values = larger file, more content control. Smaller values = smaller file, less control. Use 4 or 8 for style, 128 or 256 to teach, 1024+ for fine-detail on big data. More VRAM is needed for higher ranks.')
lora_alpha = gr.Slider(label='LoRA Alpha', value=16, minimum=0, maximum=2048, step=4, info='This divided by the rank becomes the scaling of the LoRA. Higher means stronger. A good standard value is twice your Rank.')
batch_size = gr.Slider(label='Batch Size', value=32, minimum=0, maximum=1024, step=4, info='Global batch size. The two batch sizes together determine gradient accumulation (gradientAccum = batch / microBatch). Higher gradient accum values lead to better quality training.')
micro_batch_size = gr.Slider(label='Micro Batch Size', value=4, minimum=1, maximum=128, step=1, info='Per-device batch size (NOTE: multiple devices not yet implemented). Increasing this will increase VRAM usage.')
cutoff_len = gr.Slider(label='Cutoff Length', minimum=0, maximum=4096, value=512, step=32, info='Maximum sequence length in tokens. For instruction datasets, conversations longer than this are dropped. For text datasets, documents are split into chunks of this size. Higher values require more VRAM.')
with gr.Column():
save_steps = gr.Number(label='Save every n steps', value=0, info='If above 0, a full training checkpoint (adapter weights, optimizer, scheduler) will be saved every time this many steps pass. Training can be resumed from these checkpoints.')
epochs = gr.Number(label='Epochs', value=3, info='Number of times every entry in the dataset should be fed into training. So 1 means feed each item in once, 5 means feed it in five times, etc.')
learning_rate = gr.Textbox(label='Learning Rate', value='3e-4', info='In scientific notation. 3e-4 is a good starting base point. 1e-2 is extremely high, 1e-6 is extremely low.')
with gr.Row():
lr_scheduler_type = gr.Dropdown(label='LR Scheduler', value='cosine', choices=['linear', 'constant', 'constant_with_warmup', 'cosine', 'cosine_with_restarts', 'polynomial', 'inverse_sqrt'], info='Learning rate scheduler - defines how the learning rate changes over time. "Constant" means never change, "linear" means to go in a straight line from the learning rate down to 0, cosine follows a curve, etc.', elem_classes=['slim-dropdown'])
with gr.Accordion(label='Advanced Options', open=False, elem_classes='tgw-accordion'):
with gr.Row():
with gr.Column():
lora_dropout = gr.Slider(label='LoRA Dropout', minimum=0.0, maximum=1.0, step=0.025, value=0.0, info='Percentage probability for dropout of LoRA layers. This can help reduce overfitting. Most users should leave at default.')
stop_at_loss = gr.Slider(label='Stop at loss', minimum=0.0, maximum=3.0, step=0.1, value=0.00, info='The process will automatically stop once the desired loss value is reached. (reasonable numbers are 1.5-1.8)')
with gr.Row():
optimizer = gr.Dropdown(label='Optimizer', value='adamw_torch', choices=['adamw_hf', 'adamw_torch', 'adamw_torch_fused', 'adamw_torch_xla', 'adamw_apex_fused', 'adafactor', 'adamw_bnb_8bit', 'adamw_anyprecision', 'sgd', 'adagrad'], info='Optimizer algorithm. adamw_torch is the standard choice. adamw_bnb_8bit uses less VRAM. adafactor is memory-efficient for large models.', elem_classes=['slim-dropdown'])
with gr.Column():
warmup_steps = gr.Number(label='Warmup Steps', value=100, info='For this many steps at the start, the learning rate is gradually ramped up from 0 to the target value. This prevents unstable updates early in training.')
add_eos_token = gr.Checkbox(label='Add EOS token', value=True, info="Adds EOS token for each document in text datasets.")
excess_length = gr.Dropdown(label='Excess length', value='drop', choices=['drop', 'truncate'], info='What to do with conversations that exceed the cutoff length. "Drop" removes them entirely (recommended). "Truncate" cuts from the right, which may produce incomplete responses.', elem_classes=['slim-dropdown'])
higher_rank_limit = gr.Checkbox(label='Enable higher ranks', value=False, info='If checked, changes Rank/Alpha slider above to go much higher. This will not work without a datacenter-class GPU.')
report_to = gr.Radio(label="Save detailed logs with", value="None", choices=["None", "wandb", "tensorboard"], interactive=True)
with gr.Column():
with gr.Tab(label='Chat Dataset'):
with gr.Row():
dataset = gr.Dropdown(choices=utils.get_chat_datasets(str(shared.user_data_dir / 'training/datasets')), value='None', label='Dataset File', info='A JSON file with chat conversations (messages or ShareGPT format). Each row is one conversation.', elem_classes=['slim-dropdown'], interactive=not mu)
ui.create_refresh_button(dataset, lambda: None, lambda: {'choices': utils.get_chat_datasets(str(shared.user_data_dir / 'training/datasets'))}, 'refresh-button', interactive=not mu)
with gr.Row():
format = gr.Dropdown(choices=get_instruction_templates(), value='None', label='Instruction Template', info='Select an instruction template for formatting the dataset, or "Chat Template" to use the model\'s built-in chat template.', elem_classes=['slim-dropdown'], interactive=not mu)
ui.create_refresh_button(format, lambda: None, lambda: {'choices': get_instruction_templates()}, 'refresh-button', interactive=not mu)
with gr.Tab(label="Text Dataset"):
with gr.Row():
text_dataset = gr.Dropdown(choices=utils.get_text_datasets(str(shared.user_data_dir / 'training/datasets')), value='None', label='Dataset File', info='A JSON file with a "text" key per row, for pretraining-style training. Each row is one document.', elem_classes=['slim-dropdown'], interactive=not mu)
ui.create_refresh_button(text_dataset, lambda: None, lambda: {'choices': utils.get_text_datasets(str(shared.user_data_dir / 'training/datasets'))}, 'refresh-button', interactive=not mu)
stride_length = gr.Slider(label='Stride Length', minimum=0, maximum=2048, value=256, step=32, info='Overlap between chunks in tokens. 0 = no overlap. Values like 256 or 512 help preserve context across chunk boundaries.')
with gr.Row():
eval_dataset = gr.Dropdown(choices=utils.get_datasets(str(shared.user_data_dir / 'training/datasets'), 'json'), value='None', label='Evaluation Dataset', info='The (optional) dataset file used to evaluate the model after training.', elem_classes=['slim-dropdown'], interactive=not mu)
ui.create_refresh_button(eval_dataset, lambda: None, lambda: {'choices': utils.get_datasets(str(shared.user_data_dir / 'training/datasets'), 'json')}, 'refresh-button', interactive=not mu)
eval_steps = gr.Number(label='Evaluate every n steps', value=100, info='If an evaluation dataset is given, test it every time this many steps pass.')
with gr.Row():
start_button = gr.Button("Start LoRA Training", variant='primary', interactive=not mu)
stop_button = gr.Button("Interrupt", interactive=not mu)
output = gr.Markdown(value="Ready")
with gr.Tab('Perplexity evaluation', elem_id='evaluate-tab'):
with gr.Row():
with gr.Column():
models = gr.Dropdown(utils.get_available_models(), label='Models', multiselect=True, interactive=not mu)
evaluate_text_file = gr.Dropdown(choices=['wikitext', 'ptb', 'ptb_new'] + utils.get_datasets(str(shared.user_data_dir / 'training/datasets'), 'txt')[1:], value='wikitext', label='Input dataset', info=f'The raw text file on which the model will be evaluated. The first options are automatically downloaded: wikitext, ptb, and ptb_new. The next options are your local text files under {shared.user_data_dir}/training/datasets.', interactive=not mu)
with gr.Row():
with gr.Column():
stride_length = gr.Slider(label='Stride', minimum=0, maximum=32768, value=512, step=256, info='Used to make the evaluation faster at the cost of accuracy. 1 = slowest but most accurate. 512 is a common value.')
with gr.Column():
max_length = gr.Number(label='max_length', precision=0, step=256, value=0, info='The context for each evaluation. If set to 0, the maximum context length for the model will be used.')
with gr.Row():
start_current_evaluation = gr.Button("Evaluate loaded model", interactive=not mu)
start_evaluation = gr.Button("Evaluate selected models", interactive=not mu)
stop_evaluation = gr.Button("Interrupt", interactive=not mu)
with gr.Column():
evaluation_log = gr.Markdown(value='')
evaluation_table = gr.Dataframe(value=generate_markdown_table(), interactive=True)
with gr.Row():
save_comments = gr.Button('Save comments', elem_classes="small-button", interactive=not mu)
refresh_table = gr.Button('Refresh the table', elem_classes="small-button", interactive=not mu)
# Training events
all_params = [lora_name, always_override, all_linear, q_proj_en, v_proj_en, k_proj_en, o_proj_en, gate_proj_en, down_proj_en, up_proj_en, save_steps, micro_batch_size, batch_size, epochs, learning_rate, lr_scheduler_type, lora_rank, lora_alpha, lora_dropout, cutoff_len, dataset, eval_dataset, format, eval_steps, text_dataset, higher_rank_limit, warmup_steps, optimizer, stride_length, stop_at_loss, add_eos_token, excess_length, report_to]
copy_from.change(do_copy_params, [copy_from] + all_params, all_params)
start_button.click(do_train, all_params, output)
stop_button.click(do_interrupt, None, None, queue=False)
higher_rank_limit.change(change_rank_limit, [higher_rank_limit], [lora_rank, lora_alpha])
# Evaluation events. For some reason, the interrupt event
# doesn't work with the .then() syntax, so I write them one
# by one in this ugly but functional way.
ev = start_evaluation.click(calculate_perplexity, [models, evaluate_text_file, stride_length, max_length], evaluation_log, show_progress=False)
ev.then(generate_markdown_table, None, evaluation_table, show_progress=False)
ev_cur = start_current_evaluation.click(
lambda: ['current model'], None, tmp).then(
calculate_perplexity, [tmp, evaluate_text_file, stride_length, max_length], evaluation_log, show_progress=False)
ev_cur.then(generate_markdown_table, None, evaluation_table, show_progress=False)
stop_evaluation.click(None, None, None, cancels=[ev, ev_cur], queue=False)
refresh_table.click(generate_markdown_table, None, evaluation_table, show_progress=True)
save_comments.click(
save_past_evaluations, evaluation_table, None).then(
lambda: "Comments saved.", None, evaluation_log, show_progress=False)
def do_interrupt():
global WANT_INTERRUPT
WANT_INTERRUPT = True
def do_copy_params(lora_name: str, *args):
f_name = f"{shared.args.lora_dir}/{clean_path(None, lora_name)}/training_parameters.json"
if Path(f_name).is_file():
with open(f_name, 'r', encoding='utf-8') as format_file:
params: dict[str, str] = json.load(format_file)
else:
params = {}
result = list()
for i in range(0, len(PARAMETERS)):
key = PARAMETERS[i]
if key in params:
result.append(params[key])
else:
result.append(args[i])
return result
def change_rank_limit(use_higher_ranks: bool):
mult = 2 if use_higher_ranks else 1
return {"maximum": 1024 * mult, "__type__": "update"}, {"maximum": 2048 * mult, "__type__": "update"}
def clean_path(base_path: str, path: str):
path = path.replace('\\', '/').replace('..', '_')
if base_path is None:
return path
return f'{Path(base_path).absolute()}/{path}'
def get_instruction_templates():
path = shared.user_data_dir / 'instruction-templates'
names = set()
for ext in ['yaml', 'yml', 'jinja', 'jinja2']:
for f in path.glob(f'*.{ext}'):
names.add(f.stem)
return ['None', 'Chat Template'] + sorted(names, key=utils.natural_keys)
def load_template(name):
path = shared.user_data_dir / 'instruction-templates'
for ext in ['jinja', 'jinja2', 'yaml', 'yml']:
filepath = path / f'{name}.{ext}'
if filepath.exists():
if ext in ['jinja', 'jinja2']:
return filepath.read_text(encoding='utf-8')
else:
data = yaml.safe_load(filepath.read_text(encoding='utf-8'))
return data.get('instruction_template', '')
return ''
def backup_adapter(input_folder):
# Get the creation date of the adapter file (safetensors or bin)
try:
adapter_file = Path(f"{input_folder}/adapter_model.safetensors")
if not adapter_file.is_file():
adapter_file = Path(f"{input_folder}/adapter_model.bin")
if adapter_file.is_file():
logger.info("Backing up existing LoRA adapter")
creation_date = datetime.fromtimestamp(adapter_file.stat().st_ctime)
creation_date_str = creation_date.strftime("Backup-%Y-%m-%d")
# Create the new subfolder
subfolder_path = Path(f"{input_folder}/{creation_date_str}")
subfolder_path.mkdir(parents=True, exist_ok=True)
# Check if the file already exists in the subfolder
backup_adapter_file = subfolder_path / adapter_file.name
if backup_adapter_file.is_file():
print(" - Backup already exists. Skipping backup process.")
return
# Copy existing files to the new subfolder
existing_files = Path(input_folder).iterdir()
for file in existing_files:
if file.is_file():
shutil.copy2(file, subfolder_path)
except Exception as e:
print("An error occurred in backup_adapter:", str(e))
def calc_trainable_parameters(model):
trainable_params = 0
all_param = 0
for _, param in model.named_parameters():
num_params = param.numel()
# if using DS Zero 3 and the weights are initialized empty
if num_params == 0 and hasattr(param, "ds_numel"):
num_params = param.ds_numel
all_param += num_params
if param.requires_grad:
trainable_params += num_params
return trainable_params, all_param
def do_train(lora_name: str, always_override: bool, all_linear: bool, q_proj_en: bool, v_proj_en: bool, k_proj_en: bool, o_proj_en: bool, gate_proj_en: bool, down_proj_en: bool, up_proj_en: bool, save_steps: int, micro_batch_size: int, batch_size: int, epochs: int, learning_rate: str, lr_scheduler_type: str, lora_rank: int, lora_alpha: int, lora_dropout: float, cutoff_len: int, dataset: str, eval_dataset: str, format: str, eval_steps: int, text_dataset: str, higher_rank_limit: bool, warmup_steps: int, optimizer: str, stride_length: int, stop_at_loss: float, add_eos_token: bool, excess_length: str, report_to: str):
import torch
import transformers
from datasets import Dataset, load_dataset
from peft import (
LoraConfig,
get_peft_model,
prepare_model_for_kbit_training,
set_peft_model_state_dict
)
global WANT_INTERRUPT
WANT_INTERRUPT = False
# == Input validation / processing ==
yield "Preparing the input..."
if shared.args.loader == 'llama.cpp':
yield "Error: LoRA training requires a model loaded with the Transformers loader. GGUF models are not supported for training."
return
lora_file_path = clean_path(None, lora_name)
if lora_file_path.strip() == '':
yield "Missing or invalid LoRA file name input."
return
lora_file_path = f"{Path(shared.args.lora_dir)}/{lora_file_path}"
actual_lr = float(learning_rate)
model_type = type(shared.model).__name__
if model_type == "PeftModelForCausalLM":
if len(shared.lora_names) > 0:
yield "You are trying to train a LoRA while you already have another LoRA loaded. This will work, but may have unexpected effects. *(Will continue anyway in 5 seconds, press `Interrupt` to stop.)*"
logger.warning("Training LoRA over top of another LoRA. May have unexpected effects.")
else:
yield "Model ID not matched due to LoRA loading. Consider reloading base model. *(Will continue anyway in 5 seconds, press `Interrupt` to stop.)*"
logger.warning("Model ID not matched due to LoRA loading. Consider reloading base model.")
time.sleep(5)
if cutoff_len <= 0 or micro_batch_size <= 0 or batch_size <= 0 or actual_lr <= 0 or lora_rank <= 0 or lora_alpha <= 0:
yield "Cannot input zeroes."
return
gradient_accumulation_steps = max(1, batch_size // micro_batch_size)
original_chat_template = getattr(shared.tokenizer, 'chat_template', None)
if shared.tokenizer.pad_token_id is None:
shared.tokenizer.pad_token_id = shared.tokenizer.eos_token_id
shared.tokenizer.padding_side = "right"
def list_target_modules():
if all_linear:
return "all-linear"
target_mods = [f"{name}_proj" for name, enabled in {
"q": q_proj_en, "k": k_proj_en, "v": v_proj_en, "o": o_proj_en,
"gate": gate_proj_en, "down": down_proj_en, "up": up_proj_en,
}.items() if enabled]
return target_mods
def normalize_messages(data_point):
if "messages" in data_point:
return data_point["messages"]
if "conversations" in data_point:
role_map = {"human": "user", "gpt": "assistant"}
return [
{"role": role_map.get(turn.get("from", ""), turn.get("from", "")), "content": turn["value"]}
for turn in data_point["conversations"]
]
raise RuntimeError(
f'Dataset row must contain "messages" or "conversations" key. '
f'Found: {list(data_point.keys())}'
)
def tokenize_conversation(data_point):
messages = normalize_messages(data_point)
full_ids = list(shared.tokenizer.apply_chat_template(messages, tokenize=True, return_dict=False))
# Build labels: -100 for everything, then unmask assistant turns.
# This assumes apply_chat_template(messages[:i]) is a token-for-token
# prefix of apply_chat_template(messages[:i+1]), which holds for all
# standard chat templates (Llama, ChatML, Mistral, etc.).
labels = [-100] * len(full_ids)
for i, msg in enumerate(messages):
if msg["role"] == "assistant":
# Tokens up to where this assistant turn starts
header_ids = shared.tokenizer.apply_chat_template(
messages[:i], tokenize=True, return_dict=False, add_generation_prompt=True
)
# Tokens through end of this assistant turn
through_ids = shared.tokenizer.apply_chat_template(
messages[:i + 1], tokenize=True, return_dict=False
)
# Unmask assistant tokens
start = len(header_ids)
end = min(len(through_ids), len(full_ids))
labels[start:end] = full_ids[start:end]
if len(full_ids) > cutoff_len:
if excess_length == 'truncate':
full_ids = full_ids[:cutoff_len]
labels = labels[:cutoff_len]
else:
return {"input_ids": [], "labels": [], "attention_mask": []}
return {
"input_ids": full_ids,
"labels": labels,
"attention_mask": [1] * len(full_ids),
}
train_template.clear()
# == Prep the dataset, format, etc ==
has_text_dataset = text_dataset not in ['None', '']
has_chat_dataset = dataset not in ['None', '']
if has_text_dataset and has_chat_dataset:
yield "Error: select either a Chat Dataset or a Text Dataset, not both."
return
def tokenize_text_data(data):
all_tokens = []
for row in data:
tokens = shared.tokenizer.encode(row['text'])
if add_eos_token:
tokens.append(shared.tokenizer.eos_token_id)
all_tokens.extend(tokens)
stride = int(stride_length)
step = cutoff_len - stride if stride > 0 else cutoff_len
if step <= 0:
return None, "Error: stride length must be smaller than cutoff length."
if len(all_tokens) < cutoff_len:
return None, "Error: dataset is too short to fill even one chunk of the given cutoff length."
chunks = []
for start in range(0, len(all_tokens), step):
chunk = all_tokens[start:start + cutoff_len]
if len(chunk) == 0:
break
if len(chunk) < cutoff_len:
pad_len = cutoff_len - len(chunk)
chunks.append({
"input_ids": chunk + [shared.tokenizer.pad_token_id] * pad_len,
"labels": list(chunk) + [-100] * pad_len,
"attention_mask": [1] * len(chunk) + [0] * pad_len,
})
else:
chunks.append({
"input_ids": chunk,
"labels": list(chunk),
"attention_mask": [1] * cutoff_len,
})
return Dataset.from_list(chunks), None
if has_text_dataset:
train_template["template_type"] = "text_dataset"
logger.info("Loading text dataset")
data = load_dataset("json", data_files=clean_path(str(shared.user_data_dir / 'training/datasets'), f'{text_dataset}.json'))
if "text" not in data['train'].column_names:
yield "Error: text dataset must have a \"text\" key per row."
return
train_data, err = tokenize_text_data(data['train'])
if err:
yield err
return
if eval_dataset == 'None':
eval_data = None
else:
eval_raw = load_dataset("json", data_files=clean_path(str(shared.user_data_dir / 'training/datasets'), f'{eval_dataset}.json'))
if "text" not in eval_raw['train'].column_names:
yield "Error: evaluation dataset must have a \"text\" key per row."
return
eval_data, err = tokenize_text_data(eval_raw['train'])
if err:
yield err
return
elif has_chat_dataset:
if format in ['None', '']:
yield "Missing format choice input, cannot continue."
return
if format == 'Chat Template':
if not getattr(shared.tokenizer, 'chat_template', None):
yield "Error: this model's tokenizer does not have a chat template. Select an instruction template instead, or load an instruct/chat model."
return
else:
# Load custom instruction template and set on tokenizer
template_str = load_template(format)
if not template_str:
yield f"Error: could not load instruction template '{format}'."
return
shared.tokenizer.chat_template = template_str
# Unified path — both cases use tokenize_conversation()
train_template["template_type"] = "chat_template"
logger.info("Loading JSON dataset with chat template format")
data = load_dataset("json", data_files=clean_path(str(shared.user_data_dir / 'training/datasets'), f'{dataset}.json'))
# Validate the first row
try:
normalize_messages(data['train'][0])
except (RuntimeError, KeyError, IndexError) as e:
yield f"Error: {e}"
return
total = len(data['train'])
train_data = data['train'].map(
tokenize_conversation,
remove_columns=data['train'].column_names,
new_fingerprint='%030x' % random.randrange(16**30)
)
train_data = train_data.filter(lambda x: len(x['input_ids']) > 0)
dropped = total - len(train_data)
if dropped > 0:
logger.warning(f"Dropped {dropped}/{total} conversations exceeding cutoff length of {cutoff_len} tokens.")
if len(train_data) == 0:
yield f"Error: all {total} conversations exceed the cutoff length of {cutoff_len} tokens. Increase the cutoff length or shorten your data."
return
if eval_dataset == 'None':
eval_data = None
else:
eval_data = load_dataset("json", data_files=clean_path(str(shared.user_data_dir / 'training/datasets'), f'{eval_dataset}.json'))
eval_data = eval_data['train'].map(
tokenize_conversation,
remove_columns=eval_data['train'].column_names,
new_fingerprint='%030x' % random.randrange(16**30)
)
eval_data = eval_data.filter(lambda x: len(x['input_ids']) > 0)
else:
yield "No dataset selected. Choose a Chat Dataset or a Text Dataset."
return
# == We MUST reload model if it went through any previous training, even failed one ==
if shared.model_dirty_from_training:
selected_model = shared.model_name
if selected_model:
print("\033[1;31;1m(Model has been modified by previous training, it needs to be reloaded...)\033[0;37;0m")
try:
yield f"Reloading {selected_model}..."
reload_model()
if shared.model is not None:
print("Model reloaded OK, continue with training.")
else:
yield f"Failed to load {selected_model}."
return
except Exception:
exc = traceback.format_exc()
logger.error('Failed to reload the model.')
print(exc)
yield exc.replace('\n', '\n\n')
return
# == Start prepping the model itself ==
if not hasattr(shared.model, 'lm_head') or hasattr(shared.model.lm_head, 'weight'):
logger.info("Getting model ready")
if 'quantization_config' in shared.model.config.to_dict():
prepare_model_for_kbit_training(shared.model)
# base model is now frozen and should not be reused for any other LoRA training than this one
shared.model_dirty_from_training = True
logger.info("Preparing for training")
target_modules = list_target_modules()
if not target_modules:
yield "No target modules selected. Enable at least one module or check 'Target all linear layers'."
return
config = LoraConfig(
r=lora_rank,
lora_alpha=lora_alpha,
target_modules=target_modules,
lora_dropout=lora_dropout,
bias="none",
task_type="CAUSAL_LM"
)
# == Backup the existing adapter ==
if not always_override:
backup_adapter(lora_file_path)
# == get model trainable params
model_trainable_params, model_all_params = calc_trainable_parameters(shared.model)
# == Determine if we can resume from a checkpoint ==
resume_checkpoint = None
try:
logger.info("Creating LoRA model")
lora_model = get_peft_model(shared.model, config)
if not always_override and Path(lora_file_path).exists():
# Look for HF Trainer checkpoint dirs (full resumption)
checkpoints = sorted(Path(lora_file_path).glob("checkpoint-*"), key=os.path.getmtime)
if checkpoints:
resume_checkpoint = str(checkpoints[-1])
logger.info(f"Will resume from checkpoint: {resume_checkpoint}")
else:
# Legacy fallback: load bare adapter weights only
safetensors_path = Path(f"{lora_file_path}/adapter_model.safetensors")
bin_path = Path(f"{lora_file_path}/adapter_model.bin")
if safetensors_path.is_file():
logger.info("Loading existing LoRA data (safetensors)")
from safetensors.torch import load_file
state_dict_peft = load_file(str(safetensors_path))
set_peft_model_state_dict(lora_model, state_dict_peft)
elif bin_path.is_file():
logger.info("Loading existing LoRA data (bin)")
state_dict_peft = torch.load(str(bin_path), weights_only=True)
set_peft_model_state_dict(lora_model, state_dict_peft)
except Exception:
yield traceback.format_exc().replace('\n', '\n\n')
return
class Tracked():
def __init__(self):
self.current_steps = 0
self.max_steps = 0
self.did_save = False
tracked = Tracked()
actual_save_steps = math.ceil(save_steps / gradient_accumulation_steps)
class Callbacks(transformers.TrainerCallback):
def on_step_begin(self, args: transformers.TrainingArguments, state: transformers.TrainerState, control: transformers.TrainerControl, **kwargs):
tracked.current_steps = state.global_step * gradient_accumulation_steps
tracked.max_steps = state.max_steps * gradient_accumulation_steps
if WANT_INTERRUPT:
control.should_epoch_stop = True
control.should_training_stop = True
def on_substep_end(self, args: transformers.TrainingArguments, state: transformers.TrainerState, control: transformers.TrainerControl, **kwargs):
tracked.current_steps += 1
if WANT_INTERRUPT:
control.should_epoch_stop = True
control.should_training_stop = True
def on_log(self, args: transformers.TrainingArguments, state: transformers.TrainerState, control: transformers.TrainerControl, logs, **kwargs):
train_log.update(logs)
train_log.update({"current_steps": tracked.current_steps})
if WANT_INTERRUPT:
print("\033[1;31;1mInterrupted by user\033[0;37;0m")
print(f"\033[1;30;40mStep: {tracked.current_steps} \033[0;37;0m", end='')
if 'loss' in logs:
loss = float(logs['loss'])
if stop_at_loss > 0 and loss <= stop_at_loss:
control.should_epoch_stop = True
control.should_training_stop = True
print(f"\033[1;31;1mStop Loss {stop_at_loss} reached.\033[0;37;0m")
def on_save(self, args: transformers.TrainingArguments, state: transformers.TrainerState, control: transformers.TrainerControl, **kwargs):
checkpoint_dir = Path(args.output_dir) / f"checkpoint-{state.global_step}"
if checkpoint_dir.exists():
with open(checkpoint_dir / "training_log.json", 'w', encoding='utf-8') as file:
json.dump(train_log, file, indent=2)
with open(checkpoint_dir / "training_prompt.json", 'w', encoding='utf-8') as file:
json.dump(train_template, file, indent=2)
# Fix training for mixed precision models
for param in shared.model.parameters():
if param.requires_grad:
param.data = param.data.float()
lora_model.config.use_cache = False
def collate_fn(batch):
max_len = max(len(item['input_ids']) for item in batch)
input_ids, labels, attention_mask = [], [], []
for item in batch:
pad_len = max_len - len(item['input_ids'])
input_ids.append(item['input_ids'] + [shared.tokenizer.pad_token_id] * pad_len)
labels.append(item['labels'] + [-100] * pad_len)
attention_mask.append(item['attention_mask'] + [0] * pad_len)
return {
'input_ids': torch.tensor(input_ids),
'labels': torch.tensor(labels),
'attention_mask': torch.tensor(attention_mask),
}
trainer = transformers.Trainer(
model=lora_model,
train_dataset=train_data,
eval_dataset=eval_data,
args=transformers.TrainingArguments(
report_to=report_to if report_to != "None" else "none",
per_device_train_batch_size=micro_batch_size,
gradient_accumulation_steps=gradient_accumulation_steps,
warmup_steps=math.ceil(warmup_steps / gradient_accumulation_steps),
num_train_epochs=epochs,
learning_rate=actual_lr,
fp16=False if shared.args.cpu or shared.args.bf16 else True,
bf16=shared.args.bf16,
optim=optimizer,
logging_steps=1,
eval_strategy="steps" if eval_data is not None else "no",
eval_steps=math.ceil(eval_steps / gradient_accumulation_steps) if eval_data is not None else None,
save_strategy="steps" if save_steps > 0 or eval_data is not None else "no",
save_steps=actual_save_steps if save_steps > 0 else None,
output_dir=lora_file_path,
lr_scheduler_type=lr_scheduler_type,
load_best_model_at_end=eval_data is not None,
# TODO: Enable multi-device support
ddp_find_unused_parameters=None,
use_cpu=shared.args.cpu,
remove_unused_columns=False,
),
data_collator=collate_fn,
callbacks=[Callbacks()]
)
# == Save parameters for reuse ==
with open(f"{lora_file_path}/training_parameters.json", 'w', encoding='utf-8') as file:
local_vars = locals()
json.dump({x: local_vars[x] for x in PARAMETERS}, file, indent=2)
# == Save training prompt ==
with open(f"{lora_file_path}/training_prompt.json", 'w', encoding='utf-8') as file:
json.dump(train_template, file, indent=2)
# == Main run and monitor loop ==
logger.info("Starting training")
yield "Starting..."
lora_trainable_param, lora_all_param = calc_trainable_parameters(lora_model)
if target_modules == "all-linear":
projections_string = "all-linear"
else:
projections_string = ", ".join([projection.replace("_proj", "") for projection in target_modules])
print(f"Training '{model_type}' model using ({projections_string}) projections")
if lora_all_param > 0:
print(f"Trainable params: {lora_trainable_param:,d} ({100 * lora_trainable_param / lora_all_param:.4f} %), All params: {lora_all_param:,d} (Model: {model_all_params:,d})")
train_log.update({"base_model_name": shared.model_name})
train_log.update({"base_model_class": shared.model.__class__.__name__})
train_log.update({"base_loaded_in_4bit": getattr(lora_model, "is_loaded_in_4bit", False)})
train_log.update({"base_loaded_in_8bit": getattr(lora_model, "is_loaded_in_8bit", False)})
train_log.update({"projections": projections_string})
if stop_at_loss > 0:
print(f"Monitoring loss \033[1;31;1m(Auto-Stop at: {stop_at_loss})\033[0;37;0m")
if WANT_INTERRUPT:
yield "Interrupted before start."
return
def log_train_dataset(trainer):
decoded_entries = []
# Try to decode the entries and write the log file
try:
# Iterate over the first 10 elements in the dataset (or fewer if there are less than 10)
for i in range(min(10, len(trainer.train_dataset))):
decoded_text = shared.tokenizer.decode(trainer.train_dataset[i]['input_ids'])
decoded_entries.append({"value": decoded_text})
# Write the log file
(shared.user_data_dir / 'logs').mkdir(exist_ok=True)
with open(shared.user_data_dir / 'logs' / 'train_dataset_sample.json', 'w') as json_file:
json.dump(decoded_entries, json_file, indent=4)
logger.info(f"Log file 'train_dataset_sample.json' created in the '{shared.user_data_dir}/logs' directory.")
except Exception as e:
logger.error(f"Failed to create log file due to error: {e}")
thread_error = None
def threaded_run():
nonlocal thread_error
try:
log_train_dataset(trainer)
trainer.train(resume_from_checkpoint=resume_checkpoint)
# Note: save in the thread in case the gradio thread breaks (eg browser closed)
lora_model.save_pretrained(lora_file_path)
tracked.did_save = True
logger.info("LoRA training run is completed and saved.")
# Save log
with open(f"{lora_file_path}/training_log.json", 'w', encoding='utf-8') as file:
json.dump(train_log, file, indent=2)
except Exception as e:
thread_error = e
logger.error(f"Training error: {e}")
thread = threading.Thread(target=threaded_run)
thread.start()
last_step = 0
start_time = time.perf_counter()
while thread.is_alive():
time.sleep(0.5)
if WANT_INTERRUPT:
yield "Interrupting, please wait... *(Run will stop after the current training step completes.)*"
elif tracked.current_steps != last_step:
last_step = tracked.current_steps
time_elapsed = time.perf_counter() - start_time
if time_elapsed <= 0:
timer_info = ""
total_time_estimate = 999
else:
its = tracked.current_steps / time_elapsed
if its > 1:
timer_info = f"`{its:.2f}` it/s"
else:
timer_info = f"`{1.0/its:.2f}` s/it"
total_time_estimate = (1.0 / its) * (tracked.max_steps)
yield f"Running... **{tracked.current_steps}** / **{tracked.max_steps}** ... {timer_info}, {format_time(time_elapsed)} / {format_time(total_time_estimate)} ... {format_time(total_time_estimate - time_elapsed)} remaining"
# Check for errors from the training thread
if thread_error is not None:
yield f"Training failed: {thread_error}"
return
# Saving in the train thread might fail if an error occurs, so save here if so.
if not tracked.did_save:
logger.info("Training complete, saving")
lora_model.save_pretrained(lora_file_path)
# Restore the original chat_template if we changed it for training
if shared.tokenizer is not None and hasattr(shared.tokenizer, 'chat_template'):
shared.tokenizer.chat_template = original_chat_template
if WANT_INTERRUPT:
logger.info("Training interrupted.")
yield f"Interrupted. Incomplete LoRA saved to `{lora_file_path}`."
else:
logger.info("Training complete!")
yield f"Done! LoRA saved to `{lora_file_path}`.\n\nBefore testing your new LoRA, make sure to first reload the model, as it is currently dirty from training."
def format_time(seconds: float):
if seconds < 120:
return f"`{seconds:.0f}` seconds"
minutes = seconds / 60
if minutes < 120:
return f"`{minutes:.0f}` minutes"
hours = minutes / 60
return f"`{hours:.0f}` hours" | --- +++ @@ -215,6 +215,7 @@
def clean_path(base_path: str, path: str):
+ """Strips unusual symbols and forcibly builds a path as relative to the intended directory."""
path = path.replace('\\', '/').replace('..', '_')
if base_path is None:
return path
@@ -232,6 +233,7 @@
def load_template(name):
+ """Load a Jinja2 template string from {user_data_dir}/instruction-templates/."""
path = shared.user_data_dir / 'instruction-templates'
for ext in ['jinja', 'jinja2', 'yaml', 'yml']:
filepath = path / f'{name}.{ext}'
@@ -353,6 +355,7 @@ return target_mods
def normalize_messages(data_point):
+ """Convert a dataset row to OpenAI messages format for apply_chat_template()."""
if "messages" in data_point:
return data_point["messages"]
@@ -369,6 +372,7 @@ )
def tokenize_conversation(data_point):
+ """Tokenize using apply_chat_template() with assistant-only label masking."""
messages = normalize_messages(data_point)
full_ids = list(shared.tokenizer.apply_chat_template(messages, tokenize=True, return_dict=False))
@@ -415,6 +419,7 @@ return
def tokenize_text_data(data):
+ """Tokenize text dataset rows, concatenate, and split into chunks."""
all_tokens = []
for row in data:
tokens = shared.tokenizer.encode(row['text'])
@@ -842,4 +847,4 @@ return f"`{minutes:.0f}` minutes"
hours = minutes / 60
- return f"`{hours:.0f}` hours"+ return f"`{hours:.0f}` hours"
| https://raw.githubusercontent.com/oobabooga/text-generation-webui/HEAD/modules/training.py |
Include argument descriptions in docstrings | # -*- coding: utf-8 -*-
# Copyright (c) 2025 relakkes@gmail.com
#
# This file is part of MediaCrawler project.
# Repository: https://github.com/NanmiCoder/MediaCrawler/blob/main/api/main.py
# GitHub: https://github.com/NanmiCoder
# Licensed under NON-COMMERCIAL LEARNING LICENSE 1.1
#
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
# 1. 不得用于任何商业用途。
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
# 3. 不得进行大规模爬取或对平台造成运营干扰。
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
# 5. 不得用于任何非法或不当的用途。
#
# 详细许可条款请参阅项目根目录下的LICENSE文件。
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
import asyncio
import os
import subprocess
import uvicorn
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from fastapi.staticfiles import StaticFiles
from fastapi.responses import FileResponse
from .routers import crawler_router, data_router, websocket_router
app = FastAPI(
title="MediaCrawler WebUI API",
description="API for controlling MediaCrawler from WebUI",
version="1.0.0"
)
# Get webui static files directory
WEBUI_DIR = os.path.join(os.path.dirname(__file__), "webui")
# CORS configuration - allow frontend dev server access
app.add_middleware(
CORSMiddleware,
allow_origins=[
"http://localhost:5173", # Vite dev server
"http://localhost:3000", # Backup port
"http://127.0.0.1:5173",
"http://127.0.0.1:3000",
],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# Register routers
app.include_router(crawler_router, prefix="/api")
app.include_router(data_router, prefix="/api")
app.include_router(websocket_router, prefix="/api")
@app.get("/")
async def serve_frontend():
index_path = os.path.join(WEBUI_DIR, "index.html")
if os.path.exists(index_path):
return FileResponse(index_path)
return {
"message": "MediaCrawler WebUI API",
"version": "1.0.0",
"docs": "/docs",
"note": "WebUI not found, please build it first: cd webui && npm run build"
}
@app.get("/api/health")
async def health_check():
return {"status": "ok"}
@app.get("/api/env/check")
async def check_environment():
try:
# Run uv run main.py --help command to check environment
process = await asyncio.create_subprocess_exec(
"uv", "run", "main.py", "--help",
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd="." # Project root directory
)
stdout, stderr = await asyncio.wait_for(
process.communicate(),
timeout=30.0 # 30 seconds timeout
)
if process.returncode == 0:
return {
"success": True,
"message": "MediaCrawler environment configured correctly",
"output": stdout.decode("utf-8", errors="ignore")[:500] # Truncate to first 500 characters
}
else:
error_msg = stderr.decode("utf-8", errors="ignore") or stdout.decode("utf-8", errors="ignore")
return {
"success": False,
"message": "Environment check failed",
"error": error_msg[:500]
}
except asyncio.TimeoutError:
return {
"success": False,
"message": "Environment check timeout",
"error": "Command execution exceeded 30 seconds"
}
except FileNotFoundError:
return {
"success": False,
"message": "uv command not found",
"error": "Please ensure uv is installed and configured in system PATH"
}
except Exception as e:
return {
"success": False,
"message": "Environment check error",
"error": str(e)
}
@app.get("/api/config/platforms")
async def get_platforms():
return {
"platforms": [
{"value": "xhs", "label": "Xiaohongshu", "icon": "book-open"},
{"value": "dy", "label": "Douyin", "icon": "music"},
{"value": "ks", "label": "Kuaishou", "icon": "video"},
{"value": "bili", "label": "Bilibili", "icon": "tv"},
{"value": "wb", "label": "Weibo", "icon": "message-circle"},
{"value": "tieba", "label": "Baidu Tieba", "icon": "messages-square"},
{"value": "zhihu", "label": "Zhihu", "icon": "help-circle"},
]
}
@app.get("/api/config/options")
async def get_config_options():
return {
"login_types": [
{"value": "qrcode", "label": "QR Code Login"},
{"value": "cookie", "label": "Cookie Login"},
],
"crawler_types": [
{"value": "search", "label": "Search Mode"},
{"value": "detail", "label": "Detail Mode"},
{"value": "creator", "label": "Creator Mode"},
],
"save_options": [
{"value": "jsonl", "label": "JSONL File"},
{"value": "json", "label": "JSON File"},
{"value": "csv", "label": "CSV File"},
{"value": "excel", "label": "Excel File"},
{"value": "sqlite", "label": "SQLite Database"},
{"value": "db", "label": "MySQL Database"},
{"value": "mongodb", "label": "MongoDB Database"},
],
}
# Mount static resources - must be placed after all routes
if os.path.exists(WEBUI_DIR):
assets_dir = os.path.join(WEBUI_DIR, "assets")
if os.path.exists(assets_dir):
app.mount("/assets", StaticFiles(directory=assets_dir), name="assets")
# Mount logos directory
logos_dir = os.path.join(WEBUI_DIR, "logos")
if os.path.exists(logos_dir):
app.mount("/logos", StaticFiles(directory=logos_dir), name="logos")
# Mount other static files (e.g., vite.svg)
app.mount("/static", StaticFiles(directory=WEBUI_DIR), name="webui-static")
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=8080) | --- +++ @@ -16,6 +16,11 @@ # 详细许可条款请参阅项目根目录下的LICENSE文件。
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
+"""
+MediaCrawler WebUI API Server
+Start command: uvicorn api.main:app --port 8080 --reload
+Or: python -m api.main
+"""
import asyncio
import os
import subprocess
@@ -58,6 +63,7 @@
@app.get("/")
async def serve_frontend():
+ """Return frontend page"""
index_path = os.path.join(WEBUI_DIR, "index.html")
if os.path.exists(index_path):
return FileResponse(index_path)
@@ -76,6 +82,7 @@
@app.get("/api/env/check")
async def check_environment():
+ """Check if MediaCrawler environment is configured correctly"""
try:
# Run uv run main.py --help command to check environment
process = await asyncio.create_subprocess_exec(
@@ -124,6 +131,7 @@
@app.get("/api/config/platforms")
async def get_platforms():
+ """Get list of supported platforms"""
return {
"platforms": [
{"value": "xhs", "label": "Xiaohongshu", "icon": "book-open"},
@@ -139,6 +147,7 @@
@app.get("/api/config/options")
async def get_config_options():
+ """Get all configuration options"""
return {
"login_types": [
{"value": "qrcode", "label": "QR Code Login"},
@@ -175,4 +184,4 @@
if __name__ == "__main__":
- uvicorn.run(app, host="0.0.0.0", port=8080)+ uvicorn.run(app, host="0.0.0.0", port=8080)
| https://raw.githubusercontent.com/NanmiCoder/MediaCrawler/HEAD/api/main.py |
Add docstrings for production code | # -*- coding: utf-8 -*-
# Copyright (c) 2025 relakkes@gmail.com
#
# This file is part of MediaCrawler project.
# Repository: https://github.com/NanmiCoder/MediaCrawler/blob/main/database/db.py
# GitHub: https://github.com/NanmiCoder
# Licensed under NON-COMMERCIAL LEARNING LICENSE 1.1
#
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
# 1. 不得用于任何商业用途。
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
# 3. 不得进行大规模爬取或对平台造成运营干扰。
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
# 5. 不得用于任何非法或不当的用途。
#
# 详细许可条款请参阅项目根目录下的LICENSE文件。
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
# persist-1<persist1@126.com>
# Reason: Refactored db.py into a module, removed direct execution entry point, fixed relative import issues.
# Side effects: None
# Rollback strategy: Restore this file.
import asyncio
import sys
from pathlib import Path
# Add project root to sys.path
project_root = Path(__file__).resolve().parents[1]
if str(project_root) not in sys.path:
sys.path.append(str(project_root))
from tools import utils
from database.db_session import create_tables
async def init_table_schema(db_type: str):
utils.logger.info(f"[init_table_schema] begin init {db_type} table schema ...")
await create_tables(db_type)
utils.logger.info(f"[init_table_schema] {db_type} table schema init successful")
async def init_db(db_type: str = None):
await init_table_schema(db_type)
async def close():
pass | --- +++ @@ -33,6 +33,12 @@ from database.db_session import create_tables
async def init_table_schema(db_type: str):
+ """
+ Initializes the database table schema.
+ This will create tables based on the ORM models.
+ Args:
+ db_type: The type of database, 'sqlite' or 'mysql'.
+ """
utils.logger.info(f"[init_table_schema] begin init {db_type} table schema ...")
await create_tables(db_type)
utils.logger.info(f"[init_table_schema] {db_type} table schema init successful")
@@ -41,4 +47,7 @@ await init_table_schema(db_type)
async def close():
- pass+ """
+ Placeholder for closing database connections if needed in the future.
+ """
+ pass
| https://raw.githubusercontent.com/NanmiCoder/MediaCrawler/HEAD/database/db.py |
Add docstrings including usage examples | # -*- coding: utf-8 -*-
# Copyright (c) 2025 relakkes@gmail.com
#
# This file is part of MediaCrawler project.
# Repository: https://github.com/NanmiCoder/MediaCrawler/blob/main/media_platform/xhs/xhs_sign.py
# GitHub: https://github.com/NanmiCoder
# Licensed under NON-COMMERCIAL LEARNING LICENSE 1.1
#
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
# 1. 不得用于任何商业用途。
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
# 3. 不得进行大规模爬取或对平台造成运营干扰。
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
# 5. 不得用于任何非法或不当的用途。
#
# 详细许可条款请参阅项目根目录下的LICENSE文件。
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
# Xiaohongshu signature algorithm core functions
# Used for generating signatures via playwright injection
import ctypes
import random
from urllib.parse import quote
# Custom Base64 character table
# Standard Base64: ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/
# Xiaohongshu shuffled order for obfuscation
BASE64_CHARS = list("ZmserbBoHQtNP+wOcza/LpngG8yJq42KWYj0DSfdikx3VT16IlUAFM97hECvuRX5")
# CRC32 lookup table
CRC32_TABLE = [
0, 1996959894, 3993919788, 2567524794, 124634137, 1886057615, 3915621685,
2657392035, 249268274, 2044508324, 3772115230, 2547177864, 162941995,
2125561021, 3887607047, 2428444049, 498536548, 1789927666, 4089016648,
2227061214, 450548861, 1843258603, 4107580753, 2211677639, 325883990,
1684777152, 4251122042, 2321926636, 335633487, 1661365465, 4195302755,
2366115317, 997073096, 1281953886, 3579855332, 2724688242, 1006888145,
1258607687, 3524101629, 2768942443, 901097722, 1119000684, 3686517206,
2898065728, 853044451, 1172266101, 3705015759, 2882616665, 651767980,
1373503546, 3369554304, 3218104598, 565507253, 1454621731, 3485111705,
3099436303, 671266974, 1594198024, 3322730930, 2970347812, 795835527,
1483230225, 3244367275, 3060149565, 1994146192, 31158534, 2563907772,
4023717930, 1907459465, 112637215, 2680153253, 3904427059, 2013776290,
251722036, 2517215374, 3775830040, 2137656763, 141376813, 2439277719,
3865271297, 1802195444, 476864866, 2238001368, 4066508878, 1812370925,
453092731, 2181625025, 4111451223, 1706088902, 314042704, 2344532202,
4240017532, 1658658271, 366619977, 2362670323, 4224994405, 1303535960,
984961486, 2747007092, 3569037538, 1256170817, 1037604311, 2765210733,
3554079995, 1131014506, 879679996, 2909243462, 3663771856, 1141124467,
855842277, 2852801631, 3708648649, 1342533948, 654459306, 3188396048,
3373015174, 1466479909, 544179635, 3110523913, 3462522015, 1591671054,
702138776, 2966460450, 3352799412, 1504918807, 783551873, 3082640443,
3233442989, 3988292384, 2596254646, 62317068, 1957810842, 3939845945,
2647816111, 81470997, 1943803523, 3814918930, 2489596804, 225274430,
2053790376, 3826175755, 2466906013, 167816743, 2097651377, 4027552580,
2265490386, 503444072, 1762050814, 4150417245, 2154129355, 426522225,
1852507879, 4275313526, 2312317920, 282753626, 1742555852, 4189708143,
2394877945, 397917763, 1622183637, 3604390888, 2714866558, 953729732,
1340076626, 3518719985, 2797360999, 1068828381, 1219638859, 3624741850,
2936675148, 906185462, 1090812512, 3747672003, 2825379669, 829329135,
1181335161, 3412177804, 3160834842, 628085408, 1382605366, 3423369109,
3138078467, 570562233, 1426400815, 3317316542, 2998733608, 733239954,
1555261956, 3268935591, 3050360625, 752459403, 1541320221, 2607071920,
3965973030, 1969922972, 40735498, 2617837225, 3943577151, 1913087877,
83908371, 2512341634, 3803740692, 2075208622, 213261112, 2463272603,
3855990285, 2094854071, 198958881, 2262029012, 4057260610, 1759359992,
534414190, 2176718541, 4139329115, 1873836001, 414664567, 2282248934,
4279200368, 1711684554, 285281116, 2405801727, 4167216745, 1634467795,
376229701, 2685067896, 3608007406, 1308918612, 956543938, 2808555105,
3495958263, 1231636301, 1047427035, 2932959818, 3654703836, 1088359270,
936918000, 2847714899, 3736837829, 1202900863, 817233897, 3183342108,
3401237130, 1404277552, 615818150, 3134207493, 3453421203, 1423857449,
601450431, 3009837614, 3294710456, 1567103746, 711928724, 3020668471,
3272380065, 1510334235, 755167117,
]
def _right_shift_unsigned(num: int, bit: int = 0) -> int:
val = ctypes.c_uint32(num).value >> bit
MAX32INT = 4294967295
return (val + (MAX32INT + 1)) % (2 * (MAX32INT + 1)) - MAX32INT - 1
def mrc(e: str) -> int:
o = -1
for n in range(min(57, len(e))):
o = CRC32_TABLE[(o & 255) ^ ord(e[n])] ^ _right_shift_unsigned(o, 8)
return o ^ -1 ^ 3988292384
def _triplet_to_base64(e: int) -> str:
return (
BASE64_CHARS[(e >> 18) & 63]
+ BASE64_CHARS[(e >> 12) & 63]
+ BASE64_CHARS[(e >> 6) & 63]
+ BASE64_CHARS[e & 63]
)
def _encode_chunk(data: list, start: int, end: int) -> str:
result = []
for i in range(start, end, 3):
c = ((data[i] << 16) & 0xFF0000) + ((data[i + 1] << 8) & 0xFF00) + (data[i + 2] & 0xFF)
result.append(_triplet_to_base64(c))
return "".join(result)
def encode_utf8(s: str) -> list:
encoded = quote(s, safe="~()*!.'")
result = []
i = 0
while i < len(encoded):
if encoded[i] == "%":
result.append(int(encoded[i + 1: i + 3], 16))
i += 3
else:
result.append(ord(encoded[i]))
i += 1
return result
def b64_encode(data: list) -> str:
length = len(data)
remainder = length % 3
chunks = []
main_length = length - remainder
for i in range(0, main_length, 16383):
chunks.append(_encode_chunk(data, i, min(i + 16383, main_length)))
if remainder == 1:
a = data[length - 1]
chunks.append(BASE64_CHARS[a >> 2] + BASE64_CHARS[(a << 4) & 63] + "==")
elif remainder == 2:
a = (data[length - 2] << 8) + data[length - 1]
chunks.append(
BASE64_CHARS[a >> 10] + BASE64_CHARS[(a >> 4) & 63] + BASE64_CHARS[(a << 2) & 63] + "="
)
return "".join(chunks)
def get_trace_id() -> str:
return "".join(random.choice("abcdef0123456789") for _ in range(16)) | --- +++ @@ -77,12 +77,14 @@
def _right_shift_unsigned(num: int, bit: int = 0) -> int:
+ """Python implementation of JavaScript unsigned right shift (>>>)"""
val = ctypes.c_uint32(num).value >> bit
MAX32INT = 4294967295
return (val + (MAX32INT + 1)) % (2 * (MAX32INT + 1)) - MAX32INT - 1
def mrc(e: str) -> int:
+ """CRC32 variant, used for x9 field in x-s-common"""
o = -1
for n in range(min(57, len(e))):
o = CRC32_TABLE[(o & 255) ^ ord(e[n])] ^ _right_shift_unsigned(o, 8)
@@ -90,6 +92,7 @@
def _triplet_to_base64(e: int) -> str:
+ """Convert 24-bit integer to 4 Base64 characters"""
return (
BASE64_CHARS[(e >> 18) & 63]
+ BASE64_CHARS[(e >> 12) & 63]
@@ -99,6 +102,7 @@
def _encode_chunk(data: list, start: int, end: int) -> str:
+ """Encode data chunk"""
result = []
for i in range(start, end, 3):
c = ((data[i] << 16) & 0xFF0000) + ((data[i + 1] << 8) & 0xFF00) + (data[i + 2] & 0xFF)
@@ -107,6 +111,7 @@
def encode_utf8(s: str) -> list:
+ """Encode string to UTF-8 byte list"""
encoded = quote(s, safe="~()*!.'")
result = []
i = 0
@@ -121,6 +126,7 @@
def b64_encode(data: list) -> str:
+ """Custom Base64 encoding"""
length = len(data)
remainder = length % 3
chunks = []
@@ -142,4 +148,5 @@
def get_trace_id() -> str:
- return "".join(random.choice("abcdef0123456789") for _ in range(16))+ """Generate trace id for link tracing"""
+ return "".join(random.choice("abcdef0123456789") for _ in range(16))
| https://raw.githubusercontent.com/NanmiCoder/MediaCrawler/HEAD/media_platform/xhs/xhs_sign.py |
Add verbose docstrings with examples | # -*- coding: utf-8 -*-
# Copyright (c) 2025 relakkes@gmail.com
#
# This file is part of MediaCrawler project.
# Repository: https://github.com/NanmiCoder/MediaCrawler/blob/main/api/services/crawler_manager.py
# GitHub: https://github.com/NanmiCoder
# Licensed under NON-COMMERCIAL LEARNING LICENSE 1.1
#
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
# 1. 不得用于任何商业用途。
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
# 3. 不得进行大规模爬取或对平台造成运营干扰。
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
# 5. 不得用于任何非法或不当的用途。
#
# 详细许可条款请参阅项目根目录下的LICENSE文件。
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
import asyncio
import subprocess
import signal
import os
from typing import Optional, List
from datetime import datetime
from pathlib import Path
from ..schemas import CrawlerStartRequest, LogEntry
class CrawlerManager:
def __init__(self):
self._lock = asyncio.Lock()
self.process: Optional[subprocess.Popen] = None
self.status = "idle"
self.started_at: Optional[datetime] = None
self.current_config: Optional[CrawlerStartRequest] = None
self._log_id = 0
self._logs: List[LogEntry] = []
self._read_task: Optional[asyncio.Task] = None
# Project root directory
self._project_root = Path(__file__).parent.parent.parent
# Log queue - for pushing to WebSocket
self._log_queue: Optional[asyncio.Queue] = None
@property
def logs(self) -> List[LogEntry]:
return self._logs
def get_log_queue(self) -> asyncio.Queue:
if self._log_queue is None:
self._log_queue = asyncio.Queue()
return self._log_queue
def _create_log_entry(self, message: str, level: str = "info") -> LogEntry:
self._log_id += 1
entry = LogEntry(
id=self._log_id,
timestamp=datetime.now().strftime("%H:%M:%S"),
level=level,
message=message
)
self._logs.append(entry)
# Keep last 500 logs
if len(self._logs) > 500:
self._logs = self._logs[-500:]
return entry
async def _push_log(self, entry: LogEntry):
if self._log_queue is not None:
try:
self._log_queue.put_nowait(entry)
except asyncio.QueueFull:
pass
def _parse_log_level(self, line: str) -> str:
line_upper = line.upper()
if "ERROR" in line_upper or "FAILED" in line_upper:
return "error"
elif "WARNING" in line_upper or "WARN" in line_upper:
return "warning"
elif "SUCCESS" in line_upper or "完成" in line or "成功" in line:
return "success"
elif "DEBUG" in line_upper:
return "debug"
return "info"
async def start(self, config: CrawlerStartRequest) -> bool:
async with self._lock:
if self.process and self.process.poll() is None:
return False
# Clear old logs
self._logs = []
self._log_id = 0
# Clear pending queue (don't replace object to avoid WebSocket broadcast coroutine holding old queue reference)
if self._log_queue is None:
self._log_queue = asyncio.Queue()
else:
try:
while True:
self._log_queue.get_nowait()
except asyncio.QueueEmpty:
pass
# Build command line arguments
cmd = self._build_command(config)
# Log start information
entry = self._create_log_entry(f"Starting crawler: {' '.join(cmd)}", "info")
await self._push_log(entry)
try:
# Start subprocess
self.process = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
text=True,
encoding='utf-8',
bufsize=1,
cwd=str(self._project_root),
env={**os.environ, "PYTHONUNBUFFERED": "1"}
)
self.status = "running"
self.started_at = datetime.now()
self.current_config = config
entry = self._create_log_entry(
f"Crawler started on platform: {config.platform.value}, type: {config.crawler_type.value}",
"success"
)
await self._push_log(entry)
# Start log reading task
self._read_task = asyncio.create_task(self._read_output())
return True
except Exception as e:
self.status = "error"
entry = self._create_log_entry(f"Failed to start crawler: {str(e)}", "error")
await self._push_log(entry)
return False
async def stop(self) -> bool:
async with self._lock:
if not self.process or self.process.poll() is not None:
return False
self.status = "stopping"
entry = self._create_log_entry("Sending SIGTERM to crawler process...", "warning")
await self._push_log(entry)
try:
self.process.send_signal(signal.SIGTERM)
# Wait for graceful exit (up to 15 seconds)
for _ in range(30):
if self.process.poll() is not None:
break
await asyncio.sleep(0.5)
# If still not exited, force kill
if self.process.poll() is None:
entry = self._create_log_entry("Process not responding, sending SIGKILL...", "warning")
await self._push_log(entry)
self.process.kill()
entry = self._create_log_entry("Crawler process terminated", "info")
await self._push_log(entry)
except Exception as e:
entry = self._create_log_entry(f"Error stopping crawler: {str(e)}", "error")
await self._push_log(entry)
self.status = "idle"
self.current_config = None
# Cancel log reading task
if self._read_task:
self._read_task.cancel()
self._read_task = None
return True
def get_status(self) -> dict:
return {
"status": self.status,
"platform": self.current_config.platform.value if self.current_config else None,
"crawler_type": self.current_config.crawler_type.value if self.current_config else None,
"started_at": self.started_at.isoformat() if self.started_at else None,
"error_message": None
}
def _build_command(self, config: CrawlerStartRequest) -> list:
cmd = ["uv", "run", "python", "main.py"]
cmd.extend(["--platform", config.platform.value])
cmd.extend(["--lt", config.login_type.value])
cmd.extend(["--type", config.crawler_type.value])
cmd.extend(["--save_data_option", config.save_option.value])
# Pass different arguments based on crawler type
if config.crawler_type.value == "search" and config.keywords:
cmd.extend(["--keywords", config.keywords])
elif config.crawler_type.value == "detail" and config.specified_ids:
cmd.extend(["--specified_id", config.specified_ids])
elif config.crawler_type.value == "creator" and config.creator_ids:
cmd.extend(["--creator_id", config.creator_ids])
if config.start_page != 1:
cmd.extend(["--start", str(config.start_page)])
cmd.extend(["--get_comment", "true" if config.enable_comments else "false"])
cmd.extend(["--get_sub_comment", "true" if config.enable_sub_comments else "false"])
if config.cookies:
cmd.extend(["--cookies", config.cookies])
cmd.extend(["--headless", "true" if config.headless else "false"])
return cmd
async def _read_output(self):
loop = asyncio.get_event_loop()
try:
while self.process and self.process.poll() is None:
# Read a line in thread pool
line = await loop.run_in_executor(
None, self.process.stdout.readline
)
if line:
line = line.strip()
if line:
level = self._parse_log_level(line)
entry = self._create_log_entry(line, level)
await self._push_log(entry)
# Read remaining output
if self.process and self.process.stdout:
remaining = await loop.run_in_executor(
None, self.process.stdout.read
)
if remaining:
for line in remaining.strip().split('\n'):
if line.strip():
level = self._parse_log_level(line)
entry = self._create_log_entry(line.strip(), level)
await self._push_log(entry)
# Process ended
if self.status == "running":
exit_code = self.process.returncode if self.process else -1
if exit_code == 0:
entry = self._create_log_entry("Crawler completed successfully", "success")
else:
entry = self._create_log_entry(f"Crawler exited with code: {exit_code}", "warning")
await self._push_log(entry)
self.status = "idle"
except asyncio.CancelledError:
pass
except Exception as e:
entry = self._create_log_entry(f"Error reading output: {str(e)}", "error")
await self._push_log(entry)
# Global singleton
crawler_manager = CrawlerManager() | --- +++ @@ -28,6 +28,7 @@
class CrawlerManager:
+ """Crawler process manager"""
def __init__(self):
self._lock = asyncio.Lock()
@@ -48,11 +49,13 @@ return self._logs
def get_log_queue(self) -> asyncio.Queue:
+ """Get or create log queue"""
if self._log_queue is None:
self._log_queue = asyncio.Queue()
return self._log_queue
def _create_log_entry(self, message: str, level: str = "info") -> LogEntry:
+ """Create log entry"""
self._log_id += 1
entry = LogEntry(
id=self._log_id,
@@ -67,6 +70,7 @@ return entry
async def _push_log(self, entry: LogEntry):
+ """Push log to queue"""
if self._log_queue is not None:
try:
self._log_queue.put_nowait(entry)
@@ -74,6 +78,7 @@ pass
def _parse_log_level(self, line: str) -> str:
+ """Parse log level"""
line_upper = line.upper()
if "ERROR" in line_upper or "FAILED" in line_upper:
return "error"
@@ -86,6 +91,7 @@ return "info"
async def start(self, config: CrawlerStartRequest) -> bool:
+ """Start crawler process"""
async with self._lock:
if self.process and self.process.poll() is None:
return False
@@ -145,6 +151,7 @@ return False
async def stop(self) -> bool:
+ """Stop crawler process"""
async with self._lock:
if not self.process or self.process.poll() is not None:
return False
@@ -186,6 +193,7 @@ return True
def get_status(self) -> dict:
+ """Get current status"""
return {
"status": self.status,
"platform": self.current_config.platform.value if self.current_config else None,
@@ -195,6 +203,7 @@ }
def _build_command(self, config: CrawlerStartRequest) -> list:
+ """Build main.py command line arguments"""
cmd = ["uv", "run", "python", "main.py"]
cmd.extend(["--platform", config.platform.value])
@@ -224,6 +233,7 @@ return cmd
async def _read_output(self):
+ """Asynchronously read process output"""
loop = asyncio.get_event_loop()
try:
@@ -269,4 +279,4 @@
# Global singleton
-crawler_manager = CrawlerManager()+crawler_manager = CrawlerManager()
| https://raw.githubusercontent.com/NanmiCoder/MediaCrawler/HEAD/api/services/crawler_manager.py |
Generate docstrings with examples | # -*- coding: utf-8 -*-
# Copyright (c) 2025 relakkes@gmail.com
#
# This file is part of MediaCrawler project.
# Repository: https://github.com/NanmiCoder/MediaCrawler/blob/main/base/base_crawler.py
# GitHub: https://github.com/NanmiCoder
# Licensed under NON-COMMERCIAL LEARNING LICENSE 1.1
#
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
# 1. 不得用于任何商业用途。
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
# 3. 不得进行大规模爬取或对平台造成运营干扰。
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
# 5. 不得用于任何非法或不当的用途。
#
# 详细许可条款请参阅项目根目录下的LICENSE文件。
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
from abc import ABC, abstractmethod
from typing import Dict, Optional
from playwright.async_api import BrowserContext, BrowserType, Playwright
class AbstractCrawler(ABC):
@abstractmethod
async def start(self):
pass
@abstractmethod
async def search(self):
pass
@abstractmethod
async def launch_browser(self, chromium: BrowserType, playwright_proxy: Optional[Dict], user_agent: Optional[str], headless: bool = True) -> BrowserContext:
pass
async def launch_browser_with_cdp(self, playwright: Playwright, playwright_proxy: Optional[Dict], user_agent: Optional[str], headless: bool = True) -> BrowserContext:
# Default implementation: fallback to standard mode
return await self.launch_browser(playwright.chromium, playwright_proxy, user_agent, headless)
class AbstractLogin(ABC):
@abstractmethod
async def begin(self):
pass
@abstractmethod
async def login_by_qrcode(self):
pass
@abstractmethod
async def login_by_mobile(self):
pass
@abstractmethod
async def login_by_cookies(self):
pass
class AbstractStore(ABC):
@abstractmethod
async def store_content(self, content_item: Dict):
pass
@abstractmethod
async def store_comment(self, comment_item: Dict):
pass
# TODO support all platform
# only xhs is supported, so @abstractmethod is commented
@abstractmethod
async def store_creator(self, creator: Dict):
pass
class AbstractStoreImage(ABC):
# TODO: support all platform
# only weibo is supported
# @abstractmethod
async def store_image(self, image_content_item: Dict):
pass
class AbstractStoreVideo(ABC):
# TODO: support all platform
# only weibo is supported
# @abstractmethod
async def store_video(self, video_content_item: Dict):
pass
class AbstractApiClient(ABC):
@abstractmethod
async def request(self, method, url, **kwargs):
pass
@abstractmethod
async def update_cookies(self, browser_context: BrowserContext):
pass | --- +++ @@ -27,17 +27,39 @@
@abstractmethod
async def start(self):
+ """
+ start crawler
+ """
pass
@abstractmethod
async def search(self):
+ """
+ search
+ """
pass
@abstractmethod
async def launch_browser(self, chromium: BrowserType, playwright_proxy: Optional[Dict], user_agent: Optional[str], headless: bool = True) -> BrowserContext:
+ """
+ launch browser
+ :param chromium: chromium browser
+ :param playwright_proxy: playwright proxy
+ :param user_agent: user agent
+ :param headless: headless mode
+ :return: browser context
+ """
pass
async def launch_browser_with_cdp(self, playwright: Playwright, playwright_proxy: Optional[Dict], user_agent: Optional[str], headless: bool = True) -> BrowserContext:
+ """
+ Launch browser using CDP mode (optional implementation)
+ :param playwright: playwright instance
+ :param playwright_proxy: playwright proxy configuration
+ :param user_agent: user agent
+ :param headless: headless mode
+ :return: browser context
+ """
# Default implementation: fallback to standard mode
return await self.launch_browser(playwright.chromium, playwright_proxy, user_agent, headless)
@@ -102,4 +124,4 @@
@abstractmethod
async def update_cookies(self, browser_context: BrowserContext):
- pass+ pass
| https://raw.githubusercontent.com/NanmiCoder/MediaCrawler/HEAD/base/base_crawler.py |
Add docstrings to my Python code | # -*- coding: utf-8 -*-
# Copyright (c) 2025 relakkes@gmail.com
#
# This file is part of MediaCrawler project.
# Repository: https://github.com/NanmiCoder/MediaCrawler/blob/main/media_platform/tieba/client.py
# GitHub: https://github.com/NanmiCoder
# Licensed under NON-COMMERCIAL LEARNING LICENSE 1.1
#
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
# 1. 不得用于任何商业用途。
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
# 3. 不得进行大规模爬取或对平台造成运营干扰。
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
# 5. 不得用于任何非法或不当的用途。
#
# 详细许可条款请参阅项目根目录下的LICENSE文件。
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
import asyncio
import json
from typing import Any, Callable, Dict, List, Optional, Union
from urllib.parse import urlencode, quote
import requests
from playwright.async_api import BrowserContext, Page
from tenacity import RetryError, retry, stop_after_attempt, wait_fixed
import config
from base.base_crawler import AbstractApiClient
from model.m_baidu_tieba import TiebaComment, TiebaCreator, TiebaNote
from proxy.proxy_ip_pool import ProxyIpPool
from tools import utils
from .field import SearchNoteType, SearchSortType
from .help import TieBaExtractor
class BaiduTieBaClient(AbstractApiClient):
def __init__(
self,
timeout=10,
ip_pool=None,
default_ip_proxy=None,
headers: Dict[str, str] = None,
playwright_page: Optional[Page] = None,
):
self.ip_pool: Optional[ProxyIpPool] = ip_pool
self.timeout = timeout
# Use provided headers (including real browser UA) or default headers
self.headers = headers or {
"User-Agent": utils.get_user_agent(),
"Cookie": "",
}
self._host = "https://tieba.baidu.com"
self._page_extractor = TieBaExtractor()
self.default_ip_proxy = default_ip_proxy
self.playwright_page = playwright_page # Playwright page object
def _sync_request(self, method, url, proxy=None, **kwargs):
# Construct proxy dictionary
proxies = None
if proxy:
proxies = {
"http": proxy,
"https": proxy,
}
# Send request
response = requests.request(
method=method,
url=url,
headers=self.headers,
proxies=proxies,
timeout=self.timeout,
**kwargs
)
return response
async def _refresh_proxy_if_expired(self) -> None:
if self.ip_pool is None:
return
if self.ip_pool.is_current_proxy_expired():
utils.logger.info(
"[BaiduTieBaClient._refresh_proxy_if_expired] Proxy expired, refreshing..."
)
new_proxy = await self.ip_pool.get_or_refresh_proxy()
# Update proxy URL
_, self.default_ip_proxy = utils.format_proxy_info(new_proxy)
utils.logger.info(
f"[BaiduTieBaClient._refresh_proxy_if_expired] New proxy: {new_proxy.ip}:{new_proxy.port}"
)
@retry(stop=stop_after_attempt(3), wait=wait_fixed(1))
async def request(self, method, url, return_ori_content=False, proxy=None, **kwargs) -> Union[str, Any]:
# Check if proxy is expired before each request
await self._refresh_proxy_if_expired()
actual_proxy = proxy if proxy else self.default_ip_proxy
# Execute synchronous requests in thread pool
response = await asyncio.to_thread(
self._sync_request,
method,
url,
actual_proxy,
**kwargs
)
if response.status_code != 200:
utils.logger.error(f"Request failed, method: {method}, url: {url}, status code: {response.status_code}")
utils.logger.error(f"Request failed, response: {response.text}")
raise Exception(f"Request failed, method: {method}, url: {url}, status code: {response.status_code}")
if response.text == "" or response.text == "blocked":
utils.logger.error(f"request params incorrect, response.text: {response.text}")
raise Exception("account blocked")
if return_ori_content:
return response.text
return response.json()
async def get(self, uri: str, params=None, return_ori_content=False, **kwargs) -> Any:
final_uri = uri
if isinstance(params, dict):
final_uri = (f"{uri}?"
f"{urlencode(params)}")
try:
res = await self.request(method="GET", url=f"{self._host}{final_uri}", return_ori_content=return_ori_content, **kwargs)
return res
except RetryError as e:
if self.ip_pool:
proxie_model = await self.ip_pool.get_proxy()
_, proxy = utils.format_proxy_info(proxie_model)
res = await self.request(method="GET", url=f"{self._host}{final_uri}", return_ori_content=return_ori_content, proxy=proxy, **kwargs)
self.default_ip_proxy = proxy
return res
utils.logger.error(f"[BaiduTieBaClient.get] Reached maximum retry attempts, IP is blocked, please try a new IP proxy: {e}")
raise Exception(f"[BaiduTieBaClient.get] Reached maximum retry attempts, IP is blocked, please try a new IP proxy: {e}")
async def post(self, uri: str, data: dict, **kwargs) -> Dict:
json_str = json.dumps(data, separators=(',', ':'), ensure_ascii=False)
return await self.request(method="POST", url=f"{self._host}{uri}", data=json_str, **kwargs)
async def pong(self, browser_context: BrowserContext = None) -> bool:
utils.logger.info("[BaiduTieBaClient.pong] Begin to check tieba login state by cookies...")
if not browser_context:
utils.logger.warning("[BaiduTieBaClient.pong] browser_context is None, assume not logged in")
return False
try:
# Get cookies from browser and check key login cookies
_, cookie_dict = utils.convert_cookies(await browser_context.cookies())
# Baidu Tieba login identifiers: STOKEN or PTOKEN
stoken = cookie_dict.get("STOKEN")
ptoken = cookie_dict.get("PTOKEN")
bduss = cookie_dict.get("BDUSS") # Baidu universal login cookie
if stoken or ptoken or bduss:
utils.logger.info(f"[BaiduTieBaClient.pong] Login state verified by cookies (STOKEN: {bool(stoken)}, PTOKEN: {bool(ptoken)}, BDUSS: {bool(bduss)})")
return True
else:
utils.logger.info("[BaiduTieBaClient.pong] No valid login cookies found, need to login")
return False
except Exception as e:
utils.logger.error(f"[BaiduTieBaClient.pong] Check login state failed: {e}, assume not logged in")
return False
async def update_cookies(self, browser_context: BrowserContext):
cookie_str, cookie_dict = utils.convert_cookies(await browser_context.cookies())
self.headers["Cookie"] = cookie_str
utils.logger.info("[BaiduTieBaClient.update_cookies] Cookie has been updated")
async def get_notes_by_keyword(
self,
keyword: str,
page: int = 1,
page_size: int = 10,
sort: SearchSortType = SearchSortType.TIME_DESC,
note_type: SearchNoteType = SearchNoteType.FIXED_THREAD,
) -> List[TiebaNote]:
if not self.playwright_page:
utils.logger.error("[BaiduTieBaClient.get_notes_by_keyword] playwright_page is None, cannot use browser mode")
raise Exception("playwright_page is required for browser-based search")
# Construct search URL
# Example: https://tieba.baidu.com/f/search/res?ie=utf-8&qw=keyword
search_url = f"{self._host}/f/search/res"
params = {
"ie": "utf-8",
"qw": keyword,
"rn": page_size,
"pn": page,
"sm": sort.value,
"only_thread": note_type.value,
}
# Concatenate full URL
full_url = f"{search_url}?{urlencode(params)}"
utils.logger.info(f"[BaiduTieBaClient.get_notes_by_keyword] Accessing search page: {full_url}")
try:
# Use Playwright to access search page
await self.playwright_page.goto(full_url, wait_until="domcontentloaded")
# Wait for page loading, using delay setting from config file
await asyncio.sleep(config.CRAWLER_MAX_SLEEP_SEC)
# Get page HTML content
page_content = await self.playwright_page.content()
utils.logger.info(f"[BaiduTieBaClient.get_notes_by_keyword] Successfully retrieved search page HTML, length: {len(page_content)}")
# Extract search results
notes = self._page_extractor.extract_search_note_list(page_content)
utils.logger.info(f"[BaiduTieBaClient.get_notes_by_keyword] Extracted {len(notes)} posts")
return notes
except Exception as e:
utils.logger.error(f"[BaiduTieBaClient.get_notes_by_keyword] Search failed: {e}")
raise
async def get_note_by_id(self, note_id: str) -> TiebaNote:
if not self.playwright_page:
utils.logger.error("[BaiduTieBaClient.get_note_by_id] playwright_page is None, cannot use browser mode")
raise Exception("playwright_page is required for browser-based note detail fetching")
# Construct post detail URL
note_url = f"{self._host}/p/{note_id}"
utils.logger.info(f"[BaiduTieBaClient.get_note_by_id] Accessing post detail page: {note_url}")
try:
# Use Playwright to access post detail page
await self.playwright_page.goto(note_url, wait_until="domcontentloaded")
# Wait for page loading, using delay setting from config file
await asyncio.sleep(config.CRAWLER_MAX_SLEEP_SEC)
# Get page HTML content
page_content = await self.playwright_page.content()
utils.logger.info(f"[BaiduTieBaClient.get_note_by_id] Successfully retrieved post detail HTML, length: {len(page_content)}")
# Extract post details
note_detail = self._page_extractor.extract_note_detail(page_content)
return note_detail
except Exception as e:
utils.logger.error(f"[BaiduTieBaClient.get_note_by_id] Failed to get post details: {e}")
raise
async def get_note_all_comments(
self,
note_detail: TiebaNote,
crawl_interval: float = 1.0,
callback: Optional[Callable] = None,
max_count: int = 10,
) -> List[TiebaComment]:
if not self.playwright_page:
utils.logger.error("[BaiduTieBaClient.get_note_all_comments] playwright_page is None, cannot use browser mode")
raise Exception("playwright_page is required for browser-based comment fetching")
result: List[TiebaComment] = []
current_page = 1
while note_detail.total_replay_page >= current_page and len(result) < max_count:
# Construct comment page URL
comment_url = f"{self._host}/p/{note_detail.note_id}?pn={current_page}"
utils.logger.info(f"[BaiduTieBaClient.get_note_all_comments] Accessing comment page: {comment_url}")
try:
# Use Playwright to access comment page
await self.playwright_page.goto(comment_url, wait_until="domcontentloaded")
# Wait for page loading, using delay setting from config file
await asyncio.sleep(config.CRAWLER_MAX_SLEEP_SEC)
# Get page HTML content
page_content = await self.playwright_page.content()
# Extract comments
comments = self._page_extractor.extract_tieba_note_parment_comments(
page_content, note_id=note_detail.note_id
)
if not comments:
utils.logger.info(f"[BaiduTieBaClient.get_note_all_comments] Page {current_page} has no comments, stopping crawl")
break
# Limit comment count
if len(result) + len(comments) > max_count:
comments = comments[:max_count - len(result)]
if callback:
await callback(note_detail.note_id, comments)
result.extend(comments)
# Get all sub-comments
await self.get_comments_all_sub_comments(
comments, crawl_interval=crawl_interval, callback=callback
)
await asyncio.sleep(crawl_interval)
current_page += 1
except Exception as e:
utils.logger.error(f"[BaiduTieBaClient.get_note_all_comments] Failed to get page {current_page} comments: {e}")
break
utils.logger.info(f"[BaiduTieBaClient.get_note_all_comments] Total retrieved {len(result)} first-level comments")
return result
async def get_comments_all_sub_comments(
self,
comments: List[TiebaComment],
crawl_interval: float = 1.0,
callback: Optional[Callable] = None,
) -> List[TiebaComment]:
if not config.ENABLE_GET_SUB_COMMENTS:
return []
if not self.playwright_page:
utils.logger.error("[BaiduTieBaClient.get_comments_all_sub_comments] playwright_page is None, cannot use browser mode")
raise Exception("playwright_page is required for browser-based sub-comment fetching")
all_sub_comments: List[TiebaComment] = []
for parment_comment in comments:
if parment_comment.sub_comment_count == 0:
continue
current_page = 1
max_sub_page_num = parment_comment.sub_comment_count // 10 + 1
while max_sub_page_num >= current_page:
# Construct sub-comment URL
sub_comment_url = (
f"{self._host}/p/comment?"
f"tid={parment_comment.note_id}&"
f"pid={parment_comment.comment_id}&"
f"fid={parment_comment.tieba_id}&"
f"pn={current_page}"
)
utils.logger.info(f"[BaiduTieBaClient.get_comments_all_sub_comments] Accessing sub-comment page: {sub_comment_url}")
try:
# Use Playwright to access sub-comment page
await self.playwright_page.goto(sub_comment_url, wait_until="domcontentloaded")
# Wait for page loading, using delay setting from config file
await asyncio.sleep(config.CRAWLER_MAX_SLEEP_SEC)
# Get page HTML content
page_content = await self.playwright_page.content()
# Extract sub-comments
sub_comments = self._page_extractor.extract_tieba_note_sub_comments(
page_content, parent_comment=parment_comment
)
if not sub_comments:
utils.logger.info(
f"[BaiduTieBaClient.get_comments_all_sub_comments] "
f"Comment {parment_comment.comment_id} page {current_page} has no sub-comments, stopping crawl"
)
break
if callback:
await callback(parment_comment.note_id, sub_comments)
all_sub_comments.extend(sub_comments)
await asyncio.sleep(crawl_interval)
current_page += 1
except Exception as e:
utils.logger.error(
f"[BaiduTieBaClient.get_comments_all_sub_comments] "
f"Failed to get comment {parment_comment.comment_id} page {current_page} sub-comments: {e}"
)
break
utils.logger.info(f"[BaiduTieBaClient.get_comments_all_sub_comments] Total retrieved {len(all_sub_comments)} sub-comments")
return all_sub_comments
async def get_notes_by_tieba_name(self, tieba_name: str, page_num: int) -> List[TiebaNote]:
if not self.playwright_page:
utils.logger.error("[BaiduTieBaClient.get_notes_by_tieba_name] playwright_page is None, cannot use browser mode")
raise Exception("playwright_page is required for browser-based tieba note fetching")
# Construct Tieba post list URL
tieba_url = f"{self._host}/f?kw={quote(tieba_name)}&pn={page_num}"
utils.logger.info(f"[BaiduTieBaClient.get_notes_by_tieba_name] Accessing Tieba page: {tieba_url}")
try:
# Use Playwright to access Tieba page
await self.playwright_page.goto(tieba_url, wait_until="domcontentloaded")
# Wait for page loading, using delay setting from config file
await asyncio.sleep(config.CRAWLER_MAX_SLEEP_SEC)
# Get page HTML content
page_content = await self.playwright_page.content()
utils.logger.info(f"[BaiduTieBaClient.get_notes_by_tieba_name] Successfully retrieved Tieba page HTML, length: {len(page_content)}")
# Extract post list
notes = self._page_extractor.extract_tieba_note_list(page_content)
utils.logger.info(f"[BaiduTieBaClient.get_notes_by_tieba_name] Extracted {len(notes)} posts")
return notes
except Exception as e:
utils.logger.error(f"[BaiduTieBaClient.get_notes_by_tieba_name] Failed to get Tieba post list: {e}")
raise
async def get_creator_info_by_url(self, creator_url: str) -> str:
if not self.playwright_page:
utils.logger.error("[BaiduTieBaClient.get_creator_info_by_url] playwright_page is None, cannot use browser mode")
raise Exception("playwright_page is required for browser-based creator info fetching")
utils.logger.info(f"[BaiduTieBaClient.get_creator_info_by_url] Accessing creator homepage: {creator_url}")
try:
# Use Playwright to access creator homepage
await self.playwright_page.goto(creator_url, wait_until="domcontentloaded")
# Wait for page loading, using delay setting from config file
await asyncio.sleep(config.CRAWLER_MAX_SLEEP_SEC)
# Get page HTML content
page_content = await self.playwright_page.content()
utils.logger.info(f"[BaiduTieBaClient.get_creator_info_by_url] Successfully retrieved creator homepage HTML, length: {len(page_content)}")
return page_content
except Exception as e:
utils.logger.error(f"[BaiduTieBaClient.get_creator_info_by_url] Failed to get creator homepage: {e}")
raise
async def get_notes_by_creator(self, user_name: str, page_number: int) -> Dict:
if not self.playwright_page:
utils.logger.error("[BaiduTieBaClient.get_notes_by_creator] playwright_page is None, cannot use browser mode")
raise Exception("playwright_page is required for browser-based creator notes fetching")
# Construct creator post list URL
creator_url = f"{self._host}/home/get/getthread?un={quote(user_name)}&pn={page_number}&id=utf-8&_={utils.get_current_timestamp()}"
utils.logger.info(f"[BaiduTieBaClient.get_notes_by_creator] Accessing creator post list: {creator_url}")
try:
# Use Playwright to access creator post list page
await self.playwright_page.goto(creator_url, wait_until="domcontentloaded")
# Wait for page loading, using delay setting from config file
await asyncio.sleep(config.CRAWLER_MAX_SLEEP_SEC)
# Get page content (this API returns JSON)
page_content = await self.playwright_page.content()
# Extract JSON data (page will contain <pre> tag or is directly JSON)
try:
# Try to extract JSON from page
json_text = await self.playwright_page.evaluate("() => document.body.innerText")
result = json.loads(json_text)
utils.logger.info(f"[BaiduTieBaClient.get_notes_by_creator] Successfully retrieved creator post data")
return result
except json.JSONDecodeError as e:
utils.logger.error(f"[BaiduTieBaClient.get_notes_by_creator] JSON parsing failed: {e}")
utils.logger.error(f"[BaiduTieBaClient.get_notes_by_creator] Page content: {page_content[:500]}")
raise Exception(f"Failed to parse JSON from creator notes page: {e}")
except Exception as e:
utils.logger.error(f"[BaiduTieBaClient.get_notes_by_creator] Failed to get creator post list: {e}")
raise
async def get_all_notes_by_creator_user_name(
self,
user_name: str,
crawl_interval: float = 1.0,
callback: Optional[Callable] = None,
max_note_count: int = 0,
creator_page_html_content: str = None,
) -> List[TiebaNote]:
# Baidu Tieba is special, the first 10 posts are directly displayed on the homepage and need special handling, cannot be obtained through API
result: List[TiebaNote] = []
if creator_page_html_content:
thread_id_list = (self._page_extractor.extract_tieba_thread_id_list_from_creator_page(creator_page_html_content))
utils.logger.info(f"[BaiduTieBaClient.get_all_notes_by_creator] got user_name:{user_name} thread_id_list len : {len(thread_id_list)}")
note_detail_task = [self.get_note_by_id(thread_id) for thread_id in thread_id_list]
notes = await asyncio.gather(*note_detail_task)
if callback:
await callback(notes)
result.extend(notes)
notes_has_more = 1
page_number = 1
page_per_count = 20
total_get_count = 0
while notes_has_more == 1 and (max_note_count == 0 or total_get_count < max_note_count):
notes_res = await self.get_notes_by_creator(user_name, page_number)
if not notes_res or notes_res.get("no") != 0:
utils.logger.error(f"[WeiboClient.get_notes_by_creator] got user_name:{user_name} notes failed, notes_res: {notes_res}")
break
notes_data = notes_res.get("data")
notes_has_more = notes_data.get("has_more")
notes = notes_data["thread_list"]
utils.logger.info(f"[WeiboClient.get_all_notes_by_creator] got user_name:{user_name} notes len : {len(notes)}")
note_detail_task = [self.get_note_by_id(note['thread_id']) for note in notes]
notes = await asyncio.gather(*note_detail_task)
if callback:
await callback(notes)
await asyncio.sleep(crawl_interval)
result.extend(notes)
page_number += 1
total_get_count += page_per_count
return result | --- +++ @@ -59,6 +59,17 @@ self.playwright_page = playwright_page # Playwright page object
def _sync_request(self, method, url, proxy=None, **kwargs):
+ """
+ Synchronous requests method
+ Args:
+ method: Request method
+ url: Request URL
+ proxy: Proxy IP
+ **kwargs: Other request parameters
+
+ Returns:
+ Response object
+ """
# Construct proxy dictionary
proxies = None
if proxy:
@@ -79,6 +90,9 @@ return response
async def _refresh_proxy_if_expired(self) -> None:
+ """
+ Check if proxy is expired and automatically refresh if necessary
+ """
if self.ip_pool is None:
return
@@ -95,6 +109,18 @@
@retry(stop=stop_after_attempt(3), wait=wait_fixed(1))
async def request(self, method, url, return_ori_content=False, proxy=None, **kwargs) -> Union[str, Any]:
+ """
+ Common request method wrapper for requests, handles request responses
+ Args:
+ method: Request method
+ url: Request URL
+ return_ori_content: Whether to return original content
+ proxy: Proxy IP
+ **kwargs: Other request parameters, such as headers, request body, etc.
+
+ Returns:
+
+ """
# Check if proxy is expired before each request
await self._refresh_proxy_if_expired()
@@ -124,6 +150,16 @@ return response.json()
async def get(self, uri: str, params=None, return_ori_content=False, **kwargs) -> Any:
+ """
+ GET request with header signing
+ Args:
+ uri: Request route
+ params: Request parameters
+ return_ori_content: Whether to return original content
+
+ Returns:
+
+ """
final_uri = uri
if isinstance(params, dict):
final_uri = (f"{uri}?"
@@ -143,10 +179,28 @@ raise Exception(f"[BaiduTieBaClient.get] Reached maximum retry attempts, IP is blocked, please try a new IP proxy: {e}")
async def post(self, uri: str, data: dict, **kwargs) -> Dict:
+ """
+ POST request with header signing
+ Args:
+ uri: Request route
+ data: Request body parameters
+
+ Returns:
+
+ """
json_str = json.dumps(data, separators=(',', ':'), ensure_ascii=False)
return await self.request(method="POST", url=f"{self._host}{uri}", data=json_str, **kwargs)
async def pong(self, browser_context: BrowserContext = None) -> bool:
+ """
+ Check if login state is still valid
+ Uses Cookie detection instead of API calls to avoid detection
+ Args:
+ browser_context: Browser context object
+
+ Returns:
+ bool: True if logged in, False if not logged in
+ """
utils.logger.info("[BaiduTieBaClient.pong] Begin to check tieba login state by cookies...")
if not browser_context:
@@ -174,6 +228,14 @@ return False
async def update_cookies(self, browser_context: BrowserContext):
+ """
+ Update cookies method provided by API client, usually called after successful login
+ Args:
+ browser_context: Browser context object
+
+ Returns:
+
+ """
cookie_str, cookie_dict = utils.convert_cookies(await browser_context.cookies())
self.headers["Cookie"] = cookie_str
utils.logger.info("[BaiduTieBaClient.update_cookies] Cookie has been updated")
@@ -186,6 +248,17 @@ sort: SearchSortType = SearchSortType.TIME_DESC,
note_type: SearchNoteType = SearchNoteType.FIXED_THREAD,
) -> List[TiebaNote]:
+ """
+ Search Tieba posts by keyword (uses Playwright to access page, avoiding API detection)
+ Args:
+ keyword: Keyword
+ page: Page number
+ page_size: Page size
+ sort: Result sort method
+ note_type: Post type (main thread | main thread + reply mixed mode)
+ Returns:
+
+ """
if not self.playwright_page:
utils.logger.error("[BaiduTieBaClient.get_notes_by_keyword] playwright_page is None, cannot use browser mode")
raise Exception("playwright_page is required for browser-based search")
@@ -227,6 +300,14 @@ raise
async def get_note_by_id(self, note_id: str) -> TiebaNote:
+ """
+ Get post details by post ID (uses Playwright to access page, avoiding API detection)
+ Args:
+ note_id: Post ID
+
+ Returns:
+ TiebaNote: Post detail object
+ """
if not self.playwright_page:
utils.logger.error("[BaiduTieBaClient.get_note_by_id] playwright_page is None, cannot use browser mode")
raise Exception("playwright_page is required for browser-based note detail fetching")
@@ -261,6 +342,16 @@ callback: Optional[Callable] = None,
max_count: int = 10,
) -> List[TiebaComment]:
+ """
+ Get all first-level comments for specified post (uses Playwright to access page, avoiding API detection)
+ Args:
+ note_detail: Post detail object
+ crawl_interval: Crawl delay interval in seconds
+ callback: Callback function after one post crawl completes
+ max_count: Maximum number of comments to crawl per post
+ Returns:
+ List[TiebaComment]: Comment list
+ """
if not self.playwright_page:
utils.logger.error("[BaiduTieBaClient.get_note_all_comments] playwright_page is None, cannot use browser mode")
raise Exception("playwright_page is required for browser-based comment fetching")
@@ -322,6 +413,16 @@ crawl_interval: float = 1.0,
callback: Optional[Callable] = None,
) -> List[TiebaComment]:
+ """
+ Get all sub-comments for specified comments (uses Playwright to access page, avoiding API detection)
+ Args:
+ comments: Comment list
+ crawl_interval: Crawl delay interval in seconds
+ callback: Callback function after one post crawl completes
+
+ Returns:
+ List[TiebaComment]: Sub-comment list
+ """
if not config.ENABLE_GET_SUB_COMMENTS:
return []
@@ -389,6 +490,15 @@ return all_sub_comments
async def get_notes_by_tieba_name(self, tieba_name: str, page_num: int) -> List[TiebaNote]:
+ """
+ Get post list by Tieba name (uses Playwright to access page, avoiding API detection)
+ Args:
+ tieba_name: Tieba name
+ page_num: Page number
+
+ Returns:
+ List[TiebaNote]: Post list
+ """
if not self.playwright_page:
utils.logger.error("[BaiduTieBaClient.get_notes_by_tieba_name] playwright_page is None, cannot use browser mode")
raise Exception("playwright_page is required for browser-based tieba note fetching")
@@ -418,6 +528,14 @@ raise
async def get_creator_info_by_url(self, creator_url: str) -> str:
+ """
+ Get creator information by creator URL (uses Playwright to access page, avoiding API detection)
+ Args:
+ creator_url: Creator homepage URL
+
+ Returns:
+ str: Page HTML content
+ """
if not self.playwright_page:
utils.logger.error("[BaiduTieBaClient.get_creator_info_by_url] playwright_page is None, cannot use browser mode")
raise Exception("playwright_page is required for browser-based creator info fetching")
@@ -442,6 +560,15 @@ raise
async def get_notes_by_creator(self, user_name: str, page_number: int) -> Dict:
+ """
+ Get creator's posts by creator (uses Playwright to access page, avoiding API detection)
+ Args:
+ user_name: Creator username
+ page_number: Page number
+
+ Returns:
+ Dict: Dictionary containing post data
+ """
if not self.playwright_page:
utils.logger.error("[BaiduTieBaClient.get_notes_by_creator] playwright_page is None, cannot use browser mode")
raise Exception("playwright_page is required for browser-based creator notes fetching")
@@ -484,6 +611,18 @@ max_note_count: int = 0,
creator_page_html_content: str = None,
) -> List[TiebaNote]:
+ """
+ Get all creator posts by creator username
+ Args:
+ user_name: Creator username
+ crawl_interval: Crawl delay interval in seconds
+ callback: Callback function after one post crawl completes, an awaitable function
+ max_note_count: Maximum number of posts to retrieve, if 0 then get all
+ creator_page_html_content: Creator homepage HTML content
+
+ Returns:
+
+ """
# Baidu Tieba is special, the first 10 posts are directly displayed on the homepage and need special handling, cannot be obtained through API
result: List[TiebaNote] = []
if creator_page_html_content:
@@ -517,4 +656,4 @@ result.extend(notes)
page_number += 1
total_get_count += page_per_count
- return result+ return result
| https://raw.githubusercontent.com/NanmiCoder/MediaCrawler/HEAD/media_platform/tieba/client.py |
Add docstrings to my Python code | # -*- coding: utf-8 -*-
# Copyright (c) 2025 relakkes@gmail.com
#
# This file is part of MediaCrawler project.
# Repository: https://github.com/NanmiCoder/MediaCrawler/blob/main/media_platform/douyin/login.py
# GitHub: https://github.com/NanmiCoder
# Licensed under NON-COMMERCIAL LEARNING LICENSE 1.1
#
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
# 1. 不得用于任何商业用途。
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
# 3. 不得进行大规模爬取或对平台造成运营干扰。
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
# 5. 不得用于任何非法或不当的用途。
#
# 详细许可条款请参阅项目根目录下的LICENSE文件。
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
import asyncio
import functools
import sys
from typing import Optional
from playwright.async_api import BrowserContext, Page
from playwright.async_api import TimeoutError as PlaywrightTimeoutError
from tenacity import (RetryError, retry, retry_if_result, stop_after_attempt,
wait_fixed)
import config
from base.base_crawler import AbstractLogin
from cache.cache_factory import CacheFactory
from tools import utils
class DouYinLogin(AbstractLogin):
def __init__(self,
login_type: str,
browser_context: BrowserContext, # type: ignore
context_page: Page, # type: ignore
login_phone: Optional[str] = "",
cookie_str: Optional[str] = ""
):
config.LOGIN_TYPE = login_type
self.browser_context = browser_context
self.context_page = context_page
self.login_phone = login_phone
self.scan_qrcode_time = 60
self.cookie_str = cookie_str
async def begin(self):
# popup login dialog
await self.popup_login_dialog()
# select login type
if config.LOGIN_TYPE == "qrcode":
await self.login_by_qrcode()
elif config.LOGIN_TYPE == "phone":
await self.login_by_mobile()
elif config.LOGIN_TYPE == "cookie":
await self.login_by_cookies()
else:
raise ValueError("[DouYinLogin.begin] Invalid Login Type Currently only supported qrcode or phone or cookie ...")
# If the page redirects to the slider verification page, need to slide again
await asyncio.sleep(6)
current_page_title = await self.context_page.title()
if "验证码中间页" in current_page_title:
await self.check_page_display_slider(move_step=3, slider_level="hard")
# check login state
utils.logger.info(f"[DouYinLogin.begin] login finished then check login state ...")
try:
await self.check_login_state()
except RetryError:
utils.logger.info("[DouYinLogin.begin] login failed please confirm ...")
sys.exit()
# wait for redirect
wait_redirect_seconds = 5
utils.logger.info(f"[DouYinLogin.begin] Login successful then wait for {wait_redirect_seconds} seconds redirect ...")
await asyncio.sleep(wait_redirect_seconds)
@retry(stop=stop_after_attempt(600), wait=wait_fixed(1), retry=retry_if_result(lambda value: value is False))
async def check_login_state(self):
current_cookie = await self.browser_context.cookies()
_, cookie_dict = utils.convert_cookies(current_cookie)
for page in self.browser_context.pages:
try:
local_storage = await page.evaluate("() => window.localStorage")
if local_storage.get("HasUserLogin", "") == "1":
return True
except Exception as e:
# utils.logger.warn(f"[DouYinLogin] check_login_state waring: {e}")
await asyncio.sleep(0.1)
if cookie_dict.get("LOGIN_STATUS") == "1":
return True
return False
async def popup_login_dialog(self):
dialog_selector = "xpath=//div[@id='login-panel-new']"
try:
# check dialog box is auto popup and wait for 10 seconds
await self.context_page.wait_for_selector(dialog_selector, timeout=1000 * 10)
except Exception as e:
utils.logger.error(f"[DouYinLogin.popup_login_dialog] login dialog box does not pop up automatically, error: {e}")
utils.logger.info("[DouYinLogin.popup_login_dialog] login dialog box does not pop up automatically, we will manually click the login button")
login_button_ele = self.context_page.locator("xpath=//p[text() = '登录']")
await login_button_ele.click()
await asyncio.sleep(0.5)
async def login_by_qrcode(self):
utils.logger.info("[DouYinLogin.login_by_qrcode] Begin login douyin by qrcode...")
qrcode_img_selector = "xpath=//div[@id='animate_qrcode_container']//img"
base64_qrcode_img = await utils.find_login_qrcode(
self.context_page,
selector=qrcode_img_selector
)
if not base64_qrcode_img:
utils.logger.info("[DouYinLogin.login_by_qrcode] login qrcode not found please confirm ...")
sys.exit()
partial_show_qrcode = functools.partial(utils.show_qrcode, base64_qrcode_img)
asyncio.get_running_loop().run_in_executor(executor=None, func=partial_show_qrcode)
await asyncio.sleep(2)
async def login_by_mobile(self):
utils.logger.info("[DouYinLogin.login_by_mobile] Begin login douyin by mobile ...")
mobile_tap_ele = self.context_page.locator("xpath=//li[text() = '验证码登录']")
await mobile_tap_ele.click()
await self.context_page.wait_for_selector("xpath=//article[@class='web-login-mobile-code']")
mobile_input_ele = self.context_page.locator("xpath=//input[@placeholder='手机号']")
await mobile_input_ele.fill(self.login_phone)
await asyncio.sleep(0.5)
send_sms_code_btn = self.context_page.locator("xpath=//span[text() = '获取验证码']")
await send_sms_code_btn.click()
# Check if there is slider verification
await self.check_page_display_slider(move_step=10, slider_level="easy")
cache_client = CacheFactory.create_cache(config.CACHE_TYPE_MEMORY)
max_get_sms_code_time = 60 * 2 # Maximum time to get verification code is 2 minutes
while max_get_sms_code_time > 0:
utils.logger.info(f"[DouYinLogin.login_by_mobile] get douyin sms code from redis remaining time {max_get_sms_code_time}s ...")
await asyncio.sleep(1)
sms_code_key = f"dy_{self.login_phone}"
sms_code_value = cache_client.get(sms_code_key)
if not sms_code_value:
max_get_sms_code_time -= 1
continue
sms_code_input_ele = self.context_page.locator("xpath=//input[@placeholder='请输入验证码']")
await sms_code_input_ele.fill(value=sms_code_value.decode())
await asyncio.sleep(0.5)
submit_btn_ele = self.context_page.locator("xpath=//button[@class='web-login-button']")
await submit_btn_ele.click() # Click login
# todo ... should also check the correctness of the verification code, it may be incorrect
break
async def check_page_display_slider(self, move_step: int = 10, slider_level: str = "easy"):
# Wait for slider verification to appear
back_selector = "#captcha-verify-image"
try:
await self.context_page.wait_for_selector(selector=back_selector, state="visible", timeout=30 * 1000)
except PlaywrightTimeoutError: # No slider verification, return directly
return
gap_selector = 'xpath=//*[@id="captcha_container"]/div/div[2]/img[2]'
max_slider_try_times = 20
slider_verify_success = False
while not slider_verify_success:
if max_slider_try_times <= 0:
utils.logger.error("[DouYinLogin.check_page_display_slider] slider verify failed ...")
sys.exit()
try:
await self.move_slider(back_selector, gap_selector, move_step, slider_level)
await asyncio.sleep(1)
# If the slider is too slow or verification failed, it will prompt "The operation is too slow", click the refresh button here
page_content = await self.context_page.content()
if "操作过慢" in page_content or "提示重新操作" in page_content:
utils.logger.info("[DouYinLogin.check_page_display_slider] slider verify failed, retry ...")
await self.context_page.click(selector="//a[contains(@class, 'secsdk_captcha_refresh')]")
continue
# After successful sliding, wait for the slider to disappear
await self.context_page.wait_for_selector(selector=back_selector, state="hidden", timeout=1000)
# If the slider disappears, it means the verification is successful, break the loop. If not, it means the verification failed, the above line will throw an exception and be caught to continue the loop
utils.logger.info("[DouYinLogin.check_page_display_slider] slider verify success ...")
slider_verify_success = True
except Exception as e:
utils.logger.error(f"[DouYinLogin.check_page_display_slider] slider verify failed, error: {e}")
await asyncio.sleep(1)
max_slider_try_times -= 1
utils.logger.info(f"[DouYinLogin.check_page_display_slider] remaining slider try times: {max_slider_try_times}")
continue
async def move_slider(self, back_selector: str, gap_selector: str, move_step: int = 10, slider_level="easy"):
# get slider background image
slider_back_elements = await self.context_page.wait_for_selector(
selector=back_selector,
timeout=1000 * 10, # wait 10 seconds
)
slide_back = str(await slider_back_elements.get_property("src")) # type: ignore
# get slider gap image
gap_elements = await self.context_page.wait_for_selector(
selector=gap_selector,
timeout=1000 * 10, # wait 10 seconds
)
gap_src = str(await gap_elements.get_property("src")) # type: ignore
# Identify slider position
slide_app = utils.Slide(gap=gap_src, bg=slide_back)
distance = slide_app.discern()
# Get movement trajectory
tracks = utils.get_tracks(distance, slider_level)
new_1 = tracks[-1] - (sum(tracks) - distance)
tracks.pop()
tracks.append(new_1)
# Drag slider to specified position according to trajectory
element = await self.context_page.query_selector(gap_selector)
bounding_box = await element.bounding_box() # type: ignore
await self.context_page.mouse.move(bounding_box["x"] + bounding_box["width"] / 2, # type: ignore
bounding_box["y"] + bounding_box["height"] / 2) # type: ignore
# Get x coordinate center position
x = bounding_box["x"] + bounding_box["width"] / 2 # type: ignore
# Simulate sliding operation
await element.hover() # type: ignore
await self.context_page.mouse.down()
for track in tracks:
# Loop mouse movement according to trajectory
# steps controls the ratio of single movement speed, default is 1, meaning the distance moves in 0.1 seconds no matter how far, larger value means slower
await self.context_page.mouse.move(x + track, 0, steps=move_step)
x += track
await self.context_page.mouse.up()
async def login_by_cookies(self):
utils.logger.info("[DouYinLogin.login_by_cookies] Begin login douyin by cookie ...")
for key, value in utils.convert_str_cookie_to_dict(self.cookie_str).items():
await self.browser_context.add_cookies([{
'name': key,
'value': value,
'domain': ".douyin.com",
'path': "/"
}]) | --- +++ @@ -51,6 +51,10 @@ self.cookie_str = cookie_str
async def begin(self):
+ """
+ Start login douyin website
+ The verification accuracy of the slider verification is not very good... If there are no special requirements, it is recommended not to use Douyin login, or use cookie login
+ """
# popup login dialog
await self.popup_login_dialog()
@@ -86,6 +90,7 @@
@retry(stop=stop_after_attempt(600), wait=wait_fixed(1), retry=retry_if_result(lambda value: value is False))
async def check_login_state(self):
+ """Check if the current login status is successful and return True otherwise return False"""
current_cookie = await self.browser_context.cookies()
_, cookie_dict = utils.convert_cookies(current_cookie)
@@ -104,6 +109,7 @@ return False
async def popup_login_dialog(self):
+ """If the login dialog box does not pop up automatically, we will manually click the login button"""
dialog_selector = "xpath=//div[@id='login-panel-new']"
try:
# check dialog box is auto popup and wait for 10 seconds
@@ -163,6 +169,10 @@ break
async def check_page_display_slider(self, move_step: int = 10, slider_level: str = "easy"):
+ """
+ Check if slider verification appears on the page
+ :return:
+ """
# Wait for slider verification to appear
back_selector = "#captcha-verify-image"
try:
@@ -201,6 +211,14 @@ continue
async def move_slider(self, back_selector: str, gap_selector: str, move_step: int = 10, slider_level="easy"):
+ """
+ Move the slider to the right to complete the verification
+ :param back_selector: Selector for the slider verification background image
+ :param gap_selector: Selector for the slider verification slider
+ :param move_step: Controls the ratio of single movement speed, default is 1, meaning the distance moves in 0.1 seconds no matter how far, larger value means slower
+ :param slider_level: Slider difficulty easy hard, corresponding to the slider for mobile verification code and the slider in the middle of verification code
+ :return:
+ """
# get slider background image
slider_back_elements = await self.context_page.wait_for_selector(
@@ -253,4 +271,4 @@ 'value': value,
'domain': ".douyin.com",
'path': "/"
- }])+ }])
| https://raw.githubusercontent.com/NanmiCoder/MediaCrawler/HEAD/media_platform/douyin/login.py |
Write reusable docstrings | # -*- coding: utf-8 -*-
# Copyright (c) 2025 relakkes@gmail.com
#
# This file is part of MediaCrawler project.
# Repository: https://github.com/NanmiCoder/MediaCrawler/blob/main/media_platform/xhs/core.py
# GitHub: https://github.com/NanmiCoder
# Licensed under NON-COMMERCIAL LEARNING LICENSE 1.1
#
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
# 1. 不得用于任何商业用途。
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
# 3. 不得进行大规模爬取或对平台造成运营干扰。
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
# 5. 不得用于任何非法或不当的用途。
#
# 详细许可条款请参阅项目根目录下的LICENSE文件。
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
import asyncio
import os
import random
from asyncio import Task
from typing import Dict, List, Optional
from playwright.async_api import (
BrowserContext,
BrowserType,
Page,
Playwright,
async_playwright,
)
from tenacity import RetryError
import config
from base.base_crawler import AbstractCrawler
from model.m_xiaohongshu import NoteUrlInfo, CreatorUrlInfo
from proxy.proxy_ip_pool import IpInfoModel, create_ip_pool
from store import xhs as xhs_store
from tools import utils
from tools.cdp_browser import CDPBrowserManager
from var import crawler_type_var, source_keyword_var
from .client import XiaoHongShuClient
from .exception import DataFetchError, NoteNotFoundError
from .field import SearchSortType
from .help import parse_note_info_from_note_url, parse_creator_info_from_url, get_search_id
from .login import XiaoHongShuLogin
class XiaoHongShuCrawler(AbstractCrawler):
context_page: Page
xhs_client: XiaoHongShuClient
browser_context: BrowserContext
cdp_manager: Optional[CDPBrowserManager]
def __init__(self) -> None:
self.index_url = "https://www.xiaohongshu.com"
# self.user_agent = utils.get_user_agent()
self.user_agent = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36"
self.cdp_manager = None
self.ip_proxy_pool = None # Proxy IP pool for automatic proxy refresh
async def start(self) -> None:
playwright_proxy_format, httpx_proxy_format = None, None
if config.ENABLE_IP_PROXY:
self.ip_proxy_pool = await create_ip_pool(config.IP_PROXY_POOL_COUNT, enable_validate_ip=True)
ip_proxy_info: IpInfoModel = await self.ip_proxy_pool.get_proxy()
playwright_proxy_format, httpx_proxy_format = utils.format_proxy_info(ip_proxy_info)
async with async_playwright() as playwright:
# Choose launch mode based on configuration
if config.ENABLE_CDP_MODE:
utils.logger.info("[XiaoHongShuCrawler] Launching browser using CDP mode")
self.browser_context = await self.launch_browser_with_cdp(
playwright,
playwright_proxy_format,
self.user_agent,
headless=config.CDP_HEADLESS,
)
else:
utils.logger.info("[XiaoHongShuCrawler] Launching browser using standard mode")
# Launch a browser context.
chromium = playwright.chromium
self.browser_context = await self.launch_browser(
chromium,
playwright_proxy_format,
self.user_agent,
headless=config.HEADLESS,
)
# stealth.min.js is a js script to prevent the website from detecting the crawler.
await self.browser_context.add_init_script(path="libs/stealth.min.js")
self.context_page = await self.browser_context.new_page()
await self.context_page.goto(self.index_url)
# Create a client to interact with the Xiaohongshu website.
self.xhs_client = await self.create_xhs_client(httpx_proxy_format)
if not await self.xhs_client.pong():
login_obj = XiaoHongShuLogin(
login_type=config.LOGIN_TYPE,
login_phone="", # input your phone number
browser_context=self.browser_context,
context_page=self.context_page,
cookie_str=config.COOKIES,
)
await login_obj.begin()
await self.xhs_client.update_cookies(browser_context=self.browser_context)
crawler_type_var.set(config.CRAWLER_TYPE)
if config.CRAWLER_TYPE == "search":
# Search for notes and retrieve their comment information.
await self.search()
elif config.CRAWLER_TYPE == "detail":
# Get the information and comments of the specified post
await self.get_specified_notes()
elif config.CRAWLER_TYPE == "creator":
# Get creator's information and their notes and comments
await self.get_creators_and_notes()
else:
pass
utils.logger.info("[XiaoHongShuCrawler.start] Xhs Crawler finished ...")
async def search(self) -> None:
utils.logger.info("[XiaoHongShuCrawler.search] Begin search Xiaohongshu keywords")
xhs_limit_count = 20 # Xiaohongshu limit page fixed value
if config.CRAWLER_MAX_NOTES_COUNT < xhs_limit_count:
config.CRAWLER_MAX_NOTES_COUNT = xhs_limit_count
start_page = config.START_PAGE
for keyword in config.KEYWORDS.split(","):
source_keyword_var.set(keyword)
utils.logger.info(f"[XiaoHongShuCrawler.search] Current search keyword: {keyword}")
page = 1
search_id = get_search_id()
while (page - start_page + 1) * xhs_limit_count <= config.CRAWLER_MAX_NOTES_COUNT:
if page < start_page:
utils.logger.info(f"[XiaoHongShuCrawler.search] Skip page {page}")
page += 1
continue
try:
utils.logger.info(f"[XiaoHongShuCrawler.search] search Xiaohongshu keyword: {keyword}, page: {page}")
note_ids: List[str] = []
xsec_tokens: List[str] = []
notes_res = await self.xhs_client.get_note_by_keyword(
keyword=keyword,
search_id=search_id,
page=page,
sort=(SearchSortType(config.SORT_TYPE) if config.SORT_TYPE != "" else SearchSortType.GENERAL),
)
utils.logger.info(f"[XiaoHongShuCrawler.search] Search notes response: {notes_res}")
if not notes_res or not notes_res.get("has_more", False):
utils.logger.info("[XiaoHongShuCrawler.search] No more content!")
break
semaphore = asyncio.Semaphore(config.MAX_CONCURRENCY_NUM)
task_list = [
self.get_note_detail_async_task(
note_id=post_item.get("id"),
xsec_source=post_item.get("xsec_source"),
xsec_token=post_item.get("xsec_token"),
semaphore=semaphore,
) for post_item in notes_res.get("items", {}) if post_item.get("model_type") not in ("rec_query", "hot_query")
]
note_details = await asyncio.gather(*task_list)
for note_detail in note_details:
if note_detail:
await xhs_store.update_xhs_note(note_detail)
await self.get_notice_media(note_detail)
note_ids.append(note_detail.get("note_id"))
xsec_tokens.append(note_detail.get("xsec_token"))
page += 1
utils.logger.info(f"[XiaoHongShuCrawler.search] Note details: {note_details}")
await self.batch_get_note_comments(note_ids, xsec_tokens)
# Sleep after each page navigation
await asyncio.sleep(config.CRAWLER_MAX_SLEEP_SEC)
utils.logger.info(f"[XiaoHongShuCrawler.search] Sleeping for {config.CRAWLER_MAX_SLEEP_SEC} seconds after page {page-1}")
except DataFetchError:
utils.logger.error("[XiaoHongShuCrawler.search] Get note detail error")
break
async def get_creators_and_notes(self) -> None:
utils.logger.info("[XiaoHongShuCrawler.get_creators_and_notes] Begin get Xiaohongshu creators")
for creator_url in config.XHS_CREATOR_ID_LIST:
try:
# Parse creator URL to get user_id and security tokens
creator_info: CreatorUrlInfo = parse_creator_info_from_url(creator_url)
utils.logger.info(f"[XiaoHongShuCrawler.get_creators_and_notes] Parse creator URL info: {creator_info}")
user_id = creator_info.user_id
# get creator detail info from web html content
createor_info: Dict = await self.xhs_client.get_creator_info(
user_id=user_id,
xsec_token=creator_info.xsec_token,
xsec_source=creator_info.xsec_source
)
if createor_info:
await xhs_store.save_creator(user_id, creator=createor_info)
except ValueError as e:
utils.logger.error(f"[XiaoHongShuCrawler.get_creators_and_notes] Failed to parse creator URL: {e}")
continue
# Use fixed crawling interval
crawl_interval = config.CRAWLER_MAX_SLEEP_SEC
# Get all note information of the creator
all_notes_list = await self.xhs_client.get_all_notes_by_creator(
user_id=user_id,
crawl_interval=crawl_interval,
callback=self.fetch_creator_notes_detail,
xsec_token=creator_info.xsec_token,
xsec_source=creator_info.xsec_source,
)
note_ids = []
xsec_tokens = []
for note_item in all_notes_list:
note_ids.append(note_item.get("note_id"))
xsec_tokens.append(note_item.get("xsec_token"))
await self.batch_get_note_comments(note_ids, xsec_tokens)
async def fetch_creator_notes_detail(self, note_list: List[Dict]):
semaphore = asyncio.Semaphore(config.MAX_CONCURRENCY_NUM)
task_list = [
self.get_note_detail_async_task(
note_id=post_item.get("note_id"),
xsec_source=post_item.get("xsec_source"),
xsec_token=post_item.get("xsec_token"),
semaphore=semaphore,
) for post_item in note_list
]
note_details = await asyncio.gather(*task_list)
for note_detail in note_details:
if note_detail:
await xhs_store.update_xhs_note(note_detail)
await self.get_notice_media(note_detail)
async def get_specified_notes(self):
get_note_detail_task_list = []
for full_note_url in config.XHS_SPECIFIED_NOTE_URL_LIST:
note_url_info: NoteUrlInfo = parse_note_info_from_note_url(full_note_url)
utils.logger.info(f"[XiaoHongShuCrawler.get_specified_notes] Parse note url info: {note_url_info}")
crawler_task = self.get_note_detail_async_task(
note_id=note_url_info.note_id,
xsec_source=note_url_info.xsec_source,
xsec_token=note_url_info.xsec_token,
semaphore=asyncio.Semaphore(config.MAX_CONCURRENCY_NUM),
)
get_note_detail_task_list.append(crawler_task)
need_get_comment_note_ids = []
xsec_tokens = []
note_details = await asyncio.gather(*get_note_detail_task_list)
for note_detail in note_details:
if note_detail:
need_get_comment_note_ids.append(note_detail.get("note_id", ""))
xsec_tokens.append(note_detail.get("xsec_token", ""))
await xhs_store.update_xhs_note(note_detail)
await self.get_notice_media(note_detail)
await self.batch_get_note_comments(need_get_comment_note_ids, xsec_tokens)
async def get_note_detail_async_task(
self,
note_id: str,
xsec_source: str,
xsec_token: str,
semaphore: asyncio.Semaphore,
) -> Optional[Dict]:
note_detail = None
utils.logger.info(f"[get_note_detail_async_task] Begin get note detail, note_id: {note_id}")
async with semaphore:
try:
try:
note_detail = await self.xhs_client.get_note_by_id(note_id, xsec_source, xsec_token)
except RetryError:
pass
if not note_detail:
note_detail = await self.xhs_client.get_note_by_id_from_html(note_id, xsec_source, xsec_token,
enable_cookie=True)
if not note_detail:
raise Exception(f"[get_note_detail_async_task] Failed to get note detail, Id: {note_id}")
note_detail.update({"xsec_token": xsec_token, "xsec_source": xsec_source})
# Sleep after fetching note detail
await asyncio.sleep(config.CRAWLER_MAX_SLEEP_SEC)
utils.logger.info(f"[get_note_detail_async_task] Sleeping for {config.CRAWLER_MAX_SLEEP_SEC} seconds after fetching note {note_id}")
return note_detail
except NoteNotFoundError as ex:
utils.logger.warning(f"[XiaoHongShuCrawler.get_note_detail_async_task] Note not found: {note_id}, {ex}")
return None
except DataFetchError as ex:
utils.logger.error(f"[XiaoHongShuCrawler.get_note_detail_async_task] Get note detail error: {ex}")
return None
except KeyError as ex:
utils.logger.error(f"[XiaoHongShuCrawler.get_note_detail_async_task] have not fund note detail note_id:{note_id}, err: {ex}")
return None
async def batch_get_note_comments(self, note_list: List[str], xsec_tokens: List[str]):
if not config.ENABLE_GET_COMMENTS:
utils.logger.info(f"[XiaoHongShuCrawler.batch_get_note_comments] Crawling comment mode is not enabled")
return
utils.logger.info(f"[XiaoHongShuCrawler.batch_get_note_comments] Begin batch get note comments, note list: {note_list}")
semaphore = asyncio.Semaphore(config.MAX_CONCURRENCY_NUM)
task_list: List[Task] = []
for index, note_id in enumerate(note_list):
task = asyncio.create_task(
self.get_comments(note_id=note_id, xsec_token=xsec_tokens[index], semaphore=semaphore),
name=note_id,
)
task_list.append(task)
await asyncio.gather(*task_list)
async def get_comments(self, note_id: str, xsec_token: str, semaphore: asyncio.Semaphore):
async with semaphore:
utils.logger.info(f"[XiaoHongShuCrawler.get_comments] Begin get note id comments {note_id}")
# Use fixed crawling interval
crawl_interval = config.CRAWLER_MAX_SLEEP_SEC
await self.xhs_client.get_note_all_comments(
note_id=note_id,
xsec_token=xsec_token,
crawl_interval=crawl_interval,
callback=xhs_store.batch_update_xhs_note_comments,
max_count=config.CRAWLER_MAX_COMMENTS_COUNT_SINGLENOTES,
)
# Sleep after fetching comments
await asyncio.sleep(crawl_interval)
utils.logger.info(f"[XiaoHongShuCrawler.get_comments] Sleeping for {crawl_interval} seconds after fetching comments for note {note_id}")
async def create_xhs_client(self, httpx_proxy: Optional[str]) -> XiaoHongShuClient:
utils.logger.info("[XiaoHongShuCrawler.create_xhs_client] Begin create Xiaohongshu API client ...")
cookie_str, cookie_dict = utils.convert_cookies(await self.browser_context.cookies())
xhs_client_obj = XiaoHongShuClient(
proxy=httpx_proxy,
headers={
"accept": "application/json, text/plain, */*",
"accept-language": "zh-CN,zh;q=0.9",
"cache-control": "no-cache",
"content-type": "application/json;charset=UTF-8",
"origin": "https://www.xiaohongshu.com",
"pragma": "no-cache",
"priority": "u=1, i",
"referer": "https://www.xiaohongshu.com/",
"sec-ch-ua": '"Chromium";v="136", "Google Chrome";v="136", "Not.A/Brand";v="99"',
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": '"Windows"',
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/137.0.0.0 Safari/537.36",
"Cookie": cookie_str,
},
playwright_page=self.context_page,
cookie_dict=cookie_dict,
proxy_ip_pool=self.ip_proxy_pool, # Pass proxy pool for automatic refresh
)
return xhs_client_obj
async def launch_browser(
self,
chromium: BrowserType,
playwright_proxy: Optional[Dict],
user_agent: Optional[str],
headless: bool = True,
) -> BrowserContext:
utils.logger.info("[XiaoHongShuCrawler.launch_browser] Begin create browser context ...")
if config.SAVE_LOGIN_STATE:
# feat issue #14
# we will save login state to avoid login every time
user_data_dir = os.path.join(os.getcwd(), "browser_data", config.USER_DATA_DIR % config.PLATFORM) # type: ignore
browser_context = await chromium.launch_persistent_context(
user_data_dir=user_data_dir,
accept_downloads=True,
headless=headless,
proxy=playwright_proxy, # type: ignore
viewport={
"width": 1920,
"height": 1080
},
user_agent=user_agent,
)
return browser_context
else:
browser = await chromium.launch(headless=headless, proxy=playwright_proxy) # type: ignore
browser_context = await browser.new_context(viewport={"width": 1920, "height": 1080}, user_agent=user_agent)
return browser_context
async def launch_browser_with_cdp(
self,
playwright: Playwright,
playwright_proxy: Optional[Dict],
user_agent: Optional[str],
headless: bool = True,
) -> BrowserContext:
try:
self.cdp_manager = CDPBrowserManager()
browser_context = await self.cdp_manager.launch_and_connect(
playwright=playwright,
playwright_proxy=playwright_proxy,
user_agent=user_agent,
headless=headless,
)
# Display browser information
browser_info = await self.cdp_manager.get_browser_info()
utils.logger.info(f"[XiaoHongShuCrawler] CDP browser info: {browser_info}")
return browser_context
except Exception as e:
utils.logger.error(f"[XiaoHongShuCrawler] CDP mode launch failed, falling back to standard mode: {e}")
# Fall back to standard mode
chromium = playwright.chromium
return await self.launch_browser(chromium, playwright_proxy, user_agent, headless)
async def close(self):
# Special handling if using CDP mode
if self.cdp_manager:
await self.cdp_manager.cleanup()
self.cdp_manager = None
else:
await self.browser_context.close()
utils.logger.info("[XiaoHongShuCrawler.close] Browser context closed ...")
async def get_notice_media(self, note_detail: Dict):
if not config.ENABLE_GET_MEIDAS:
utils.logger.info(f"[XiaoHongShuCrawler.get_notice_media] Crawling image mode is not enabled")
return
await self.get_note_images(note_detail)
await self.get_notice_video(note_detail)
async def get_note_images(self, note_item: Dict):
if not config.ENABLE_GET_MEIDAS:
return
note_id = note_item.get("note_id")
image_list: List[Dict] = note_item.get("image_list", [])
for img in image_list:
if img.get("url_default") != "":
img.update({"url": img.get("url_default")})
if not image_list:
return
picNum = 0
for pic in image_list:
url = pic.get("url")
if not url:
continue
content = await self.xhs_client.get_note_media(url)
await asyncio.sleep(random.random())
if content is None:
continue
extension_file_name = f"{picNum}.jpg"
picNum += 1
await xhs_store.update_xhs_note_image(note_id, content, extension_file_name)
async def get_notice_video(self, note_item: Dict):
if not config.ENABLE_GET_MEIDAS:
return
note_id = note_item.get("note_id")
videos = xhs_store.get_video_url_arr(note_item)
if not videos:
return
videoNum = 0
for url in videos:
content = await self.xhs_client.get_note_media(url)
await asyncio.sleep(random.random())
if content is None:
continue
extension_file_name = f"{videoNum}.mp4"
videoNum += 1
await xhs_store.update_xhs_note_video(note_id, content, extension_file_name) | --- +++ @@ -123,6 +123,7 @@ utils.logger.info("[XiaoHongShuCrawler.start] Xhs Crawler finished ...")
async def search(self) -> None:
+ """Search for notes and retrieve their comment information."""
utils.logger.info("[XiaoHongShuCrawler.search] Begin search Xiaohongshu keywords")
xhs_limit_count = 20 # Xiaohongshu limit page fixed value
if config.CRAWLER_MAX_NOTES_COUNT < xhs_limit_count:
@@ -181,6 +182,7 @@ break
async def get_creators_and_notes(self) -> None:
+ """Get creator's notes and retrieve their comment information."""
utils.logger.info("[XiaoHongShuCrawler.get_creators_and_notes] Begin get Xiaohongshu creators")
for creator_url in config.XHS_CREATOR_ID_LIST:
try:
@@ -220,6 +222,7 @@ await self.batch_get_note_comments(note_ids, xsec_tokens)
async def fetch_creator_notes_detail(self, note_list: List[Dict]):
+ """Concurrently obtain the specified post list and save the data"""
semaphore = asyncio.Semaphore(config.MAX_CONCURRENCY_NUM)
task_list = [
self.get_note_detail_async_task(
@@ -237,6 +240,10 @@ await self.get_notice_media(note_detail)
async def get_specified_notes(self):
+ """Get the information and comments of the specified post
+
+ Note: Must specify note_id, xsec_source, xsec_token
+ """
get_note_detail_task_list = []
for full_note_url in config.XHS_SPECIFIED_NOTE_URL_LIST:
note_url_info: NoteUrlInfo = parse_note_info_from_note_url(full_note_url)
@@ -267,6 +274,17 @@ xsec_token: str,
semaphore: asyncio.Semaphore,
) -> Optional[Dict]:
+ """Get note detail
+
+ Args:
+ note_id:
+ xsec_source:
+ xsec_token:
+ semaphore:
+
+ Returns:
+ Dict: note detail
+ """
note_detail = None
utils.logger.info(f"[get_note_detail_async_task] Begin get note detail, note_id: {note_id}")
async with semaphore:
@@ -301,6 +319,7 @@ return None
async def batch_get_note_comments(self, note_list: List[str], xsec_tokens: List[str]):
+ """Batch get note comments"""
if not config.ENABLE_GET_COMMENTS:
utils.logger.info(f"[XiaoHongShuCrawler.batch_get_note_comments] Crawling comment mode is not enabled")
return
@@ -317,6 +336,7 @@ await asyncio.gather(*task_list)
async def get_comments(self, note_id: str, xsec_token: str, semaphore: asyncio.Semaphore):
+ """Get note comments with keyword filtering and quantity limitation"""
async with semaphore:
utils.logger.info(f"[XiaoHongShuCrawler.get_comments] Begin get note id comments {note_id}")
# Use fixed crawling interval
@@ -334,6 +354,7 @@ utils.logger.info(f"[XiaoHongShuCrawler.get_comments] Sleeping for {crawl_interval} seconds after fetching comments for note {note_id}")
async def create_xhs_client(self, httpx_proxy: Optional[str]) -> XiaoHongShuClient:
+ """Create Xiaohongshu client"""
utils.logger.info("[XiaoHongShuCrawler.create_xhs_client] Begin create Xiaohongshu API client ...")
cookie_str, cookie_dict = utils.convert_cookies(await self.browser_context.cookies())
xhs_client_obj = XiaoHongShuClient(
@@ -369,6 +390,7 @@ user_agent: Optional[str],
headless: bool = True,
) -> BrowserContext:
+ """Launch browser and create browser context"""
utils.logger.info("[XiaoHongShuCrawler.launch_browser] Begin create browser context ...")
if config.SAVE_LOGIN_STATE:
# feat issue #14
@@ -398,6 +420,7 @@ user_agent: Optional[str],
headless: bool = True,
) -> BrowserContext:
+ """Launch browser using CDP mode"""
try:
self.cdp_manager = CDPBrowserManager()
browser_context = await self.cdp_manager.launch_and_connect(
@@ -420,6 +443,7 @@ return await self.launch_browser(chromium, playwright_proxy, user_agent, headless)
async def close(self):
+ """Close browser context"""
# Special handling if using CDP mode
if self.cdp_manager:
await self.cdp_manager.cleanup()
@@ -436,6 +460,11 @@ await self.get_notice_video(note_detail)
async def get_note_images(self, note_item: Dict):
+ """Get note images. Please use get_notice_media
+
+ Args:
+ note_item: Note item dictionary
+ """
if not config.ENABLE_GET_MEIDAS:
return
note_id = note_item.get("note_id")
@@ -461,6 +490,11 @@ await xhs_store.update_xhs_note_image(note_id, content, extension_file_name)
async def get_notice_video(self, note_item: Dict):
+ """Get note videos. Please use get_notice_media
+
+ Args:
+ note_item: Note item dictionary
+ """
if not config.ENABLE_GET_MEIDAS:
return
note_id = note_item.get("note_id")
@@ -477,4 +511,4 @@ continue
extension_file_name = f"{videoNum}.mp4"
videoNum += 1
- await xhs_store.update_xhs_note_video(note_id, content, extension_file_name)+ await xhs_store.update_xhs_note_video(note_id, content, extension_file_name)
| https://raw.githubusercontent.com/NanmiCoder/MediaCrawler/HEAD/media_platform/xhs/core.py |
Add detailed docstrings explaining each function | # -*- coding: utf-8 -*-
# Copyright (c) 2025 relakkes@gmail.com
#
# This file is part of MediaCrawler project.
# Repository: https://github.com/NanmiCoder/MediaCrawler/blob/main/media_platform/weibo/core.py
# GitHub: https://github.com/NanmiCoder
# Licensed under NON-COMMERCIAL LEARNING LICENSE 1.1
#
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
# 1. 不得用于任何商业用途。
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
# 3. 不得进行大规模爬取或对平台造成运营干扰。
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
# 5. 不得用于任何非法或不当的用途。
#
# 详细许可条款请参阅项目根目录下的LICENSE文件。
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
# -*- coding: utf-8 -*-
# @Author : relakkes@gmail.com
# @Time : 2023/12/23 15:41
# @Desc : Weibo crawler main workflow code
import asyncio
import os
# import random # Removed as we now use fixed config.CRAWLER_MAX_SLEEP_SEC intervals
from asyncio import Task
from typing import Dict, List, Optional, Tuple
from playwright.async_api import (
BrowserContext,
BrowserType,
Page,
Playwright,
async_playwright,
)
import config
from base.base_crawler import AbstractCrawler
from proxy.proxy_ip_pool import IpInfoModel, create_ip_pool
from store import weibo as weibo_store
from tools import utils
from tools.cdp_browser import CDPBrowserManager
from var import crawler_type_var, source_keyword_var
from .client import WeiboClient
from .exception import DataFetchError
from .field import SearchType
from .help import filter_search_result_card
from .login import WeiboLogin
class WeiboCrawler(AbstractCrawler):
context_page: Page
wb_client: WeiboClient
browser_context: BrowserContext
cdp_manager: Optional[CDPBrowserManager]
def __init__(self):
self.index_url = "https://www.weibo.com"
self.mobile_index_url = "https://m.weibo.cn"
self.user_agent = utils.get_user_agent()
self.mobile_user_agent = utils.get_mobile_user_agent()
self.cdp_manager = None
self.ip_proxy_pool = None # Proxy IP pool for automatic proxy refresh
async def start(self):
playwright_proxy_format, httpx_proxy_format = None, None
if config.ENABLE_IP_PROXY:
self.ip_proxy_pool = await create_ip_pool(config.IP_PROXY_POOL_COUNT, enable_validate_ip=True)
ip_proxy_info: IpInfoModel = await self.ip_proxy_pool.get_proxy()
playwright_proxy_format, httpx_proxy_format = utils.format_proxy_info(ip_proxy_info)
async with async_playwright() as playwright:
# Select launch mode based on configuration
if config.ENABLE_CDP_MODE:
utils.logger.info("[WeiboCrawler] Launching browser with CDP mode")
self.browser_context = await self.launch_browser_with_cdp(
playwright,
playwright_proxy_format,
self.mobile_user_agent,
headless=config.CDP_HEADLESS,
)
else:
utils.logger.info("[WeiboCrawler] Launching browser with standard mode")
# Launch a browser context.
chromium = playwright.chromium
self.browser_context = await self.launch_browser(chromium, None, self.mobile_user_agent, headless=config.HEADLESS)
# stealth.min.js is a js script to prevent the website from detecting the crawler.
await self.browser_context.add_init_script(path="libs/stealth.min.js")
self.context_page = await self.browser_context.new_page()
await self.context_page.goto(self.index_url)
await asyncio.sleep(2)
# Create a client to interact with the xiaohongshu website.
self.wb_client = await self.create_weibo_client(httpx_proxy_format)
if not await self.wb_client.pong():
login_obj = WeiboLogin(
login_type=config.LOGIN_TYPE,
login_phone="", # your phone number
browser_context=self.browser_context,
context_page=self.context_page,
cookie_str=config.COOKIES,
)
await login_obj.begin()
# After successful login, redirect to mobile website and update mobile cookies
utils.logger.info("[WeiboCrawler.start] redirect weibo mobile homepage and update cookies on mobile platform")
await self.context_page.goto(self.mobile_index_url)
await asyncio.sleep(3)
# Only get mobile cookies to avoid confusion between PC and mobile cookies
await self.wb_client.update_cookies(
browser_context=self.browser_context,
urls=[self.mobile_index_url]
)
crawler_type_var.set(config.CRAWLER_TYPE)
if config.CRAWLER_TYPE == "search":
# Search for video and retrieve their comment information.
await self.search()
elif config.CRAWLER_TYPE == "detail":
# Get the information and comments of the specified post
await self.get_specified_notes()
elif config.CRAWLER_TYPE == "creator":
# Get creator's information and their notes and comments
await self.get_creators_and_notes()
else:
pass
utils.logger.info("[WeiboCrawler.start] Weibo Crawler finished ...")
async def search(self):
utils.logger.info("[WeiboCrawler.search] Begin search weibo keywords")
weibo_limit_count = 10 # weibo limit page fixed value
if config.CRAWLER_MAX_NOTES_COUNT < weibo_limit_count:
config.CRAWLER_MAX_NOTES_COUNT = weibo_limit_count
start_page = config.START_PAGE
# Set the search type based on the configuration for weibo
if config.WEIBO_SEARCH_TYPE == "default":
search_type = SearchType.DEFAULT
elif config.WEIBO_SEARCH_TYPE == "real_time":
search_type = SearchType.REAL_TIME
elif config.WEIBO_SEARCH_TYPE == "popular":
search_type = SearchType.POPULAR
elif config.WEIBO_SEARCH_TYPE == "video":
search_type = SearchType.VIDEO
else:
utils.logger.error(f"[WeiboCrawler.search] Invalid WEIBO_SEARCH_TYPE: {config.WEIBO_SEARCH_TYPE}")
return
for keyword in config.KEYWORDS.split(","):
source_keyword_var.set(keyword)
utils.logger.info(f"[WeiboCrawler.search] Current search keyword: {keyword}")
page = 1
while (page - start_page + 1) * weibo_limit_count <= config.CRAWLER_MAX_NOTES_COUNT:
if page < start_page:
utils.logger.info(f"[WeiboCrawler.search] Skip page: {page}")
page += 1
continue
utils.logger.info(f"[WeiboCrawler.search] search weibo keyword: {keyword}, page: {page}")
search_res = await self.wb_client.get_note_by_keyword(keyword=keyword, page=page, search_type=search_type)
note_id_list: List[str] = []
note_list = filter_search_result_card(search_res.get("cards"))
# If full text fetching is enabled, batch get full text of posts
note_list = await self.batch_get_notes_full_text(note_list)
for note_item in note_list:
if note_item:
mblog: Dict = note_item.get("mblog")
if mblog:
note_id_list.append(mblog.get("id"))
await weibo_store.update_weibo_note(note_item)
await self.get_note_images(mblog)
page += 1
# Sleep after page navigation
await asyncio.sleep(config.CRAWLER_MAX_SLEEP_SEC)
utils.logger.info(f"[WeiboCrawler.search] Sleeping for {config.CRAWLER_MAX_SLEEP_SEC} seconds after page {page-1}")
await self.batch_get_notes_comments(note_id_list)
async def get_specified_notes(self):
semaphore = asyncio.Semaphore(config.MAX_CONCURRENCY_NUM)
task_list = [self.get_note_info_task(note_id=note_id, semaphore=semaphore) for note_id in config.WEIBO_SPECIFIED_ID_LIST]
video_details = await asyncio.gather(*task_list)
for note_item in video_details:
if note_item:
await weibo_store.update_weibo_note(note_item)
await self.batch_get_notes_comments(config.WEIBO_SPECIFIED_ID_LIST)
async def get_note_info_task(self, note_id: str, semaphore: asyncio.Semaphore) -> Optional[Dict]:
async with semaphore:
try:
result = await self.wb_client.get_note_info_by_id(note_id)
# Sleep after fetching note details
await asyncio.sleep(config.CRAWLER_MAX_SLEEP_SEC)
utils.logger.info(f"[WeiboCrawler.get_note_info_task] Sleeping for {config.CRAWLER_MAX_SLEEP_SEC} seconds after fetching note details {note_id}")
return result
except DataFetchError as ex:
utils.logger.error(f"[WeiboCrawler.get_note_info_task] Get note detail error: {ex}")
return None
except KeyError as ex:
utils.logger.error(f"[WeiboCrawler.get_note_info_task] have not fund note detail note_id:{note_id}, err: {ex}")
return None
async def batch_get_notes_comments(self, note_id_list: List[str]):
if not config.ENABLE_GET_COMMENTS:
utils.logger.info(f"[WeiboCrawler.batch_get_note_comments] Crawling comment mode is not enabled")
return
utils.logger.info(f"[WeiboCrawler.batch_get_notes_comments] note ids:{note_id_list}")
semaphore = asyncio.Semaphore(config.MAX_CONCURRENCY_NUM)
task_list: List[Task] = []
for note_id in note_id_list:
task = asyncio.create_task(self.get_note_comments(note_id, semaphore), name=note_id)
task_list.append(task)
await asyncio.gather(*task_list)
async def get_note_comments(self, note_id: str, semaphore: asyncio.Semaphore):
async with semaphore:
try:
utils.logger.info(f"[WeiboCrawler.get_note_comments] begin get note_id: {note_id} comments ...")
# Sleep before fetching comments
await asyncio.sleep(config.CRAWLER_MAX_SLEEP_SEC)
utils.logger.info(f"[WeiboCrawler.get_note_comments] Sleeping for {config.CRAWLER_MAX_SLEEP_SEC} seconds before fetching comments for note {note_id}")
await self.wb_client.get_note_all_comments(
note_id=note_id,
crawl_interval=config.CRAWLER_MAX_SLEEP_SEC, # Use fixed interval instead of random
callback=weibo_store.batch_update_weibo_note_comments,
max_count=config.CRAWLER_MAX_COMMENTS_COUNT_SINGLENOTES,
)
except DataFetchError as ex:
utils.logger.error(f"[WeiboCrawler.get_note_comments] get note_id: {note_id} comment error: {ex}")
except Exception as e:
utils.logger.error(f"[WeiboCrawler.get_note_comments] may be been blocked, err:{e}")
async def get_note_images(self, mblog: Dict):
if not config.ENABLE_GET_MEIDAS:
utils.logger.info(f"[WeiboCrawler.get_note_images] Crawling image mode is not enabled")
return
pics: List = mblog.get("pics")
if not pics:
return
for pic in pics:
if isinstance(pic, str):
url = pic
pid = url.split("/")[-1].split(".")[0]
elif isinstance(pic, dict):
url = pic.get("url")
pid = pic.get("pid", "")
else:
continue
if not url:
continue
content = await self.wb_client.get_note_image(url)
await asyncio.sleep(config.CRAWLER_MAX_SLEEP_SEC)
utils.logger.info(f"[WeiboCrawler.get_note_images] Sleeping for {config.CRAWLER_MAX_SLEEP_SEC} seconds after fetching image")
if content != None:
extension_file_name = url.split(".")[-1]
await weibo_store.update_weibo_note_image(pid, content, extension_file_name)
async def get_creators_and_notes(self) -> None:
utils.logger.info("[WeiboCrawler.get_creators_and_notes] Begin get weibo creators")
for user_id in config.WEIBO_CREATOR_ID_LIST:
createor_info_res: Dict = await self.wb_client.get_creator_info_by_id(creator_id=user_id)
if createor_info_res:
createor_info: Dict = createor_info_res.get("userInfo", {})
utils.logger.info(f"[WeiboCrawler.get_creators_and_notes] creator info: {createor_info}")
if not createor_info:
raise DataFetchError("Get creator info error")
await weibo_store.save_creator(user_id, user_info=createor_info)
# Create a wrapper callback to get full text before saving data
async def save_notes_with_full_text(note_list: List[Dict]):
# If full text fetching is enabled, batch get full text first
updated_note_list = await self.batch_get_notes_full_text(note_list)
await weibo_store.batch_update_weibo_notes(updated_note_list)
# Get all note information of the creator
all_notes_list = await self.wb_client.get_all_notes_by_creator_id(
creator_id=user_id,
container_id=f"107603{user_id}",
crawl_interval=0,
callback=save_notes_with_full_text,
)
note_ids = [note_item.get("mblog", {}).get("id") for note_item in all_notes_list if note_item.get("mblog", {}).get("id")]
await self.batch_get_notes_comments(note_ids)
else:
utils.logger.error(f"[WeiboCrawler.get_creators_and_notes] get creator info error, creator_id:{user_id}")
async def create_weibo_client(self, httpx_proxy: Optional[str]) -> WeiboClient:
utils.logger.info("[WeiboCrawler.create_weibo_client] Begin create weibo API client ...")
cookie_str, cookie_dict = utils.convert_cookies(await self.browser_context.cookies(urls=[self.mobile_index_url]))
weibo_client_obj = WeiboClient(
proxy=httpx_proxy,
headers={
"User-Agent": utils.get_mobile_user_agent(),
"Cookie": cookie_str,
"Origin": "https://m.weibo.cn",
"Referer": "https://m.weibo.cn",
"Content-Type": "application/json;charset=UTF-8",
},
playwright_page=self.context_page,
cookie_dict=cookie_dict,
proxy_ip_pool=self.ip_proxy_pool, # Pass proxy pool for automatic refresh
)
return weibo_client_obj
async def launch_browser(
self,
chromium: BrowserType,
playwright_proxy: Optional[Dict],
user_agent: Optional[str],
headless: bool = True,
) -> BrowserContext:
utils.logger.info("[WeiboCrawler.launch_browser] Begin create browser context ...")
if config.SAVE_LOGIN_STATE:
user_data_dir = os.path.join(os.getcwd(), "browser_data", config.USER_DATA_DIR % config.PLATFORM) # type: ignore
browser_context = await chromium.launch_persistent_context(
user_data_dir=user_data_dir,
accept_downloads=True,
headless=headless,
proxy=playwright_proxy, # type: ignore
viewport={
"width": 1920,
"height": 1080
},
user_agent=user_agent,
channel="chrome", # Use system's Chrome stable version
)
return browser_context
else:
browser = await chromium.launch(headless=headless, proxy=playwright_proxy, channel="chrome") # type: ignore
browser_context = await browser.new_context(viewport={"width": 1920, "height": 1080}, user_agent=user_agent)
return browser_context
async def launch_browser_with_cdp(
self,
playwright: Playwright,
playwright_proxy: Optional[Dict],
user_agent: Optional[str],
headless: bool = True,
) -> BrowserContext:
try:
self.cdp_manager = CDPBrowserManager()
browser_context = await self.cdp_manager.launch_and_connect(
playwright=playwright,
playwright_proxy=playwright_proxy,
user_agent=user_agent,
headless=headless,
)
# Display browser information
browser_info = await self.cdp_manager.get_browser_info()
utils.logger.info(f"[WeiboCrawler] CDP browser info: {browser_info}")
return browser_context
except Exception as e:
utils.logger.error(f"[WeiboCrawler] CDP mode startup failed, falling back to standard mode: {e}")
# Fallback to standard mode
chromium = playwright.chromium
return await self.launch_browser(chromium, playwright_proxy, user_agent, headless)
async def get_note_full_text(self, note_item: Dict) -> Dict:
if not config.ENABLE_WEIBO_FULL_TEXT:
return note_item
mblog = note_item.get("mblog", {})
if not mblog:
return note_item
# Check if it's a long text
is_long_text = mblog.get("isLongText", False)
if not is_long_text:
return note_item
note_id = mblog.get("id")
if not note_id:
return note_item
try:
utils.logger.info(f"[WeiboCrawler.get_note_full_text] Fetching full text for note: {note_id}")
full_note = await self.wb_client.get_note_info_by_id(note_id)
if full_note and full_note.get("mblog"):
# Replace original content with complete content
note_item["mblog"] = full_note["mblog"]
utils.logger.info(f"[WeiboCrawler.get_note_full_text] Successfully fetched full text for note: {note_id}")
# Sleep after request to avoid rate limiting
await asyncio.sleep(config.CRAWLER_MAX_SLEEP_SEC)
except DataFetchError as ex:
utils.logger.error(f"[WeiboCrawler.get_note_full_text] Failed to fetch full text for note {note_id}: {ex}")
except Exception as ex:
utils.logger.error(f"[WeiboCrawler.get_note_full_text] Unexpected error for note {note_id}: {ex}")
return note_item
async def batch_get_notes_full_text(self, note_list: List[Dict]) -> List[Dict]:
if not config.ENABLE_WEIBO_FULL_TEXT:
return note_list
result = []
for note_item in note_list:
updated_note = await self.get_note_full_text(note_item)
result.append(updated_note)
return result
async def close(self):
# Special handling if using CDP mode
if self.cdp_manager:
await self.cdp_manager.cleanup()
self.cdp_manager = None
else:
await self.browser_context.close()
utils.logger.info("[WeiboCrawler.close] Browser context closed ...") | --- +++ @@ -134,6 +134,10 @@ utils.logger.info("[WeiboCrawler.start] Weibo Crawler finished ...")
async def search(self):
+ """
+ search weibo note with keywords
+ :return:
+ """
utils.logger.info("[WeiboCrawler.search] Begin search weibo keywords")
weibo_limit_count = 10 # weibo limit page fixed value
if config.CRAWLER_MAX_NOTES_COUNT < weibo_limit_count:
@@ -185,6 +189,10 @@ await self.batch_get_notes_comments(note_id_list)
async def get_specified_notes(self):
+ """
+ get specified notes info
+ :return:
+ """
semaphore = asyncio.Semaphore(config.MAX_CONCURRENCY_NUM)
task_list = [self.get_note_info_task(note_id=note_id, semaphore=semaphore) for note_id in config.WEIBO_SPECIFIED_ID_LIST]
video_details = await asyncio.gather(*task_list)
@@ -194,6 +202,12 @@ await self.batch_get_notes_comments(config.WEIBO_SPECIFIED_ID_LIST)
async def get_note_info_task(self, note_id: str, semaphore: asyncio.Semaphore) -> Optional[Dict]:
+ """
+ Get note detail task
+ :param note_id:
+ :param semaphore:
+ :return:
+ """
async with semaphore:
try:
result = await self.wb_client.get_note_info_by_id(note_id)
@@ -211,6 +225,11 @@ return None
async def batch_get_notes_comments(self, note_id_list: List[str]):
+ """
+ batch get notes comments
+ :param note_id_list:
+ :return:
+ """
if not config.ENABLE_GET_COMMENTS:
utils.logger.info(f"[WeiboCrawler.batch_get_note_comments] Crawling comment mode is not enabled")
return
@@ -224,6 +243,12 @@ await asyncio.gather(*task_list)
async def get_note_comments(self, note_id: str, semaphore: asyncio.Semaphore):
+ """
+ get comment for note id
+ :param note_id:
+ :param semaphore:
+ :return:
+ """
async with semaphore:
try:
utils.logger.info(f"[WeiboCrawler.get_note_comments] begin get note_id: {note_id} comments ...")
@@ -244,6 +269,11 @@ utils.logger.error(f"[WeiboCrawler.get_note_comments] may be been blocked, err:{e}")
async def get_note_images(self, mblog: Dict):
+ """
+ get note images
+ :param mblog:
+ :return:
+ """
if not config.ENABLE_GET_MEIDAS:
utils.logger.info(f"[WeiboCrawler.get_note_images] Crawling image mode is not enabled")
return
@@ -270,6 +300,11 @@ await weibo_store.update_weibo_note_image(pid, content, extension_file_name)
async def get_creators_and_notes(self) -> None:
+ """
+ Get creator's information and their notes and comments
+ Returns:
+
+ """
utils.logger.info("[WeiboCrawler.get_creators_and_notes] Begin get weibo creators")
for user_id in config.WEIBO_CREATOR_ID_LIST:
createor_info_res: Dict = await self.wb_client.get_creator_info_by_id(creator_id=user_id)
@@ -301,6 +336,7 @@ utils.logger.error(f"[WeiboCrawler.get_creators_and_notes] get creator info error, creator_id:{user_id}")
async def create_weibo_client(self, httpx_proxy: Optional[str]) -> WeiboClient:
+ """Create xhs client"""
utils.logger.info("[WeiboCrawler.create_weibo_client] Begin create weibo API client ...")
cookie_str, cookie_dict = utils.convert_cookies(await self.browser_context.cookies(urls=[self.mobile_index_url]))
weibo_client_obj = WeiboClient(
@@ -325,6 +361,7 @@ user_agent: Optional[str],
headless: bool = True,
) -> BrowserContext:
+ """Launch browser and create browser context"""
utils.logger.info("[WeiboCrawler.launch_browser] Begin create browser context ...")
if config.SAVE_LOGIN_STATE:
user_data_dir = os.path.join(os.getcwd(), "browser_data", config.USER_DATA_DIR % config.PLATFORM) # type: ignore
@@ -353,6 +390,9 @@ user_agent: Optional[str],
headless: bool = True,
) -> BrowserContext:
+ """
+ Launch browser with CDP mode
+ """
try:
self.cdp_manager = CDPBrowserManager()
browser_context = await self.cdp_manager.launch_and_connect(
@@ -375,6 +415,12 @@ return await self.launch_browser(chromium, playwright_proxy, user_agent, headless)
async def get_note_full_text(self, note_item: Dict) -> Dict:
+ """
+ Get full text content of a post
+ If the post content is truncated (isLongText=True), request the detail API to get complete content
+ :param note_item: Post data, contains mblog field
+ :return: Updated post data
+ """
if not config.ENABLE_WEIBO_FULL_TEXT:
return note_item
@@ -409,6 +455,11 @@ return note_item
async def batch_get_notes_full_text(self, note_list: List[Dict]) -> List[Dict]:
+ """
+ Batch get full text content of posts
+ :param note_list: List of posts
+ :return: Updated list of posts
+ """
if not config.ENABLE_WEIBO_FULL_TEXT:
return note_list
@@ -419,10 +470,11 @@ return result
async def close(self):
+ """Close browser context"""
# Special handling if using CDP mode
if self.cdp_manager:
await self.cdp_manager.cleanup()
self.cdp_manager = None
else:
await self.browser_context.close()
- utils.logger.info("[WeiboCrawler.close] Browser context closed ...")+ utils.logger.info("[WeiboCrawler.close] Browser context closed ...")
| https://raw.githubusercontent.com/NanmiCoder/MediaCrawler/HEAD/media_platform/weibo/core.py |
Create documentation strings for testing functions | # -*- coding: utf-8 -*-
# Copyright (c) 2025 relakkes@gmail.com
#
# This file is part of MediaCrawler project.
# Repository: https://github.com/NanmiCoder/MediaCrawler/blob/main/media_platform/douyin/client.py
# GitHub: https://github.com/NanmiCoder
# Licensed under NON-COMMERCIAL LEARNING LICENSE 1.1
#
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
# 1. 不得用于任何商业用途。
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
# 3. 不得进行大规模爬取或对平台造成运营干扰。
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
# 5. 不得用于任何非法或不当的用途。
#
# 详细许可条款请参阅项目根目录下的LICENSE文件。
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
import asyncio
import copy
import json
import urllib.parse
from typing import TYPE_CHECKING, Any, Callable, Dict, Union, Optional
import httpx
from playwright.async_api import BrowserContext
from base.base_crawler import AbstractApiClient
from proxy.proxy_mixin import ProxyRefreshMixin
from tools import utils
from var import request_keyword_var
if TYPE_CHECKING:
from proxy.proxy_ip_pool import ProxyIpPool
from .exception import *
from .field import *
from .help import *
class DouYinClient(AbstractApiClient, ProxyRefreshMixin):
def __init__(
self,
timeout=60, # If the crawl media option is turned on, Douyin’s short videos will require a longer timeout.
proxy=None,
*,
headers: Dict,
playwright_page: Optional[Page],
cookie_dict: Dict,
proxy_ip_pool: Optional["ProxyIpPool"] = None,
):
self.proxy = proxy
self.timeout = timeout
self.headers = headers
self._host = "https://www.douyin.com"
self.playwright_page = playwright_page
self.cookie_dict = cookie_dict
# Initialize proxy pool (from ProxyRefreshMixin)
self.init_proxy_pool(proxy_ip_pool)
async def __process_req_params(
self,
uri: str,
params: Optional[Dict] = None,
headers: Optional[Dict] = None,
request_method="GET",
):
if not params:
return
headers = headers or self.headers
local_storage: Dict = await self.playwright_page.evaluate("() => window.localStorage") # type: ignore
common_params = {
"device_platform": "webapp",
"aid": "6383",
"channel": "channel_pc_web",
"version_code": "190600",
"version_name": "19.6.0",
"update_version_code": "170400",
"pc_client_type": "1",
"cookie_enabled": "true",
"browser_language": "zh-CN",
"browser_platform": "MacIntel",
"browser_name": "Chrome",
"browser_version": "125.0.0.0",
"browser_online": "true",
"engine_name": "Blink",
"os_name": "Mac OS",
"os_version": "10.15.7",
"cpu_core_num": "8",
"device_memory": "8",
"engine_version": "109.0",
"platform": "PC",
"screen_width": "2560",
"screen_height": "1440",
'effective_type': '4g',
"round_trip_time": "50",
"webid": get_web_id(),
"msToken": local_storage.get("xmst"),
}
params.update(common_params)
query_string = urllib.parse.urlencode(params)
# 20240927 a-bogus update (JS version)
post_data = {}
if request_method == "POST":
post_data = params
if "/v1/web/general/search" not in uri:
a_bogus = await get_a_bogus(uri, query_string, post_data, headers["User-Agent"], self.playwright_page)
params["a_bogus"] = a_bogus
async def request(self, method, url, **kwargs):
# Check whether the proxy has expired before each request
await self._refresh_proxy_if_expired()
async with httpx.AsyncClient(proxy=self.proxy) as client:
response = await client.request(method, url, timeout=self.timeout, **kwargs)
try:
if response.text == "" or response.text == "blocked":
utils.logger.error(f"request params incrr, response.text: {response.text}")
raise Exception("account blocked")
return response.json()
except Exception as e:
raise DataFetchError(f"{e}, {response.text}")
async def get(self, uri: str, params: Optional[Dict] = None, headers: Optional[Dict] = None):
await self.__process_req_params(uri, params, headers)
headers = headers or self.headers
return await self.request(method="GET", url=f"{self._host}{uri}", params=params, headers=headers)
async def post(self, uri: str, data: dict, headers: Optional[Dict] = None):
await self.__process_req_params(uri, data, headers)
headers = headers or self.headers
return await self.request(method="POST", url=f"{self._host}{uri}", data=data, headers=headers)
async def pong(self, browser_context: BrowserContext) -> bool:
local_storage = await self.playwright_page.evaluate("() => window.localStorage")
if local_storage.get("HasUserLogin", "") == "1":
return True
_, cookie_dict = utils.convert_cookies(await browser_context.cookies())
return cookie_dict.get("LOGIN_STATUS") == "1"
async def update_cookies(self, browser_context: BrowserContext):
cookie_str, cookie_dict = utils.convert_cookies(await browser_context.cookies())
self.headers["Cookie"] = cookie_str
self.cookie_dict = cookie_dict
async def search_info_by_keyword(
self,
keyword: str,
offset: int = 0,
search_channel: SearchChannelType = SearchChannelType.GENERAL,
sort_type: SearchSortType = SearchSortType.GENERAL,
publish_time: PublishTimeType = PublishTimeType.UNLIMITED,
search_id: str = "",
):
query_params = {
'search_channel': search_channel.value,
'enable_history': '1',
'keyword': keyword,
'search_source': 'tab_search',
'query_correct_type': '1',
'is_filter_search': '0',
'from_group_id': '7378810571505847586',
'offset': offset,
'count': '15',
'need_filter_settings': '1',
'list_type': 'multi',
'search_id': search_id,
}
if sort_type.value != SearchSortType.GENERAL.value or publish_time.value != PublishTimeType.UNLIMITED.value:
query_params["filter_selected"] = json.dumps({"sort_type": str(sort_type.value), "publish_time": str(publish_time.value)})
query_params["is_filter_search"] = 1
query_params["search_source"] = "tab_search"
referer_url = f"https://www.douyin.com/search/{keyword}?aid=f594bbd9-a0e2-4651-9319-ebe3cb6298c1&type=general"
headers = copy.copy(self.headers)
headers["Referer"] = urllib.parse.quote(referer_url, safe=':/')
return await self.get("/aweme/v1/web/general/search/single/", query_params, headers=headers)
async def get_video_by_id(self, aweme_id: str) -> Any:
params = {"aweme_id": aweme_id}
headers = copy.copy(self.headers)
del headers["Origin"]
res = await self.get("/aweme/v1/web/aweme/detail/", params, headers)
return res.get("aweme_detail", {})
async def get_aweme_comments(self, aweme_id: str, cursor: int = 0):
uri = "/aweme/v1/web/comment/list/"
params = {"aweme_id": aweme_id, "cursor": cursor, "count": 20, "item_type": 0}
keywords = request_keyword_var.get()
referer_url = "https://www.douyin.com/search/" + keywords + '?aid=3a3cec5a-9e27-4040-b6aa-ef548c2c1138&publish_time=0&sort_type=0&source=search_history&type=general'
headers = copy.copy(self.headers)
headers["Referer"] = urllib.parse.quote(referer_url, safe=':/')
return await self.get(uri, params)
async def get_sub_comments(self, aweme_id: str, comment_id: str, cursor: int = 0):
uri = "/aweme/v1/web/comment/list/reply/"
params = {
'comment_id': comment_id,
"cursor": cursor,
"count": 20,
"item_type": 0,
"item_id": aweme_id,
}
keywords = request_keyword_var.get()
referer_url = "https://www.douyin.com/search/" + keywords + '?aid=3a3cec5a-9e27-4040-b6aa-ef548c2c1138&publish_time=0&sort_type=0&source=search_history&type=general'
headers = copy.copy(self.headers)
headers["Referer"] = urllib.parse.quote(referer_url, safe=':/')
return await self.get(uri, params)
async def get_aweme_all_comments(
self,
aweme_id: str,
crawl_interval: float = 1.0,
is_fetch_sub_comments=False,
callback: Optional[Callable] = None,
max_count: int = 10,
):
result = []
comments_has_more = 1
comments_cursor = 0
while comments_has_more and len(result) < max_count:
comments_res = await self.get_aweme_comments(aweme_id, comments_cursor)
comments_has_more = comments_res.get("has_more", 0)
comments_cursor = comments_res.get("cursor", 0)
comments = comments_res.get("comments", [])
if not comments:
continue
if len(result) + len(comments) > max_count:
comments = comments[:max_count - len(result)]
result.extend(comments)
if callback: # If there is a callback function, execute the callback function
await callback(aweme_id, comments)
await asyncio.sleep(crawl_interval)
if not is_fetch_sub_comments:
continue
# Get secondary reviews
for comment in comments:
reply_comment_total = comment.get("reply_comment_total")
if reply_comment_total > 0:
comment_id = comment.get("cid")
sub_comments_has_more = 1
sub_comments_cursor = 0
while sub_comments_has_more:
sub_comments_res = await self.get_sub_comments(aweme_id, comment_id, sub_comments_cursor)
sub_comments_has_more = sub_comments_res.get("has_more", 0)
sub_comments_cursor = sub_comments_res.get("cursor", 0)
sub_comments = sub_comments_res.get("comments", [])
if not sub_comments:
continue
result.extend(sub_comments)
if callback: # If there is a callback function, execute the callback function
await callback(aweme_id, sub_comments)
await asyncio.sleep(crawl_interval)
return result
async def get_user_info(self, sec_user_id: str):
uri = "/aweme/v1/web/user/profile/other/"
params = {
"sec_user_id": sec_user_id,
"publish_video_strategy_type": 2,
"personal_center_strategy": 1,
}
return await self.get(uri, params)
async def get_user_aweme_posts(self, sec_user_id: str, max_cursor: str = "") -> Dict:
uri = "/aweme/v1/web/aweme/post/"
params = {
"sec_user_id": sec_user_id,
"count": 18,
"max_cursor": max_cursor,
"locate_query": "false",
"publish_video_strategy_type": 2,
'verifyFp': 'verify_ma3hrt8n_q2q2HyYA_uLyO_4N6D_BLvX_E2LgoGmkA1BU',
'fp': 'verify_ma3hrt8n_q2q2HyYA_uLyO_4N6D_BLvX_E2LgoGmkA1BU'
}
return await self.get(uri, params)
async def get_all_user_aweme_posts(self, sec_user_id: str, callback: Optional[Callable] = None):
posts_has_more = 1
max_cursor = ""
result = []
while posts_has_more == 1:
aweme_post_res = await self.get_user_aweme_posts(sec_user_id, max_cursor)
posts_has_more = aweme_post_res.get("has_more", 0)
max_cursor = aweme_post_res.get("max_cursor")
aweme_list = aweme_post_res.get("aweme_list") if aweme_post_res.get("aweme_list") else []
utils.logger.info(f"[DouYinClient.get_all_user_aweme_posts] get sec_user_id:{sec_user_id} video len : {len(aweme_list)}")
if callback:
await callback(aweme_list)
result.extend(aweme_list)
return result
async def get_aweme_media(self, url: str) -> Union[bytes, None]:
async with httpx.AsyncClient(proxy=self.proxy) as client:
try:
response = await client.request("GET", url, timeout=self.timeout, follow_redirects=True)
response.raise_for_status()
if not response.reason_phrase == "OK":
utils.logger.error(f"[DouYinClient.get_aweme_media] request {url} err, res:{response.text}")
return None
else:
return response.content
except httpx.HTTPError as exc: # some wrong when call httpx.request method, such as connection error, client error, server error or response status code is not 2xx
utils.logger.error(f"[DouYinClient.get_aweme_media] {exc.__class__.__name__} for {exc.request.url} - {exc}") # Keep the original exception type name for developers to debug
return None
async def resolve_short_url(self, short_url: str) -> str:
async with httpx.AsyncClient(proxy=self.proxy, follow_redirects=False) as client:
try:
utils.logger.info(f"[DouYinClient.resolve_short_url] Resolving short URL: {short_url}")
response = await client.get(short_url, timeout=10)
# Short links usually return a 302 redirect
if response.status_code in [301, 302, 303, 307, 308]:
redirect_url = response.headers.get("Location", "")
utils.logger.info(f"[DouYinClient.resolve_short_url] Resolved to: {redirect_url}")
return redirect_url
else:
utils.logger.warning(f"[DouYinClient.resolve_short_url] Unexpected status code: {response.status_code}")
return ""
except Exception as e:
utils.logger.error(f"[DouYinClient.resolve_short_url] Failed to resolve short URL: {e}")
return "" | --- +++ @@ -127,6 +127,9 @@ raise DataFetchError(f"{e}, {response.text}")
async def get(self, uri: str, params: Optional[Dict] = None, headers: Optional[Dict] = None):
+ """
+ GET请求
+ """
await self.__process_req_params(uri, params, headers)
headers = headers or self.headers
return await self.request(method="GET", url=f"{self._host}{uri}", params=params, headers=headers)
@@ -158,6 +161,16 @@ publish_time: PublishTimeType = PublishTimeType.UNLIMITED,
search_id: str = "",
):
+ """
+ DouYin Web Search API
+ :param keyword:
+ :param offset:
+ :param search_channel:
+ :param sort_type:
+ :param publish_time: ·
+ :param search_id: ·
+ :return:
+ """
query_params = {
'search_channel': search_channel.value,
'enable_history': '1',
@@ -182,6 +195,11 @@ return await self.get("/aweme/v1/web/general/search/single/", query_params, headers=headers)
async def get_video_by_id(self, aweme_id: str) -> Any:
+ """
+ DouYin Video Detail API
+ :param aweme_id:
+ :return:
+ """
params = {"aweme_id": aweme_id}
headers = copy.copy(self.headers)
del headers["Origin"]
@@ -189,6 +207,9 @@ return res.get("aweme_detail", {})
async def get_aweme_comments(self, aweme_id: str, cursor: int = 0):
+ """get note comments
+
+ """
uri = "/aweme/v1/web/comment/list/"
params = {"aweme_id": aweme_id, "cursor": cursor, "count": 20, "item_type": 0}
keywords = request_keyword_var.get()
@@ -198,6 +219,9 @@ return await self.get(uri, params)
async def get_sub_comments(self, aweme_id: str, comment_id: str, cursor: int = 0):
+ """
+ 获取子评论
+ """
uri = "/aweme/v1/web/comment/list/reply/"
params = {
'comment_id': comment_id,
@@ -220,6 +244,15 @@ callback: Optional[Callable] = None,
max_count: int = 10,
):
+ """
+ 获取帖子的所有评论,包括子评论
+ :param aweme_id: 帖子ID
+ :param crawl_interval: 抓取间隔
+ :param is_fetch_sub_comments: 是否抓取子评论
+ :param callback: 回调函数,用于处理抓取到的评论
+ :param max_count: 一次帖子爬取的最大评论数量
+ :return: 评论列表
+ """
result = []
comments_has_more = 1
comments_cursor = 0
@@ -314,6 +347,13 @@ return None
async def resolve_short_url(self, short_url: str) -> str:
+ """
+ 解析抖音短链接,获取重定向后的真实URL
+ Args:
+ short_url: 短链接,如 https://v.douyin.com/iF12345ABC/
+ Returns:
+ 重定向后的完整URL
+ """
async with httpx.AsyncClient(proxy=self.proxy, follow_redirects=False) as client:
try:
utils.logger.info(f"[DouYinClient.resolve_short_url] Resolving short URL: {short_url}")
@@ -329,4 +369,4 @@ return ""
except Exception as e:
utils.logger.error(f"[DouYinClient.resolve_short_url] Failed to resolve short URL: {e}")
- return ""+ return ""
| https://raw.githubusercontent.com/NanmiCoder/MediaCrawler/HEAD/media_platform/douyin/client.py |
Add docstrings that explain inputs and outputs | # -*- coding: utf-8 -*-
# Copyright (c) 2025 relakkes@gmail.com
#
# This file is part of MediaCrawler project.
# Repository: https://github.com/NanmiCoder/MediaCrawler/blob/main/media_platform/kuaishou/help.py
# GitHub: https://github.com/NanmiCoder
# Licensed under NON-COMMERCIAL LEARNING LICENSE 1.1
#
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
# 1. 不得用于任何商业用途。
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
# 3. 不得进行大规模爬取或对平台造成运营干扰。
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
# 5. 不得用于任何非法或不当的用途。
#
# 详细许可条款请参阅项目根目录下的LICENSE文件。
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
# -*- coding: utf-8 -*-
import re
from model.m_kuaishou import VideoUrlInfo, CreatorUrlInfo
def parse_video_info_from_url(url: str) -> VideoUrlInfo:
# If it doesn't contain http and doesn't contain kuaishou.com, consider it as pure ID
if not url.startswith("http") and "kuaishou.com" not in url:
return VideoUrlInfo(video_id=url, url_type="normal")
# Extract ID from standard video URL: /short-video/video_ID
video_pattern = r'/short-video/([a-zA-Z0-9_-]+)'
match = re.search(video_pattern, url)
if match:
video_id = match.group(1)
return VideoUrlInfo(video_id=video_id, url_type="normal")
raise ValueError(f"Unable to parse video ID from URL: {url}")
def parse_creator_info_from_url(url: str) -> CreatorUrlInfo:
# If it doesn't contain http and doesn't contain kuaishou.com, consider it as pure ID
if not url.startswith("http") and "kuaishou.com" not in url:
return CreatorUrlInfo(user_id=url)
# Extract user_id from creator homepage URL: /profile/xxx
user_pattern = r'/profile/([a-zA-Z0-9_-]+)'
match = re.search(user_pattern, url)
if match:
user_id = match.group(1)
return CreatorUrlInfo(user_id=user_id)
raise ValueError(f"Unable to parse creator ID from URL: {url}")
if __name__ == '__main__':
# Test video URL parsing
print("=== Video URL Parsing Test ===")
test_video_urls = [
"https://www.kuaishou.com/short-video/3x3zxz4mjrsc8ke?authorId=3x84qugg4ch9zhs&streamSource=search&area=searchxxnull&searchKey=python",
"3xf8enb8dbj6uig",
]
for url in test_video_urls:
try:
result = parse_video_info_from_url(url)
print(f"✓ URL: {url[:80]}...")
print(f" Result: {result}\n")
except Exception as e:
print(f"✗ URL: {url}")
print(f" Error: {e}\n")
# Test creator URL parsing
print("=== Creator URL Parsing Test ===")
test_creator_urls = [
"https://www.kuaishou.com/profile/3x84qugg4ch9zhs",
"3x4sm73aye7jq7i",
]
for url in test_creator_urls:
try:
result = parse_creator_info_from_url(url)
print(f"✓ URL: {url[:80]}...")
print(f" Result: {result}\n")
except Exception as e:
print(f"✗ URL: {url}")
print(f" Error: {e}\n") | --- +++ @@ -25,6 +25,17 @@
def parse_video_info_from_url(url: str) -> VideoUrlInfo:
+ """
+ Parse video ID from Kuaishou video URL
+ Supports the following formats:
+ 1. Full video URL: "https://www.kuaishou.com/short-video/3x3zxz4mjrsc8ke?authorId=3x84qugg4ch9zhs&streamSource=search"
+ 2. Pure video ID: "3x3zxz4mjrsc8ke"
+
+ Args:
+ url: Kuaishou video link or video ID
+ Returns:
+ VideoUrlInfo: Object containing video ID
+ """
# If it doesn't contain http and doesn't contain kuaishou.com, consider it as pure ID
if not url.startswith("http") and "kuaishou.com" not in url:
return VideoUrlInfo(video_id=url, url_type="normal")
@@ -40,6 +51,17 @@
def parse_creator_info_from_url(url: str) -> CreatorUrlInfo:
+ """
+ Parse creator ID from Kuaishou creator homepage URL
+ Supports the following formats:
+ 1. Creator homepage: "https://www.kuaishou.com/profile/3x84qugg4ch9zhs"
+ 2. Pure ID: "3x4sm73aye7jq7i"
+
+ Args:
+ url: Kuaishou creator homepage link or user_id
+ Returns:
+ CreatorUrlInfo: Object containing creator ID
+ """
# If it doesn't contain http and doesn't contain kuaishou.com, consider it as pure ID
if not url.startswith("http") and "kuaishou.com" not in url:
return CreatorUrlInfo(user_id=url)
@@ -83,4 +105,4 @@ print(f" Result: {result}\n")
except Exception as e:
print(f"✗ URL: {url}")
- print(f" Error: {e}\n")+ print(f" Error: {e}\n")
| https://raw.githubusercontent.com/NanmiCoder/MediaCrawler/HEAD/media_platform/kuaishou/help.py |
Add docstrings to meet PEP guidelines | # -*- coding: utf-8 -*-
# Copyright (c) 2025 relakkes@gmail.com
#
# This file is part of MediaCrawler project.
# Repository: https://github.com/NanmiCoder/MediaCrawler/blob/main/media_platform/bilibili/core.py
# GitHub: https://github.com/NanmiCoder
# Licensed under NON-COMMERCIAL LEARNING LICENSE 1.1
#
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
# 1. 不得用于任何商业用途。
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
# 3. 不得进行大规模爬取或对平台造成运营干扰。
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
# 5. 不得用于任何非法或不当的用途。
#
# 详细许可条款请参阅项目根目录下的LICENSE文件。
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
# -*- coding: utf-8 -*-
# @Author : relakkes@gmail.com
# @Time : 2023/12/2 18:44
# @Desc : Bilibili Crawler
import asyncio
import os
# import random # Removed as we now use fixed config.CRAWLER_MAX_SLEEP_SEC intervals
from asyncio import Task
from typing import Dict, List, Optional, Tuple, Union
from datetime import datetime, timedelta
import pandas as pd
from playwright.async_api import (
BrowserContext,
BrowserType,
Page,
Playwright,
async_playwright,
)
from playwright._impl._errors import TargetClosedError
import config
from base.base_crawler import AbstractCrawler
from proxy.proxy_ip_pool import IpInfoModel, create_ip_pool
from store import bilibili as bilibili_store
from tools import utils
from tools.cdp_browser import CDPBrowserManager
from var import crawler_type_var, source_keyword_var
from .client import BilibiliClient
from .exception import DataFetchError
from .field import SearchOrderType
from .help import parse_video_info_from_url, parse_creator_info_from_url
from .login import BilibiliLogin
class BilibiliCrawler(AbstractCrawler):
context_page: Page
bili_client: BilibiliClient
browser_context: BrowserContext
cdp_manager: Optional[CDPBrowserManager]
def __init__(self):
self.index_url = "https://www.bilibili.com"
self.user_agent = utils.get_user_agent()
self.cdp_manager = None
self.ip_proxy_pool = None # Proxy IP pool for automatic proxy refresh
async def start(self):
playwright_proxy_format, httpx_proxy_format = None, None
if config.ENABLE_IP_PROXY:
self.ip_proxy_pool = await create_ip_pool(config.IP_PROXY_POOL_COUNT, enable_validate_ip=True)
ip_proxy_info: IpInfoModel = await self.ip_proxy_pool.get_proxy()
playwright_proxy_format, httpx_proxy_format = utils.format_proxy_info(ip_proxy_info)
async with async_playwright() as playwright:
# Choose launch mode based on configuration
if config.ENABLE_CDP_MODE:
utils.logger.info("[BilibiliCrawler] Launching browser using CDP mode")
self.browser_context = await self.launch_browser_with_cdp(
playwright,
playwright_proxy_format,
self.user_agent,
headless=config.CDP_HEADLESS,
)
else:
utils.logger.info("[BilibiliCrawler] Launching browser using standard mode")
# Launch a browser context.
chromium = playwright.chromium
self.browser_context = await self.launch_browser(chromium, None, self.user_agent, headless=config.HEADLESS)
# stealth.min.js is a js script to prevent the website from detecting the crawler.
await self.browser_context.add_init_script(path="libs/stealth.min.js")
self.context_page = await self.browser_context.new_page()
await self.context_page.goto(self.index_url)
# Create a client to interact with the xiaohongshu website.
self.bili_client = await self.create_bilibili_client(httpx_proxy_format)
if not await self.bili_client.pong():
login_obj = BilibiliLogin(
login_type=config.LOGIN_TYPE,
login_phone="", # your phone number
browser_context=self.browser_context,
context_page=self.context_page,
cookie_str=config.COOKIES,
)
await login_obj.begin()
await self.bili_client.update_cookies(browser_context=self.browser_context)
crawler_type_var.set(config.CRAWLER_TYPE)
if config.CRAWLER_TYPE == "search":
await self.search()
elif config.CRAWLER_TYPE == "detail":
# Get the information and comments of the specified post
await self.get_specified_videos(config.BILI_SPECIFIED_ID_LIST)
elif config.CRAWLER_TYPE == "creator":
if config.CREATOR_MODE:
for creator_url in config.BILI_CREATOR_ID_LIST:
try:
creator_info = parse_creator_info_from_url(creator_url)
utils.logger.info(f"[BilibiliCrawler.start] Parsed creator ID: {creator_info.creator_id} from {creator_url}")
await self.get_creator_videos(int(creator_info.creator_id))
except ValueError as e:
utils.logger.error(f"[BilibiliCrawler.start] Failed to parse creator URL: {e}")
continue
else:
await self.get_all_creator_details(config.BILI_CREATOR_ID_LIST)
else:
pass
utils.logger.info("[BilibiliCrawler.start] Bilibili Crawler finished ...")
async def search(self):
# Search for video and retrieve their comment information.
if config.BILI_SEARCH_MODE == "normal":
await self.search_by_keywords()
elif config.BILI_SEARCH_MODE == "all_in_time_range":
await self.search_by_keywords_in_time_range(daily_limit=False)
elif config.BILI_SEARCH_MODE == "daily_limit_in_time_range":
await self.search_by_keywords_in_time_range(daily_limit=True)
else:
utils.logger.warning(f"Unknown BILI_SEARCH_MODE: {config.BILI_SEARCH_MODE}")
@staticmethod
async def get_pubtime_datetime(
start: str = config.START_DAY,
end: str = config.END_DAY,
) -> Tuple[str, str]:
# Convert start and end to datetime objects
start_day: datetime = datetime.strptime(start, "%Y-%m-%d")
end_day: datetime = datetime.strptime(end, "%Y-%m-%d")
if start_day > end_day:
raise ValueError("Wrong time range, please check your start and end argument, to ensure that the start cannot exceed end")
elif start_day == end_day: # Searching content from the same day
end_day = (start_day + timedelta(days=1) - timedelta(seconds=1)) # Set end_day to start_day + 1 day - 1 second
else: # Searching from start to end
end_day = (end_day + timedelta(days=1) - timedelta(seconds=1)) # Set end_day to end_day + 1 day - 1 second
# Convert back to timestamps
return str(int(start_day.timestamp())), str(int(end_day.timestamp()))
async def search_by_keywords(self):
utils.logger.info("[BilibiliCrawler.search_by_keywords] Begin search bilibli keywords")
bili_limit_count = 20 # bilibili limit page fixed value
if config.CRAWLER_MAX_NOTES_COUNT < bili_limit_count:
config.CRAWLER_MAX_NOTES_COUNT = bili_limit_count
start_page = config.START_PAGE # start page number
for keyword in config.KEYWORDS.split(","):
source_keyword_var.set(keyword)
utils.logger.info(f"[BilibiliCrawler.search_by_keywords] Current search keyword: {keyword}")
page = 1
while (page - start_page + 1) * bili_limit_count <= config.CRAWLER_MAX_NOTES_COUNT:
if page < start_page:
utils.logger.info(f"[BilibiliCrawler.search_by_keywords] Skip page: {page}")
page += 1
continue
utils.logger.info(f"[BilibiliCrawler.search_by_keywords] search bilibili keyword: {keyword}, page: {page}")
video_id_list: List[str] = []
videos_res = await self.bili_client.search_video_by_keyword(
keyword=keyword,
page=page,
page_size=bili_limit_count,
order=SearchOrderType.DEFAULT,
pubtime_begin_s=0, # Publish date start timestamp
pubtime_end_s=0, # Publish date end timestamp
)
video_list: List[Dict] = videos_res.get("result")
if not video_list:
utils.logger.info(f"[BilibiliCrawler.search_by_keywords] No more videos for '{keyword}', moving to next keyword.")
break
semaphore = asyncio.Semaphore(config.MAX_CONCURRENCY_NUM)
task_list = []
try:
task_list = [self.get_video_info_task(aid=video_item.get("aid"), bvid="", semaphore=semaphore) for video_item in video_list]
except Exception as e:
utils.logger.warning(f"[BilibiliCrawler.search_by_keywords] error in the task list. The video for this page will not be included. {e}")
video_items = await asyncio.gather(*task_list)
for video_item in video_items:
if video_item:
video_id_list.append(video_item.get("View").get("aid"))
await bilibili_store.update_bilibili_video(video_item)
await bilibili_store.update_up_info(video_item)
await self.get_bilibili_video(video_item, semaphore)
page += 1
# Sleep after page navigation
await asyncio.sleep(config.CRAWLER_MAX_SLEEP_SEC)
utils.logger.info(f"[BilibiliCrawler.search_by_keywords] Sleeping for {config.CRAWLER_MAX_SLEEP_SEC} seconds after page {page-1}")
await self.batch_get_video_comments(video_id_list)
async def search_by_keywords_in_time_range(self, daily_limit: bool):
utils.logger.info(f"[BilibiliCrawler.search_by_keywords_in_time_range] Begin search with daily_limit={daily_limit}")
bili_limit_count = 20
start_page = config.START_PAGE
for keyword in config.KEYWORDS.split(","):
source_keyword_var.set(keyword)
utils.logger.info(f"[BilibiliCrawler.search_by_keywords_in_time_range] Current search keyword: {keyword}")
total_notes_crawled_for_keyword = 0
for day in pd.date_range(start=config.START_DAY, end=config.END_DAY, freq="D"):
if (daily_limit and total_notes_crawled_for_keyword >= config.CRAWLER_MAX_NOTES_COUNT):
utils.logger.info(f"[BilibiliCrawler.search] Reached CRAWLER_MAX_NOTES_COUNT limit for keyword '{keyword}', skipping remaining days.")
break
if (not daily_limit and total_notes_crawled_for_keyword >= config.CRAWLER_MAX_NOTES_COUNT):
utils.logger.info(f"[BilibiliCrawler.search] Reached CRAWLER_MAX_NOTES_COUNT limit for keyword '{keyword}', skipping remaining days.")
break
pubtime_begin_s, pubtime_end_s = await self.get_pubtime_datetime(start=day.strftime("%Y-%m-%d"), end=day.strftime("%Y-%m-%d"))
page = 1
notes_count_this_day = 0
while True:
if notes_count_this_day >= config.MAX_NOTES_PER_DAY:
utils.logger.info(f"[BilibiliCrawler.search] Reached MAX_NOTES_PER_DAY limit for {day.ctime()}.")
break
if (daily_limit and total_notes_crawled_for_keyword >= config.CRAWLER_MAX_NOTES_COUNT):
utils.logger.info(f"[BilibiliCrawler.search] Reached CRAWLER_MAX_NOTES_COUNT limit for keyword '{keyword}'.")
break
if (not daily_limit and total_notes_crawled_for_keyword >= config.CRAWLER_MAX_NOTES_COUNT):
break
try:
utils.logger.info(f"[BilibiliCrawler.search] search bilibili keyword: {keyword}, date: {day.ctime()}, page: {page}")
video_id_list: List[str] = []
videos_res = await self.bili_client.search_video_by_keyword(
keyword=keyword,
page=page,
page_size=bili_limit_count,
order=SearchOrderType.DEFAULT,
pubtime_begin_s=pubtime_begin_s,
pubtime_end_s=pubtime_end_s,
)
video_list: List[Dict] = videos_res.get("result")
if not video_list:
utils.logger.info(f"[BilibiliCrawler.search] No more videos for '{keyword}' on {day.ctime()}, moving to next day.")
break
semaphore = asyncio.Semaphore(config.MAX_CONCURRENCY_NUM)
task_list = [self.get_video_info_task(aid=video_item.get("aid"), bvid="", semaphore=semaphore) for video_item in video_list]
video_items = await asyncio.gather(*task_list)
for video_item in video_items:
if video_item:
if (daily_limit and total_notes_crawled_for_keyword >= config.CRAWLER_MAX_NOTES_COUNT):
break
if (not daily_limit and total_notes_crawled_for_keyword >= config.CRAWLER_MAX_NOTES_COUNT):
break
if notes_count_this_day >= config.MAX_NOTES_PER_DAY:
break
notes_count_this_day += 1
total_notes_crawled_for_keyword += 1
video_id_list.append(video_item.get("View").get("aid"))
await bilibili_store.update_bilibili_video(video_item)
await bilibili_store.update_up_info(video_item)
await self.get_bilibili_video(video_item, semaphore)
page += 1
# Sleep after page navigation
await asyncio.sleep(config.CRAWLER_MAX_SLEEP_SEC)
utils.logger.info(f"[BilibiliCrawler.search_by_keywords_in_time_range] Sleeping for {config.CRAWLER_MAX_SLEEP_SEC} seconds after page {page-1}")
await self.batch_get_video_comments(video_id_list)
except Exception as e:
utils.logger.error(f"[BilibiliCrawler.search] Error searching on {day.ctime()}: {e}")
break
async def batch_get_video_comments(self, video_id_list: List[str]):
if not config.ENABLE_GET_COMMENTS:
utils.logger.info(f"[BilibiliCrawler.batch_get_note_comments] Crawling comment mode is not enabled")
return
utils.logger.info(f"[BilibiliCrawler.batch_get_video_comments] video ids:{video_id_list}")
semaphore = asyncio.Semaphore(config.MAX_CONCURRENCY_NUM)
task_list: List[Task] = []
for video_id in video_id_list:
task = asyncio.create_task(self.get_comments(video_id, semaphore), name=video_id)
task_list.append(task)
await asyncio.gather(*task_list)
async def get_comments(self, video_id: str, semaphore: asyncio.Semaphore):
async with semaphore:
try:
utils.logger.info(f"[BilibiliCrawler.get_comments] begin get video_id: {video_id} comments ...")
await asyncio.sleep(config.CRAWLER_MAX_SLEEP_SEC)
utils.logger.info(f"[BilibiliCrawler.get_comments] Sleeping for {config.CRAWLER_MAX_SLEEP_SEC} seconds after fetching comments for video {video_id}")
await self.bili_client.get_video_all_comments(
video_id=video_id,
crawl_interval=config.CRAWLER_MAX_SLEEP_SEC,
is_fetch_sub_comments=config.ENABLE_GET_SUB_COMMENTS,
callback=bilibili_store.batch_update_bilibili_video_comments,
max_count=config.CRAWLER_MAX_COMMENTS_COUNT_SINGLENOTES,
)
except DataFetchError as ex:
utils.logger.error(f"[BilibiliCrawler.get_comments] get video_id: {video_id} comment error: {ex}")
except Exception as e:
utils.logger.error(f"[BilibiliCrawler.get_comments] may be been blocked, err:{e}")
# Propagate the exception to be caught by the main loop
raise
async def get_creator_videos(self, creator_id: int):
ps = 30
pn = 1
while True:
result = await self.bili_client.get_creator_videos(creator_id, pn, ps)
video_bvids_list = [video["bvid"] for video in result["list"]["vlist"]]
await self.get_specified_videos(video_bvids_list)
if int(result["page"]["count"]) <= pn * ps:
break
await asyncio.sleep(config.CRAWLER_MAX_SLEEP_SEC)
utils.logger.info(f"[BilibiliCrawler.get_creator_videos] Sleeping for {config.CRAWLER_MAX_SLEEP_SEC} seconds after page {pn}")
pn += 1
async def get_specified_videos(self, video_url_list: List[str]):
utils.logger.info("[BilibiliCrawler.get_specified_videos] Parsing video URLs...")
bvids_list = []
for video_url in video_url_list:
try:
video_info = parse_video_info_from_url(video_url)
bvids_list.append(video_info.video_id)
utils.logger.info(f"[BilibiliCrawler.get_specified_videos] Parsed video ID: {video_info.video_id} from {video_url}")
except ValueError as e:
utils.logger.error(f"[BilibiliCrawler.get_specified_videos] Failed to parse video URL: {e}")
continue
semaphore = asyncio.Semaphore(config.MAX_CONCURRENCY_NUM)
task_list = [self.get_video_info_task(aid=0, bvid=video_id, semaphore=semaphore) for video_id in bvids_list]
video_details = await asyncio.gather(*task_list)
video_aids_list = []
for video_detail in video_details:
if video_detail is not None:
video_item_view: Dict = video_detail.get("View")
video_aid: str = video_item_view.get("aid")
if video_aid:
video_aids_list.append(video_aid)
await bilibili_store.update_bilibili_video(video_detail)
await bilibili_store.update_up_info(video_detail)
await self.get_bilibili_video(video_detail, semaphore)
await self.batch_get_video_comments(video_aids_list)
async def get_video_info_task(self, aid: int, bvid: str, semaphore: asyncio.Semaphore) -> Optional[Dict]:
async with semaphore:
try:
result = await self.bili_client.get_video_info(aid=aid, bvid=bvid)
# Sleep after fetching video details
await asyncio.sleep(config.CRAWLER_MAX_SLEEP_SEC)
utils.logger.info(f"[BilibiliCrawler.get_video_info_task] Sleeping for {config.CRAWLER_MAX_SLEEP_SEC} seconds after fetching video details {bvid or aid}")
return result
except DataFetchError as ex:
utils.logger.error(f"[BilibiliCrawler.get_video_info_task] Get video detail error: {ex}")
return None
except KeyError as ex:
utils.logger.error(f"[BilibiliCrawler.get_video_info_task] have not fund note detail video_id:{bvid}, err: {ex}")
return None
async def get_video_play_url_task(self, aid: int, cid: int, semaphore: asyncio.Semaphore) -> Union[Dict, None]:
async with semaphore:
try:
result = await self.bili_client.get_video_play_url(aid=aid, cid=cid)
return result
except DataFetchError as ex:
utils.logger.error(f"[BilibiliCrawler.get_video_play_url_task] Get video play url error: {ex}")
return None
except KeyError as ex:
utils.logger.error(f"[BilibiliCrawler.get_video_play_url_task] have not fund play url from :{aid}|{cid}, err: {ex}")
return None
async def create_bilibili_client(self, httpx_proxy: Optional[str]) -> BilibiliClient:
utils.logger.info("[BilibiliCrawler.create_bilibili_client] Begin create bilibili API client ...")
cookie_str, cookie_dict = utils.convert_cookies(await self.browser_context.cookies())
bilibili_client_obj = BilibiliClient(
proxy=httpx_proxy,
headers={
"User-Agent": self.user_agent,
"Cookie": cookie_str,
"Origin": "https://www.bilibili.com",
"Referer": "https://www.bilibili.com",
"Content-Type": "application/json;charset=UTF-8",
},
playwright_page=self.context_page,
cookie_dict=cookie_dict,
proxy_ip_pool=self.ip_proxy_pool, # Pass proxy pool for automatic refresh
)
return bilibili_client_obj
async def launch_browser(
self,
chromium: BrowserType,
playwright_proxy: Optional[Dict],
user_agent: Optional[str],
headless: bool = True,
) -> BrowserContext:
utils.logger.info("[BilibiliCrawler.launch_browser] Begin create browser context ...")
if config.SAVE_LOGIN_STATE:
# feat issue #14
# we will save login state to avoid login every time
user_data_dir = os.path.join(os.getcwd(), "browser_data", config.USER_DATA_DIR % config.PLATFORM) # type: ignore
browser_context = await chromium.launch_persistent_context(
user_data_dir=user_data_dir,
accept_downloads=True,
headless=headless,
proxy=playwright_proxy, # type: ignore
viewport={
"width": 1920,
"height": 1080
},
user_agent=user_agent,
channel="chrome", # Use system's stable Chrome version
)
return browser_context
else:
# type: ignore
browser = await chromium.launch(headless=headless, proxy=playwright_proxy, channel="chrome")
browser_context = await browser.new_context(viewport={"width": 1920, "height": 1080}, user_agent=user_agent)
return browser_context
async def launch_browser_with_cdp(
self,
playwright: Playwright,
playwright_proxy: Optional[Dict],
user_agent: Optional[str],
headless: bool = True,
) -> BrowserContext:
try:
self.cdp_manager = CDPBrowserManager()
browser_context = await self.cdp_manager.launch_and_connect(
playwright=playwright,
playwright_proxy=playwright_proxy,
user_agent=user_agent,
headless=headless,
)
# Display browser information
browser_info = await self.cdp_manager.get_browser_info()
utils.logger.info(f"[BilibiliCrawler] CDP browser info: {browser_info}")
return browser_context
except Exception as e:
utils.logger.error(f"[BilibiliCrawler] CDP mode launch failed, fallback to standard mode: {e}")
# Fallback to standard mode
chromium = playwright.chromium
return await self.launch_browser(chromium, playwright_proxy, user_agent, headless)
async def close(self):
try:
# If using CDP mode, special handling is required
if self.cdp_manager:
await self.cdp_manager.cleanup()
self.cdp_manager = None
elif self.browser_context:
await self.browser_context.close()
utils.logger.info("[BilibiliCrawler.close] Browser context closed ...")
except TargetClosedError:
utils.logger.warning("[BilibiliCrawler.close] Browser context was already closed.")
except Exception as e:
utils.logger.error(f"[BilibiliCrawler.close] An error occurred during close: {e}")
async def get_bilibili_video(self, video_item: Dict, semaphore: asyncio.Semaphore):
if not config.ENABLE_GET_MEIDAS:
utils.logger.info(f"[BilibiliCrawler.get_bilibili_video] Crawling image mode is not enabled")
return
video_item_view: Dict = video_item.get("View")
aid = video_item_view.get("aid")
cid = video_item_view.get("cid")
result = await self.get_video_play_url_task(aid, cid, semaphore)
if result is None:
utils.logger.info("[BilibiliCrawler.get_bilibili_video] get video play url failed")
return
durl_list = result.get("durl")
max_size = -1
video_url = ""
for durl in durl_list:
size = durl.get("size")
if size > max_size:
max_size = size
video_url = durl.get("url")
if video_url == "":
utils.logger.info("[BilibiliCrawler.get_bilibili_video] get video url failed")
return
content = await self.bili_client.get_video_media(video_url)
await asyncio.sleep(config.CRAWLER_MAX_SLEEP_SEC)
utils.logger.info(f"[BilibiliCrawler.get_bilibili_video] Sleeping for {config.CRAWLER_MAX_SLEEP_SEC} seconds after fetching video {aid}")
if content is None:
return
extension_file_name = f"video.mp4"
await bilibili_store.store_video(aid, content, extension_file_name)
async def get_all_creator_details(self, creator_url_list: List[str]):
utils.logger.info(f"[BilibiliCrawler.get_all_creator_details] Crawling the details of creators")
utils.logger.info(f"[BilibiliCrawler.get_all_creator_details] Parsing creator URLs...")
creator_id_list = []
for creator_url in creator_url_list:
try:
creator_info = parse_creator_info_from_url(creator_url)
creator_id_list.append(int(creator_info.creator_id))
utils.logger.info(f"[BilibiliCrawler.get_all_creator_details] Parsed creator ID: {creator_info.creator_id} from {creator_url}")
except ValueError as e:
utils.logger.error(f"[BilibiliCrawler.get_all_creator_details] Failed to parse creator URL: {e}")
continue
utils.logger.info(f"[BilibiliCrawler.get_all_creator_details] creator ids:{creator_id_list}")
semaphore = asyncio.Semaphore(config.MAX_CONCURRENCY_NUM)
task_list: List[Task] = []
try:
for creator_id in creator_id_list:
task = asyncio.create_task(self.get_creator_details(creator_id, semaphore), name=str(creator_id))
task_list.append(task)
except Exception as e:
utils.logger.warning(f"[BilibiliCrawler.get_all_creator_details] error in the task list. The creator will not be included. {e}")
await asyncio.gather(*task_list)
async def get_creator_details(self, creator_id: int, semaphore: asyncio.Semaphore):
async with semaphore:
creator_unhandled_info: Dict = await self.bili_client.get_creator_info(creator_id)
creator_info: Dict = {
"id": creator_id,
"name": creator_unhandled_info.get("name"),
"sign": creator_unhandled_info.get("sign"),
"avatar": creator_unhandled_info.get("face"),
}
await self.get_fans(creator_info, semaphore)
await self.get_followings(creator_info, semaphore)
await self.get_dynamics(creator_info, semaphore)
async def get_fans(self, creator_info: Dict, semaphore: asyncio.Semaphore):
creator_id = creator_info["id"]
async with semaphore:
try:
utils.logger.info(f"[BilibiliCrawler.get_fans] begin get creator_id: {creator_id} fans ...")
await self.bili_client.get_creator_all_fans(
creator_info=creator_info,
crawl_interval=config.CRAWLER_MAX_SLEEP_SEC,
callback=bilibili_store.batch_update_bilibili_creator_fans,
max_count=config.CRAWLER_MAX_CONTACTS_COUNT_SINGLENOTES,
)
except DataFetchError as ex:
utils.logger.error(f"[BilibiliCrawler.get_fans] get creator_id: {creator_id} fans error: {ex}")
except Exception as e:
utils.logger.error(f"[BilibiliCrawler.get_fans] may be been blocked, err:{e}")
async def get_followings(self, creator_info: Dict, semaphore: asyncio.Semaphore):
creator_id = creator_info["id"]
async with semaphore:
try:
utils.logger.info(f"[BilibiliCrawler.get_followings] begin get creator_id: {creator_id} followings ...")
await self.bili_client.get_creator_all_followings(
creator_info=creator_info,
crawl_interval=config.CRAWLER_MAX_SLEEP_SEC,
callback=bilibili_store.batch_update_bilibili_creator_followings,
max_count=config.CRAWLER_MAX_CONTACTS_COUNT_SINGLENOTES,
)
except DataFetchError as ex:
utils.logger.error(f"[BilibiliCrawler.get_followings] get creator_id: {creator_id} followings error: {ex}")
except Exception as e:
utils.logger.error(f"[BilibiliCrawler.get_followings] may be been blocked, err:{e}")
async def get_dynamics(self, creator_info: Dict, semaphore: asyncio.Semaphore):
creator_id = creator_info["id"]
async with semaphore:
try:
utils.logger.info(f"[BilibiliCrawler.get_dynamics] begin get creator_id: {creator_id} dynamics ...")
await self.bili_client.get_creator_all_dynamics(
creator_info=creator_info,
crawl_interval=config.CRAWLER_MAX_SLEEP_SEC,
callback=bilibili_store.batch_update_bilibili_creator_dynamics,
max_count=config.CRAWLER_MAX_DYNAMICS_COUNT_SINGLENOTES,
)
except DataFetchError as ex:
utils.logger.error(f"[BilibiliCrawler.get_dynamics] get creator_id: {creator_id} dynamics error: {ex}")
except Exception as e:
utils.logger.error(f"[BilibiliCrawler.get_dynamics] may be been blocked, err:{e}") | --- +++ @@ -130,6 +130,9 @@ utils.logger.info("[BilibiliCrawler.start] Bilibili Crawler finished ...")
async def search(self):
+ """
+ search bilibili video
+ """
# Search for video and retrieve their comment information.
if config.BILI_SEARCH_MODE == "normal":
await self.search_by_keywords()
@@ -145,6 +148,22 @@ start: str = config.START_DAY,
end: str = config.END_DAY,
) -> Tuple[str, str]:
+ """
+ Get bilibili publish start timestamp pubtime_begin_s and publish end timestamp pubtime_end_s
+ ---
+ :param start: Publish date start time, YYYY-MM-DD
+ :param end: Publish date end time, YYYY-MM-DD
+
+ Note
+ ---
+ - Search time range is from start to end, including both start and end
+ - To search content from the same day, to include search content from that day, pubtime_end_s should be pubtime_begin_s plus one day minus one second, i.e., the last second of start day
+ - For example, searching only 2024-01-05 content, pubtime_begin_s = 1704384000, pubtime_end_s = 1704470399
+ Converted to readable datetime objects: pubtime_begin_s = datetime.datetime(2024, 1, 5, 0, 0), pubtime_end_s = datetime.datetime(2024, 1, 5, 23, 59, 59)
+ - To search content from start to end, to include search content from end day, pubtime_end_s should be pubtime_end_s plus one day minus one second, i.e., the last second of end day
+ - For example, searching 2024-01-05 - 2024-01-06 content, pubtime_begin_s = 1704384000, pubtime_end_s = 1704556799
+ Converted to readable datetime objects: pubtime_begin_s = datetime.datetime(2024, 1, 5, 0, 0), pubtime_end_s = datetime.datetime(2024, 1, 6, 23, 59, 59)
+ """
# Convert start and end to datetime objects
start_day: datetime = datetime.strptime(start, "%Y-%m-%d")
end_day: datetime = datetime.strptime(end, "%Y-%m-%d")
@@ -158,6 +177,10 @@ return str(int(start_day.timestamp())), str(int(end_day.timestamp()))
async def search_by_keywords(self):
+ """
+ search bilibili video with keywords in normal mode
+ :return:
+ """
utils.logger.info("[BilibiliCrawler.search_by_keywords] Begin search bilibli keywords")
bili_limit_count = 20 # bilibili limit page fixed value
if config.CRAWLER_MAX_NOTES_COUNT < bili_limit_count:
@@ -211,6 +234,10 @@ await self.batch_get_video_comments(video_id_list)
async def search_by_keywords_in_time_range(self, daily_limit: bool):
+ """
+ Search bilibili video with keywords in a given time range.
+ :param daily_limit: if True, strictly limit the number of notes per day and total.
+ """
utils.logger.info(f"[BilibiliCrawler.search_by_keywords_in_time_range] Begin search with daily_limit={daily_limit}")
bili_limit_count = 20
start_page = config.START_PAGE
@@ -292,6 +319,11 @@ break
async def batch_get_video_comments(self, video_id_list: List[str]):
+ """
+ batch get video comments
+ :param video_id_list:
+ :return:
+ """
if not config.ENABLE_GET_COMMENTS:
utils.logger.info(f"[BilibiliCrawler.batch_get_note_comments] Crawling comment mode is not enabled")
return
@@ -305,6 +337,12 @@ await asyncio.gather(*task_list)
async def get_comments(self, video_id: str, semaphore: asyncio.Semaphore):
+ """
+ get comment for video id
+ :param video_id:
+ :param semaphore:
+ :return:
+ """
async with semaphore:
try:
utils.logger.info(f"[BilibiliCrawler.get_comments] begin get video_id: {video_id} comments ...")
@@ -326,6 +364,10 @@ raise
async def get_creator_videos(self, creator_id: int):
+ """
+ get videos for a creator
+ :return:
+ """
ps = 30
pn = 1
while True:
@@ -339,6 +381,11 @@ pn += 1
async def get_specified_videos(self, video_url_list: List[str]):
+ """
+ get specified videos info from URLs or BV IDs
+ :param video_url_list: List of video URLs or BV IDs
+ :return:
+ """
utils.logger.info("[BilibiliCrawler.get_specified_videos] Parsing video URLs...")
bvids_list = []
for video_url in video_url_list:
@@ -366,6 +413,13 @@ await self.batch_get_video_comments(video_aids_list)
async def get_video_info_task(self, aid: int, bvid: str, semaphore: asyncio.Semaphore) -> Optional[Dict]:
+ """
+ Get video detail task
+ :param aid:
+ :param bvid:
+ :param semaphore:
+ :return:
+ """
async with semaphore:
try:
result = await self.bili_client.get_video_info(aid=aid, bvid=bvid)
@@ -383,6 +437,13 @@ return None
async def get_video_play_url_task(self, aid: int, cid: int, semaphore: asyncio.Semaphore) -> Union[Dict, None]:
+ """
+ Get video play url
+ :param aid:
+ :param cid:
+ :param semaphore:
+ :return:
+ """
async with semaphore:
try:
result = await self.bili_client.get_video_play_url(aid=aid, cid=cid)
@@ -395,6 +456,11 @@ return None
async def create_bilibili_client(self, httpx_proxy: Optional[str]) -> BilibiliClient:
+ """
+ create bilibili client
+ :param httpx_proxy: httpx proxy
+ :return: bilibili client
+ """
utils.logger.info("[BilibiliCrawler.create_bilibili_client] Begin create bilibili API client ...")
cookie_str, cookie_dict = utils.convert_cookies(await self.browser_context.cookies())
bilibili_client_obj = BilibiliClient(
@@ -419,6 +485,14 @@ user_agent: Optional[str],
headless: bool = True,
) -> BrowserContext:
+ """
+ launch browser and create browser context
+ :param chromium: chromium browser
+ :param playwright_proxy: playwright proxy
+ :param user_agent: user agent
+ :param headless: headless mode
+ :return: browser context
+ """
utils.logger.info("[BilibiliCrawler.launch_browser] Begin create browser context ...")
if config.SAVE_LOGIN_STATE:
# feat issue #14
@@ -450,6 +524,9 @@ user_agent: Optional[str],
headless: bool = True,
) -> BrowserContext:
+ """
+ Launch browser using CDP mode
+ """
try:
self.cdp_manager = CDPBrowserManager()
browser_context = await self.cdp_manager.launch_and_connect(
@@ -472,6 +549,7 @@ return await self.launch_browser(chromium, playwright_proxy, user_agent, headless)
async def close(self):
+ """Close browser context"""
try:
# If using CDP mode, special handling is required
if self.cdp_manager:
@@ -486,6 +564,12 @@ utils.logger.error(f"[BilibiliCrawler.close] An error occurred during close: {e}")
async def get_bilibili_video(self, video_item: Dict, semaphore: asyncio.Semaphore):
+ """
+ download bilibili video
+ :param video_item:
+ :param semaphore:
+ :return:
+ """
if not config.ENABLE_GET_MEIDAS:
utils.logger.info(f"[BilibiliCrawler.get_bilibili_video] Crawling image mode is not enabled")
return
@@ -517,6 +601,9 @@ await bilibili_store.store_video(aid, content, extension_file_name)
async def get_all_creator_details(self, creator_url_list: List[str]):
+ """
+ creator_url_list: get details for creator from creator URL list
+ """
utils.logger.info(f"[BilibiliCrawler.get_all_creator_details] Crawling the details of creators")
utils.logger.info(f"[BilibiliCrawler.get_all_creator_details] Parsing creator URLs...")
@@ -544,6 +631,12 @@ await asyncio.gather(*task_list)
async def get_creator_details(self, creator_id: int, semaphore: asyncio.Semaphore):
+ """
+ get details for creator id
+ :param creator_id:
+ :param semaphore:
+ :return:
+ """
async with semaphore:
creator_unhandled_info: Dict = await self.bili_client.get_creator_info(creator_id)
creator_info: Dict = {
@@ -557,6 +650,12 @@ await self.get_dynamics(creator_info, semaphore)
async def get_fans(self, creator_info: Dict, semaphore: asyncio.Semaphore):
+ """
+ get fans for creator id
+ :param creator_info:
+ :param semaphore:
+ :return:
+ """
creator_id = creator_info["id"]
async with semaphore:
try:
@@ -574,6 +673,12 @@ utils.logger.error(f"[BilibiliCrawler.get_fans] may be been blocked, err:{e}")
async def get_followings(self, creator_info: Dict, semaphore: asyncio.Semaphore):
+ """
+ get followings for creator id
+ :param creator_info:
+ :param semaphore:
+ :return:
+ """
creator_id = creator_info["id"]
async with semaphore:
try:
@@ -591,6 +696,12 @@ utils.logger.error(f"[BilibiliCrawler.get_followings] may be been blocked, err:{e}")
async def get_dynamics(self, creator_info: Dict, semaphore: asyncio.Semaphore):
+ """
+ get dynamics for creator id
+ :param creator_info:
+ :param semaphore:
+ :return:
+ """
creator_id = creator_info["id"]
async with semaphore:
try:
@@ -605,4 +716,4 @@ except DataFetchError as ex:
utils.logger.error(f"[BilibiliCrawler.get_dynamics] get creator_id: {creator_id} dynamics error: {ex}")
except Exception as e:
- utils.logger.error(f"[BilibiliCrawler.get_dynamics] may be been blocked, err:{e}")+ utils.logger.error(f"[BilibiliCrawler.get_dynamics] may be been blocked, err:{e}")
| https://raw.githubusercontent.com/NanmiCoder/MediaCrawler/HEAD/media_platform/bilibili/core.py |
Please document this code using docstrings | # -*- coding: utf-8 -*-
# Copyright (c) 2025 relakkes@gmail.com
#
# This file is part of MediaCrawler project.
# Repository: https://github.com/NanmiCoder/MediaCrawler/blob/main/media_platform/tieba/core.py
# GitHub: https://github.com/NanmiCoder
# Licensed under NON-COMMERCIAL LEARNING LICENSE 1.1
#
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
# 1. 不得用于任何商业用途。
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
# 3. 不得进行大规模爬取或对平台造成运营干扰。
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
# 5. 不得用于任何非法或不当的用途。
#
# 详细许可条款请参阅项目根目录下的LICENSE文件。
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
import asyncio
import os
from asyncio import Task
from typing import Dict, List, Optional, Tuple
from playwright.async_api import (
BrowserContext,
BrowserType,
Page,
Playwright,
async_playwright,
)
import config
from base.base_crawler import AbstractCrawler
from model.m_baidu_tieba import TiebaCreator, TiebaNote
from proxy.proxy_ip_pool import IpInfoModel, ProxyIpPool, create_ip_pool
from store import tieba as tieba_store
from tools import utils
from tools.cdp_browser import CDPBrowserManager
from var import crawler_type_var, source_keyword_var
from .client import BaiduTieBaClient
from .field import SearchNoteType, SearchSortType
from .help import TieBaExtractor
from .login import BaiduTieBaLogin
class TieBaCrawler(AbstractCrawler):
context_page: Page
tieba_client: BaiduTieBaClient
browser_context: BrowserContext
cdp_manager: Optional[CDPBrowserManager]
def __init__(self) -> None:
self.index_url = "https://tieba.baidu.com"
self.user_agent = utils.get_user_agent()
self._page_extractor = TieBaExtractor()
self.cdp_manager = None
async def start(self) -> None:
playwright_proxy_format, httpx_proxy_format = None, None
if config.ENABLE_IP_PROXY:
utils.logger.info(
"[BaiduTieBaCrawler.start] Begin create ip proxy pool ..."
)
ip_proxy_pool = await create_ip_pool(
config.IP_PROXY_POOL_COUNT, enable_validate_ip=True
)
ip_proxy_info: IpInfoModel = await ip_proxy_pool.get_proxy()
playwright_proxy_format, httpx_proxy_format = utils.format_proxy_info(ip_proxy_info)
utils.logger.info(
f"[BaiduTieBaCrawler.start] Init default ip proxy, value: {httpx_proxy_format}"
)
async with async_playwright() as playwright:
# Choose startup mode based on configuration
if config.ENABLE_CDP_MODE:
utils.logger.info("[BaiduTieBaCrawler] Launching browser in CDP mode")
self.browser_context = await self.launch_browser_with_cdp(
playwright,
playwright_proxy_format,
self.user_agent,
headless=config.CDP_HEADLESS,
)
else:
utils.logger.info("[BaiduTieBaCrawler] Launching browser in standard mode")
# Launch a browser context.
chromium = playwright.chromium
self.browser_context = await self.launch_browser(
chromium,
playwright_proxy_format,
self.user_agent,
headless=config.HEADLESS,
)
# Inject anti-detection scripts - for Baidu's special detection
await self._inject_anti_detection_scripts()
self.context_page = await self.browser_context.new_page()
# First visit Baidu homepage, then click Tieba link to avoid triggering security verification
await self._navigate_to_tieba_via_baidu()
# Create a client to interact with the baidutieba website.
self.tieba_client = await self.create_tieba_client(
httpx_proxy_format,
ip_proxy_pool if config.ENABLE_IP_PROXY else None
)
# Check login status and perform login if necessary
if not await self.tieba_client.pong(browser_context=self.browser_context):
login_obj = BaiduTieBaLogin(
login_type=config.LOGIN_TYPE,
login_phone="", # your phone number
browser_context=self.browser_context,
context_page=self.context_page,
cookie_str=config.COOKIES,
)
await login_obj.begin()
await self.tieba_client.update_cookies(browser_context=self.browser_context)
crawler_type_var.set(config.CRAWLER_TYPE)
if config.CRAWLER_TYPE == "search":
# Search for notes and retrieve their comment information.
await self.search()
await self.get_specified_tieba_notes()
elif config.CRAWLER_TYPE == "detail":
# Get the information and comments of the specified post
await self.get_specified_notes()
elif config.CRAWLER_TYPE == "creator":
# Get creator's information and their notes and comments
await self.get_creators_and_notes()
else:
pass
utils.logger.info("[BaiduTieBaCrawler.start] Tieba Crawler finished ...")
async def search(self) -> None:
utils.logger.info(
"[BaiduTieBaCrawler.search] Begin search baidu tieba keywords"
)
tieba_limit_count = 10 # tieba limit page fixed value
if config.CRAWLER_MAX_NOTES_COUNT < tieba_limit_count:
config.CRAWLER_MAX_NOTES_COUNT = tieba_limit_count
start_page = config.START_PAGE
for keyword in config.KEYWORDS.split(","):
source_keyword_var.set(keyword)
utils.logger.info(
f"[BaiduTieBaCrawler.search] Current search keyword: {keyword}"
)
page = 1
while (
page - start_page + 1
) * tieba_limit_count <= config.CRAWLER_MAX_NOTES_COUNT:
if page < start_page:
utils.logger.info(f"[BaiduTieBaCrawler.search] Skip page {page}")
page += 1
continue
try:
utils.logger.info(
f"[BaiduTieBaCrawler.search] search tieba keyword: {keyword}, page: {page}"
)
notes_list: List[TiebaNote] = (
await self.tieba_client.get_notes_by_keyword(
keyword=keyword,
page=page,
page_size=tieba_limit_count,
sort=SearchSortType.TIME_DESC,
note_type=SearchNoteType.FIXED_THREAD,
)
)
if not notes_list:
utils.logger.info(
f"[BaiduTieBaCrawler.search] Search note list is empty"
)
break
utils.logger.info(
f"[BaiduTieBaCrawler.search] Note list len: {len(notes_list)}"
)
await self.get_specified_notes(
note_id_list=[note_detail.note_id for note_detail in notes_list]
)
# Sleep after page navigation
await asyncio.sleep(config.CRAWLER_MAX_SLEEP_SEC)
utils.logger.info(f"[TieBaCrawler.search] Sleeping for {config.CRAWLER_MAX_SLEEP_SEC} seconds after page {page}")
page += 1
except Exception as ex:
utils.logger.error(
f"[BaiduTieBaCrawler.search] Search keywords error, current page: {page}, current keyword: {keyword}, err: {ex}"
)
break
async def get_specified_tieba_notes(self):
tieba_limit_count = 50
if config.CRAWLER_MAX_NOTES_COUNT < tieba_limit_count:
config.CRAWLER_MAX_NOTES_COUNT = tieba_limit_count
for tieba_name in config.TIEBA_NAME_LIST:
utils.logger.info(
f"[BaiduTieBaCrawler.get_specified_tieba_notes] Begin get tieba name: {tieba_name}"
)
page_number = 0
while page_number <= config.CRAWLER_MAX_NOTES_COUNT:
note_list: List[TiebaNote] = (
await self.tieba_client.get_notes_by_tieba_name(
tieba_name=tieba_name, page_num=page_number
)
)
if not note_list:
utils.logger.info(
f"[BaiduTieBaCrawler.get_specified_tieba_notes] Get note list is empty"
)
break
utils.logger.info(
f"[BaiduTieBaCrawler.get_specified_tieba_notes] tieba name: {tieba_name} note list len: {len(note_list)}"
)
await self.get_specified_notes([note.note_id for note in note_list])
# Sleep after processing notes
await asyncio.sleep(config.CRAWLER_MAX_SLEEP_SEC)
utils.logger.info(f"[TieBaCrawler.get_specified_tieba_notes] Sleeping for {config.CRAWLER_MAX_SLEEP_SEC} seconds after processing notes from page {page_number}")
page_number += tieba_limit_count
async def get_specified_notes(
self, note_id_list: List[str] = config.TIEBA_SPECIFIED_ID_LIST
):
semaphore = asyncio.Semaphore(config.MAX_CONCURRENCY_NUM)
task_list = [
self.get_note_detail_async_task(note_id=note_id, semaphore=semaphore)
for note_id in note_id_list
]
note_details = await asyncio.gather(*task_list)
note_details_model: List[TiebaNote] = []
for note_detail in note_details:
if note_detail is not None:
note_details_model.append(note_detail)
await tieba_store.update_tieba_note(note_detail)
await self.batch_get_note_comments(note_details_model)
async def get_note_detail_async_task(
self, note_id: str, semaphore: asyncio.Semaphore
) -> Optional[TiebaNote]:
async with semaphore:
try:
utils.logger.info(
f"[BaiduTieBaCrawler.get_note_detail] Begin get note detail, note_id: {note_id}"
)
note_detail: TiebaNote = await self.tieba_client.get_note_by_id(note_id)
# Sleep after fetching note details
await asyncio.sleep(config.CRAWLER_MAX_SLEEP_SEC)
utils.logger.info(f"[TieBaCrawler.get_note_detail_async_task] Sleeping for {config.CRAWLER_MAX_SLEEP_SEC} seconds after fetching note details {note_id}")
if not note_detail:
utils.logger.error(
f"[BaiduTieBaCrawler.get_note_detail] Get note detail error, note_id: {note_id}"
)
return None
return note_detail
except Exception as ex:
utils.logger.error(
f"[BaiduTieBaCrawler.get_note_detail] Get note detail error: {ex}"
)
return None
except KeyError as ex:
utils.logger.error(
f"[BaiduTieBaCrawler.get_note_detail] have not fund note detail note_id:{note_id}, err: {ex}"
)
return None
async def batch_get_note_comments(self, note_detail_list: List[TiebaNote]):
if not config.ENABLE_GET_COMMENTS:
return
semaphore = asyncio.Semaphore(config.MAX_CONCURRENCY_NUM)
task_list: List[Task] = []
for note_detail in note_detail_list:
task = asyncio.create_task(
self.get_comments_async_task(note_detail, semaphore),
name=note_detail.note_id,
)
task_list.append(task)
await asyncio.gather(*task_list)
async def get_comments_async_task(
self, note_detail: TiebaNote, semaphore: asyncio.Semaphore
):
async with semaphore:
utils.logger.info(
f"[BaiduTieBaCrawler.get_comments] Begin get note id comments {note_detail.note_id}"
)
# Sleep before fetching comments
await asyncio.sleep(config.CRAWLER_MAX_SLEEP_SEC)
utils.logger.info(f"[TieBaCrawler.get_comments_async_task] Sleeping for {config.CRAWLER_MAX_SLEEP_SEC} seconds before fetching comments for note {note_detail.note_id}")
await self.tieba_client.get_note_all_comments(
note_detail=note_detail,
crawl_interval=config.CRAWLER_MAX_SLEEP_SEC,
callback=tieba_store.batch_update_tieba_note_comments,
max_count=config.CRAWLER_MAX_COMMENTS_COUNT_SINGLENOTES,
)
async def get_creators_and_notes(self) -> None:
utils.logger.info(
"[WeiboCrawler.get_creators_and_notes] Begin get weibo creators"
)
for creator_url in config.TIEBA_CREATOR_URL_LIST:
creator_page_html_content = await self.tieba_client.get_creator_info_by_url(
creator_url=creator_url
)
creator_info: TiebaCreator = self._page_extractor.extract_creator_info(
creator_page_html_content
)
if creator_info:
utils.logger.info(
f"[WeiboCrawler.get_creators_and_notes] creator info: {creator_info}"
)
if not creator_info:
raise Exception("Get creator info error")
await tieba_store.save_creator(user_info=creator_info)
# Get all note information of the creator
all_notes_list = (
await self.tieba_client.get_all_notes_by_creator_user_name(
user_name=creator_info.user_name,
crawl_interval=0,
callback=tieba_store.batch_update_tieba_notes,
max_note_count=config.CRAWLER_MAX_NOTES_COUNT,
creator_page_html_content=creator_page_html_content,
)
)
await self.batch_get_note_comments(all_notes_list)
else:
utils.logger.error(
f"[WeiboCrawler.get_creators_and_notes] get creator info error, creator_url:{creator_url}"
)
async def _navigate_to_tieba_via_baidu(self):
utils.logger.info("[TieBaCrawler] Simulating real user access path...")
try:
# Step 1: Visit Baidu homepage
utils.logger.info("[TieBaCrawler] Step 1: Visiting Baidu homepage https://www.baidu.com/")
await self.context_page.goto("https://www.baidu.com/", wait_until="domcontentloaded")
# Step 2: Wait for page loading, using delay setting from config file
utils.logger.info(f"[TieBaCrawler] Step 2: Waiting {config.CRAWLER_MAX_SLEEP_SEC} seconds to simulate user browsing...")
await asyncio.sleep(config.CRAWLER_MAX_SLEEP_SEC)
# Step 3: Find and click "Tieba" link
utils.logger.info("[TieBaCrawler] Step 3: Finding and clicking 'Tieba' link...")
# Try multiple selectors to ensure finding the Tieba link
tieba_selectors = [
'a[href="http://tieba.baidu.com/"]',
'a[href="https://tieba.baidu.com/"]',
'a.mnav:has-text("贴吧")',
'text=贴吧',
]
tieba_link = None
for selector in tieba_selectors:
try:
tieba_link = await self.context_page.wait_for_selector(selector, timeout=5000)
if tieba_link:
utils.logger.info(f"[TieBaCrawler] Found Tieba link (selector: {selector})")
break
except Exception:
continue
if not tieba_link:
utils.logger.warning("[TieBaCrawler] Tieba link not found, directly accessing Tieba homepage")
await self.context_page.goto(self.index_url, wait_until="domcontentloaded")
return
# Step 4: Click Tieba link (check if it will open in a new tab)
utils.logger.info("[TieBaCrawler] Step 4: Clicking Tieba link...")
# Check link's target attribute
target_attr = await tieba_link.get_attribute("target")
utils.logger.info(f"[TieBaCrawler] Link target attribute: {target_attr}")
if target_attr == "_blank":
# If it's a new tab, need to wait for new page and switch
utils.logger.info("[TieBaCrawler] Link will open in new tab, waiting for new page...")
async with self.browser_context.expect_page() as new_page_info:
await tieba_link.click()
# Get newly opened page
new_page = await new_page_info.value
await new_page.wait_for_load_state("domcontentloaded")
# Close old Baidu homepage
await self.context_page.close()
# Switch to new Tieba page
self.context_page = new_page
utils.logger.info("[TieBaCrawler] Successfully switched to new tab (Tieba page)")
else:
# If it's same tab navigation, wait for navigation normally
utils.logger.info("[TieBaCrawler] Link navigates in current tab...")
async with self.context_page.expect_navigation(wait_until="domcontentloaded"):
await tieba_link.click()
# Step 5: Wait for page to stabilize, using delay setting from config file
utils.logger.info(f"[TieBaCrawler] Step 5: Page loaded, waiting {config.CRAWLER_MAX_SLEEP_SEC} seconds...")
await asyncio.sleep(config.CRAWLER_MAX_SLEEP_SEC)
current_url = self.context_page.url
utils.logger.info(f"[TieBaCrawler] Successfully entered Tieba via Baidu homepage! Current URL: {current_url}")
except Exception as e:
utils.logger.error(f"[TieBaCrawler] Failed to access Tieba via Baidu homepage: {e}")
utils.logger.info("[TieBaCrawler] Fallback: directly accessing Tieba homepage")
await self.context_page.goto(self.index_url, wait_until="domcontentloaded")
async def _inject_anti_detection_scripts(self):
utils.logger.info("[TieBaCrawler] Injecting anti-detection scripts...")
# Lightweight anti-detection script, only covering key detection points
anti_detection_js = """
// Override navigator.webdriver
Object.defineProperty(navigator, 'webdriver', {
get: () => undefined,
configurable: true
});
// Override window.navigator.chrome
if (!window.navigator.chrome) {
window.navigator.chrome = {
runtime: {},
loadTimes: function() {},
csi: function() {},
app: {}
};
}
// Override Permissions API
const originalQuery = window.navigator.permissions.query;
window.navigator.permissions.query = (parameters) => (
parameters.name === 'notifications' ?
Promise.resolve({ state: Notification.permission }) :
originalQuery(parameters)
);
// Override plugins length (make it look like there are plugins)
Object.defineProperty(navigator, 'plugins', {
get: () => [1, 2, 3, 4, 5],
configurable: true
});
// Override languages
Object.defineProperty(navigator, 'languages', {
get: () => ['zh-CN', 'zh', 'en'],
configurable: true
});
// Remove window.cdc_ and other ChromeDriver remnants
delete window.cdc_adoQpoasnfa76pfcZLmcfl_Array;
delete window.cdc_adoQpoasnfa76pfcZLmcfl_Promise;
delete window.cdc_adoQpoasnfa76pfcZLmcfl_Symbol;
console.log('[Anti-Detection] Scripts injected successfully');
"""
await self.browser_context.add_init_script(anti_detection_js)
utils.logger.info("[TieBaCrawler] Anti-detection scripts injected")
async def create_tieba_client(
self, httpx_proxy: Optional[str], ip_pool: Optional[ProxyIpPool] = None
) -> BaiduTieBaClient:
utils.logger.info("[TieBaCrawler.create_tieba_client] Begin create tieba API client...")
# Extract User-Agent from real browser to avoid detection
user_agent = await self.context_page.evaluate("() => navigator.userAgent")
utils.logger.info(f"[TieBaCrawler.create_tieba_client] Extracted User-Agent from browser: {user_agent}")
cookie_str, cookie_dict = utils.convert_cookies(await self.browser_context.cookies())
# Build complete browser request headers, simulating real browser behavior
tieba_client = BaiduTieBaClient(
timeout=10,
ip_pool=ip_pool,
default_ip_proxy=httpx_proxy,
headers={
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
"Accept-Language": "zh-CN,zh;q=0.9",
"Accept-Encoding": "gzip, deflate, br",
"Connection": "keep-alive",
"User-Agent": user_agent, # Use real browser UA
"Cookie": cookie_str,
"Host": "tieba.baidu.com",
"Referer": "https://tieba.baidu.com/",
"Sec-Fetch-Dest": "document",
"Sec-Fetch-Mode": "navigate",
"Sec-Fetch-Site": "same-origin",
"Sec-Fetch-User": "?1",
"Upgrade-Insecure-Requests": "1",
"sec-ch-ua": '"Google Chrome";v="141", "Not?A_Brand";v="8", "Chromium";v="141"',
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": '"macOS"',
},
playwright_page=self.context_page, # Pass in playwright page object
)
return tieba_client
async def launch_browser(
self,
chromium: BrowserType,
playwright_proxy: Optional[Dict],
user_agent: Optional[str],
headless: bool = True,
) -> BrowserContext:
utils.logger.info(
"[BaiduTieBaCrawler.launch_browser] Begin create browser context ..."
)
if config.SAVE_LOGIN_STATE:
# feat issue #14
# we will save login state to avoid login every time
user_data_dir = os.path.join(
os.getcwd(), "browser_data", config.USER_DATA_DIR % config.PLATFORM
) # type: ignore
browser_context = await chromium.launch_persistent_context(
user_data_dir=user_data_dir,
accept_downloads=True,
headless=headless,
proxy=playwright_proxy, # type: ignore
viewport={"width": 1920, "height": 1080},
user_agent=user_agent,
channel="chrome", # Use system's stable Chrome version
)
return browser_context
else:
browser = await chromium.launch(headless=headless, proxy=playwright_proxy, channel="chrome") # type: ignore
browser_context = await browser.new_context(
viewport={"width": 1920, "height": 1080}, user_agent=user_agent
)
return browser_context
async def launch_browser_with_cdp(
self,
playwright: Playwright,
playwright_proxy: Optional[Dict],
user_agent: Optional[str],
headless: bool = True,
) -> BrowserContext:
try:
self.cdp_manager = CDPBrowserManager()
browser_context = await self.cdp_manager.launch_and_connect(
playwright=playwright,
playwright_proxy=playwright_proxy,
user_agent=user_agent,
headless=headless,
)
# Display browser information
browser_info = await self.cdp_manager.get_browser_info()
utils.logger.info(f"[TieBaCrawler] CDP browser info: {browser_info}")
return browser_context
except Exception as e:
utils.logger.error(f"[TieBaCrawler] CDP mode launch failed, falling back to standard mode: {e}")
# Fall back to standard mode
chromium = playwright.chromium
return await self.launch_browser(
chromium, playwright_proxy, user_agent, headless
)
async def close(self):
# If using CDP mode, need special handling
if self.cdp_manager:
await self.cdp_manager.cleanup()
self.cdp_manager = None
else:
await self.browser_context.close()
utils.logger.info("[BaiduTieBaCrawler.close] Browser context closed ...") | --- +++ @@ -59,6 +59,11 @@ self.cdp_manager = None
async def start(self) -> None:
+ """
+ Start the crawler
+ Returns:
+
+ """
playwright_proxy_format, httpx_proxy_format = None, None
if config.ENABLE_IP_PROXY:
utils.logger.info(
@@ -137,6 +142,11 @@ utils.logger.info("[BaiduTieBaCrawler.start] Tieba Crawler finished ...")
async def search(self) -> None:
+ """
+ Search for notes and retrieve their comment information.
+ Returns:
+
+ """
utils.logger.info(
"[BaiduTieBaCrawler.search] Begin search baidu tieba keywords"
)
@@ -194,6 +204,11 @@ break
async def get_specified_tieba_notes(self):
+ """
+ Get the information and comments of the specified post by tieba name
+ Returns:
+
+ """
tieba_limit_count = 50
if config.CRAWLER_MAX_NOTES_COUNT < tieba_limit_count:
config.CRAWLER_MAX_NOTES_COUNT = tieba_limit_count
@@ -228,6 +243,14 @@ async def get_specified_notes(
self, note_id_list: List[str] = config.TIEBA_SPECIFIED_ID_LIST
):
+ """
+ Get the information and comments of the specified post
+ Args:
+ note_id_list:
+
+ Returns:
+
+ """
semaphore = asyncio.Semaphore(config.MAX_CONCURRENCY_NUM)
task_list = [
self.get_note_detail_async_task(note_id=note_id, semaphore=semaphore)
@@ -244,6 +267,15 @@ async def get_note_detail_async_task(
self, note_id: str, semaphore: asyncio.Semaphore
) -> Optional[TiebaNote]:
+ """
+ Get note detail
+ Args:
+ note_id: baidu tieba note id
+ semaphore: asyncio semaphore
+
+ Returns:
+
+ """
async with semaphore:
try:
utils.logger.info(
@@ -273,6 +305,14 @@ return None
async def batch_get_note_comments(self, note_detail_list: List[TiebaNote]):
+ """
+ Batch get note comments
+ Args:
+ note_detail_list:
+
+ Returns:
+
+ """
if not config.ENABLE_GET_COMMENTS:
return
@@ -289,6 +329,15 @@ async def get_comments_async_task(
self, note_detail: TiebaNote, semaphore: asyncio.Semaphore
):
+ """
+ Get comments async task
+ Args:
+ note_detail:
+ semaphore:
+
+ Returns:
+
+ """
async with semaphore:
utils.logger.info(
f"[BaiduTieBaCrawler.get_comments] Begin get note id comments {note_detail.note_id}"
@@ -306,6 +355,11 @@ )
async def get_creators_and_notes(self) -> None:
+ """
+ Get creator's information and their notes and comments
+ Returns:
+
+ """
utils.logger.info(
"[WeiboCrawler.get_creators_and_notes] Begin get weibo creators"
)
@@ -344,6 +398,15 @@ )
async def _navigate_to_tieba_via_baidu(self):
+ """
+ Simulate real user access path:
+ 1. First visit Baidu homepage (https://www.baidu.com/)
+ 2. Wait for page to load
+ 3. Click "Tieba" link in top navigation bar
+ 4. Jump to Tieba homepage
+
+ This avoids triggering Baidu's security verification
+ """
utils.logger.info("[TieBaCrawler] Simulating real user access path...")
try:
@@ -424,6 +487,10 @@ await self.context_page.goto(self.index_url, wait_until="domcontentloaded")
async def _inject_anti_detection_scripts(self):
+ """
+ Inject anti-detection JavaScript scripts
+ For Baidu Tieba's special detection mechanism
+ """
utils.logger.info("[TieBaCrawler] Injecting anti-detection scripts...")
# Lightweight anti-detection script, only covering key detection points
@@ -478,6 +545,15 @@ async def create_tieba_client(
self, httpx_proxy: Optional[str], ip_pool: Optional[ProxyIpPool] = None
) -> BaiduTieBaClient:
+ """
+ Create tieba client with real browser User-Agent and complete headers
+ Args:
+ httpx_proxy: HTTP proxy
+ ip_pool: IP proxy pool
+
+ Returns:
+ BaiduTieBaClient instance
+ """
utils.logger.info("[TieBaCrawler.create_tieba_client] Begin create tieba API client...")
# Extract User-Agent from real browser to avoid detection
@@ -520,6 +596,17 @@ user_agent: Optional[str],
headless: bool = True,
) -> BrowserContext:
+ """
+ Launch browser and create browser
+ Args:
+ chromium:
+ playwright_proxy:
+ user_agent:
+ headless:
+
+ Returns:
+
+ """
utils.logger.info(
"[BaiduTieBaCrawler.launch_browser] Begin create browser context ..."
)
@@ -553,6 +640,9 @@ user_agent: Optional[str],
headless: bool = True,
) -> BrowserContext:
+ """
+ Launch browser using CDP mode
+ """
try:
self.cdp_manager = CDPBrowserManager()
browser_context = await self.cdp_manager.launch_and_connect(
@@ -577,10 +667,15 @@ )
async def close(self):
+ """
+ Close browser context
+ Returns:
+
+ """
# If using CDP mode, need special handling
if self.cdp_manager:
await self.cdp_manager.cleanup()
self.cdp_manager = None
else:
await self.browser_context.close()
- utils.logger.info("[BaiduTieBaCrawler.close] Browser context closed ...")+ utils.logger.info("[BaiduTieBaCrawler.close] Browser context closed ...")
| https://raw.githubusercontent.com/NanmiCoder/MediaCrawler/HEAD/media_platform/tieba/core.py |
Add missing documentation to my Python functions | # -*- coding: utf-8 -*-
# Copyright (c) 2025 relakkes@gmail.com
#
# This file is part of MediaCrawler project.
# Repository: https://github.com/NanmiCoder/MediaCrawler/blob/main/database/mongodb_store_base.py
# GitHub: https://github.com/NanmiCoder
# Licensed under NON-COMMERCIAL LEARNING LICENSE 1.1
#
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
# 1. 不得用于任何商业用途。
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
# 3. 不得进行大规模爬取或对平台造成运营干扰。
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
# 5. 不得用于任何非法或不当的用途。
#
# 详细许可条款请参阅项目根目录下的LICENSE文件。
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
import asyncio
from typing import Dict, List, Optional
from motor.motor_asyncio import AsyncIOMotorClient, AsyncIOMotorDatabase, AsyncIOMotorCollection
from config import db_config
from tools import utils
class MongoDBConnection:
_instance = None
_client: Optional[AsyncIOMotorClient] = None
_db: Optional[AsyncIOMotorDatabase] = None
_lock = asyncio.Lock()
def __new__(cls):
if cls._instance is None:
cls._instance = super(MongoDBConnection, cls).__new__(cls)
return cls._instance
async def get_client(self) -> AsyncIOMotorClient:
if self._client is None:
async with self._lock:
if self._client is None:
await self._connect()
return self._client
async def get_db(self) -> AsyncIOMotorDatabase:
if self._db is None:
async with self._lock:
if self._db is None:
await self._connect()
return self._db
async def _connect(self):
try:
mongo_config = db_config.mongodb_config
host = mongo_config["host"]
port = mongo_config["port"]
user = mongo_config["user"]
password = mongo_config["password"]
db_name = mongo_config["db_name"]
# Build connection URL (with/without authentication)
if user and password:
connection_url = f"mongodb://{user}:{password}@{host}:{port}/"
else:
connection_url = f"mongodb://{host}:{port}/"
self._client = AsyncIOMotorClient(connection_url, serverSelectionTimeoutMS=5000)
await self._client.server_info() # Test connection
self._db = self._client[db_name]
utils.logger.info(f"[MongoDBConnection] Connected to {host}:{port}/{db_name}")
except Exception as e:
utils.logger.error(f"[MongoDBConnection] Connection failed: {e}")
raise
async def close(self):
if self._client is not None:
self._client.close()
self._client = None
self._db = None
utils.logger.info("[MongoDBConnection] Connection closed")
class MongoDBStoreBase:
def __init__(self, collection_prefix: str):
self.collection_prefix = collection_prefix
self._connection = MongoDBConnection()
async def get_collection(self, collection_suffix: str) -> AsyncIOMotorCollection:
db = await self._connection.get_db()
collection_name = f"{self.collection_prefix}_{collection_suffix}"
return db[collection_name]
async def save_or_update(self, collection_suffix: str, query: Dict, data: Dict) -> bool:
try:
collection = await self.get_collection(collection_suffix)
await collection.update_one(query, {"$set": data}, upsert=True)
return True
except Exception as e:
utils.logger.error(f"[MongoDBStoreBase] Save failed ({self.collection_prefix}_{collection_suffix}): {e}")
return False
async def find_one(self, collection_suffix: str, query: Dict) -> Optional[Dict]:
try:
collection = await self.get_collection(collection_suffix)
return await collection.find_one(query)
except Exception as e:
utils.logger.error(f"[MongoDBStoreBase] Find one failed ({self.collection_prefix}_{collection_suffix}): {e}")
return None
async def find_many(self, collection_suffix: str, query: Dict, limit: int = 0) -> List[Dict]:
try:
collection = await self.get_collection(collection_suffix)
cursor = collection.find(query)
if limit > 0:
cursor = cursor.limit(limit)
return await cursor.to_list(length=None)
except Exception as e:
utils.logger.error(f"[MongoDBStoreBase] Find many failed ({self.collection_prefix}_{collection_suffix}): {e}")
return []
async def create_index(self, collection_suffix: str, keys: List[tuple], unique: bool = False):
try:
collection = await self.get_collection(collection_suffix)
await collection.create_index(keys, unique=unique)
utils.logger.info(f"[MongoDBStoreBase] Index created on {self.collection_prefix}_{collection_suffix}")
except Exception as e:
utils.logger.error(f"[MongoDBStoreBase] Create index failed: {e}") | --- +++ @@ -16,6 +16,7 @@ # 详细许可条款请参阅项目根目录下的LICENSE文件。
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
+"""MongoDB storage base class: Provides connection management and common storage methods"""
import asyncio
from typing import Dict, List, Optional
from motor.motor_asyncio import AsyncIOMotorClient, AsyncIOMotorDatabase, AsyncIOMotorCollection
@@ -24,6 +25,7 @@
class MongoDBConnection:
+ """MongoDB connection management (singleton pattern)"""
_instance = None
_client: Optional[AsyncIOMotorClient] = None
_db: Optional[AsyncIOMotorDatabase] = None
@@ -35,6 +37,7 @@ return cls._instance
async def get_client(self) -> AsyncIOMotorClient:
+ """Get client"""
if self._client is None:
async with self._lock:
if self._client is None:
@@ -42,6 +45,7 @@ return self._client
async def get_db(self) -> AsyncIOMotorDatabase:
+ """Get database"""
if self._db is None:
async with self._lock:
if self._db is None:
@@ -49,6 +53,7 @@ return self._db
async def _connect(self):
+ """Establish connection"""
try:
mongo_config = db_config.mongodb_config
host = mongo_config["host"]
@@ -72,6 +77,7 @@ raise
async def close(self):
+ """Close connection"""
if self._client is not None:
self._client.close()
self._client = None
@@ -80,17 +86,24 @@
class MongoDBStoreBase:
+ """MongoDB storage base class: Provides common CRUD operations"""
def __init__(self, collection_prefix: str):
+ """Initialize storage base class
+ Args:
+ collection_prefix: Platform prefix (xhs/douyin/bilibili, etc.)
+ """
self.collection_prefix = collection_prefix
self._connection = MongoDBConnection()
async def get_collection(self, collection_suffix: str) -> AsyncIOMotorCollection:
+ """Get collection: {prefix}_{suffix}"""
db = await self._connection.get_db()
collection_name = f"{self.collection_prefix}_{collection_suffix}"
return db[collection_name]
async def save_or_update(self, collection_suffix: str, query: Dict, data: Dict) -> bool:
+ """Save or update data (upsert)"""
try:
collection = await self.get_collection(collection_suffix)
await collection.update_one(query, {"$set": data}, upsert=True)
@@ -100,6 +113,7 @@ return False
async def find_one(self, collection_suffix: str, query: Dict) -> Optional[Dict]:
+ """Query a single record"""
try:
collection = await self.get_collection(collection_suffix)
return await collection.find_one(query)
@@ -108,6 +122,7 @@ return None
async def find_many(self, collection_suffix: str, query: Dict, limit: int = 0) -> List[Dict]:
+ """Query multiple records (limit=0 means no limit)"""
try:
collection = await self.get_collection(collection_suffix)
cursor = collection.find(query)
@@ -119,9 +134,10 @@ return []
async def create_index(self, collection_suffix: str, keys: List[tuple], unique: bool = False):
+ """Create index: keys=[("field", 1)]"""
try:
collection = await self.get_collection(collection_suffix)
await collection.create_index(keys, unique=unique)
utils.logger.info(f"[MongoDBStoreBase] Index created on {self.collection_prefix}_{collection_suffix}")
except Exception as e:
- utils.logger.error(f"[MongoDBStoreBase] Create index failed: {e}")+ utils.logger.error(f"[MongoDBStoreBase] Create index failed: {e}")
| https://raw.githubusercontent.com/NanmiCoder/MediaCrawler/HEAD/database/mongodb_store_base.py |
Write docstrings for backend logic | # -*- coding: utf-8 -*-
# Copyright (c) 2025 relakkes@gmail.com
#
# This file is part of MediaCrawler project.
# Repository: https://github.com/NanmiCoder/MediaCrawler/blob/main/media_platform/bilibili/client.py
# GitHub: https://github.com/NanmiCoder
# Licensed under NON-COMMERCIAL LEARNING LICENSE 1.1
#
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
# 1. 不得用于任何商业用途。
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
# 3. 不得进行大规模爬取或对平台造成运营干扰。
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
# 5. 不得用于任何非法或不当的用途。
#
# 详细许可条款请参阅项目根目录下的LICENSE文件。
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
# -*- coding: utf-8 -*-
# @Author : relakkes@gmail.com
# @Time : 2023/12/2 18:44
# @Desc : bilibili request client
import asyncio
import json
import random
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union
from urllib.parse import urlencode
import httpx
from playwright.async_api import BrowserContext, Page
import config
from base.base_crawler import AbstractApiClient
from proxy.proxy_mixin import ProxyRefreshMixin
from tools import utils
if TYPE_CHECKING:
from proxy.proxy_ip_pool import ProxyIpPool
from .exception import DataFetchError
from .field import CommentOrderType, SearchOrderType
from .help import BilibiliSign
class BilibiliClient(AbstractApiClient, ProxyRefreshMixin):
def __init__(
self,
timeout=60, # For media crawling, Bilibili long videos need a longer timeout
proxy=None,
*,
headers: Dict[str, str],
playwright_page: Page,
cookie_dict: Dict[str, str],
proxy_ip_pool: Optional["ProxyIpPool"] = None,
):
self.proxy = proxy
self.timeout = timeout
self.headers = headers
self._host = "https://api.bilibili.com"
self.playwright_page = playwright_page
self.cookie_dict = cookie_dict
# Initialize proxy pool (from ProxyRefreshMixin)
self.init_proxy_pool(proxy_ip_pool)
async def request(self, method, url, **kwargs) -> Any:
# Check if proxy has expired before each request
await self._refresh_proxy_if_expired()
async with httpx.AsyncClient(proxy=self.proxy) as client:
response = await client.request(method, url, timeout=self.timeout, **kwargs)
try:
data: Dict = response.json()
except json.JSONDecodeError:
utils.logger.error(f"[BilibiliClient.request] Failed to decode JSON from response. status_code: {response.status_code}, response_text: {response.text}")
raise DataFetchError(f"Failed to decode JSON, content: {response.text}")
if data.get("code") != 0:
raise DataFetchError(data.get("message", "unkonw error"))
else:
return data.get("data", {})
async def pre_request_data(self, req_data: Dict) -> Dict:
if not req_data:
return {}
img_key, sub_key = await self.get_wbi_keys()
return BilibiliSign(img_key, sub_key).sign(req_data)
async def get_wbi_keys(self) -> Tuple[str, str]:
local_storage = await self.playwright_page.evaluate("() => window.localStorage")
wbi_img_urls = local_storage.get("wbi_img_urls", "")
if not wbi_img_urls:
img_url_from_storage = local_storage.get("wbi_img_url")
sub_url_from_storage = local_storage.get("wbi_sub_url")
if img_url_from_storage and sub_url_from_storage:
wbi_img_urls = f"{img_url_from_storage}-{sub_url_from_storage}"
if wbi_img_urls and "-" in wbi_img_urls:
img_url, sub_url = wbi_img_urls.split("-")
else:
resp = await self.request(method="GET", url=self._host + "/x/web-interface/nav")
img_url: str = resp['wbi_img']['img_url']
sub_url: str = resp['wbi_img']['sub_url']
img_key = img_url.rsplit('/', 1)[1].split('.')[0]
sub_key = sub_url.rsplit('/', 1)[1].split('.')[0]
return img_key, sub_key
async def get(self, uri: str, params=None, enable_params_sign: bool = True) -> Dict:
final_uri = uri
if enable_params_sign:
params = await self.pre_request_data(params)
if isinstance(params, dict):
final_uri = (f"{uri}?"
f"{urlencode(params)}")
return await self.request(method="GET", url=f"{self._host}{final_uri}", headers=self.headers)
async def post(self, uri: str, data: dict) -> Dict:
data = await self.pre_request_data(data)
json_str = json.dumps(data, separators=(',', ':'), ensure_ascii=False)
return await self.request(method="POST", url=f"{self._host}{uri}", data=json_str, headers=self.headers)
async def pong(self) -> bool:
utils.logger.info("[BilibiliClient.pong] Begin pong bilibili...")
ping_flag = False
try:
check_login_uri = "/x/web-interface/nav"
response = await self.get(check_login_uri)
if response.get("isLogin"):
utils.logger.info("[BilibiliClient.pong] Use cache login state get web interface successfull!")
ping_flag = True
except Exception as e:
utils.logger.error(f"[BilibiliClient.pong] Pong bilibili failed: {e}, and try to login again...")
ping_flag = False
return ping_flag
async def update_cookies(self, browser_context: BrowserContext):
cookie_str, cookie_dict = utils.convert_cookies(await browser_context.cookies())
self.headers["Cookie"] = cookie_str
self.cookie_dict = cookie_dict
async def search_video_by_keyword(
self,
keyword: str,
page: int = 1,
page_size: int = 20,
order: SearchOrderType = SearchOrderType.DEFAULT,
pubtime_begin_s: int = 0,
pubtime_end_s: int = 0,
) -> Dict:
uri = "/x/web-interface/wbi/search/type"
post_data = {
"search_type": "video",
"keyword": keyword,
"page": page,
"page_size": page_size,
"order": order.value,
"pubtime_begin_s": pubtime_begin_s,
"pubtime_end_s": pubtime_end_s
}
return await self.get(uri, post_data)
async def get_video_info(self, aid: Union[int, None] = None, bvid: Union[str, None] = None) -> Dict:
if not aid and not bvid:
raise ValueError("Please provide at least one parameter: aid or bvid")
uri = "/x/web-interface/view/detail"
params = dict()
if aid:
params.update({"aid": aid})
else:
params.update({"bvid": bvid})
return await self.get(uri, params, enable_params_sign=False)
async def get_video_play_url(self, aid: int, cid: int) -> Dict:
if not aid or not cid or aid <= 0 or cid <= 0:
raise ValueError("aid and cid must exist")
uri = "/x/player/wbi/playurl"
qn_value = getattr(config, "BILI_QN", 80)
params = {
"avid": aid,
"cid": cid,
"qn": qn_value,
"fourk": 1,
"fnval": 1,
"platform": "pc",
}
return await self.get(uri, params, enable_params_sign=True)
async def get_video_media(self, url: str) -> Union[bytes, None]:
# Follow CDN 302 redirects and treat any 2xx as success (some endpoints return 206)
async with httpx.AsyncClient(proxy=self.proxy, follow_redirects=True) as client:
try:
response = await client.request("GET", url, timeout=self.timeout, headers=self.headers)
response.raise_for_status()
if 200 <= response.status_code < 300:
return response.content
utils.logger.error(
f"[BilibiliClient.get_video_media] Unexpected status {response.status_code} for {url}"
)
return None
except httpx.HTTPError as exc: # some wrong when call httpx.request method, such as connection error, client error, server error or response status code is not 2xx
utils.logger.error(f"[BilibiliClient.get_video_media] {exc.__class__.__name__} for {exc.request.url} - {exc}") # Keep original exception type name for developer debugging
return None
async def get_video_comments(
self,
video_id: str,
order_mode: CommentOrderType = CommentOrderType.DEFAULT,
next: int = 0,
) -> Dict:
uri = "/x/v2/reply/wbi/main"
post_data = {"oid": video_id, "mode": order_mode.value, "type": 1, "ps": 20, "next": next}
return await self.get(uri, post_data)
async def get_video_all_comments(
self,
video_id: str,
crawl_interval: float = 1.0,
is_fetch_sub_comments=False,
callback: Optional[Callable] = None,
max_count: int = 10,
):
result = []
is_end = False
next_page = 0
max_retries = 3
while not is_end and len(result) < max_count:
comments_res = None
for attempt in range(max_retries):
try:
comments_res = await self.get_video_comments(video_id, CommentOrderType.DEFAULT, next_page)
break # Success
except DataFetchError as e:
if attempt < max_retries - 1:
delay = 5 * (2**attempt) + random.uniform(0, 1)
utils.logger.warning(f"[BilibiliClient.get_video_all_comments] Retrying video_id {video_id} in {delay:.2f}s... (Attempt {attempt + 1}/{max_retries})")
await asyncio.sleep(delay)
else:
utils.logger.error(f"[BilibiliClient.get_video_all_comments] Max retries reached for video_id: {video_id}. Skipping comments. Error: {e}")
is_end = True
break
if not comments_res:
break
cursor_info: Dict = comments_res.get("cursor")
if not cursor_info:
utils.logger.warning(f"[BilibiliClient.get_video_all_comments] Could not find 'cursor' in response for video_id: {video_id}. Skipping.")
break
comment_list: List[Dict] = comments_res.get("replies", [])
# Check if is_end and next exist
if "is_end" not in cursor_info or "next" not in cursor_info:
utils.logger.warning(f"[BilibiliClient.get_video_all_comments] 'is_end' or 'next' not in cursor for video_id: {video_id}. Assuming end of comments.")
is_end = True
else:
is_end = cursor_info.get("is_end")
next_page = cursor_info.get("next")
if not isinstance(is_end, bool):
utils.logger.warning(f"[BilibiliClient.get_video_all_comments] 'is_end' is not a boolean for video_id: {video_id}. Assuming end of comments.")
is_end = True
if is_fetch_sub_comments:
for comment in comment_list:
comment_id = comment['rpid']
if (comment.get("rcount", 0) > 0):
{await self.get_video_all_level_two_comments(video_id, comment_id, CommentOrderType.DEFAULT, 10, crawl_interval, callback)}
if len(result) + len(comment_list) > max_count:
comment_list = comment_list[:max_count - len(result)]
if callback: # If there is a callback function, execute it
await callback(video_id, comment_list)
await asyncio.sleep(crawl_interval)
if not is_fetch_sub_comments:
result.extend(comment_list)
continue
return result
async def get_video_all_level_two_comments(
self,
video_id: str,
level_one_comment_id: int,
order_mode: CommentOrderType,
ps: int = 10,
crawl_interval: float = 1.0,
callback: Optional[Callable] = None,
) -> Dict:
pn = 1
while True:
result = await self.get_video_level_two_comments(video_id, level_one_comment_id, pn, ps, order_mode)
comment_list: List[Dict] = result.get("replies", [])
if callback: # If there is a callback function, execute it
await callback(video_id, comment_list)
await asyncio.sleep(crawl_interval)
if (int(result["page"]["count"]) <= pn * ps):
break
pn += 1
async def get_video_level_two_comments(
self,
video_id: str,
level_one_comment_id: int,
pn: int,
ps: int,
order_mode: CommentOrderType,
) -> Dict:
uri = "/x/v2/reply/reply"
post_data = {
"oid": video_id,
"mode": order_mode.value,
"type": 1,
"ps": ps,
"pn": pn,
"root": level_one_comment_id,
}
result = await self.get(uri, post_data)
return result
async def get_creator_videos(self, creator_id: str, pn: int, ps: int = 30, order_mode: SearchOrderType = SearchOrderType.LAST_PUBLISH) -> Dict:
uri = "/x/space/wbi/arc/search"
post_data = {
"mid": creator_id,
"pn": pn,
"ps": ps,
"order": order_mode,
}
return await self.get(uri, post_data)
async def get_creator_info(self, creator_id: int) -> Dict:
uri = "/x/space/wbi/acc/info"
post_data = {
"mid": creator_id,
}
return await self.get(uri, post_data)
async def get_creator_fans(
self,
creator_id: int,
pn: int,
ps: int = 24,
) -> Dict:
uri = "/x/relation/fans"
post_data = {
'vmid': creator_id,
"pn": pn,
"ps": ps,
"gaia_source": "main_web",
}
return await self.get(uri, post_data)
async def get_creator_followings(
self,
creator_id: int,
pn: int,
ps: int = 24,
) -> Dict:
uri = "/x/relation/followings"
post_data = {
"vmid": creator_id,
"pn": pn,
"ps": ps,
"gaia_source": "main_web",
}
return await self.get(uri, post_data)
async def get_creator_dynamics(self, creator_id: int, offset: str = ""):
uri = "/x/polymer/web-dynamic/v1/feed/space"
post_data = {
"offset": offset,
"host_mid": creator_id,
"platform": "web",
}
return await self.get(uri, post_data)
async def get_creator_all_fans(
self,
creator_info: Dict,
crawl_interval: float = 1.0,
callback: Optional[Callable] = None,
max_count: int = 100,
) -> List:
creator_id = creator_info["id"]
result = []
pn = config.START_CONTACTS_PAGE
while len(result) < max_count:
fans_res: Dict = await self.get_creator_fans(creator_id, pn=pn)
fans_list: List[Dict] = fans_res.get("list", [])
pn += 1
if len(result) + len(fans_list) > max_count:
fans_list = fans_list[:max_count - len(result)]
if callback: # If there is a callback function, execute it
await callback(creator_info, fans_list)
await asyncio.sleep(crawl_interval)
if not fans_list:
break
result.extend(fans_list)
return result
async def get_creator_all_followings(
self,
creator_info: Dict,
crawl_interval: float = 1.0,
callback: Optional[Callable] = None,
max_count: int = 100,
) -> List:
creator_id = creator_info["id"]
result = []
pn = config.START_CONTACTS_PAGE
while len(result) < max_count:
followings_res: Dict = await self.get_creator_followings(creator_id, pn=pn)
followings_list: List[Dict] = followings_res.get("list", [])
pn += 1
if len(result) + len(followings_list) > max_count:
followings_list = followings_list[:max_count - len(result)]
if callback: # If there is a callback function, execute it
await callback(creator_info, followings_list)
await asyncio.sleep(crawl_interval)
if not followings_list:
break
result.extend(followings_list)
return result
async def get_creator_all_dynamics(
self,
creator_info: Dict,
crawl_interval: float = 1.0,
callback: Optional[Callable] = None,
max_count: int = 20,
) -> List:
creator_id = creator_info["id"]
result = []
offset = ""
has_more = True
while has_more and len(result) < max_count:
dynamics_res = await self.get_creator_dynamics(creator_id, offset)
dynamics_list: List[Dict] = dynamics_res["items"]
has_more = dynamics_res["has_more"]
offset = dynamics_res["offset"]
if len(result) + len(dynamics_list) > max_count:
dynamics_list = dynamics_list[:max_count - len(result)]
if callback:
await callback(creator_info, dynamics_list)
await asyncio.sleep(crawl_interval)
result.extend(dynamics_list)
return result | --- +++ @@ -81,12 +81,23 @@ return data.get("data", {})
async def pre_request_data(self, req_data: Dict) -> Dict:
+ """
+ Send request to sign request parameters
+ Need to get wbi_img_urls parameter from localStorage, value as follows:
+ https://i0.hdslb.com/bfs/wbi/7cd084941338484aae1ad9425b84077c.png-https://i0.hdslb.com/bfs/wbi/4932caff0ff746eab6f01bf08b70ac45.png
+ :param req_data:
+ :return:
+ """
if not req_data:
return {}
img_key, sub_key = await self.get_wbi_keys()
return BilibiliSign(img_key, sub_key).sign(req_data)
async def get_wbi_keys(self) -> Tuple[str, str]:
+ """
+ Get the latest img_key and sub_key
+ :return:
+ """
local_storage = await self.playwright_page.evaluate("() => window.localStorage")
wbi_img_urls = local_storage.get("wbi_img_urls", "")
if not wbi_img_urls:
@@ -119,6 +130,7 @@ return await self.request(method="POST", url=f"{self._host}{uri}", data=json_str, headers=self.headers)
async def pong(self) -> bool:
+ """get a note to check if login state is ok"""
utils.logger.info("[BilibiliClient.pong] Begin pong bilibili...")
ping_flag = False
try:
@@ -146,6 +158,16 @@ pubtime_begin_s: int = 0,
pubtime_end_s: int = 0,
) -> Dict:
+ """
+ KuaiShou web search api
+ :param keyword: Search keyword
+ :param page: Page number for pagination
+ :param page_size: Number of items per page
+ :param order: Sort order for search results, default is comprehensive sorting
+ :param pubtime_begin_s: Publish time start timestamp
+ :param pubtime_end_s: Publish time end timestamp
+ :return:
+ """
uri = "/x/web-interface/wbi/search/type"
post_data = {
"search_type": "video",
@@ -159,6 +181,12 @@ return await self.get(uri, post_data)
async def get_video_info(self, aid: Union[int, None] = None, bvid: Union[str, None] = None) -> Dict:
+ """
+ Bilibli web video detail api, choose one parameter between aid and bvid
+ :param aid: Video aid
+ :param bvid: Video bvid
+ :return:
+ """
if not aid and not bvid:
raise ValueError("Please provide at least one parameter: aid or bvid")
@@ -171,6 +199,12 @@ return await self.get(uri, params, enable_params_sign=False)
async def get_video_play_url(self, aid: int, cid: int) -> Dict:
+ """
+ Bilibli web video play url api
+ :param aid: Video aid
+ :param cid: cid
+ :return:
+ """
if not aid or not cid or aid <= 0 or cid <= 0:
raise ValueError("aid and cid must exist")
uri = "/x/player/wbi/playurl"
@@ -208,6 +242,12 @@ order_mode: CommentOrderType = CommentOrderType.DEFAULT,
next: int = 0,
) -> Dict:
+ """get video comments
+ :param video_id: Video ID
+ :param order_mode: Sort order
+ :param next: Comment page selection
+ :return:
+ """
uri = "/x/v2/reply/wbi/main"
post_data = {"oid": video_id, "mode": order_mode.value, "type": 1, "ps": 20, "next": next}
return await self.get(uri, post_data)
@@ -220,6 +260,16 @@ callback: Optional[Callable] = None,
max_count: int = 10,
):
+ """
+ get video all comments include sub comments
+ :param video_id:
+ :param crawl_interval:
+ :param is_fetch_sub_comments:
+ :param callback:
+ max_count: Maximum number of comments to crawl per note
+
+ :return:
+ """
result = []
is_end = False
next_page = 0
@@ -284,6 +334,16 @@ crawl_interval: float = 1.0,
callback: Optional[Callable] = None,
) -> Dict:
+ """
+ get video all level two comments for a level one comment
+ :param video_id: Video ID
+ :param level_one_comment_id: Level one comment ID
+ :param order_mode:
+ :param ps: Number of comments per page
+ :param crawl_interval:
+ :param callback:
+ :return:
+ """
pn = 1
while True:
@@ -305,6 +365,13 @@ ps: int,
order_mode: CommentOrderType,
) -> Dict:
+ """get video level two comments
+ :param video_id: Video ID
+ :param level_one_comment_id: Level one comment ID
+ :param order_mode: Sort order
+
+ :return:
+ """
uri = "/x/v2/reply/reply"
post_data = {
"oid": video_id,
@@ -318,6 +385,14 @@ return result
async def get_creator_videos(self, creator_id: str, pn: int, ps: int = 30, order_mode: SearchOrderType = SearchOrderType.LAST_PUBLISH) -> Dict:
+ """get all videos for a creator
+ :param creator_id: Creator ID
+ :param pn: Page number
+ :param ps: Number of videos per page
+ :param order_mode: Sort order
+
+ :return:
+ """
uri = "/x/space/wbi/arc/search"
post_data = {
"mid": creator_id,
@@ -328,6 +403,10 @@ return await self.get(uri, post_data)
async def get_creator_info(self, creator_id: int) -> Dict:
+ """
+ get creator info
+ :param creator_id: Creator ID
+ """
uri = "/x/space/wbi/acc/info"
post_data = {
"mid": creator_id,
@@ -340,6 +419,13 @@ pn: int,
ps: int = 24,
) -> Dict:
+ """
+ get creator fans
+ :param creator_id: Creator ID
+ :param pn: Start page number
+ :param ps: Number of items per page
+ :return:
+ """
uri = "/x/relation/fans"
post_data = {
'vmid': creator_id,
@@ -355,6 +441,13 @@ pn: int,
ps: int = 24,
) -> Dict:
+ """
+ get creator followings
+ :param creator_id: Creator ID
+ :param pn: Start page number
+ :param ps: Number of items per page
+ :return:
+ """
uri = "/x/relation/followings"
post_data = {
"vmid": creator_id,
@@ -365,6 +458,12 @@ return await self.get(uri, post_data)
async def get_creator_dynamics(self, creator_id: int, offset: str = ""):
+ """
+ get creator comments
+ :param creator_id: Creator ID
+ :param offset: Parameter required for sending request
+ :return:
+ """
uri = "/x/polymer/web-dynamic/v1/feed/space"
post_data = {
"offset": offset,
@@ -381,6 +480,15 @@ callback: Optional[Callable] = None,
max_count: int = 100,
) -> List:
+ """
+ get creator all fans
+ :param creator_info:
+ :param crawl_interval:
+ :param callback:
+ :param max_count: Maximum number of fans to crawl for a creator
+
+ :return: List of creator fans
+ """
creator_id = creator_info["id"]
result = []
pn = config.START_CONTACTS_PAGE
@@ -406,6 +514,15 @@ callback: Optional[Callable] = None,
max_count: int = 100,
) -> List:
+ """
+ get creator all followings
+ :param creator_info:
+ :param crawl_interval:
+ :param callback:
+ :param max_count: Maximum number of followings to crawl for a creator
+
+ :return: List of creator followings
+ """
creator_id = creator_info["id"]
result = []
pn = config.START_CONTACTS_PAGE
@@ -431,6 +548,15 @@ callback: Optional[Callable] = None,
max_count: int = 20,
) -> List:
+ """
+ get creator all followings
+ :param creator_info:
+ :param crawl_interval:
+ :param callback:
+ :param max_count: Maximum number of dynamics to crawl for a creator
+
+ :return: List of creator dynamics
+ """
creator_id = creator_info["id"]
result = []
offset = ""
@@ -446,4 +572,4 @@ await callback(creator_info, dynamics_list)
await asyncio.sleep(crawl_interval)
result.extend(dynamics_list)
- return result+ return result
| https://raw.githubusercontent.com/NanmiCoder/MediaCrawler/HEAD/media_platform/bilibili/client.py |
Create docstrings for reusable components | # -*- coding: utf-8 -*-
# Copyright (c) 2025 relakkes@gmail.com
#
# This file is part of MediaCrawler project.
# Repository: https://github.com/NanmiCoder/MediaCrawler/blob/main/media_platform/zhihu/core.py
# GitHub: https://github.com/NanmiCoder
# Licensed under NON-COMMERCIAL LEARNING LICENSE 1.1
#
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
# 1. 不得用于任何商业用途。
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
# 3. 不得进行大规模爬取或对平台造成运营干扰。
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
# 5. 不得用于任何非法或不当的用途。
#
# 详细许可条款请参阅项目根目录下的LICENSE文件。
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
# -*- coding: utf-8 -*-
import asyncio
import os
# import random # Removed as we now use fixed config.CRAWLER_MAX_SLEEP_SEC intervals
from asyncio import Task
from typing import Dict, List, Optional, Tuple, cast
from playwright.async_api import (
BrowserContext,
BrowserType,
Page,
Playwright,
async_playwright,
)
import config
from constant import zhihu as constant
from base.base_crawler import AbstractCrawler
from model.m_zhihu import ZhihuContent, ZhihuCreator
from proxy.proxy_ip_pool import IpInfoModel, create_ip_pool
from store import zhihu as zhihu_store
from tools import utils
from tools.cdp_browser import CDPBrowserManager
from var import crawler_type_var, source_keyword_var
from .client import ZhiHuClient
from .exception import DataFetchError
from .help import ZhihuExtractor, judge_zhihu_url
from .login import ZhiHuLogin
class ZhihuCrawler(AbstractCrawler):
context_page: Page
zhihu_client: ZhiHuClient
browser_context: BrowserContext
cdp_manager: Optional[CDPBrowserManager]
def __init__(self) -> None:
self.index_url = "https://www.zhihu.com"
# self.user_agent = utils.get_user_agent()
self.user_agent = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36"
self._extractor = ZhihuExtractor()
self.cdp_manager = None
self.ip_proxy_pool = None # Proxy IP pool for automatic proxy refresh
async def start(self) -> None:
playwright_proxy_format, httpx_proxy_format = None, None
if config.ENABLE_IP_PROXY:
self.ip_proxy_pool = await create_ip_pool(
config.IP_PROXY_POOL_COUNT, enable_validate_ip=True
)
ip_proxy_info: IpInfoModel = await self.ip_proxy_pool.get_proxy()
playwright_proxy_format, httpx_proxy_format = utils.format_proxy_info(
ip_proxy_info
)
async with async_playwright() as playwright:
# Choose launch mode based on configuration
if config.ENABLE_CDP_MODE:
utils.logger.info("[ZhihuCrawler] Launching browser in CDP mode")
self.browser_context = await self.launch_browser_with_cdp(
playwright,
playwright_proxy_format,
self.user_agent,
headless=config.CDP_HEADLESS,
)
else:
utils.logger.info("[ZhihuCrawler] Launching browser in standard mode")
# Launch a browser context.
chromium = playwright.chromium
self.browser_context = await self.launch_browser(
chromium, None, self.user_agent, headless=config.HEADLESS
)
# stealth.min.js is a js script to prevent the website from detecting the crawler.
await self.browser_context.add_init_script(path="libs/stealth.min.js")
self.context_page = await self.browser_context.new_page()
await self.context_page.goto(self.index_url, wait_until="domcontentloaded")
# Create a client to interact with the zhihu website.
self.zhihu_client = await self.create_zhihu_client(httpx_proxy_format)
if not await self.zhihu_client.pong():
login_obj = ZhiHuLogin(
login_type=config.LOGIN_TYPE,
login_phone="", # input your phone number
browser_context=self.browser_context,
context_page=self.context_page,
cookie_str=config.COOKIES,
)
await login_obj.begin()
await self.zhihu_client.update_cookies(
browser_context=self.browser_context
)
# Zhihu's search API requires opening the search page first to access cookies, homepage alone won't work
utils.logger.info(
"[ZhihuCrawler.start] Zhihu navigating to search page to get search page cookies, this process takes about 5 seconds"
)
await self.context_page.goto(
f"{self.index_url}/search?q=python&search_source=Guess&utm_content=search_hot&type=content"
)
await asyncio.sleep(5)
await self.zhihu_client.update_cookies(browser_context=self.browser_context)
crawler_type_var.set(config.CRAWLER_TYPE)
if config.CRAWLER_TYPE == "search":
# Search for notes and retrieve their comment information.
await self.search()
elif config.CRAWLER_TYPE == "detail":
# Get the information and comments of the specified post
await self.get_specified_notes()
elif config.CRAWLER_TYPE == "creator":
# Get creator's information and their notes and comments
await self.get_creators_and_notes()
else:
pass
utils.logger.info("[ZhihuCrawler.start] Zhihu Crawler finished ...")
async def search(self) -> None:
utils.logger.info("[ZhihuCrawler.search] Begin search zhihu keywords")
zhihu_limit_count = 20 # zhihu limit page fixed value
if config.CRAWLER_MAX_NOTES_COUNT < zhihu_limit_count:
config.CRAWLER_MAX_NOTES_COUNT = zhihu_limit_count
start_page = config.START_PAGE
for keyword in config.KEYWORDS.split(","):
source_keyword_var.set(keyword)
utils.logger.info(
f"[ZhihuCrawler.search] Current search keyword: {keyword}"
)
page = 1
while (
page - start_page + 1
) * zhihu_limit_count <= config.CRAWLER_MAX_NOTES_COUNT:
if page < start_page:
utils.logger.info(f"[ZhihuCrawler.search] Skip page {page}")
page += 1
continue
try:
utils.logger.info(
f"[ZhihuCrawler.search] search zhihu keyword: {keyword}, page: {page}"
)
content_list: List[ZhihuContent] = (
await self.zhihu_client.get_note_by_keyword(
keyword=keyword,
page=page,
)
)
utils.logger.info(
f"[ZhihuCrawler.search] Search contents :{content_list}"
)
if not content_list:
utils.logger.info("No more content!")
break
# Sleep after page navigation
await asyncio.sleep(config.CRAWLER_MAX_SLEEP_SEC)
utils.logger.info(f"[ZhihuCrawler.search] Sleeping for {config.CRAWLER_MAX_SLEEP_SEC} seconds after page {page-1}")
page += 1
for content in content_list:
await zhihu_store.update_zhihu_content(content)
await self.batch_get_content_comments(content_list)
except DataFetchError:
utils.logger.error("[ZhihuCrawler.search] Search content error")
return
async def batch_get_content_comments(self, content_list: List[ZhihuContent]):
if not config.ENABLE_GET_COMMENTS:
utils.logger.info(
f"[ZhihuCrawler.batch_get_content_comments] Crawling comment mode is not enabled"
)
return
semaphore = asyncio.Semaphore(config.MAX_CONCURRENCY_NUM)
task_list: List[Task] = []
for content_item in content_list:
task = asyncio.create_task(
self.get_comments(content_item, semaphore), name=content_item.content_id
)
task_list.append(task)
await asyncio.gather(*task_list)
async def get_comments(
self, content_item: ZhihuContent, semaphore: asyncio.Semaphore
):
async with semaphore:
utils.logger.info(
f"[ZhihuCrawler.get_comments] Begin get note id comments {content_item.content_id}"
)
# Sleep before fetching comments
await asyncio.sleep(config.CRAWLER_MAX_SLEEP_SEC)
utils.logger.info(f"[ZhihuCrawler.get_comments] Sleeping for {config.CRAWLER_MAX_SLEEP_SEC} seconds before fetching comments for content {content_item.content_id}")
await self.zhihu_client.get_note_all_comments(
content=content_item,
crawl_interval=config.CRAWLER_MAX_SLEEP_SEC,
callback=zhihu_store.batch_update_zhihu_note_comments,
)
async def get_creators_and_notes(self) -> None:
utils.logger.info(
"[ZhihuCrawler.get_creators_and_notes] Begin get xiaohongshu creators"
)
for user_link in config.ZHIHU_CREATOR_URL_LIST:
utils.logger.info(
f"[ZhihuCrawler.get_creators_and_notes] Begin get creator {user_link}"
)
user_url_token = user_link.split("/")[-1]
# get creator detail info from web html content
createor_info: ZhihuCreator = await self.zhihu_client.get_creator_info(
url_token=user_url_token
)
if not createor_info:
utils.logger.info(
f"[ZhihuCrawler.get_creators_and_notes] Creator {user_url_token} not found"
)
continue
utils.logger.info(
f"[ZhihuCrawler.get_creators_and_notes] Creator info: {createor_info}"
)
await zhihu_store.save_creator(creator=createor_info)
# By default, only answer information is extracted, uncomment below if articles and videos are needed
# Get all anwser information of the creator
all_content_list = await self.zhihu_client.get_all_anwser_by_creator(
creator=createor_info,
crawl_interval=config.CRAWLER_MAX_SLEEP_SEC,
callback=zhihu_store.batch_update_zhihu_contents,
)
# Get all articles of the creator's contents
# all_content_list = await self.zhihu_client.get_all_articles_by_creator(
# creator=createor_info,
# crawl_interval=config.CRAWLER_MAX_SLEEP_SEC,
# callback=zhihu_store.batch_update_zhihu_contents
# )
# Get all videos of the creator's contents
# all_content_list = await self.zhihu_client.get_all_videos_by_creator(
# creator=createor_info,
# crawl_interval=config.CRAWLER_MAX_SLEEP_SEC,
# callback=zhihu_store.batch_update_zhihu_contents
# )
# Get all comments of the creator's contents
await self.batch_get_content_comments(all_content_list)
async def get_note_detail(
self, full_note_url: str, semaphore: asyncio.Semaphore
) -> Optional[ZhihuContent]:
async with semaphore:
utils.logger.info(
f"[ZhihuCrawler.get_specified_notes] Begin get specified note {full_note_url}"
)
# Judge note type
note_type: str = judge_zhihu_url(full_note_url)
if note_type == constant.ANSWER_NAME:
question_id = full_note_url.split("/")[-3]
answer_id = full_note_url.split("/")[-1]
utils.logger.info(
f"[ZhihuCrawler.get_specified_notes] Get answer info, question_id: {question_id}, answer_id: {answer_id}"
)
result = await self.zhihu_client.get_answer_info(question_id, answer_id)
# Sleep after fetching answer details
await asyncio.sleep(config.CRAWLER_MAX_SLEEP_SEC)
utils.logger.info(f"[ZhihuCrawler.get_note_detail] Sleeping for {config.CRAWLER_MAX_SLEEP_SEC} seconds after fetching answer details {answer_id}")
return result
elif note_type == constant.ARTICLE_NAME:
article_id = full_note_url.split("/")[-1]
utils.logger.info(
f"[ZhihuCrawler.get_specified_notes] Get article info, article_id: {article_id}"
)
result = await self.zhihu_client.get_article_info(article_id)
# Sleep after fetching article details
await asyncio.sleep(config.CRAWLER_MAX_SLEEP_SEC)
utils.logger.info(f"[ZhihuCrawler.get_note_detail] Sleeping for {config.CRAWLER_MAX_SLEEP_SEC} seconds after fetching article details {article_id}")
return result
elif note_type == constant.VIDEO_NAME:
video_id = full_note_url.split("/")[-1]
utils.logger.info(
f"[ZhihuCrawler.get_specified_notes] Get video info, video_id: {video_id}"
)
result = await self.zhihu_client.get_video_info(video_id)
# Sleep after fetching video details
await asyncio.sleep(config.CRAWLER_MAX_SLEEP_SEC)
utils.logger.info(f"[ZhihuCrawler.get_note_detail] Sleeping for {config.CRAWLER_MAX_SLEEP_SEC} seconds after fetching video details {video_id}")
return result
async def get_specified_notes(self):
get_note_detail_task_list = []
for full_note_url in config.ZHIHU_SPECIFIED_ID_LIST:
# remove query params
full_note_url = full_note_url.split("?")[0]
crawler_task = self.get_note_detail(
full_note_url=full_note_url,
semaphore=asyncio.Semaphore(config.MAX_CONCURRENCY_NUM),
)
get_note_detail_task_list.append(crawler_task)
need_get_comment_notes: List[ZhihuContent] = []
note_details = await asyncio.gather(*get_note_detail_task_list)
for index, note_detail in enumerate(note_details):
if not note_detail:
utils.logger.info(
f"[ZhihuCrawler.get_specified_notes] Note {config.ZHIHU_SPECIFIED_ID_LIST[index]} not found"
)
continue
note_detail = cast(ZhihuContent, note_detail) # only for type check
need_get_comment_notes.append(note_detail)
await zhihu_store.update_zhihu_content(note_detail)
await self.batch_get_content_comments(need_get_comment_notes)
async def create_zhihu_client(self, httpx_proxy: Optional[str]) -> ZhiHuClient:
utils.logger.info(
"[ZhihuCrawler.create_zhihu_client] Begin create zhihu API client ..."
)
cookie_str, cookie_dict = utils.convert_cookies(
await self.browser_context.cookies()
)
zhihu_client_obj = ZhiHuClient(
proxy=httpx_proxy,
headers={
"accept": "*/*",
"accept-language": "zh-CN,zh;q=0.9",
"cookie": cookie_str,
"priority": "u=1, i",
"referer": "https://www.zhihu.com/search?q=python&time_interval=a_year&type=content",
"user-agent": self.user_agent,
"x-api-version": "3.0.91",
"x-app-za": "OS=Web",
"x-requested-with": "fetch",
"x-zse-93": "101_3_3.0",
},
playwright_page=self.context_page,
cookie_dict=cookie_dict,
proxy_ip_pool=self.ip_proxy_pool, # Pass proxy pool for automatic refresh
)
return zhihu_client_obj
async def launch_browser(
self,
chromium: BrowserType,
playwright_proxy: Optional[Dict],
user_agent: Optional[str],
headless: bool = True,
) -> BrowserContext:
utils.logger.info(
"[ZhihuCrawler.launch_browser] Begin create browser context ..."
)
if config.SAVE_LOGIN_STATE:
# feat issue #14
# we will save login state to avoid login every time
user_data_dir = os.path.join(
os.getcwd(), "browser_data", config.USER_DATA_DIR % config.PLATFORM
) # type: ignore
browser_context = await chromium.launch_persistent_context(
user_data_dir=user_data_dir,
accept_downloads=True,
headless=headless,
proxy=playwright_proxy, # type: ignore
viewport={"width": 1920, "height": 1080},
user_agent=user_agent,
channel="chrome", # Use system Chrome stable version
)
return browser_context
else:
browser = await chromium.launch(headless=headless, proxy=playwright_proxy, channel="chrome") # type: ignore
browser_context = await browser.new_context(
viewport={"width": 1920, "height": 1080}, user_agent=user_agent
)
return browser_context
async def launch_browser_with_cdp(
self,
playwright: Playwright,
playwright_proxy: Optional[Dict],
user_agent: Optional[str],
headless: bool = True,
) -> BrowserContext:
try:
self.cdp_manager = CDPBrowserManager()
browser_context = await self.cdp_manager.launch_and_connect(
playwright=playwright,
playwright_proxy=playwright_proxy,
user_agent=user_agent,
headless=headless,
)
# Display browser information
browser_info = await self.cdp_manager.get_browser_info()
utils.logger.info(f"[ZhihuCrawler] CDP browser info: {browser_info}")
return browser_context
except Exception as e:
utils.logger.error(f"[ZhihuCrawler] CDP mode launch failed, falling back to standard mode: {e}")
# Fall back to standard mode
chromium = playwright.chromium
return await self.launch_browser(
chromium, playwright_proxy, user_agent, headless
)
async def close(self):
# Special handling if using CDP mode
if self.cdp_manager:
await self.cdp_manager.cleanup()
self.cdp_manager = None
else:
await self.browser_context.close()
utils.logger.info("[ZhihuCrawler.close] Browser context closed ...") | --- +++ @@ -64,6 +64,11 @@ self.ip_proxy_pool = None # Proxy IP pool for automatic proxy refresh
async def start(self) -> None:
+ """
+ Start the crawler
+ Returns:
+
+ """
playwright_proxy_format, httpx_proxy_format = None, None
if config.ENABLE_IP_PROXY:
self.ip_proxy_pool = await create_ip_pool(
@@ -138,6 +143,7 @@ utils.logger.info("[ZhihuCrawler.start] Zhihu Crawler finished ...")
async def search(self) -> None:
+ """Search for notes and retrieve their comment information."""
utils.logger.info("[ZhihuCrawler.search] Begin search zhihu keywords")
zhihu_limit_count = 20 # zhihu limit page fixed value
if config.CRAWLER_MAX_NOTES_COUNT < zhihu_limit_count:
@@ -188,6 +194,14 @@ return
async def batch_get_content_comments(self, content_list: List[ZhihuContent]):
+ """
+ Batch get content comments
+ Args:
+ content_list:
+
+ Returns:
+
+ """
if not config.ENABLE_GET_COMMENTS:
utils.logger.info(
f"[ZhihuCrawler.batch_get_content_comments] Crawling comment mode is not enabled"
@@ -206,6 +220,15 @@ async def get_comments(
self, content_item: ZhihuContent, semaphore: asyncio.Semaphore
):
+ """
+ Get note comments with keyword filtering and quantity limitation
+ Args:
+ content_item:
+ semaphore:
+
+ Returns:
+
+ """
async with semaphore:
utils.logger.info(
f"[ZhihuCrawler.get_comments] Begin get note id comments {content_item.content_id}"
@@ -222,6 +245,11 @@ )
async def get_creators_and_notes(self) -> None:
+ """
+ Get creator's information and their notes and comments
+ Returns:
+
+ """
utils.logger.info(
"[ZhihuCrawler.get_creators_and_notes] Begin get xiaohongshu creators"
)
@@ -274,6 +302,15 @@ async def get_note_detail(
self, full_note_url: str, semaphore: asyncio.Semaphore
) -> Optional[ZhihuContent]:
+ """
+ Get note detail
+ Args:
+ full_note_url: str
+ semaphore:
+
+ Returns:
+
+ """
async with semaphore:
utils.logger.info(
f"[ZhihuCrawler.get_specified_notes] Begin get specified note {full_note_url}"
@@ -321,6 +358,11 @@ return result
async def get_specified_notes(self):
+ """
+ Get the information and comments of the specified post
+ Returns:
+
+ """
get_note_detail_task_list = []
for full_note_url in config.ZHIHU_SPECIFIED_ID_LIST:
# remove query params
@@ -347,6 +389,7 @@ await self.batch_get_content_comments(need_get_comment_notes)
async def create_zhihu_client(self, httpx_proxy: Optional[str]) -> ZhiHuClient:
+ """Create zhihu client"""
utils.logger.info(
"[ZhihuCrawler.create_zhihu_client] Begin create zhihu API client ..."
)
@@ -380,6 +423,7 @@ user_agent: Optional[str],
headless: bool = True,
) -> BrowserContext:
+ """Launch browser and create browser context"""
utils.logger.info(
"[ZhihuCrawler.launch_browser] Begin create browser context ..."
)
@@ -413,6 +457,9 @@ user_agent: Optional[str],
headless: bool = True,
) -> BrowserContext:
+ """
+ Launch browser using CDP mode
+ """
try:
self.cdp_manager = CDPBrowserManager()
browser_context = await self.cdp_manager.launch_and_connect(
@@ -437,10 +484,11 @@ )
async def close(self):
+ """Close browser context"""
# Special handling if using CDP mode
if self.cdp_manager:
await self.cdp_manager.cleanup()
self.cdp_manager = None
else:
await self.browser_context.close()
- utils.logger.info("[ZhihuCrawler.close] Browser context closed ...")+ utils.logger.info("[ZhihuCrawler.close] Browser context closed ...")
| https://raw.githubusercontent.com/NanmiCoder/MediaCrawler/HEAD/media_platform/zhihu/core.py |
Add concise docstrings to each method | # -*- coding: utf-8 -*-
# Copyright (c) 2025 relakkes@gmail.com
#
# This file is part of MediaCrawler project.
# Repository: https://github.com/NanmiCoder/MediaCrawler/blob/main/media_platform/kuaishou/client.py
# GitHub: https://github.com/NanmiCoder
# Licensed under NON-COMMERCIAL LEARNING LICENSE 1.1
#
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
# 1. 不得用于任何商业用途。
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
# 3. 不得进行大规模爬取或对平台造成运营干扰。
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
# 5. 不得用于任何非法或不当的用途。
#
# 详细许可条款请参阅项目根目录下的LICENSE文件。
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
# -*- coding: utf-8 -*-
import asyncio
import json
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional
from urllib.parse import urlencode
import httpx
from playwright.async_api import BrowserContext, Page
import config
from base.base_crawler import AbstractApiClient
from proxy.proxy_mixin import ProxyRefreshMixin
from tools import utils
if TYPE_CHECKING:
from proxy.proxy_ip_pool import ProxyIpPool
from .exception import DataFetchError
from .graphql import KuaiShouGraphQL
class KuaiShouClient(AbstractApiClient, ProxyRefreshMixin):
def __init__(
self,
timeout=10,
proxy=None,
*,
headers: Dict[str, str],
playwright_page: Page,
cookie_dict: Dict[str, str],
proxy_ip_pool: Optional["ProxyIpPool"] = None,
):
self.proxy = proxy
self.timeout = timeout
self.headers = headers
self._host = "https://www.kuaishou.com/graphql"
self._rest_host = "https://www.kuaishou.com"
self.playwright_page = playwright_page
self.cookie_dict = cookie_dict
self.graphql = KuaiShouGraphQL()
# Initialize proxy pool (from ProxyRefreshMixin)
self.init_proxy_pool(proxy_ip_pool)
async def request(self, method, url, **kwargs) -> Any:
# Check if proxy is expired before each request
await self._refresh_proxy_if_expired()
async with httpx.AsyncClient(proxy=self.proxy) as client:
response = await client.request(method, url, timeout=self.timeout, **kwargs)
data: Dict = response.json()
if data.get("errors"):
raise DataFetchError(data.get("errors", "unkonw error"))
else:
return data.get("data", {})
async def get(self, uri: str, params=None) -> Dict:
final_uri = uri
if isinstance(params, dict):
final_uri = f"{uri}?" f"{urlencode(params)}"
return await self.request(
method="GET", url=f"{self._host}{final_uri}", headers=self.headers
)
async def post(self, uri: str, data: dict) -> Dict:
json_str = json.dumps(data, separators=(",", ":"), ensure_ascii=False)
return await self.request(
method="POST", url=f"{self._host}{uri}", data=json_str, headers=self.headers
)
async def request_rest_v2(self, uri: str, data: dict) -> Dict:
await self._refresh_proxy_if_expired()
json_str = json.dumps(data, separators=(",", ":"), ensure_ascii=False)
async with httpx.AsyncClient(proxy=self.proxy) as client:
response = await client.request(
method="POST",
url=f"{self._rest_host}{uri}",
data=json_str,
timeout=self.timeout,
headers=self.headers,
)
result: Dict = response.json()
if result.get("result") != 1:
raise DataFetchError(f"REST API V2 error: {result}")
return result
async def pong(self) -> bool:
utils.logger.info("[KuaiShouClient.pong] Begin pong kuaishou...")
ping_flag = False
try:
post_data = {
"operationName": "visionProfileUserList",
"variables": {
"ftype": 1,
},
"query": self.graphql.get("vision_profile_user_list"),
}
res = await self.post("", post_data)
if res.get("visionProfileUserList", {}).get("result") == 1:
ping_flag = True
except Exception as e:
utils.logger.error(
f"[KuaiShouClient.pong] Pong kuaishou failed: {e}, and try to login again..."
)
ping_flag = False
return ping_flag
async def update_cookies(self, browser_context: BrowserContext):
cookie_str, cookie_dict = utils.convert_cookies(await browser_context.cookies())
self.headers["Cookie"] = cookie_str
self.cookie_dict = cookie_dict
async def search_info_by_keyword(
self, keyword: str, pcursor: str, search_session_id: str = ""
):
post_data = {
"operationName": "visionSearchPhoto",
"variables": {
"keyword": keyword,
"pcursor": pcursor,
"page": "search",
"searchSessionId": search_session_id,
},
"query": self.graphql.get("search_query"),
}
return await self.post("", post_data)
async def get_video_info(self, photo_id: str) -> Dict:
post_data = {
"operationName": "visionVideoDetail",
"variables": {"photoId": photo_id, "page": "search"},
"query": self.graphql.get("video_detail"),
}
return await self.post("", post_data)
async def get_video_comments(self, photo_id: str, pcursor: str = "") -> Dict:
post_data = {
"photoId": photo_id,
"pcursor": pcursor,
}
return await self.request_rest_v2("/rest/v/photo/comment/list", post_data)
async def get_video_sub_comments(
self, photo_id: str, root_comment_id: int, pcursor: str = ""
) -> Dict:
post_data = {
"photoId": photo_id,
"pcursor": pcursor,
"rootCommentId": root_comment_id, # Must be int type for V2 API
}
return await self.request_rest_v2("/rest/v/photo/comment/sublist", post_data)
async def get_creator_profile(self, userId: str) -> Dict:
post_data = {
"operationName": "visionProfile",
"variables": {"userId": userId},
"query": self.graphql.get("vision_profile"),
}
return await self.post("", post_data)
async def get_video_by_creater(self, userId: str, pcursor: str = "") -> Dict:
post_data = {
"operationName": "visionProfilePhotoList",
"variables": {"page": "profile", "pcursor": pcursor, "userId": userId},
"query": self.graphql.get("vision_profile_photo_list"),
}
return await self.post("", post_data)
async def get_video_all_comments(
self,
photo_id: str,
crawl_interval: float = 1.0,
callback: Optional[Callable] = None,
max_count: int = 10,
):
result = []
pcursor = ""
while pcursor != "no_more" and len(result) < max_count:
comments_res = await self.get_video_comments(photo_id, pcursor)
# V2 API returns data at top level, not nested in visionCommentList
pcursor = comments_res.get("pcursorV2", "no_more")
comments = comments_res.get("rootCommentsV2", [])
if len(result) + len(comments) > max_count:
comments = comments[: max_count - len(result)]
if callback: # If there is a callback function, execute the callback function
await callback(photo_id, comments)
result.extend(comments)
await asyncio.sleep(crawl_interval)
sub_comments = await self.get_comments_all_sub_comments(
comments, photo_id, crawl_interval, callback
)
result.extend(sub_comments)
return result
async def get_comments_all_sub_comments(
self,
comments: List[Dict],
photo_id,
crawl_interval: float = 1.0,
callback: Optional[Callable] = None,
) -> List[Dict]:
if not config.ENABLE_GET_SUB_COMMENTS:
utils.logger.info(
f"[KuaiShouClient.get_comments_all_sub_comments] Crawling sub_comment mode is not enabled"
)
return []
result = []
for comment in comments:
# V2 API uses hasSubComments (boolean) instead of subCommentsPcursor (string)
has_sub_comments = comment.get("hasSubComments", False)
if not has_sub_comments:
continue
# V2 API uses comment_id (int) instead of commentId (string)
root_comment_id = comment.get("comment_id")
if not root_comment_id:
continue
sub_comment_pcursor = ""
while sub_comment_pcursor != "no_more":
comments_res = await self.get_video_sub_comments(
photo_id, root_comment_id, sub_comment_pcursor
)
# V2 API returns data at top level
sub_comment_pcursor = comments_res.get("pcursorV2", "no_more")
sub_comments = comments_res.get("subCommentsV2", [])
if callback and sub_comments:
await callback(photo_id, sub_comments)
await asyncio.sleep(crawl_interval)
result.extend(sub_comments)
return result
async def get_creator_info(self, user_id: str) -> Dict:
visionProfile = await self.get_creator_profile(user_id)
return visionProfile.get("userProfile")
async def get_all_videos_by_creator(
self,
user_id: str,
crawl_interval: float = 1.0,
callback: Optional[Callable] = None,
) -> List[Dict]:
result = []
pcursor = ""
while pcursor != "no_more":
videos_res = await self.get_video_by_creater(user_id, pcursor)
if not videos_res:
utils.logger.error(
f"[KuaiShouClient.get_all_videos_by_creator] The current creator may have been banned by ks, so they cannot access the data."
)
break
vision_profile_photo_list = videos_res.get("visionProfilePhotoList", {})
pcursor = vision_profile_photo_list.get("pcursor", "")
videos = vision_profile_photo_list.get("feeds", [])
utils.logger.info(
f"[KuaiShouClient.get_all_videos_by_creator] got user_id:{user_id} videos len : {len(videos)}"
)
if callback:
await callback(videos)
await asyncio.sleep(crawl_interval)
result.extend(videos)
return result | --- +++ @@ -88,6 +88,12 @@ )
async def request_rest_v2(self, uri: str, data: dict) -> Dict:
+ """
+ Make REST API V2 request (for comment endpoints)
+ :param uri: API endpoint path
+ :param data: request body
+ :return: response data
+ """
await self._refresh_proxy_if_expired()
json_str = json.dumps(data, separators=(",", ":"), ensure_ascii=False)
@@ -105,6 +111,7 @@ return result
async def pong(self) -> bool:
+ """get a note to check if login state is ok"""
utils.logger.info("[KuaiShouClient.pong] Begin pong kuaishou...")
ping_flag = False
try:
@@ -133,6 +140,13 @@ async def search_info_by_keyword(
self, keyword: str, pcursor: str, search_session_id: str = ""
):
+ """
+ KuaiShou web search api
+ :param keyword: search keyword
+ :param pcursor: limite page curson
+ :param search_session_id: search session id
+ :return:
+ """
post_data = {
"operationName": "visionSearchPhoto",
"variables": {
@@ -146,6 +160,11 @@ return await self.post("", post_data)
async def get_video_info(self, photo_id: str) -> Dict:
+ """
+ Kuaishou web video detail api
+ :param photo_id:
+ :return:
+ """
post_data = {
"operationName": "visionVideoDetail",
"variables": {"photoId": photo_id, "page": "search"},
@@ -154,6 +173,11 @@ return await self.post("", post_data)
async def get_video_comments(self, photo_id: str, pcursor: str = "") -> Dict:
+ """Get video first-level comments using REST API V2
+ :param photo_id: video id you want to fetch
+ :param pcursor: pagination cursor, defaults to ""
+ :return: dict with rootCommentsV2, pcursorV2, commentCountV2
+ """
post_data = {
"photoId": photo_id,
"pcursor": pcursor,
@@ -163,6 +187,12 @@ async def get_video_sub_comments(
self, photo_id: str, root_comment_id: int, pcursor: str = ""
) -> Dict:
+ """Get video second-level comments using REST API V2
+ :param photo_id: video id you want to fetch
+ :param root_comment_id: parent comment id (must be int type)
+ :param pcursor: pagination cursor, defaults to ""
+ :return: dict with subCommentsV2, pcursorV2
+ """
post_data = {
"photoId": photo_id,
"pcursor": pcursor,
@@ -193,6 +223,14 @@ callback: Optional[Callable] = None,
max_count: int = 10,
):
+ """
+ Get video all comments including sub comments (V2 REST API)
+ :param photo_id: video id
+ :param crawl_interval: delay between requests (seconds)
+ :param callback: callback function for processing comments
+ :param max_count: max number of comments to fetch
+ :return: list of all comments
+ """
result = []
pcursor = ""
@@ -221,6 +259,16 @@ crawl_interval: float = 1.0,
callback: Optional[Callable] = None,
) -> List[Dict]:
+ """
+ Get all second-level comments under specified first-level comments (V2 REST API)
+ Args:
+ comments: Comment list
+ photo_id: Video ID
+ crawl_interval: Delay unit for crawling comments once (seconds)
+ callback: Callback after one comment crawl ends
+ Returns:
+ List of sub comments
+ """
if not config.ENABLE_GET_SUB_COMMENTS:
utils.logger.info(
f"[KuaiShouClient.get_comments_all_sub_comments] Crawling sub_comment mode is not enabled"
@@ -256,6 +304,10 @@ return result
async def get_creator_info(self, user_id: str) -> Dict:
+ """
+ eg: https://www.kuaishou.com/profile/3x4jtnbfter525a
+ Kuaishou user homepage
+ """
visionProfile = await self.get_creator_profile(user_id)
return visionProfile.get("userProfile")
@@ -266,6 +318,15 @@ crawl_interval: float = 1.0,
callback: Optional[Callable] = None,
) -> List[Dict]:
+ """
+ Get all posts published by the specified user, this method will continue to find all post information under a user
+ Args:
+ user_id: User ID
+ crawl_interval: Delay unit for crawling once (seconds)
+ callback: Update callback function after one page crawl ends
+ Returns:
+
+ """
result = []
pcursor = ""
@@ -289,4 +350,4 @@ await callback(videos)
await asyncio.sleep(crawl_interval)
result.extend(videos)
- return result+ return result
| https://raw.githubusercontent.com/NanmiCoder/MediaCrawler/HEAD/media_platform/kuaishou/client.py |
Add professional docstrings to my codebase | # -*- coding: utf-8 -*-
# Copyright (c) 2025 relakkes@gmail.com
#
# This file is part of MediaCrawler project.
# Repository: https://github.com/NanmiCoder/MediaCrawler/blob/main/media_platform/bilibili/help.py
# GitHub: https://github.com/NanmiCoder
# Licensed under NON-COMMERCIAL LEARNING LICENSE 1.1
#
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
# 1. 不得用于任何商业用途。
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
# 3. 不得进行大规模爬取或对平台造成运营干扰。
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
# 5. 不得用于任何非法或不当的用途。
#
# 详细许可条款请参阅项目根目录下的LICENSE文件。
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
# -*- coding: utf-8 -*-
# @Author : relakkes@gmail.com
# @Time : 2023/12/2 23:26
# @Desc : bilibili request parameter signing
# Reverse engineering implementation reference: https://socialsisteryi.github.io/bilibili-API-collect/docs/misc/sign/wbi.html#wbi%E7%AD%BE%E5%90%8D%E7%AE%97%E6%B3%95
import re
import urllib.parse
from hashlib import md5
from typing import Dict
from model.m_bilibili import VideoUrlInfo, CreatorUrlInfo
from tools import utils
class BilibiliSign:
def __init__(self, img_key: str, sub_key: str):
self.img_key = img_key
self.sub_key = sub_key
self.map_table = [
46, 47, 18, 2, 53, 8, 23, 32, 15, 50, 10, 31, 58, 3, 45, 35, 27, 43, 5, 49,
33, 9, 42, 19, 29, 28, 14, 39, 12, 38, 41, 13, 37, 48, 7, 16, 24, 55, 40,
61, 26, 17, 0, 1, 60, 51, 30, 4, 22, 25, 54, 21, 56, 59, 6, 63, 57, 62, 11,
36, 20, 34, 44, 52
]
def get_salt(self) -> str:
salt = ""
mixin_key = self.img_key + self.sub_key
for mt in self.map_table:
salt += mixin_key[mt]
return salt[:32]
def sign(self, req_data: Dict) -> Dict:
current_ts = utils.get_unix_timestamp()
req_data.update({"wts": current_ts})
req_data = dict(sorted(req_data.items()))
req_data = {
# Filter "!'()*" characters from values
k: ''.join(filter(lambda ch: ch not in "!'()*", str(v)))
for k, v
in req_data.items()
}
query = urllib.parse.urlencode(req_data)
salt = self.get_salt()
wbi_sign = md5((query + salt).encode()).hexdigest() # Calculate w_rid
req_data['w_rid'] = wbi_sign
return req_data
def parse_video_info_from_url(url: str) -> VideoUrlInfo:
# If the input is already a BV number, return directly
if url.startswith("BV"):
return VideoUrlInfo(video_id=url)
# Use regex to extract BV number
# Match /video/BV... or /video/av... format
bv_pattern = r'/video/(BV[a-zA-Z0-9]+)'
match = re.search(bv_pattern, url)
if match:
video_id = match.group(1)
return VideoUrlInfo(video_id=video_id)
raise ValueError(f"Unable to parse video ID from URL: {url}")
def parse_creator_info_from_url(url: str) -> CreatorUrlInfo:
# If the input is already a numeric ID, return directly
if url.isdigit():
return CreatorUrlInfo(creator_id=url)
# Use regex to extract UID
# Match /space.bilibili.com/number format
uid_pattern = r'space\.bilibili\.com/(\d+)'
match = re.search(uid_pattern, url)
if match:
creator_id = match.group(1)
return CreatorUrlInfo(creator_id=creator_id)
raise ValueError(f"Unable to parse creator ID from URL: {url}")
if __name__ == '__main__':
# Test video URL parsing
video_url1 = "https://www.bilibili.com/video/BV1dwuKzmE26/?spm_id_from=333.1387.homepage.video_card.click"
video_url2 = "BV1d54y1g7db"
print("Video URL parsing test:")
print(f"URL1: {video_url1} -> {parse_video_info_from_url(video_url1)}")
print(f"URL2: {video_url2} -> {parse_video_info_from_url(video_url2)}")
# Test creator URL parsing
creator_url1 = "https://space.bilibili.com/434377496?spm_id_from=333.1007.0.0"
creator_url2 = "20813884"
print("\nCreator URL parsing test:")
print(f"URL1: {creator_url1} -> {parse_creator_info_from_url(creator_url1)}")
print(f"URL2: {creator_url2} -> {parse_creator_info_from_url(creator_url2)}") | --- +++ @@ -44,6 +44,10 @@ ]
def get_salt(self) -> str:
+ """
+ Get the salted key
+ :return:
+ """
salt = ""
mixin_key = self.img_key + self.sub_key
for mt in self.map_table:
@@ -51,6 +55,12 @@ return salt[:32]
def sign(self, req_data: Dict) -> Dict:
+ """
+ Add current timestamp to request parameters, sort keys in dictionary order,
+ then URL encode the parameters and combine with salt to generate md5 for w_rid parameter
+ :param req_data:
+ :return:
+ """
current_ts = utils.get_unix_timestamp()
req_data.update({"wts": current_ts})
req_data = dict(sorted(req_data.items()))
@@ -68,6 +78,16 @@
def parse_video_info_from_url(url: str) -> VideoUrlInfo:
+ """
+ Parse video ID from Bilibili video URL
+ Args:
+ url: Bilibili video link
+ - https://www.bilibili.com/video/BV1dwuKzmE26/?spm_id_from=333.1387.homepage.video_card.click
+ - https://www.bilibili.com/video/BV1d54y1g7db
+ - BV1d54y1g7db (directly pass BV number)
+ Returns:
+ VideoUrlInfo: Object containing video ID
+ """
# If the input is already a BV number, return directly
if url.startswith("BV"):
return VideoUrlInfo(video_id=url)
@@ -85,6 +105,16 @@
def parse_creator_info_from_url(url: str) -> CreatorUrlInfo:
+ """
+ Parse creator ID from Bilibili creator space URL
+ Args:
+ url: Bilibili creator space link
+ - https://space.bilibili.com/434377496?spm_id_from=333.1007.0.0
+ - https://space.bilibili.com/20813884
+ - 434377496 (directly pass UID)
+ Returns:
+ CreatorUrlInfo: Object containing creator ID
+ """
# If the input is already a numeric ID, return directly
if url.isdigit():
return CreatorUrlInfo(creator_id=url)
@@ -114,4 +144,4 @@ creator_url2 = "20813884"
print("\nCreator URL parsing test:")
print(f"URL1: {creator_url1} -> {parse_creator_info_from_url(creator_url1)}")
- print(f"URL2: {creator_url2} -> {parse_creator_info_from_url(creator_url2)}")+ print(f"URL2: {creator_url2} -> {parse_creator_info_from_url(creator_url2)}")
| https://raw.githubusercontent.com/NanmiCoder/MediaCrawler/HEAD/media_platform/bilibili/help.py |
Add detailed documentation for each class | # -*- coding: utf-8 -*-
# Copyright (c) 2025 relakkes@gmail.com
#
# This file is part of MediaCrawler project.
# Repository: https://github.com/NanmiCoder/MediaCrawler/blob/main/media_platform/xhs/exception.py
# GitHub: https://github.com/NanmiCoder
# Licensed under NON-COMMERCIAL LEARNING LICENSE 1.1
#
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
# 1. 不得用于任何商业用途。
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
# 3. 不得进行大规模爬取或对平台造成运营干扰。
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
# 5. 不得用于任何非法或不当的用途。
#
# 详细许可条款请参阅项目根目录下的LICENSE文件。
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
from httpx import RequestError
class DataFetchError(RequestError):
class IPBlockError(RequestError):
class NoteNotFoundError(RequestError): | --- +++ @@ -22,9 +22,12 @@
class DataFetchError(RequestError):
+ """something error when fetch"""
class IPBlockError(RequestError):
+ """fetch so fast that the server block us ip"""
-class NoteNotFoundError(RequestError):+class NoteNotFoundError(RequestError):
+ """Note does not exist or is abnormal"""
| https://raw.githubusercontent.com/NanmiCoder/MediaCrawler/HEAD/media_platform/xhs/exception.py |
Create documentation for each function signature | # -*- coding: utf-8 -*-
# Copyright (c) 2025 relakkes@gmail.com
#
# This file is part of MediaCrawler project.
# Repository: https://github.com/NanmiCoder/MediaCrawler/blob/main/media_platform/weibo/login.py
# GitHub: https://github.com/NanmiCoder
# Licensed under NON-COMMERCIAL LEARNING LICENSE 1.1
#
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
# 1. 不得用于任何商业用途。
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
# 3. 不得进行大规模爬取或对平台造成运营干扰。
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
# 5. 不得用于任何非法或不当的用途。
#
# 详细许可条款请参阅项目根目录下的LICENSE文件。
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
# -*- coding: utf-8 -*-
# @Author : relakkes@gmail.com
# @Time : 2023/12/23 15:42
# @Desc : Weibo login implementation
import asyncio
import functools
import sys
from typing import Optional
from playwright.async_api import BrowserContext, Page
from tenacity import (RetryError, retry, retry_if_result, stop_after_attempt,
wait_fixed)
import config
from base.base_crawler import AbstractLogin
from tools import utils
class WeiboLogin(AbstractLogin):
def __init__(self,
login_type: str,
browser_context: BrowserContext,
context_page: Page,
login_phone: Optional[str] = "",
cookie_str: str = ""
):
config.LOGIN_TYPE = login_type
self.browser_context = browser_context
self.context_page = context_page
self.login_phone = login_phone
self.cookie_str = cookie_str
self.weibo_sso_login_url = "https://passport.weibo.com/sso/signin?entry=miniblog&source=miniblog"
async def begin(self):
utils.logger.info("[WeiboLogin.begin] Begin login weibo ...")
if config.LOGIN_TYPE == "qrcode":
await self.login_by_qrcode()
elif config.LOGIN_TYPE == "phone":
await self.login_by_mobile()
elif config.LOGIN_TYPE == "cookie":
await self.login_by_cookies()
else:
raise ValueError(
"[WeiboLogin.begin] Invalid Login Type Currently only supported qrcode or phone or cookie ...")
@retry(stop=stop_after_attempt(600), wait=wait_fixed(1), retry=retry_if_result(lambda value: value is False))
async def check_login_state(self, no_logged_in_session: str) -> bool:
current_cookie = await self.browser_context.cookies()
_, cookie_dict = utils.convert_cookies(current_cookie)
if cookie_dict.get("SSOLoginState"):
return True
current_web_session = cookie_dict.get("WBPSESS")
if current_web_session != no_logged_in_session:
return True
return False
async def login_by_qrcode(self):
utils.logger.info("[WeiboLogin.login_by_qrcode] Begin login weibo by qrcode ...")
await self.context_page.goto(self.weibo_sso_login_url)
# find login qrcode
qrcode_img_selector = "xpath=//img[@class='w-full h-full']"
base64_qrcode_img = await utils.find_login_qrcode(
self.context_page,
selector=qrcode_img_selector
)
if not base64_qrcode_img:
utils.logger.info("[WeiboLogin.login_by_qrcode] login failed , have not found qrcode please check ....")
sys.exit()
# show login qrcode
partial_show_qrcode = functools.partial(utils.show_qrcode, base64_qrcode_img)
asyncio.get_running_loop().run_in_executor(executor=None, func=partial_show_qrcode)
utils.logger.info(f"[WeiboLogin.login_by_qrcode] Waiting for scan code login, remaining time is 20s")
# get not logged session
current_cookie = await self.browser_context.cookies()
_, cookie_dict = utils.convert_cookies(current_cookie)
no_logged_in_session = cookie_dict.get("WBPSESS")
try:
await self.check_login_state(no_logged_in_session)
except RetryError:
utils.logger.info("[WeiboLogin.login_by_qrcode] Login weibo failed by qrcode login method ...")
sys.exit()
wait_redirect_seconds = 5
utils.logger.info(
f"[WeiboLogin.login_by_qrcode] Login successful then wait for {wait_redirect_seconds} seconds redirect ...")
await asyncio.sleep(wait_redirect_seconds)
async def login_by_mobile(self):
pass
async def login_by_cookies(self):
utils.logger.info("[WeiboLogin.login_by_qrcode] Begin login weibo by cookie ...")
for key, value in utils.convert_str_cookie_to_dict(self.cookie_str).items():
await self.browser_context.add_cookies([{
'name': key,
'value': value,
'domain': ".weibo.cn",
'path': "/"
}]) | --- +++ @@ -53,6 +53,7 @@ self.weibo_sso_login_url = "https://passport.weibo.com/sso/signin?entry=miniblog&source=miniblog"
async def begin(self):
+ """Start login weibo"""
utils.logger.info("[WeiboLogin.begin] Begin login weibo ...")
if config.LOGIN_TYPE == "qrcode":
await self.login_by_qrcode()
@@ -67,6 +68,11 @@
@retry(stop=stop_after_attempt(600), wait=wait_fixed(1), retry=retry_if_result(lambda value: value is False))
async def check_login_state(self, no_logged_in_session: str) -> bool:
+ """
+ Check if the current login status is successful and return True otherwise return False
+ retry decorator will retry 20 times if the return value is False, and the retry interval is 1 second
+ if max retry times reached, raise RetryError
+ """
current_cookie = await self.browser_context.cookies()
_, cookie_dict = utils.convert_cookies(current_cookie)
if cookie_dict.get("SSOLoginState"):
@@ -77,6 +83,7 @@ return False
async def login_by_qrcode(self):
+ """login weibo website and keep webdriver login state"""
utils.logger.info("[WeiboLogin.login_by_qrcode] Begin login weibo by qrcode ...")
await self.context_page.goto(self.weibo_sso_login_url)
# find login qrcode
@@ -122,4 +129,4 @@ 'value': value,
'domain': ".weibo.cn",
'path': "/"
- }])+ }])
| https://raw.githubusercontent.com/NanmiCoder/MediaCrawler/HEAD/media_platform/weibo/login.py |
Add professional docstrings to my codebase | # -*- coding: utf-8 -*-
# Copyright (c) 2025 relakkes@gmail.com
#
# This file is part of MediaCrawler project.
# Repository: https://github.com/NanmiCoder/MediaCrawler/blob/main/media_platform/xhs/playwright_sign.py
# GitHub: https://github.com/NanmiCoder
# Licensed under NON-COMMERCIAL LEARNING LICENSE 1.1
#
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
# 1. 不得用于任何商业用途。
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
# 3. 不得进行大规模爬取或对平台造成运营干扰。
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
# 5. 不得用于任何非法或不当的用途。
#
# 详细许可条款请参阅项目根目录下的LICENSE文件。
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
# Generate Xiaohongshu signature by calling window.mnsv2 via Playwright injection
import hashlib
import json
import time
from typing import Any, Dict, Optional, Union
from urllib.parse import urlparse, quote
from playwright.async_api import Page
from .xhs_sign import b64_encode, encode_utf8, get_trace_id, mrc
def _build_sign_string(uri: str, data: Optional[Union[Dict, str]] = None, method: str = "POST") -> str:
if method.upper() == "POST":
# POST request uses JSON format
c = uri
if data is not None:
if isinstance(data, dict):
c += json.dumps(data, separators=(",", ":"), ensure_ascii=False)
elif isinstance(data, str):
c += data
return c
else:
# GET request uses query string format
if not data or (isinstance(data, dict) and len(data) == 0):
return uri
if isinstance(data, dict):
params = []
for key in data.keys():
value = data[key]
if isinstance(value, list):
value_str = ",".join(str(v) for v in value)
elif value is not None:
value_str = str(value)
else:
value_str = ""
# Use URL encoding (safe parameter preserves certain characters from encoding)
# Note: httpx will encode commas, equals signs, etc., we need to handle the same way
value_str = quote(value_str, safe='')
params.append(f"{key}={value_str}")
return f"{uri}?{'&'.join(params)}"
elif isinstance(data, str):
return f"{uri}?{data}"
return uri
def _md5_hex(s: str) -> str:
return hashlib.md5(s.encode("utf-8")).hexdigest()
def _build_xs_payload(x3_value: str, data_type: str = "object") -> str:
s = {
"x0": "4.2.1",
"x1": "xhs-pc-web",
"x2": "Mac OS",
"x3": x3_value,
"x4": data_type,
}
return "XYS_" + b64_encode(encode_utf8(json.dumps(s, separators=(",", ":"))))
def _build_xs_common(a1: str, b1: str, x_s: str, x_t: str) -> str:
payload = {
"s0": 3,
"s1": "",
"x0": "1",
"x1": "4.2.2",
"x2": "Mac OS",
"x3": "xhs-pc-web",
"x4": "4.74.0",
"x5": a1,
"x6": x_t,
"x7": x_s,
"x8": b1,
"x9": mrc(x_t + x_s + b1),
"x10": 154,
"x11": "normal",
}
return b64_encode(encode_utf8(json.dumps(payload, separators=(",", ":"))))
async def get_b1_from_localstorage(page: Page) -> str:
try:
local_storage = await page.evaluate("() => window.localStorage")
return local_storage.get("b1", "")
except Exception:
return ""
async def call_mnsv2(page: Page, sign_str: str, md5_str: str) -> str:
sign_str_escaped = sign_str.replace("\\", "\\\\").replace("'", "\\'").replace("\n", "\\n")
md5_str_escaped = md5_str.replace("\\", "\\\\").replace("'", "\\'")
try:
result = await page.evaluate(f"window.mnsv2('{sign_str_escaped}', '{md5_str_escaped}')")
return result if result else ""
except Exception:
return ""
async def sign_xs_with_playwright(
page: Page,
uri: str,
data: Optional[Union[Dict, str]] = None,
method: str = "POST",
) -> str:
sign_str = _build_sign_string(uri, data, method)
md5_str = _md5_hex(sign_str)
x3_value = await call_mnsv2(page, sign_str, md5_str)
data_type = "object" if isinstance(data, (dict, list)) else "string"
return _build_xs_payload(x3_value, data_type)
async def sign_with_playwright(
page: Page,
uri: str,
data: Optional[Union[Dict, str]] = None,
a1: str = "",
method: str = "POST",
) -> Dict[str, Any]:
b1 = await get_b1_from_localstorage(page)
x_s = await sign_xs_with_playwright(page, uri, data, method)
x_t = str(int(time.time() * 1000))
return {
"x-s": x_s,
"x-t": x_t,
"x-s-common": _build_xs_common(a1, b1, x_s, x_t),
"x-b3-traceid": get_trace_id(),
}
async def pre_headers_with_playwright(
page: Page,
url: str,
cookie_dict: Dict[str, str],
params: Optional[Dict] = None,
payload: Optional[Dict] = None,
) -> Dict[str, str]:
a1_value = cookie_dict.get("a1", "")
uri = urlparse(url).path
# Determine request data and method
if params is not None:
data = params
method = "GET"
elif payload is not None:
data = payload
method = "POST"
else:
raise ValueError("params or payload is required")
signs = await sign_with_playwright(page, uri, data, a1_value, method)
return {
"X-S": signs["x-s"],
"X-T": signs["x-t"],
"x-S-Common": signs["x-s-common"],
"X-B3-Traceid": signs["x-b3-traceid"],
} | --- +++ @@ -30,6 +30,16 @@
def _build_sign_string(uri: str, data: Optional[Union[Dict, str]] = None, method: str = "POST") -> str:
+ """Build string to be signed
+
+ Args:
+ uri: API path
+ data: Request data
+ method: Request method (GET or POST)
+
+ Returns:
+ String to be signed
+ """
if method.upper() == "POST":
# POST request uses JSON format
c = uri
@@ -65,10 +75,12 @@
def _md5_hex(s: str) -> str:
+ """Calculate MD5 hash value"""
return hashlib.md5(s.encode("utf-8")).hexdigest()
def _build_xs_payload(x3_value: str, data_type: str = "object") -> str:
+ """Build x-s signature"""
s = {
"x0": "4.2.1",
"x1": "xhs-pc-web",
@@ -80,6 +92,7 @@
def _build_xs_common(a1: str, b1: str, x_s: str, x_t: str) -> str:
+ """Build x-s-common request header"""
payload = {
"s0": 3,
"s1": "",
@@ -100,6 +113,7 @@
async def get_b1_from_localstorage(page: Page) -> str:
+ """Get b1 value from localStorage"""
try:
local_storage = await page.evaluate("() => window.localStorage")
return local_storage.get("b1", "")
@@ -108,6 +122,17 @@
async def call_mnsv2(page: Page, sign_str: str, md5_str: str) -> str:
+ """
+ Call window.mnsv2 function via playwright
+
+ Args:
+ page: playwright Page object
+ sign_str: String to be signed (uri + JSON.stringify(data))
+ md5_str: MD5 hash value of sign_str
+
+ Returns:
+ Signature string returned by mnsv2
+ """
sign_str_escaped = sign_str.replace("\\", "\\\\").replace("'", "\\'").replace("\n", "\\n")
md5_str_escaped = md5_str.replace("\\", "\\\\").replace("'", "\\'")
@@ -124,6 +149,18 @@ data: Optional[Union[Dict, str]] = None,
method: str = "POST",
) -> str:
+ """
+ Generate x-s signature via playwright injection
+
+ Args:
+ page: playwright Page object (must have Xiaohongshu page open)
+ uri: API path, e.g., "/api/sns/web/v1/search/notes"
+ data: Request data (GET params or POST payload)
+ method: Request method (GET or POST)
+
+ Returns:
+ x-s signature string
+ """
sign_str = _build_sign_string(uri, data, method)
md5_str = _md5_hex(sign_str)
x3_value = await call_mnsv2(page, sign_str, md5_str)
@@ -138,6 +175,19 @@ a1: str = "",
method: str = "POST",
) -> Dict[str, Any]:
+ """
+ Generate complete signature request headers via playwright
+
+ Args:
+ page: playwright Page object (must have Xiaohongshu page open)
+ uri: API path
+ data: Request data
+ a1: a1 value from cookie
+ method: Request method (GET or POST)
+
+ Returns:
+ Dictionary containing x-s, x-t, x-s-common, x-b3-traceid
+ """
b1 = await get_b1_from_localstorage(page)
x_s = await sign_xs_with_playwright(page, uri, data, method)
x_t = str(int(time.time() * 1000))
@@ -157,6 +207,20 @@ params: Optional[Dict] = None,
payload: Optional[Dict] = None,
) -> Dict[str, str]:
+ """
+ Generate request header signature using playwright injection method
+ Can directly replace _pre_headers method in client.py
+
+ Args:
+ page: playwright Page object
+ url: Request URL
+ cookie_dict: Cookie dictionary
+ params: GET request parameters
+ payload: POST request parameters
+
+ Returns:
+ Signed request header dictionary
+ """
a1_value = cookie_dict.get("a1", "")
uri = urlparse(url).path
@@ -177,4 +241,4 @@ "X-T": signs["x-t"],
"x-S-Common": signs["x-s-common"],
"X-B3-Traceid": signs["x-b3-traceid"],
- }+ }
| https://raw.githubusercontent.com/NanmiCoder/MediaCrawler/HEAD/media_platform/xhs/playwright_sign.py |
Document classes and their methods | # -*- coding: utf-8 -*-
# Copyright (c) 2025 relakkes@gmail.com
#
# This file is part of MediaCrawler project.
# Repository: https://github.com/NanmiCoder/MediaCrawler/blob/main/media_platform/zhihu/client.py
# GitHub: https://github.com/NanmiCoder
# Licensed under NON-COMMERCIAL LEARNING LICENSE 1.1
#
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
# 1. 不得用于任何商业用途。
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
# 3. 不得进行大规模爬取或对平台造成运营干扰。
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
# 5. 不得用于任何非法或不当的用途。
#
# 详细许可条款请参阅项目根目录下的LICENSE文件。
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
# -*- coding: utf-8 -*-
import asyncio
import json
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Union
from urllib.parse import urlencode
import httpx
from httpx import Response
from playwright.async_api import BrowserContext, Page
from tenacity import retry, stop_after_attempt, wait_fixed
import config
from base.base_crawler import AbstractApiClient
from constant import zhihu as zhihu_constant
from model.m_zhihu import ZhihuComment, ZhihuContent, ZhihuCreator
from proxy.proxy_mixin import ProxyRefreshMixin
from tools import utils
if TYPE_CHECKING:
from proxy.proxy_ip_pool import ProxyIpPool
from .exception import DataFetchError, ForbiddenError
from .field import SearchSort, SearchTime, SearchType
from .help import ZhihuExtractor, sign
class ZhiHuClient(AbstractApiClient, ProxyRefreshMixin):
def __init__(
self,
timeout=10,
proxy=None,
*,
headers: Dict[str, str],
playwright_page: Page,
cookie_dict: Dict[str, str],
proxy_ip_pool: Optional["ProxyIpPool"] = None,
):
self.proxy = proxy
self.timeout = timeout
self.default_headers = headers
self.cookie_dict = cookie_dict
self._extractor = ZhihuExtractor()
# Initialize proxy pool (from ProxyRefreshMixin)
self.init_proxy_pool(proxy_ip_pool)
async def _pre_headers(self, url: str) -> Dict:
d_c0 = self.cookie_dict.get("d_c0")
if not d_c0:
raise Exception("d_c0 not found in cookies")
sign_res = sign(url, self.default_headers["cookie"])
headers = self.default_headers.copy()
headers['x-zst-81'] = sign_res["x-zst-81"]
headers['x-zse-96'] = sign_res["x-zse-96"]
return headers
@retry(stop=stop_after_attempt(3), wait=wait_fixed(1))
async def request(self, method, url, **kwargs) -> Union[str, Any]:
# Check if proxy is expired before each request
await self._refresh_proxy_if_expired()
# return response.text
return_response = kwargs.pop('return_response', False)
async with httpx.AsyncClient(proxy=self.proxy) as client:
response = await client.request(method, url, timeout=self.timeout, **kwargs)
if response.status_code != 200:
utils.logger.error(f"[ZhiHuClient.request] Requset Url: {url}, Request error: {response.text}")
if response.status_code == 403:
raise ForbiddenError(response.text)
elif response.status_code == 404: # Content without comments also returns 404
return {}
raise DataFetchError(response.text)
if return_response:
return response.text
try:
data: Dict = response.json()
if data.get("error"):
utils.logger.error(f"[ZhiHuClient.request] Request error: {data}")
raise DataFetchError(data.get("error", {}).get("message"))
return data
except json.JSONDecodeError:
utils.logger.error(f"[ZhiHuClient.request] Request error: {response.text}")
raise DataFetchError(response.text)
async def get(self, uri: str, params=None, **kwargs) -> Union[Response, Dict, str]:
final_uri = uri
if isinstance(params, dict):
final_uri += '?' + urlencode(params)
headers = await self._pre_headers(final_uri)
base_url = (zhihu_constant.ZHIHU_URL if "/p/" not in uri else zhihu_constant.ZHIHU_ZHUANLAN_URL)
return await self.request(method="GET", url=base_url + final_uri, headers=headers, **kwargs)
async def pong(self) -> bool:
utils.logger.info("[ZhiHuClient.pong] Begin to pong zhihu...")
ping_flag = False
try:
res = await self.get_current_user_info()
if res.get("uid") and res.get("name"):
ping_flag = True
utils.logger.info("[ZhiHuClient.pong] Ping zhihu successfully")
else:
utils.logger.error(f"[ZhiHuClient.pong] Ping zhihu failed, response data: {res}")
except Exception as e:
utils.logger.error(f"[ZhiHuClient.pong] Ping zhihu failed: {e}, and try to login again...")
ping_flag = False
return ping_flag
async def update_cookies(self, browser_context: BrowserContext):
cookie_str, cookie_dict = utils.convert_cookies(await browser_context.cookies())
self.default_headers["cookie"] = cookie_str
self.cookie_dict = cookie_dict
async def get_current_user_info(self) -> Dict:
params = {"include": "email,is_active,is_bind_phone"}
return await self.get("/api/v4/me", params)
async def get_note_by_keyword(
self,
keyword: str,
page: int = 1,
page_size: int = 20,
sort: SearchSort = SearchSort.DEFAULT,
note_type: SearchType = SearchType.DEFAULT,
search_time: SearchTime = SearchTime.DEFAULT,
) -> List[ZhihuContent]:
uri = "/api/v4/search_v3"
params = {
"gk_version": "gz-gaokao",
"t": "general",
"q": keyword,
"correction": 1,
"offset": (page - 1) * page_size,
"limit": page_size,
"filter_fields": "",
"lc_idx": (page - 1) * page_size,
"show_all_topics": 0,
"search_source": "Filter",
"time_interval": search_time.value,
"sort": sort.value,
"vertical": note_type.value,
}
search_res = await self.get(uri, params)
utils.logger.info(f"[ZhiHuClient.get_note_by_keyword] Search result: {search_res}")
return self._extractor.extract_contents_from_search(search_res)
async def get_root_comments(
self,
content_id: str,
content_type: str,
offset: str = "",
limit: int = 10,
order_by: str = "score",
) -> Dict:
uri = f"/api/v4/comment_v5/{content_type}s/{content_id}/root_comment"
params = {"order": order_by, "offset": offset, "limit": limit}
return await self.get(uri, params)
# uri = f"/api/v4/{content_type}s/{content_id}/root_comments"
# params = {
# "order": order_by,
# "offset": offset,
# "limit": limit
# }
# return await self.get(uri, params)
async def get_child_comments(
self,
root_comment_id: str,
offset: str = "",
limit: int = 10,
order_by: str = "sort",
) -> Dict:
uri = f"/api/v4/comment_v5/comment/{root_comment_id}/child_comment"
params = {
"order": order_by,
"offset": offset,
"limit": limit,
}
return await self.get(uri, params)
async def get_note_all_comments(
self,
content: ZhihuContent,
crawl_interval: float = 1.0,
callback: Optional[Callable] = None,
) -> List[ZhihuComment]:
result: List[ZhihuComment] = []
is_end: bool = False
offset: str = ""
prev_offset: str = ""
limit: int = 10
while not is_end:
prev_offset = offset
root_comment_res = await self.get_root_comments(content.content_id, content.content_type, offset, limit)
if not root_comment_res:
break
paging_info = root_comment_res.get("paging", {})
is_end = paging_info.get("is_end")
offset = self._extractor.extract_offset(paging_info)
comments = self._extractor.extract_comments(content, root_comment_res.get("data"))
if not comments:
break
if prev_offset == offset:
break
if callback:
await callback(comments)
result.extend(comments)
await self.get_comments_all_sub_comments(content, comments, crawl_interval=crawl_interval, callback=callback)
await asyncio.sleep(crawl_interval)
return result
async def get_comments_all_sub_comments(
self,
content: ZhihuContent,
comments: List[ZhihuComment],
crawl_interval: float = 1.0,
callback: Optional[Callable] = None,
) -> List[ZhihuComment]:
if not config.ENABLE_GET_SUB_COMMENTS:
return []
all_sub_comments: List[ZhihuComment] = []
for parment_comment in comments:
if parment_comment.sub_comment_count == 0:
continue
is_end: bool = False
offset: str = ""
prev_offset: str = ""
limit: int = 10
while not is_end:
prev_offset = offset
child_comment_res = await self.get_child_comments(parment_comment.comment_id, offset, limit)
if not child_comment_res:
break
paging_info = child_comment_res.get("paging", {})
is_end = paging_info.get("is_end")
offset = self._extractor.extract_offset(paging_info)
sub_comments = self._extractor.extract_comments(content, child_comment_res.get("data"))
if not sub_comments:
break
if prev_offset == offset:
break
if callback:
await callback(sub_comments)
all_sub_comments.extend(sub_comments)
await asyncio.sleep(crawl_interval)
return all_sub_comments
async def get_creator_info(self, url_token: str) -> Optional[ZhihuCreator]:
uri = f"/people/{url_token}"
html_content: str = await self.get(uri, return_response=True)
return self._extractor.extract_creator(url_token, html_content)
async def get_creator_answers(self, url_token: str, offset: int = 0, limit: int = 20) -> Dict:
uri = f"/api/v4/members/{url_token}/answers"
params = {
"include":
"data[*].is_normal,admin_closed_comment,reward_info,is_collapsed,annotation_action,annotation_detail,collapse_reason,collapsed_by,suggest_edit,comment_count,can_comment,content,editable_content,attachment,voteup_count,reshipment_settings,comment_permission,created_time,updated_time,review_info,excerpt,paid_info,reaction_instruction,is_labeled,label_info,relationship.is_authorized,voting,is_author,is_thanked,is_nothelp;data[*].vessay_info;data[*].author.badge[?(type=best_answerer)].topics;data[*].author.vip_info;data[*].question.has_publishing_draft,relationship",
"offset": offset,
"limit": limit,
"order_by": "created"
}
return await self.get(uri, params)
async def get_creator_articles(self, url_token: str, offset: int = 0, limit: int = 20) -> Dict:
uri = f"/api/v4/members/{url_token}/articles"
params = {
"include":
"data[*].comment_count,suggest_edit,is_normal,thumbnail_extra_info,thumbnail,can_comment,comment_permission,admin_closed_comment,content,voteup_count,created,updated,upvoted_followees,voting,review_info,reaction_instruction,is_labeled,label_info;data[*].vessay_info;data[*].author.badge[?(type=best_answerer)].topics;data[*].author.vip_info;",
"offset": offset,
"limit": limit,
"order_by": "created"
}
return await self.get(uri, params)
async def get_creator_videos(self, url_token: str, offset: int = 0, limit: int = 20) -> Dict:
uri = f"/api/v4/members/{url_token}/zvideos"
params = {
"include": "similar_zvideo,creation_relationship,reaction_instruction",
"offset": offset,
"limit": limit,
"similar_aggregation": "true",
}
return await self.get(uri, params)
async def get_all_anwser_by_creator(self, creator: ZhihuCreator, crawl_interval: float = 1.0, callback: Optional[Callable] = None) -> List[ZhihuContent]:
all_contents: List[ZhihuContent] = []
is_end: bool = False
offset: int = 0
limit: int = 20
while not is_end:
res = await self.get_creator_answers(creator.url_token, offset, limit)
if not res:
break
utils.logger.info(f"[ZhiHuClient.get_all_anwser_by_creator] Get creator {creator.url_token} answers: {res}")
paging_info = res.get("paging", {})
is_end = paging_info.get("is_end")
contents = self._extractor.extract_content_list_from_creator(res.get("data"))
if callback:
await callback(contents)
all_contents.extend(contents)
offset += limit
await asyncio.sleep(crawl_interval)
return all_contents
async def get_all_articles_by_creator(
self,
creator: ZhihuCreator,
crawl_interval: float = 1.0,
callback: Optional[Callable] = None,
) -> List[ZhihuContent]:
all_contents: List[ZhihuContent] = []
is_end: bool = False
offset: int = 0
limit: int = 20
while not is_end:
res = await self.get_creator_articles(creator.url_token, offset, limit)
if not res:
break
paging_info = res.get("paging", {})
is_end = paging_info.get("is_end")
contents = self._extractor.extract_content_list_from_creator(res.get("data"))
if callback:
await callback(contents)
all_contents.extend(contents)
offset += limit
await asyncio.sleep(crawl_interval)
return all_contents
async def get_all_videos_by_creator(
self,
creator: ZhihuCreator,
crawl_interval: float = 1.0,
callback: Optional[Callable] = None,
) -> List[ZhihuContent]:
all_contents: List[ZhihuContent] = []
is_end: bool = False
offset: int = 0
limit: int = 20
while not is_end:
res = await self.get_creator_videos(creator.url_token, offset, limit)
if not res:
break
paging_info = res.get("paging", {})
is_end = paging_info.get("is_end")
contents = self._extractor.extract_content_list_from_creator(res.get("data"))
if callback:
await callback(contents)
all_contents.extend(contents)
offset += limit
await asyncio.sleep(crawl_interval)
return all_contents
async def get_answer_info(
self,
question_id: str,
answer_id: str,
) -> Optional[ZhihuContent]:
uri = f"/question/{question_id}/answer/{answer_id}"
response_html = await self.get(uri, return_response=True)
return self._extractor.extract_answer_content_from_html(response_html)
async def get_article_info(self, article_id: str) -> Optional[ZhihuContent]:
uri = f"/p/{article_id}"
response_html = await self.get(uri, return_response=True)
return self._extractor.extract_article_content_from_html(response_html)
async def get_video_info(self, video_id: str) -> Optional[ZhihuContent]:
uri = f"/zvideo/{video_id}"
response_html = await self.get(uri, return_response=True)
return self._extractor.extract_zvideo_content_from_html(response_html) | --- +++ @@ -64,6 +64,13 @@ self.init_proxy_pool(proxy_ip_pool)
async def _pre_headers(self, url: str) -> Dict:
+ """
+ Sign request headers
+ Args:
+ url: Request URL with query parameters
+ Returns:
+
+ """
d_c0 = self.cookie_dict.get("d_c0")
if not d_c0:
raise Exception("d_c0 not found in cookies")
@@ -75,6 +82,16 @@
@retry(stop=stop_after_attempt(3), wait=wait_fixed(1))
async def request(self, method, url, **kwargs) -> Union[str, Any]:
+ """
+ Wrapper for httpx common request method with response handling
+ Args:
+ method: Request method
+ url: Request URL
+ **kwargs: Other request parameters such as headers, body, etc.
+
+ Returns:
+
+ """
# Check if proxy is expired before each request
await self._refresh_proxy_if_expired()
@@ -106,6 +123,15 @@ raise DataFetchError(response.text)
async def get(self, uri: str, params=None, **kwargs) -> Union[Response, Dict, str]:
+ """
+ GET request with header signing
+ Args:
+ uri: Request URI
+ params: Request parameters
+
+ Returns:
+
+ """
final_uri = uri
if isinstance(params, dict):
final_uri += '?' + urlencode(params)
@@ -114,6 +140,11 @@ return await self.request(method="GET", url=base_url + final_uri, headers=headers, **kwargs)
async def pong(self) -> bool:
+ """
+ Check if login status is still valid
+ Returns:
+
+ """
utils.logger.info("[ZhiHuClient.pong] Begin to pong zhihu...")
ping_flag = False
try:
@@ -129,11 +160,24 @@ return ping_flag
async def update_cookies(self, browser_context: BrowserContext):
+ """
+ Update cookies method provided by API client, typically called after successful login
+ Args:
+ browser_context: Browser context object
+
+ Returns:
+
+ """
cookie_str, cookie_dict = utils.convert_cookies(await browser_context.cookies())
self.default_headers["cookie"] = cookie_str
self.cookie_dict = cookie_dict
async def get_current_user_info(self) -> Dict:
+ """
+ Get current logged-in user information
+ Returns:
+
+ """
params = {"include": "email,is_active,is_bind_phone"}
return await self.get("/api/v4/me", params)
@@ -146,6 +190,19 @@ note_type: SearchType = SearchType.DEFAULT,
search_time: SearchTime = SearchTime.DEFAULT,
) -> List[ZhihuContent]:
+ """
+ Search by keyword
+ Args:
+ keyword: Search keyword
+ page: Page number
+ page_size: Page size
+ sort: Sorting method
+ note_type: Search result type
+ search_time: Time range for search results
+
+ Returns:
+
+ """
uri = "/api/v4/search_v3"
params = {
"gk_version": "gz-gaokao",
@@ -174,6 +231,18 @@ limit: int = 10,
order_by: str = "score",
) -> Dict:
+ """
+ Get root-level comments for content
+ Args:
+ content_id: Content ID
+ content_type: Content type (answer, article, zvideo)
+ offset:
+ limit:
+ order_by:
+
+ Returns:
+
+ """
uri = f"/api/v4/comment_v5/{content_type}s/{content_id}/root_comment"
params = {"order": order_by, "offset": offset, "limit": limit}
return await self.get(uri, params)
@@ -192,6 +261,17 @@ limit: int = 10,
order_by: str = "sort",
) -> Dict:
+ """
+ Get child comments under a root comment
+ Args:
+ root_comment_id:
+ offset:
+ limit:
+ order_by:
+
+ Returns:
+
+ """
uri = f"/api/v4/comment_v5/comment/{root_comment_id}/child_comment"
params = {
"order": order_by,
@@ -206,6 +286,16 @@ crawl_interval: float = 1.0,
callback: Optional[Callable] = None,
) -> List[ZhihuComment]:
+ """
+ Get all root-level comments for a specified post, this method will retrieve all comment information under a post
+ Args:
+ content: Content detail object (question|article|video)
+ crawl_interval: Crawl delay interval in seconds
+ callback: Callback after completing one crawl
+
+ Returns:
+
+ """
result: List[ZhihuComment] = []
is_end: bool = False
offset: str = ""
@@ -242,6 +332,17 @@ crawl_interval: float = 1.0,
callback: Optional[Callable] = None,
) -> List[ZhihuComment]:
+ """
+ Get all sub-comments under specified comments
+ Args:
+ content: Content detail object (question|article|video)
+ comments: Comment list
+ crawl_interval: Crawl delay interval in seconds
+ callback: Callback after completing one crawl
+
+ Returns:
+
+ """
if not config.ENABLE_GET_SUB_COMMENTS:
return []
@@ -278,11 +379,30 @@ return all_sub_comments
async def get_creator_info(self, url_token: str) -> Optional[ZhihuCreator]:
+ """
+ Get creator information
+ Args:
+ url_token:
+
+ Returns:
+
+ """
uri = f"/people/{url_token}"
html_content: str = await self.get(uri, return_response=True)
return self._extractor.extract_creator(url_token, html_content)
async def get_creator_answers(self, url_token: str, offset: int = 0, limit: int = 20) -> Dict:
+ """
+ Get creator's answers
+ Args:
+ url_token:
+ offset:
+ limit:
+
+ Returns:
+
+
+ """
uri = f"/api/v4/members/{url_token}/answers"
params = {
"include":
@@ -294,6 +414,16 @@ return await self.get(uri, params)
async def get_creator_articles(self, url_token: str, offset: int = 0, limit: int = 20) -> Dict:
+ """
+ Get creator's articles
+ Args:
+ url_token:
+ offset:
+ limit:
+
+ Returns:
+
+ """
uri = f"/api/v4/members/{url_token}/articles"
params = {
"include":
@@ -305,6 +435,16 @@ return await self.get(uri, params)
async def get_creator_videos(self, url_token: str, offset: int = 0, limit: int = 20) -> Dict:
+ """
+ Get creator's videos
+ Args:
+ url_token:
+ offset:
+ limit:
+
+ Returns:
+
+ """
uri = f"/api/v4/members/{url_token}/zvideos"
params = {
"include": "similar_zvideo,creation_relationship,reaction_instruction",
@@ -315,6 +455,16 @@ return await self.get(uri, params)
async def get_all_anwser_by_creator(self, creator: ZhihuCreator, crawl_interval: float = 1.0, callback: Optional[Callable] = None) -> List[ZhihuContent]:
+ """
+ Get all answers by creator
+ Args:
+ creator: Creator information
+ crawl_interval: Crawl delay interval in seconds
+ callback: Callback after completing one crawl
+
+ Returns:
+
+ """
all_contents: List[ZhihuContent] = []
is_end: bool = False
offset: int = 0
@@ -340,6 +490,16 @@ crawl_interval: float = 1.0,
callback: Optional[Callable] = None,
) -> List[ZhihuContent]:
+ """
+ Get all articles by creator
+ Args:
+ creator:
+ crawl_interval:
+ callback:
+
+ Returns:
+
+ """
all_contents: List[ZhihuContent] = []
is_end: bool = False
offset: int = 0
@@ -364,6 +524,16 @@ crawl_interval: float = 1.0,
callback: Optional[Callable] = None,
) -> List[ZhihuContent]:
+ """
+ Get all videos by creator
+ Args:
+ creator:
+ crawl_interval:
+ callback:
+
+ Returns:
+
+ """
all_contents: List[ZhihuContent] = []
is_end: bool = False
offset: int = 0
@@ -387,16 +557,41 @@ question_id: str,
answer_id: str,
) -> Optional[ZhihuContent]:
+ """
+ Get answer information
+ Args:
+ question_id:
+ answer_id:
+
+ Returns:
+
+ """
uri = f"/question/{question_id}/answer/{answer_id}"
response_html = await self.get(uri, return_response=True)
return self._extractor.extract_answer_content_from_html(response_html)
async def get_article_info(self, article_id: str) -> Optional[ZhihuContent]:
+ """
+ Get article information
+ Args:
+ article_id:
+
+ Returns:
+
+ """
uri = f"/p/{article_id}"
response_html = await self.get(uri, return_response=True)
return self._extractor.extract_article_content_from_html(response_html)
async def get_video_info(self, video_id: str) -> Optional[ZhihuContent]:
+ """
+ Get video information
+ Args:
+ video_id:
+
+ Returns:
+
+ """
uri = f"/zvideo/{video_id}"
response_html = await self.get(uri, return_response=True)
- return self._extractor.extract_zvideo_content_from_html(response_html)+ return self._extractor.extract_zvideo_content_from_html(response_html)
| https://raw.githubusercontent.com/NanmiCoder/MediaCrawler/HEAD/media_platform/zhihu/client.py |
Document all endpoints with docstrings | # -*- coding: utf-8 -*-
# Copyright (c) 2025 relakkes@gmail.com
#
# This file is part of MediaCrawler project.
# Repository: https://github.com/NanmiCoder/MediaCrawler/blob/main/cache/abs_cache.py
# GitHub: https://github.com/NanmiCoder
# Licensed under NON-COMMERCIAL LEARNING LICENSE 1.1
#
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
# 1. 不得用于任何商业用途。
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
# 3. 不得进行大规模爬取或对平台造成运营干扰。
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
# 5. 不得用于任何非法或不当的用途。
#
# 详细许可条款请参阅项目根目录下的LICENSE文件。
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
# -*- coding: utf-8 -*-
# @Author : relakkes@gmail.com
# @Name : Programmer AJiang-Relakkes
# @Time : 2024/6/2 11:06
# @Desc : Abstract class
from abc import ABC, abstractmethod
from typing import Any, List, Optional
class AbstractCache(ABC):
@abstractmethod
def get(self, key: str) -> Optional[Any]:
raise NotImplementedError
@abstractmethod
def set(self, key: str, value: Any, expire_time: int) -> None:
raise NotImplementedError
@abstractmethod
def keys(self, pattern: str) -> List[str]:
raise NotImplementedError | --- +++ @@ -32,12 +32,31 @@
@abstractmethod
def get(self, key: str) -> Optional[Any]:
+ """
+ Get the value of a key from the cache.
+ This is an abstract method. Subclasses must implement this method.
+ :param key: The key
+ :return:
+ """
raise NotImplementedError
@abstractmethod
def set(self, key: str, value: Any, expire_time: int) -> None:
+ """
+ Set the value of a key in the cache.
+ This is an abstract method. Subclasses must implement this method.
+ :param key: The key
+ :param value: The value
+ :param expire_time: Expiration time
+ :return:
+ """
raise NotImplementedError
@abstractmethod
def keys(self, pattern: str) -> List[str]:
- raise NotImplementedError+ """
+ Get all keys matching the pattern
+ :param pattern: Matching pattern
+ :return:
+ """
+ raise NotImplementedError
| https://raw.githubusercontent.com/NanmiCoder/MediaCrawler/HEAD/cache/abs_cache.py |
Write docstrings that follow conventions | # -*- coding: utf-8 -*-
# Copyright (c) 2025 relakkes@gmail.com
#
# This file is part of MediaCrawler project.
# Repository: https://github.com/NanmiCoder/MediaCrawler/blob/main/media_platform/kuaishou/core.py
# GitHub: https://github.com/NanmiCoder
# Licensed under NON-COMMERCIAL LEARNING LICENSE 1.1
#
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
# 1. 不得用于任何商业用途。
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
# 3. 不得进行大规模爬取或对平台造成运营干扰。
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
# 5. 不得用于任何非法或不当的用途。
#
# 详细许可条款请参阅项目根目录下的LICENSE文件。
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
import asyncio
import os
# import random # Removed as we now use fixed config.CRAWLER_MAX_SLEEP_SEC intervals
import time
from asyncio import Task
from typing import Dict, List, Optional, Tuple
from playwright.async_api import (
BrowserContext,
BrowserType,
Page,
Playwright,
async_playwright,
)
import config
from base.base_crawler import AbstractCrawler
from model.m_kuaishou import VideoUrlInfo, CreatorUrlInfo
from proxy.proxy_ip_pool import IpInfoModel, create_ip_pool
from store import kuaishou as kuaishou_store
from tools import utils
from tools.cdp_browser import CDPBrowserManager
from var import comment_tasks_var, crawler_type_var, source_keyword_var
from .client import KuaiShouClient
from .exception import DataFetchError
from .help import parse_video_info_from_url, parse_creator_info_from_url
from .login import KuaishouLogin
class KuaishouCrawler(AbstractCrawler):
context_page: Page
ks_client: KuaiShouClient
browser_context: BrowserContext
cdp_manager: Optional[CDPBrowserManager]
def __init__(self):
self.index_url = "https://www.kuaishou.com"
self.user_agent = utils.get_user_agent()
self.cdp_manager = None
self.ip_proxy_pool = None # Proxy IP pool, used for automatic proxy refresh
async def start(self):
playwright_proxy_format, httpx_proxy_format = None, None
if config.ENABLE_IP_PROXY:
self.ip_proxy_pool = await create_ip_pool(
config.IP_PROXY_POOL_COUNT, enable_validate_ip=True
)
ip_proxy_info: IpInfoModel = await self.ip_proxy_pool.get_proxy()
playwright_proxy_format, httpx_proxy_format = utils.format_proxy_info(
ip_proxy_info
)
async with async_playwright() as playwright:
# Select startup mode based on configuration
if config.ENABLE_CDP_MODE:
utils.logger.info("[KuaishouCrawler] Launching browser using CDP mode")
self.browser_context = await self.launch_browser_with_cdp(
playwright,
playwright_proxy_format,
self.user_agent,
headless=config.CDP_HEADLESS,
)
else:
utils.logger.info("[KuaishouCrawler] Launching browser using standard mode")
# Launch a browser context.
chromium = playwright.chromium
self.browser_context = await self.launch_browser(
chromium, None, self.user_agent, headless=config.HEADLESS
)
# stealth.min.js is a js script to prevent the website from detecting the crawler.
await self.browser_context.add_init_script(path="libs/stealth.min.js")
self.context_page = await self.browser_context.new_page()
await self.context_page.goto(f"{self.index_url}?isHome=1")
# Create a client to interact with the kuaishou website.
self.ks_client = await self.create_ks_client(httpx_proxy_format)
if not await self.ks_client.pong():
login_obj = KuaishouLogin(
login_type=config.LOGIN_TYPE,
login_phone=httpx_proxy_format,
browser_context=self.browser_context,
context_page=self.context_page,
cookie_str=config.COOKIES,
)
await login_obj.begin()
await self.ks_client.update_cookies(
browser_context=self.browser_context
)
crawler_type_var.set(config.CRAWLER_TYPE)
if config.CRAWLER_TYPE == "search":
# Search for videos and retrieve their comment information.
await self.search()
elif config.CRAWLER_TYPE == "detail":
# Get the information and comments of the specified post
await self.get_specified_videos()
elif config.CRAWLER_TYPE == "creator":
# Get creator's information and their videos and comments
await self.get_creators_and_videos()
else:
pass
utils.logger.info("[KuaishouCrawler.start] Kuaishou Crawler finished ...")
async def search(self):
utils.logger.info("[KuaishouCrawler.search] Begin search kuaishou keywords")
ks_limit_count = 20 # kuaishou limit page fixed value
if config.CRAWLER_MAX_NOTES_COUNT < ks_limit_count:
config.CRAWLER_MAX_NOTES_COUNT = ks_limit_count
start_page = config.START_PAGE
for keyword in config.KEYWORDS.split(","):
search_session_id = ""
source_keyword_var.set(keyword)
utils.logger.info(
f"[KuaishouCrawler.search] Current search keyword: {keyword}"
)
page = 1
while (
page - start_page + 1
) * ks_limit_count <= config.CRAWLER_MAX_NOTES_COUNT:
if page < start_page:
utils.logger.info(f"[KuaishouCrawler.search] Skip page: {page}")
page += 1
continue
utils.logger.info(
f"[KuaishouCrawler.search] search kuaishou keyword: {keyword}, page: {page}"
)
video_id_list: List[str] = []
videos_res = await self.ks_client.search_info_by_keyword(
keyword=keyword,
pcursor=str(page),
search_session_id=search_session_id,
)
if not videos_res:
utils.logger.error(
f"[KuaishouCrawler.search] search info by keyword:{keyword} not found data"
)
continue
vision_search_photo: Dict = videos_res.get("visionSearchPhoto")
if vision_search_photo.get("result") != 1:
utils.logger.error(
f"[KuaishouCrawler.search] search info by keyword:{keyword} not found data "
)
continue
search_session_id = vision_search_photo.get("searchSessionId", "")
for video_detail in vision_search_photo.get("feeds"):
video_id_list.append(video_detail.get("photo", {}).get("id"))
await kuaishou_store.update_kuaishou_video(video_item=video_detail)
# batch fetch video comments
page += 1
# Sleep after page navigation
await asyncio.sleep(config.CRAWLER_MAX_SLEEP_SEC)
utils.logger.info(f"[KuaishouCrawler.search] Sleeping for {config.CRAWLER_MAX_SLEEP_SEC} seconds after page {page-1}")
await self.batch_get_video_comments(video_id_list)
async def get_specified_videos(self):
utils.logger.info("[KuaishouCrawler.get_specified_videos] Parsing video URLs...")
video_ids = []
for video_url in config.KS_SPECIFIED_ID_LIST:
try:
video_info = parse_video_info_from_url(video_url)
video_ids.append(video_info.video_id)
utils.logger.info(f"Parsed video ID: {video_info.video_id} from {video_url}")
except ValueError as e:
utils.logger.error(f"Failed to parse video URL: {e}")
continue
semaphore = asyncio.Semaphore(config.MAX_CONCURRENCY_NUM)
task_list = [
self.get_video_info_task(video_id=video_id, semaphore=semaphore)
for video_id in video_ids
]
video_details = await asyncio.gather(*task_list)
for video_detail in video_details:
if video_detail is not None:
await kuaishou_store.update_kuaishou_video(video_detail)
await self.batch_get_video_comments(video_ids)
async def get_video_info_task(
self, video_id: str, semaphore: asyncio.Semaphore
) -> Optional[Dict]:
async with semaphore:
try:
result = await self.ks_client.get_video_info(video_id)
# Sleep after fetching video details
await asyncio.sleep(config.CRAWLER_MAX_SLEEP_SEC)
utils.logger.info(f"[KuaishouCrawler.get_video_info_task] Sleeping for {config.CRAWLER_MAX_SLEEP_SEC} seconds after fetching video details {video_id}")
utils.logger.info(
f"[KuaishouCrawler.get_video_info_task] Get video_id:{video_id} info result: {result} ..."
)
return result.get("visionVideoDetail")
except DataFetchError as ex:
utils.logger.error(
f"[KuaishouCrawler.get_video_info_task] Get video detail error: {ex}"
)
return None
except KeyError as ex:
utils.logger.error(
f"[KuaishouCrawler.get_video_info_task] have not fund video detail video_id:{video_id}, err: {ex}"
)
return None
async def batch_get_video_comments(self, video_id_list: List[str]):
if not config.ENABLE_GET_COMMENTS:
utils.logger.info(
f"[KuaishouCrawler.batch_get_video_comments] Crawling comment mode is not enabled"
)
return
utils.logger.info(
f"[KuaishouCrawler.batch_get_video_comments] video ids:{video_id_list}"
)
semaphore = asyncio.Semaphore(config.MAX_CONCURRENCY_NUM)
task_list: List[Task] = []
for video_id in video_id_list:
task = asyncio.create_task(
self.get_comments(video_id, semaphore), name=video_id
)
task_list.append(task)
comment_tasks_var.set(task_list)
await asyncio.gather(*task_list)
async def get_comments(self, video_id: str, semaphore: asyncio.Semaphore):
async with semaphore:
try:
utils.logger.info(
f"[KuaishouCrawler.get_comments] begin get video_id: {video_id} comments ..."
)
# Sleep before fetching comments
await asyncio.sleep(config.CRAWLER_MAX_SLEEP_SEC)
utils.logger.info(f"[KuaishouCrawler.get_comments] Sleeping for {config.CRAWLER_MAX_SLEEP_SEC} seconds before fetching comments for video {video_id}")
await self.ks_client.get_video_all_comments(
photo_id=video_id,
crawl_interval=config.CRAWLER_MAX_SLEEP_SEC,
callback=kuaishou_store.batch_update_ks_video_comments,
max_count=config.CRAWLER_MAX_COMMENTS_COUNT_SINGLENOTES,
)
except DataFetchError as ex:
utils.logger.error(
f"[KuaishouCrawler.get_comments] get video_id: {video_id} comment error: {ex}"
)
except Exception as e:
utils.logger.error(
f"[KuaishouCrawler.get_comments] may be been blocked, err:{e}"
)
# use time.sleeep block main coroutine instead of asyncio.sleep and cacel running comment task
# maybe kuaishou block our request, we will take a nap and update the cookie again
current_running_tasks = comment_tasks_var.get()
for task in current_running_tasks:
task.cancel()
time.sleep(20)
await self.context_page.goto(f"{self.index_url}?isHome=1")
await self.ks_client.update_cookies(
browser_context=self.browser_context
)
async def create_ks_client(self, httpx_proxy: Optional[str]) -> KuaiShouClient:
utils.logger.info(
"[KuaishouCrawler.create_ks_client] Begin create kuaishou API client ..."
)
cookie_str, cookie_dict = utils.convert_cookies(
await self.browser_context.cookies()
)
ks_client_obj = KuaiShouClient(
proxy=httpx_proxy,
headers={
"User-Agent": self.user_agent,
"Cookie": cookie_str,
"Origin": self.index_url,
"Referer": self.index_url,
"Content-Type": "application/json;charset=UTF-8",
},
playwright_page=self.context_page,
cookie_dict=cookie_dict,
proxy_ip_pool=self.ip_proxy_pool, # Pass proxy pool for automatic refresh
)
return ks_client_obj
async def launch_browser(
self,
chromium: BrowserType,
playwright_proxy: Optional[Dict],
user_agent: Optional[str],
headless: bool = True,
) -> BrowserContext:
utils.logger.info(
"[KuaishouCrawler.launch_browser] Begin create browser context ..."
)
if config.SAVE_LOGIN_STATE:
user_data_dir = os.path.join(
os.getcwd(), "browser_data", config.USER_DATA_DIR % config.PLATFORM
) # type: ignore
browser_context = await chromium.launch_persistent_context(
user_data_dir=user_data_dir,
accept_downloads=True,
headless=headless,
proxy=playwright_proxy, # type: ignore
viewport={"width": 1920, "height": 1080},
user_agent=user_agent,
channel="chrome", # Use system's stable Chrome version
)
return browser_context
else:
browser = await chromium.launch(headless=headless, proxy=playwright_proxy, channel="chrome") # type: ignore
browser_context = await browser.new_context(
viewport={"width": 1920, "height": 1080}, user_agent=user_agent
)
return browser_context
async def launch_browser_with_cdp(
self,
playwright: Playwright,
playwright_proxy: Optional[Dict],
user_agent: Optional[str],
headless: bool = True,
) -> BrowserContext:
try:
self.cdp_manager = CDPBrowserManager()
browser_context = await self.cdp_manager.launch_and_connect(
playwright=playwright,
playwright_proxy=playwright_proxy,
user_agent=user_agent,
headless=headless,
)
# Display browser information
browser_info = await self.cdp_manager.get_browser_info()
utils.logger.info(f"[KuaishouCrawler] CDP browser info: {browser_info}")
return browser_context
except Exception as e:
utils.logger.error(
f"[KuaishouCrawler] CDP mode launch failed, fallback to standard mode: {e}"
)
# Fallback to standard mode
chromium = playwright.chromium
return await self.launch_browser(
chromium, playwright_proxy, user_agent, headless
)
async def get_creators_and_videos(self) -> None:
utils.logger.info(
"[KuaiShouCrawler.get_creators_and_videos] Begin get kuaishou creators"
)
for creator_url in config.KS_CREATOR_ID_LIST:
try:
# Parse creator URL to get user_id
creator_info: CreatorUrlInfo = parse_creator_info_from_url(creator_url)
utils.logger.info(f"[KuaiShouCrawler.get_creators_and_videos] Parse creator URL info: {creator_info}")
user_id = creator_info.user_id
# get creator detail info from web html content
createor_info: Dict = await self.ks_client.get_creator_info(user_id=user_id)
if createor_info:
await kuaishou_store.save_creator(user_id, creator=createor_info)
except ValueError as e:
utils.logger.error(f"[KuaiShouCrawler.get_creators_and_videos] Failed to parse creator URL: {e}")
continue
# Get all video information of the creator
all_video_list = await self.ks_client.get_all_videos_by_creator(
user_id=user_id,
crawl_interval=config.CRAWLER_MAX_SLEEP_SEC,
callback=self.fetch_creator_video_detail,
)
video_ids = [
video_item.get("photo", {}).get("id") for video_item in all_video_list
]
await self.batch_get_video_comments(video_ids)
async def fetch_creator_video_detail(self, video_list: List[Dict]):
semaphore = asyncio.Semaphore(config.MAX_CONCURRENCY_NUM)
task_list = [
self.get_video_info_task(post_item.get("photo", {}).get("id"), semaphore)
for post_item in video_list
]
video_details = await asyncio.gather(*task_list)
for video_detail in video_details:
if video_detail is not None:
await kuaishou_store.update_kuaishou_video(video_detail)
async def close(self):
# If using CDP mode, need special handling
if self.cdp_manager:
await self.cdp_manager.cleanup()
self.cdp_manager = None
else:
await self.browser_context.close()
utils.logger.info("[KuaishouCrawler.close] Browser context closed ...") | --- +++ @@ -181,6 +181,7 @@ await self.batch_get_video_comments(video_id_list)
async def get_specified_videos(self):
+ """Get the information and comments of the specified post"""
utils.logger.info("[KuaishouCrawler.get_specified_videos] Parsing video URLs...")
video_ids = []
for video_url in config.KS_SPECIFIED_ID_LIST:
@@ -206,6 +207,7 @@ async def get_video_info_task(
self, video_id: str, semaphore: asyncio.Semaphore
) -> Optional[Dict]:
+ """Get video detail task"""
async with semaphore:
try:
result = await self.ks_client.get_video_info(video_id)
@@ -230,6 +232,11 @@ return None
async def batch_get_video_comments(self, video_id_list: List[str]):
+ """
+ batch get video comments
+ :param video_id_list:
+ :return:
+ """
if not config.ENABLE_GET_COMMENTS:
utils.logger.info(
f"[KuaishouCrawler.batch_get_video_comments] Crawling comment mode is not enabled"
@@ -251,6 +258,12 @@ await asyncio.gather(*task_list)
async def get_comments(self, video_id: str, semaphore: asyncio.Semaphore):
+ """
+ get comment for video id
+ :param video_id:
+ :param semaphore:
+ :return:
+ """
async with semaphore:
try:
utils.logger.info(
@@ -287,6 +300,7 @@ )
async def create_ks_client(self, httpx_proxy: Optional[str]) -> KuaiShouClient:
+ """Create ks client"""
utils.logger.info(
"[KuaishouCrawler.create_ks_client] Begin create kuaishou API client ..."
)
@@ -315,6 +329,7 @@ user_agent: Optional[str],
headless: bool = True,
) -> BrowserContext:
+ """Launch browser and create browser context"""
utils.logger.info(
"[KuaishouCrawler.launch_browser] Begin create browser context ..."
)
@@ -346,6 +361,9 @@ user_agent: Optional[str],
headless: bool = True,
) -> BrowserContext:
+ """
+ Launch browser using CDP mode
+ """
try:
self.cdp_manager = CDPBrowserManager()
browser_context = await self.cdp_manager.launch_and_connect(
@@ -372,6 +390,7 @@ )
async def get_creators_and_videos(self) -> None:
+ """Get creator's videos and retrieve their comment information."""
utils.logger.info(
"[KuaiShouCrawler.get_creators_and_videos] Begin get kuaishou creators"
)
@@ -403,6 +422,9 @@ await self.batch_get_video_comments(video_ids)
async def fetch_creator_video_detail(self, video_list: List[Dict]):
+ """
+ Concurrently obtain the specified post list and save the data
+ """
semaphore = asyncio.Semaphore(config.MAX_CONCURRENCY_NUM)
task_list = [
self.get_video_info_task(post_item.get("photo", {}).get("id"), semaphore)
@@ -415,10 +437,11 @@ await kuaishou_store.update_kuaishou_video(video_detail)
async def close(self):
+ """Close browser context"""
# If using CDP mode, need special handling
if self.cdp_manager:
await self.cdp_manager.cleanup()
self.cdp_manager = None
else:
await self.browser_context.close()
- utils.logger.info("[KuaishouCrawler.close] Browser context closed ...")+ utils.logger.info("[KuaishouCrawler.close] Browser context closed ...")
| https://raw.githubusercontent.com/NanmiCoder/MediaCrawler/HEAD/media_platform/kuaishou/core.py |
Add docstrings for production code | # -*- coding: utf-8 -*-
# Copyright (c) 2025 relakkes@gmail.com
#
# This file is part of MediaCrawler project.
# Repository: https://github.com/NanmiCoder/MediaCrawler/blob/main/cache/local_cache.py
# GitHub: https://github.com/NanmiCoder
# Licensed under NON-COMMERCIAL LEARNING LICENSE 1.1
#
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
# 1. 不得用于任何商业用途。
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
# 3. 不得进行大规模爬取或对平台造成运营干扰。
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
# 5. 不得用于任何非法或不当的用途。
#
# 详细许可条款请参阅项目根目录下的LICENSE文件。
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
# -*- coding: utf-8 -*-
# @Author : relakkes@gmail.com
# @Name : Programmer AJiang-Relakkes
# @Time : 2024/6/2 11:05
# @Desc : Local cache
import asyncio
import time
from typing import Any, Dict, List, Optional, Tuple
from cache.abs_cache import AbstractCache
class ExpiringLocalCache(AbstractCache):
def __init__(self, cron_interval: int = 10):
self._cron_interval = cron_interval
self._cache_container: Dict[str, Tuple[Any, float]] = {}
self._cron_task: Optional[asyncio.Task] = None
# Start scheduled cleanup task
self._schedule_clear()
def __del__(self):
if self._cron_task is not None:
self._cron_task.cancel()
def get(self, key: str) -> Optional[Any]:
value, expire_time = self._cache_container.get(key, (None, 0))
if value is None:
return None
# If the key has expired, delete it and return None
if expire_time < time.time():
del self._cache_container[key]
return None
return value
def set(self, key: str, value: Any, expire_time: int) -> None:
self._cache_container[key] = (value, time.time() + expire_time)
def keys(self, pattern: str) -> List[str]:
if pattern == '*':
return list(self._cache_container.keys())
# For local cache wildcard, temporarily replace * with empty string
if '*' in pattern:
pattern = pattern.replace('*', '')
return [key for key in self._cache_container.keys() if pattern in key]
def _schedule_clear(self):
try:
loop = asyncio.get_event_loop()
except RuntimeError:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
self._cron_task = loop.create_task(self._start_clear_cron())
def _clear(self):
for key, (value, expire_time) in self._cache_container.items():
if expire_time < time.time():
del self._cache_container[key]
async def _start_clear_cron(self):
while True:
self._clear()
await asyncio.sleep(self._cron_interval)
if __name__ == '__main__':
cache = ExpiringLocalCache(cron_interval=2)
cache.set('name', 'Programmer AJiang-Relakkes', 3)
print(cache.get('key'))
print(cache.keys("*"))
time.sleep(4)
print(cache.get('key'))
del cache
time.sleep(1)
print("done") | --- +++ @@ -34,6 +34,11 @@ class ExpiringLocalCache(AbstractCache):
def __init__(self, cron_interval: int = 10):
+ """
+ Initialize local cache
+ :param cron_interval: Time interval for scheduled cache cleanup
+ :return:
+ """
self._cron_interval = cron_interval
self._cache_container: Dict[str, Tuple[Any, float]] = {}
self._cron_task: Optional[asyncio.Task] = None
@@ -41,10 +46,19 @@ self._schedule_clear()
def __del__(self):
+ """
+ Destructor function, cleanup scheduled task
+ :return:
+ """
if self._cron_task is not None:
self._cron_task.cancel()
def get(self, key: str) -> Optional[Any]:
+ """
+ Get the value of a key from the cache
+ :param key:
+ :return:
+ """
value, expire_time = self._cache_container.get(key, (None, 0))
if value is None:
return None
@@ -57,9 +71,21 @@ return value
def set(self, key: str, value: Any, expire_time: int) -> None:
+ """
+ Set the value of a key in the cache
+ :param key:
+ :param value:
+ :param expire_time:
+ :return:
+ """
self._cache_container[key] = (value, time.time() + expire_time)
def keys(self, pattern: str) -> List[str]:
+ """
+ Get all keys matching the pattern
+ :param pattern: Matching pattern
+ :return:
+ """
if pattern == '*':
return list(self._cache_container.keys())
@@ -70,6 +96,10 @@ return [key for key in self._cache_container.keys() if pattern in key]
def _schedule_clear(self):
+ """
+ Start scheduled cleanup task
+ :return:
+ """
try:
loop = asyncio.get_event_loop()
@@ -80,11 +110,19 @@ self._cron_task = loop.create_task(self._start_clear_cron())
def _clear(self):
+ """
+ Clean up cache based on expiration time
+ :return:
+ """
for key, (value, expire_time) in self._cache_container.items():
if expire_time < time.time():
del self._cache_container[key]
async def _start_clear_cron(self):
+ """
+ Start scheduled cleanup task
+ :return:
+ """
while True:
self._clear()
await asyncio.sleep(self._cron_interval)
@@ -99,4 +137,4 @@ print(cache.get('key'))
del cache
time.sleep(1)
- print("done")+ print("done")
| https://raw.githubusercontent.com/NanmiCoder/MediaCrawler/HEAD/cache/local_cache.py |
Add docstrings to incomplete code | # -*- coding: utf-8 -*-
# Copyright (c) 2025 relakkes@gmail.com
#
# This file is part of MediaCrawler project.
# Repository: https://github.com/NanmiCoder/MediaCrawler/blob/main/media_platform/tieba/login.py
# GitHub: https://github.com/NanmiCoder
# Licensed under NON-COMMERCIAL LEARNING LICENSE 1.1
#
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
# 1. 不得用于任何商业用途。
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
# 3. 不得进行大规模爬取或对平台造成运营干扰。
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
# 5. 不得用于任何非法或不当的用途。
#
# 详细许可条款请参阅项目根目录下的LICENSE文件。
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
import asyncio
import functools
import sys
from typing import Optional
from playwright.async_api import BrowserContext, Page
from tenacity import (RetryError, retry, retry_if_result, stop_after_attempt,
wait_fixed)
import config
from base.base_crawler import AbstractLogin
from tools import utils
class BaiduTieBaLogin(AbstractLogin):
def __init__(self,
login_type: str,
browser_context: BrowserContext,
context_page: Page,
login_phone: Optional[str] = "",
cookie_str: str = ""
):
config.LOGIN_TYPE = login_type
self.browser_context = browser_context
self.context_page = context_page
self.login_phone = login_phone
self.cookie_str = cookie_str
@retry(stop=stop_after_attempt(600), wait=wait_fixed(1), retry=retry_if_result(lambda value: value is False))
async def check_login_state(self) -> bool:
current_cookie = await self.browser_context.cookies()
_, cookie_dict = utils.convert_cookies(current_cookie)
stoken = cookie_dict.get("STOKEN")
ptoken = cookie_dict.get("PTOKEN")
if stoken or ptoken:
return True
return False
async def begin(self):
utils.logger.info("[BaiduTieBaLogin.begin] Begin login baidutieba ...")
if config.LOGIN_TYPE == "qrcode":
await self.login_by_qrcode()
elif config.LOGIN_TYPE == "phone":
await self.login_by_mobile()
elif config.LOGIN_TYPE == "cookie":
await self.login_by_cookies()
else:
raise ValueError("[BaiduTieBaLogin.begin]Invalid Login Type Currently only supported qrcode or phone or cookies ...")
async def login_by_mobile(self):
pass
async def login_by_qrcode(self):
utils.logger.info("[BaiduTieBaLogin.login_by_qrcode] Begin login baidutieba by qrcode ...")
qrcode_img_selector = "xpath=//img[@class='tang-pass-qrcode-img']"
# find login qrcode
base64_qrcode_img = await utils.find_login_qrcode(
self.context_page,
selector=qrcode_img_selector
)
if not base64_qrcode_img:
utils.logger.info("[BaiduTieBaLogin.login_by_qrcode] login failed , have not found qrcode please check ....")
# if this website does not automatically popup login dialog box, we will manual click login button
await asyncio.sleep(0.5)
login_button_ele = self.context_page.locator("xpath=//li[@class='u_login']")
await login_button_ele.click()
base64_qrcode_img = await utils.find_login_qrcode(
self.context_page,
selector=qrcode_img_selector
)
if not base64_qrcode_img:
utils.logger.info("[BaiduTieBaLogin.login_by_qrcode] login failed , have not found qrcode please check ....")
sys.exit()
# show login qrcode
# fix issue #12
# we need to use partial function to call show_qrcode function and run in executor
# then current asyncio event loop will not be blocked
partial_show_qrcode = functools.partial(utils.show_qrcode, base64_qrcode_img)
asyncio.get_running_loop().run_in_executor(executor=None, func=partial_show_qrcode)
utils.logger.info(f"[BaiduTieBaLogin.login_by_qrcode] waiting for scan code login, remaining time is 120s")
try:
await self.check_login_state()
except RetryError:
utils.logger.info("[BaiduTieBaLogin.login_by_qrcode] Login baidutieba failed by qrcode login method ...")
sys.exit()
wait_redirect_seconds = 5
utils.logger.info(f"[BaiduTieBaLogin.login_by_qrcode] Login successful then wait for {wait_redirect_seconds} seconds redirect ...")
await asyncio.sleep(wait_redirect_seconds)
async def login_by_cookies(self):
utils.logger.info("[BaiduTieBaLogin.login_by_cookies] Begin login baidutieba by cookie ...")
for key, value in utils.convert_str_cookie_to_dict(self.cookie_str).items():
await self.browser_context.add_cookies([{
'name': key,
'value': value,
'domain': ".baidu.com",
'path': "/"
}]) | --- +++ @@ -49,6 +49,12 @@
@retry(stop=stop_after_attempt(600), wait=wait_fixed(1), retry=retry_if_result(lambda value: value is False))
async def check_login_state(self) -> bool:
+ """
+ Poll to check if login status is successful, return True if successful, otherwise return False
+
+ Returns:
+
+ """
current_cookie = await self.browser_context.cookies()
_, cookie_dict = utils.convert_cookies(current_cookie)
stoken = cookie_dict.get("STOKEN")
@@ -58,6 +64,7 @@ return False
async def begin(self):
+ """Start login baidutieba"""
utils.logger.info("[BaiduTieBaLogin.begin] Begin login baidutieba ...")
if config.LOGIN_TYPE == "qrcode":
await self.login_by_qrcode()
@@ -69,9 +76,11 @@ raise ValueError("[BaiduTieBaLogin.begin]Invalid Login Type Currently only supported qrcode or phone or cookies ...")
async def login_by_mobile(self):
+ """Login baidutieba by mobile"""
pass
async def login_by_qrcode(self):
+ """login baidutieba website and keep webdriver login state"""
utils.logger.info("[BaiduTieBaLogin.login_by_qrcode] Begin login baidutieba by qrcode ...")
qrcode_img_selector = "xpath=//img[@class='tang-pass-qrcode-img']"
# find login qrcode
@@ -112,6 +121,7 @@ await asyncio.sleep(wait_redirect_seconds)
async def login_by_cookies(self):
+ """login baidutieba website by cookies"""
utils.logger.info("[BaiduTieBaLogin.login_by_cookies] Begin login baidutieba by cookie ...")
for key, value in utils.convert_str_cookie_to_dict(self.cookie_str).items():
await self.browser_context.add_cookies([{
@@ -119,4 +129,4 @@ 'value': value,
'domain': ".baidu.com",
'path': "/"
- }])+ }])
| https://raw.githubusercontent.com/NanmiCoder/MediaCrawler/HEAD/media_platform/tieba/login.py |
Document all endpoints with docstrings | # -*- coding: utf-8 -*-
# Copyright (c) 2025 relakkes@gmail.com
#
# This file is part of MediaCrawler project.
# Repository: https://github.com/NanmiCoder/MediaCrawler/blob/main/media_platform/douyin/help.py
# GitHub: https://github.com/NanmiCoder
# Licensed under NON-COMMERCIAL LEARNING LICENSE 1.1
#
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
# 1. 不得用于任何商业用途。
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
# 3. 不得进行大规模爬取或对平台造成运营干扰。
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
# 5. 不得用于任何非法或不当的用途。
#
# 详细许可条款请参阅项目根目录下的LICENSE文件。
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
# -*- coding: utf-8 -*-
# @Author : relakkes@gmail.com
# @Name: Programmer Ajiang-Relakkes
# @Time : 2024/6/10 02:24
# @Desc : Get a_bogus parameter, for learning and communication only, do not use for commercial purposes, contact author to delete if infringement
import random
import re
from typing import Optional
import execjs
from playwright.async_api import Page
from model.m_douyin import VideoUrlInfo, CreatorUrlInfo
from tools.crawler_util import extract_url_params_to_dict
douyin_sign_obj = execjs.compile(open('libs/douyin.js', encoding='utf-8-sig').read())
def get_web_id():
def e(t):
if t is not None:
return str(t ^ (int(16 * random.random()) >> (t // 4)))
else:
return ''.join(
[str(int(1e7)), '-', str(int(1e3)), '-', str(int(4e3)), '-', str(int(8e3)), '-', str(int(1e11))]
)
web_id = ''.join(
e(int(x)) if x in '018' else x for x in e(None)
)
return web_id.replace('-', '')[:19]
async def get_a_bogus(url: str, params: str, post_data: dict, user_agent: str, page: Page = None):
return get_a_bogus_from_js(url, params, user_agent)
def get_a_bogus_from_js(url: str, params: str, user_agent: str):
sign_js_name = "sign_datail"
if "/reply" in url:
sign_js_name = "sign_reply"
return douyin_sign_obj.call(sign_js_name, params, user_agent)
async def get_a_bogus_from_playwright(params: str, post_data: dict, user_agent: str, page: Page):
if not post_data:
post_data = ""
a_bogus = await page.evaluate(
"([params, post_data, ua]) => window.bdms.init._v[2].p[42].apply(null, [0, 1, 8, params, post_data, ua])",
[params, post_data, user_agent])
return a_bogus
def parse_video_info_from_url(url: str) -> VideoUrlInfo:
# If it's a pure numeric ID, return directly
if url.isdigit():
return VideoUrlInfo(aweme_id=url, url_type="normal")
# Check if it's a short link (v.douyin.com)
if "v.douyin.com" in url or url.startswith("http") and len(url) < 50 and "video" not in url:
return VideoUrlInfo(aweme_id="", url_type="short") # Requires client parsing
# Try to extract modal_id from URL parameters
params = extract_url_params_to_dict(url)
modal_id = params.get("modal_id")
if modal_id:
return VideoUrlInfo(aweme_id=modal_id, url_type="modal")
# Extract ID from standard video URL: /video/number
video_pattern = r'/video/(\d+)'
match = re.search(video_pattern, url)
if match:
aweme_id = match.group(1)
return VideoUrlInfo(aweme_id=aweme_id, url_type="normal")
raise ValueError(f"Unable to parse video ID from URL: {url}")
def parse_creator_info_from_url(url: str) -> CreatorUrlInfo:
# If it's a pure ID format (usually starts with MS4wLjABAAAA), return directly
if url.startswith("MS4wLjABAAAA") or (not url.startswith("http") and "douyin.com" not in url):
return CreatorUrlInfo(sec_user_id=url)
# Extract sec_user_id from creator homepage URL: /user/xxx
user_pattern = r'/user/([^/?]+)'
match = re.search(user_pattern, url)
if match:
sec_user_id = match.group(1)
return CreatorUrlInfo(sec_user_id=sec_user_id)
raise ValueError(f"Unable to parse creator ID from URL: {url}")
if __name__ == '__main__':
# Test video URL parsing
print("=== Video URL Parsing Test ===")
test_urls = [
"https://www.douyin.com/video/7525082444551310602",
"https://www.douyin.com/user/MS4wLjABAAAATJPY7LAlaa5X-c8uNdWkvz0jUGgpw4eeXIwu_8BhvqE?from_tab_name=main&modal_id=7525082444551310602",
"https://www.douyin.com/root/search/python?aid=b733a3b0-4662-4639-9a72-c2318fba9f3f&modal_id=7471165520058862848&type=general",
"7525082444551310602",
]
for url in test_urls:
try:
result = parse_video_info_from_url(url)
print(f"✓ URL: {url[:80]}...")
print(f" Result: {result}\n")
except Exception as e:
print(f"✗ URL: {url}")
print(f" Error: {e}\n")
# Test creator URL parsing
print("=== Creator URL Parsing Test ===")
test_creator_urls = [
"https://www.douyin.com/user/MS4wLjABAAAATJPY7LAlaa5X-c8uNdWkvz0jUGgpw4eeXIwu_8BhvqE?from_tab_name=main",
"MS4wLjABAAAATJPY7LAlaa5X-c8uNdWkvz0jUGgpw4eeXIwu_8BhvqE",
]
for url in test_creator_urls:
try:
result = parse_creator_info_from_url(url)
print(f"✓ URL: {url[:80]}...")
print(f" Result: {result}\n")
except Exception as e:
print(f"✗ URL: {url}")
print(f" Error: {e}\n") | --- +++ @@ -37,6 +37,11 @@ douyin_sign_obj = execjs.compile(open('libs/douyin.js', encoding='utf-8-sig').read())
def get_web_id():
+ """
+ Generate random webid
+ Returns:
+
+ """
def e(t):
if t is not None:
@@ -54,9 +59,22 @@
async def get_a_bogus(url: str, params: str, post_data: dict, user_agent: str, page: Page = None):
+ """
+ Get a_bogus parameter, currently does not support POST request type signature
+ """
return get_a_bogus_from_js(url, params, user_agent)
def get_a_bogus_from_js(url: str, params: str, user_agent: str):
+ """
+ Get a_bogus parameter through js
+ Args:
+ url:
+ params:
+ user_agent:
+
+ Returns:
+
+ """
sign_js_name = "sign_datail"
if "/reply" in url:
sign_js_name = "sign_reply"
@@ -65,6 +83,12 @@
async def get_a_bogus_from_playwright(params: str, post_data: dict, user_agent: str, page: Page):
+ """
+ Get a_bogus parameter through playwright
+ playwright version is deprecated
+ Returns:
+
+ """
if not post_data:
post_data = ""
a_bogus = await page.evaluate(
@@ -75,6 +99,21 @@
def parse_video_info_from_url(url: str) -> VideoUrlInfo:
+ """
+ Parse video ID from Douyin video URL
+ Supports the following formats:
+ 1. Normal video link: https://www.douyin.com/video/7525082444551310602
+ 2. Link with modal_id parameter:
+ - https://www.douyin.com/user/MS4wLjABAAAATJPY7LAlaa5X-c8uNdWkvz0jUGgpw4eeXIwu_8BhvqE?modal_id=7525082444551310602
+ - https://www.douyin.com/root/search/python?modal_id=7471165520058862848
+ 3. Short link: https://v.douyin.com/iF12345ABC/ (requires client parsing)
+ 4. Pure ID: 7525082444551310602
+
+ Args:
+ url: Douyin video link or ID
+ Returns:
+ VideoUrlInfo: Object containing video ID
+ """
# If it's a pure numeric ID, return directly
if url.isdigit():
return VideoUrlInfo(aweme_id=url, url_type="normal")
@@ -100,6 +139,17 @@
def parse_creator_info_from_url(url: str) -> CreatorUrlInfo:
+ """
+ Parse creator ID (sec_user_id) from Douyin creator homepage URL
+ Supports the following formats:
+ 1. Creator homepage: https://www.douyin.com/user/MS4wLjABAAAATJPY7LAlaa5X-c8uNdWkvz0jUGgpw4eeXIwu_8BhvqE?from_tab_name=main
+ 2. Pure ID: MS4wLjABAAAATJPY7LAlaa5X-c8uNdWkvz0jUGgpw4eeXIwu_8BhvqE
+
+ Args:
+ url: Douyin creator homepage link or sec_user_id
+ Returns:
+ CreatorUrlInfo: Object containing creator ID
+ """
# If it's a pure ID format (usually starts with MS4wLjABAAAA), return directly
if url.startswith("MS4wLjABAAAA") or (not url.startswith("http") and "douyin.com" not in url):
return CreatorUrlInfo(sec_user_id=url)
@@ -145,4 +195,4 @@ print(f" Result: {result}\n")
except Exception as e:
print(f"✗ URL: {url}")
- print(f" Error: {e}\n")+ print(f" Error: {e}\n")
| https://raw.githubusercontent.com/NanmiCoder/MediaCrawler/HEAD/media_platform/douyin/help.py |
Add docstrings for internal functions | # -*- coding: utf-8 -*-
# Copyright (c) 2025 relakkes@gmail.com
#
# This file is part of MediaCrawler project.
# Repository: https://github.com/NanmiCoder/MediaCrawler/blob/main/media_platform/tieba/help.py
# GitHub: https://github.com/NanmiCoder
# Licensed under NON-COMMERCIAL LEARNING LICENSE 1.1
#
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
# 1. 不得用于任何商业用途。
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
# 3. 不得进行大规模爬取或对平台造成运营干扰。
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
# 5. 不得用于任何非法或不当的用途。
#
# 详细许可条款请参阅项目根目录下的LICENSE文件。
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
# -*- coding: utf-8 -*-
import html
import json
import re
from typing import Dict, List, Tuple
from urllib.parse import parse_qs, unquote
from parsel import Selector
from constant import baidu_tieba as const
from model.m_baidu_tieba import TiebaComment, TiebaCreator, TiebaNote
from tools import utils
GENDER_MALE = "sex_male"
GENDER_FEMALE = "sex_female"
class TieBaExtractor:
def __init__(self):
pass
@staticmethod
def extract_search_note_list(page_content: str) -> List[TiebaNote]:
xpath_selector = "//div[@class='s_post']"
post_list = Selector(text=page_content).xpath(xpath_selector)
result: List[TiebaNote] = []
for post in post_list:
tieba_note = TiebaNote(note_id=post.xpath(".//span[@class='p_title']/a/@data-tid").get(default='').strip(),
title=post.xpath(".//span[@class='p_title']/a/text()").get(default='').strip(),
desc=post.xpath(".//div[@class='p_content']/text()").get(default='').strip(),
note_url=const.TIEBA_URL + post.xpath(".//span[@class='p_title']/a/@href").get(
default=''),
user_nickname=post.xpath(".//a[starts-with(@href, '/home/main')]/font/text()").get(
default='').strip(), user_link=const.TIEBA_URL + post.xpath(
".//a[starts-with(@href, '/home/main')]/@href").get(default=''),
tieba_name=post.xpath(".//a[@class='p_forum']/font/text()").get(default='').strip(),
tieba_link=const.TIEBA_URL + post.xpath(".//a[@class='p_forum']/@href").get(
default=''),
publish_time=post.xpath(".//font[@class='p_green p_date']/text()").get(
default='').strip(), )
result.append(tieba_note)
return result
def extract_tieba_note_list(self, page_content: str) -> List[TiebaNote]:
page_content = page_content.replace('<!--', "")
content_selector = Selector(text=page_content)
xpath_selector = "//ul[@id='thread_list']/li"
post_list = content_selector.xpath(xpath_selector)
result: List[TiebaNote] = []
for post_selector in post_list:
post_field_value: Dict = self.extract_data_field_value(post_selector)
if not post_field_value:
continue
note_id = str(post_field_value.get("id"))
tieba_note = TiebaNote(note_id=note_id,
title=post_selector.xpath(".//a[@class='j_th_tit ']/text()").get(default='').strip(),
desc=post_selector.xpath(
".//div[@class='threadlist_abs threadlist_abs_onlyline ']/text()").get(
default='').strip(), note_url=const.TIEBA_URL + f"/p/{note_id}",
user_link=const.TIEBA_URL + post_selector.xpath(
".//a[@class='frs-author-name j_user_card ']/@href").get(default='').strip(),
user_nickname=post_field_value.get("authoer_nickname") or post_field_value.get(
"author_name"),
tieba_name=content_selector.xpath("//a[@class='card_title_fname']/text()").get(
default='').strip(), tieba_link=const.TIEBA_URL + content_selector.xpath(
"//a[@class='card_title_fname']/@href").get(default=''),
total_replay_num=post_field_value.get("reply_num", 0))
result.append(tieba_note)
return result
def extract_note_detail(self, page_content: str) -> TiebaNote:
content_selector = Selector(text=page_content)
first_floor_selector = content_selector.xpath("//div[@class='p_postlist'][1]")
only_view_author_link = content_selector.xpath("//*[@id='lzonly_cntn']/@href").get(default='').strip()
note_id = only_view_author_link.split("?")[0].split("/")[-1]
# Post reply count and reply page count
thread_num_infos = content_selector.xpath(
"//div[@id='thread_theme_5']//li[@class='l_reply_num']//span[@class='red']")
# IP location and publish time
other_info_content = content_selector.xpath(".//div[@class='post-tail-wrap']").get(default="").strip()
ip_location, publish_time = self.extract_ip_and_pub_time(other_info_content)
note = TiebaNote(note_id=note_id, title=content_selector.xpath("//title/text()").get(default='').strip(),
desc=content_selector.xpath("//meta[@name='description']/@content").get(default='').strip(),
note_url=const.TIEBA_URL + f"/p/{note_id}",
user_link=const.TIEBA_URL + first_floor_selector.xpath(
".//a[@class='p_author_face ']/@href").get(default='').strip(),
user_nickname=first_floor_selector.xpath(
".//a[@class='p_author_name j_user_card']/text()").get(default='').strip(),
user_avatar=first_floor_selector.xpath(".//a[@class='p_author_face ']/img/@src").get(
default='').strip(),
tieba_name=content_selector.xpath("//a[@class='card_title_fname']/text()").get(
default='').strip(), tieba_link=const.TIEBA_URL + content_selector.xpath(
"//a[@class='card_title_fname']/@href").get(default=''), ip_location=ip_location,
publish_time=publish_time,
total_replay_num=thread_num_infos[0].xpath("./text()").get(default='').strip(),
total_replay_page=thread_num_infos[1].xpath("./text()").get(default='').strip(), )
note.title = note.title.replace(f"【{note.tieba_name}】_Baidu Tieba", "")
return note
def extract_tieba_note_parment_comments(self, page_content: str, note_id: str) -> List[TiebaComment]:
xpath_selector = "//div[@class='l_post l_post_bright j_l_post clearfix ']"
comment_list = Selector(text=page_content).xpath(xpath_selector)
result: List[TiebaComment] = []
for comment_selector in comment_list:
comment_field_value: Dict = self.extract_data_field_value(comment_selector)
if not comment_field_value:
continue
tieba_name = comment_selector.xpath("//a[@class='card_title_fname']/text()").get(default='').strip()
other_info_content = comment_selector.xpath(".//div[@class='post-tail-wrap']").get(default="").strip()
ip_location, publish_time = self.extract_ip_and_pub_time(other_info_content)
tieba_comment = TiebaComment(comment_id=str(comment_field_value.get("content").get("post_id")),
sub_comment_count=comment_field_value.get("content").get("comment_num"),
content=utils.extract_text_from_html(
comment_field_value.get("content").get("content")),
note_url=const.TIEBA_URL + f"/p/{note_id}",
user_link=const.TIEBA_URL + comment_selector.xpath(
".//a[@class='p_author_face ']/@href").get(default='').strip(),
user_nickname=comment_selector.xpath(
".//a[@class='p_author_name j_user_card']/text()").get(default='').strip(),
user_avatar=comment_selector.xpath(
".//a[@class='p_author_face ']/img/@src").get(default='').strip(),
tieba_id=str(comment_field_value.get("content").get("forum_id", "")),
tieba_name=tieba_name, tieba_link=f"https://tieba.baidu.com/f?kw={tieba_name}",
ip_location=ip_location, publish_time=publish_time, note_id=note_id, )
result.append(tieba_comment)
return result
def extract_tieba_note_sub_comments(self, page_content: str, parent_comment: TiebaComment) -> List[TiebaComment]:
selector = Selector(page_content)
comments = []
comment_ele_list = selector.xpath("//li[@class='lzl_single_post j_lzl_s_p first_no_border']")
comment_ele_list.extend(selector.xpath("//li[@class='lzl_single_post j_lzl_s_p ']"))
for comment_ele in comment_ele_list:
comment_value = self.extract_data_field_value(comment_ele)
if not comment_value:
continue
comment_user_a_selector = comment_ele.xpath("./a[@class='j_user_card lzl_p_p']")[0]
content = utils.extract_text_from_html(
comment_ele.xpath(".//span[@class='lzl_content_main']").get(default=""))
comment = TiebaComment(
comment_id=str(comment_value.get("spid")), content=content,
user_link=comment_user_a_selector.xpath("./@href").get(default=""),
user_nickname=comment_value.get("showname"),
user_avatar=comment_user_a_selector.xpath("./img/@src").get(default=""),
publish_time=comment_ele.xpath(".//span[@class='lzl_time']/text()").get(default="").strip(),
parent_comment_id=parent_comment.comment_id,
note_id=parent_comment.note_id, note_url=parent_comment.note_url,
tieba_id=parent_comment.tieba_id, tieba_name=parent_comment.tieba_name,
tieba_link=parent_comment.tieba_link)
comments.append(comment)
return comments
def extract_creator_info(self, html_content: str) -> TiebaCreator:
selector = Selector(text=html_content)
user_link_selector = selector.xpath("//p[@class='space']/a")
user_link: str = user_link_selector.xpath("./@href").get(default='')
user_link_params: Dict = parse_qs(unquote(user_link.split("?")[-1]))
user_name = user_link_params.get("un")[0] if user_link_params.get("un") else ""
user_id = user_link_params.get("id")[0] if user_link_params.get("id") else ""
userinfo_userdata_selector = selector.xpath("//div[@class='userinfo_userdata']")
follow_fans_selector = selector.xpath("//span[@class='concern_num']")
follows, fans = 0, 0
if len(follow_fans_selector) == 2:
follows, fans = self.extract_follow_and_fans(follow_fans_selector)
user_content = userinfo_userdata_selector.get(default='')
return TiebaCreator(user_id=user_id, user_name=user_name,
nickname=selector.xpath(".//span[@class='userinfo_username ']/text()").get(
default='').strip(),
avatar=selector.xpath(".//div[@class='userinfo_left_head']//img/@src").get(
default='').strip(),
gender=self.extract_gender(user_content),
ip_location=self.extract_ip(user_content),
follows=follows,
fans=fans,
registration_duration=self.extract_registration_duration(user_content)
)
@staticmethod
def extract_tieba_thread_id_list_from_creator_page(
html_content: str
) -> List[str]:
selector = Selector(text=html_content)
thread_id_list = []
xpath_selector = (
"//ul[@class='new_list clearfix']//div[@class='thread_name']/a[1]/@href"
)
thread_url_list = selector.xpath(xpath_selector).getall()
for thread_url in thread_url_list:
thread_id = thread_url.split("?")[0].split("/")[-1]
thread_id_list.append(thread_id)
return thread_id_list
def extract_ip_and_pub_time(self, html_content: str) -> Tuple[str, str]:
pattern_pub_time = re.compile(r'<span class="tail-info">(\d{4}-\d{2}-\d{2} \d{2}:\d{2})</span>')
time_match = pattern_pub_time.search(html_content)
pub_time = time_match.group(1) if time_match else ""
return self.extract_ip(html_content), pub_time
@staticmethod
def extract_ip(html_content: str) -> str:
pattern_ip = re.compile(r'IP属地:(\S+)</span>')
ip_match = pattern_ip.search(html_content)
ip = ip_match.group(1) if ip_match else ""
return ip
@staticmethod
def extract_gender(html_content: str) -> str:
if GENDER_MALE in html_content:
return 'Male'
elif GENDER_FEMALE in html_content:
return 'Female'
return 'Unknown'
@staticmethod
def extract_follow_and_fans(selectors: List[Selector]) -> Tuple[str, str]:
pattern = re.compile(r'<span class="concern_num">\(<a[^>]*>(\d+)</a>\)</span>')
follow_match = pattern.findall(selectors[0].get())
fans_match = pattern.findall(selectors[1].get())
follows = follow_match[0] if follow_match else 0
fans = fans_match[0] if fans_match else 0
return follows, fans
@staticmethod
def extract_registration_duration(html_content: str) -> str:
pattern = re.compile(r'<span>吧龄:(\S+)</span>')
match = pattern.search(html_content)
return match.group(1) if match else ""
@staticmethod
def extract_data_field_value(selector: Selector) -> Dict:
data_field_value = selector.xpath("./@data-field").get(default='').strip()
if not data_field_value or data_field_value == "{}":
return {}
try:
# First use html.unescape to handle escape characters, then json.loads to convert JSON string to Python dictionary
unescaped_json_str = html.unescape(data_field_value)
data_field_dict_value = json.loads(unescaped_json_str)
except Exception as ex:
print(f"extract_data_field_value, error: {ex}, trying alternative parsing method")
data_field_dict_value = {}
return data_field_dict_value
def test_extract_search_note_list():
with open("test_data/search_keyword_notes.html", "r", encoding="utf-8") as f:
content = f.read()
extractor = TieBaExtractor()
result = extractor.extract_search_note_list(content)
print(result)
def test_extract_note_detail():
with open("test_data/note_detail.html", "r", encoding="utf-8") as f:
content = f.read()
extractor = TieBaExtractor()
result = extractor.extract_note_detail(content)
print(result.model_dump())
def test_extract_tieba_note_parment_comments():
with open("test_data/note_comments.html", "r", encoding="utf-8") as f:
content = f.read()
extractor = TieBaExtractor()
result = extractor.extract_tieba_note_parment_comments(content, "123456")
print(result)
def test_extract_tieba_note_sub_comments():
with open("test_data/note_sub_comments.html", "r", encoding="utf-8") as f:
content = f.read()
extractor = TieBaExtractor()
fake_parment_comment = TiebaComment(comment_id="123456", content="content", user_link="user_link",
user_nickname="user_nickname", user_avatar="user_avatar",
publish_time="publish_time", parent_comment_id="parent_comment_id",
note_id="note_id", note_url="note_url", tieba_id="tieba_id",
tieba_name="tieba_name", )
result = extractor.extract_tieba_note_sub_comments(content, fake_parment_comment)
print(result)
def test_extract_tieba_note_list():
with open("test_data/tieba_note_list.html", "r", encoding="utf-8") as f:
content = f.read()
extractor = TieBaExtractor()
result = extractor.extract_tieba_note_list(content)
print(result)
pass
def test_extract_creator_info():
with open("test_data/creator_info.html", "r", encoding="utf-8") as f:
content = f.read()
extractor = TieBaExtractor()
result = extractor.extract_creator_info(content)
print(result.model_dump_json())
if __name__ == '__main__':
# test_extract_search_note_list()
# test_extract_note_detail()
# test_extract_tieba_note_parment_comments()
# test_extract_tieba_note_list()
test_extract_creator_info() | --- +++ @@ -41,6 +41,14 @@
@staticmethod
def extract_search_note_list(page_content: str) -> List[TiebaNote]:
+ """
+ Extract Tieba post list from keyword search result pages, still missing reply count and reply page data
+ Args:
+ page_content: HTML string of page content
+
+ Returns:
+ List of Tieba post objects
+ """
xpath_selector = "//div[@class='s_post']"
post_list = Selector(text=page_content).xpath(xpath_selector)
result: List[TiebaNote] = []
@@ -62,6 +70,14 @@ return result
def extract_tieba_note_list(self, page_content: str) -> List[TiebaNote]:
+ """
+ Extract Tieba post list from Tieba page
+ Args:
+ page_content: HTML string of page content
+
+ Returns:
+ List of Tieba post objects
+ """
page_content = page_content.replace('<!--', "")
content_selector = Selector(text=page_content)
xpath_selector = "//ul[@id='thread_list']/li"
@@ -89,6 +105,14 @@ return result
def extract_note_detail(self, page_content: str) -> TiebaNote:
+ """
+ Extract Tieba post details from post detail page
+ Args:
+ page_content: HTML string of page content
+
+ Returns:
+ Tieba post detail object
+ """
content_selector = Selector(text=page_content)
first_floor_selector = content_selector.xpath("//div[@class='p_postlist'][1]")
only_view_author_link = content_selector.xpath("//*[@id='lzonly_cntn']/@href").get(default='').strip()
@@ -118,6 +142,15 @@ return note
def extract_tieba_note_parment_comments(self, page_content: str, note_id: str) -> List[TiebaComment]:
+ """
+ Extract Tieba post first-level comments from comment page
+ Args:
+ page_content: HTML string of page content
+ note_id: Post ID
+
+ Returns:
+ List of first-level comment objects
+ """
xpath_selector = "//div[@class='l_post l_post_bright j_l_post clearfix ']"
comment_list = Selector(text=page_content).xpath(xpath_selector)
result: List[TiebaComment] = []
@@ -146,6 +179,15 @@ return result
def extract_tieba_note_sub_comments(self, page_content: str, parent_comment: TiebaComment) -> List[TiebaComment]:
+ """
+ Extract Tieba post second-level comments from sub-comment page
+ Args:
+ page_content: HTML string of page content
+ parent_comment: Parent comment object
+
+ Returns:
+ List of second-level comment objects
+ """
selector = Selector(page_content)
comments = []
comment_ele_list = selector.xpath("//li[@class='lzl_single_post j_lzl_s_p first_no_border']")
@@ -172,6 +214,14 @@ return comments
def extract_creator_info(self, html_content: str) -> TiebaCreator:
+ """
+ Extract Tieba creator information from creator homepage
+ Args:
+ html_content: HTML string of creator homepage
+
+ Returns:
+ Tieba creator object
+ """
selector = Selector(text=html_content)
user_link_selector = selector.xpath("//p[@class='space']/a")
user_link: str = user_link_selector.xpath("./@href").get(default='')
@@ -200,6 +250,14 @@ def extract_tieba_thread_id_list_from_creator_page(
html_content: str
) -> List[str]:
+ """
+ Extract post ID list from Tieba creator's homepage
+ Args:
+ html_content: HTML string of creator homepage
+
+ Returns:
+ List of post IDs
+ """
selector = Selector(text=html_content)
thread_id_list = []
xpath_selector = (
@@ -212,6 +270,14 @@ return thread_id_list
def extract_ip_and_pub_time(self, html_content: str) -> Tuple[str, str]:
+ """
+ Extract IP location and publish time from HTML content
+ Args:
+ html_content: HTML string
+
+ Returns:
+ Tuple of (IP location, publish time)
+ """
pattern_pub_time = re.compile(r'<span class="tail-info">(\d{4}-\d{2}-\d{2} \d{2}:\d{2})</span>')
time_match = pattern_pub_time.search(html_content)
pub_time = time_match.group(1) if time_match else ""
@@ -219,6 +285,14 @@
@staticmethod
def extract_ip(html_content: str) -> str:
+ """
+ Extract IP location from HTML content
+ Args:
+ html_content: HTML string
+
+ Returns:
+ IP location string
+ """
pattern_ip = re.compile(r'IP属地:(\S+)</span>')
ip_match = pattern_ip.search(html_content)
ip = ip_match.group(1) if ip_match else ""
@@ -226,6 +300,14 @@
@staticmethod
def extract_gender(html_content: str) -> str:
+ """
+ Extract gender from HTML content
+ Args:
+ html_content: HTML string
+
+ Returns:
+ Gender string ('Male', 'Female', or 'Unknown')
+ """
if GENDER_MALE in html_content:
return 'Male'
elif GENDER_FEMALE in html_content:
@@ -234,6 +316,14 @@
@staticmethod
def extract_follow_and_fans(selectors: List[Selector]) -> Tuple[str, str]:
+ """
+ Extract follow count and fan count from selectors
+ Args:
+ selectors: List of selector objects
+
+ Returns:
+ Tuple of (follow count, fan count)
+ """
pattern = re.compile(r'<span class="concern_num">\(<a[^>]*>(\d+)</a>\)</span>')
follow_match = pattern.findall(selectors[0].get())
fans_match = pattern.findall(selectors[1].get())
@@ -243,12 +333,31 @@
@staticmethod
def extract_registration_duration(html_content: str) -> str:
+ """
+ Extract Tieba age from HTML content
+ Example: "<span>吧龄:1.9年</span>"
+ Returns: "1.9年"
+
+ Args:
+ html_content: HTML string
+
+ Returns:
+ Tieba age string
+ """
pattern = re.compile(r'<span>吧龄:(\S+)</span>')
match = pattern.search(html_content)
return match.group(1) if match else ""
@staticmethod
def extract_data_field_value(selector: Selector) -> Dict:
+ """
+ Extract data-field value from selector
+ Args:
+ selector: Selector object
+
+ Returns:
+ Dictionary containing data-field value
+ """
data_field_value = selector.xpath("./@data-field").get(default='').strip()
if not data_field_value or data_field_value == "{}":
return {}
@@ -321,4 +430,4 @@ # test_extract_note_detail()
# test_extract_tieba_note_parment_comments()
# test_extract_tieba_note_list()
- test_extract_creator_info()+ test_extract_creator_info()
| https://raw.githubusercontent.com/NanmiCoder/MediaCrawler/HEAD/media_platform/tieba/help.py |
Improve my code by adding docstrings | # -*- coding: utf-8 -*-
# Copyright (c) 2025 relakkes@gmail.com
#
# This file is part of MediaCrawler project.
# Repository: https://github.com/NanmiCoder/MediaCrawler/blob/main/media_platform/xhs/help.py
# GitHub: https://github.com/NanmiCoder
# Licensed under NON-COMMERCIAL LEARNING LICENSE 1.1
#
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
# 1. 不得用于任何商业用途。
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
# 3. 不得进行大规模爬取或对平台造成运营干扰。
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
# 5. 不得用于任何非法或不当的用途。
#
# 详细许可条款请参阅项目根目录下的LICENSE文件。
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
import ctypes
import json
import random
import time
import urllib.parse
from model.m_xiaohongshu import NoteUrlInfo, CreatorUrlInfo
from tools.crawler_util import extract_url_params_to_dict
def sign(a1="", b1="", x_s="", x_t=""):
common = {
"s0": 3, # getPlatformCode
"s1": "",
"x0": "1", # localStorage.getItem("b1b1")
"x1": "4.2.2", # version
"x2": "Mac OS",
"x3": "xhs-pc-web",
"x4": "4.74.0",
"x5": a1, # cookie of a1
"x6": x_t,
"x7": x_s,
"x8": b1, # localStorage.getItem("b1")
"x9": mrc(x_t + x_s + b1),
"x10": 154, # getSigCount
"x11": "normal"
}
encode_str = encodeUtf8(json.dumps(common, separators=(',', ':')))
x_s_common = b64Encode(encode_str)
x_b3_traceid = get_b3_trace_id()
return {
"x-s": x_s,
"x-t": x_t,
"x-s-common": x_s_common,
"x-b3-traceid": x_b3_traceid
}
def get_b3_trace_id():
re = "abcdef0123456789"
je = 16
e = ""
for t in range(16):
e += re[random.randint(0, je - 1)]
return e
def mrc(e):
ie = [
0, 1996959894, 3993919788, 2567524794, 124634137, 1886057615, 3915621685,
2657392035, 249268274, 2044508324, 3772115230, 2547177864, 162941995,
2125561021, 3887607047, 2428444049, 498536548, 1789927666, 4089016648,
2227061214, 450548861, 1843258603, 4107580753, 2211677639, 325883990,
1684777152, 4251122042, 2321926636, 335633487, 1661365465, 4195302755,
2366115317, 997073096, 1281953886, 3579855332, 2724688242, 1006888145,
1258607687, 3524101629, 2768942443, 901097722, 1119000684, 3686517206,
2898065728, 853044451, 1172266101, 3705015759, 2882616665, 651767980,
1373503546, 3369554304, 3218104598, 565507253, 1454621731, 3485111705,
3099436303, 671266974, 1594198024, 3322730930, 2970347812, 795835527,
1483230225, 3244367275, 3060149565, 1994146192, 31158534, 2563907772,
4023717930, 1907459465, 112637215, 2680153253, 3904427059, 2013776290,
251722036, 2517215374, 3775830040, 2137656763, 141376813, 2439277719,
3865271297, 1802195444, 476864866, 2238001368, 4066508878, 1812370925,
453092731, 2181625025, 4111451223, 1706088902, 314042704, 2344532202,
4240017532, 1658658271, 366619977, 2362670323, 4224994405, 1303535960,
984961486, 2747007092, 3569037538, 1256170817, 1037604311, 2765210733,
3554079995, 1131014506, 879679996, 2909243462, 3663771856, 1141124467,
855842277, 2852801631, 3708648649, 1342533948, 654459306, 3188396048,
3373015174, 1466479909, 544179635, 3110523913, 3462522015, 1591671054,
702138776, 2966460450, 3352799412, 1504918807, 783551873, 3082640443,
3233442989, 3988292384, 2596254646, 62317068, 1957810842, 3939845945,
2647816111, 81470997, 1943803523, 3814918930, 2489596804, 225274430,
2053790376, 3826175755, 2466906013, 167816743, 2097651377, 4027552580,
2265490386, 503444072, 1762050814, 4150417245, 2154129355, 426522225,
1852507879, 4275313526, 2312317920, 282753626, 1742555852, 4189708143,
2394877945, 397917763, 1622183637, 3604390888, 2714866558, 953729732,
1340076626, 3518719985, 2797360999, 1068828381, 1219638859, 3624741850,
2936675148, 906185462, 1090812512, 3747672003, 2825379669, 829329135,
1181335161, 3412177804, 3160834842, 628085408, 1382605366, 3423369109,
3138078467, 570562233, 1426400815, 3317316542, 2998733608, 733239954,
1555261956, 3268935591, 3050360625, 752459403, 1541320221, 2607071920,
3965973030, 1969922972, 40735498, 2617837225, 3943577151, 1913087877,
83908371, 2512341634, 3803740692, 2075208622, 213261112, 2463272603,
3855990285, 2094854071, 198958881, 2262029012, 4057260610, 1759359992,
534414190, 2176718541, 4139329115, 1873836001, 414664567, 2282248934,
4279200368, 1711684554, 285281116, 2405801727, 4167216745, 1634467795,
376229701, 2685067896, 3608007406, 1308918612, 956543938, 2808555105,
3495958263, 1231636301, 1047427035, 2932959818, 3654703836, 1088359270,
936918000, 2847714899, 3736837829, 1202900863, 817233897, 3183342108,
3401237130, 1404277552, 615818150, 3134207493, 3453421203, 1423857449,
601450431, 3009837614, 3294710456, 1567103746, 711928724, 3020668471,
3272380065, 1510334235, 755167117,
]
o = -1
def right_without_sign(num: int, bit: int=0) -> int:
val = ctypes.c_uint32(num).value >> bit
MAX32INT = 4294967295
return (val + (MAX32INT + 1)) % (2 * (MAX32INT + 1)) - MAX32INT - 1
for n in range(57):
o = ie[(o & 255) ^ ord(e[n])] ^ right_without_sign(o, 8)
return o ^ -1 ^ 3988292384
lookup = [
"Z",
"m",
"s",
"e",
"r",
"b",
"B",
"o",
"H",
"Q",
"t",
"N",
"P",
"+",
"w",
"O",
"c",
"z",
"a",
"/",
"L",
"p",
"n",
"g",
"G",
"8",
"y",
"J",
"q",
"4",
"2",
"K",
"W",
"Y",
"j",
"0",
"D",
"S",
"f",
"d",
"i",
"k",
"x",
"3",
"V",
"T",
"1",
"6",
"I",
"l",
"U",
"A",
"F",
"M",
"9",
"7",
"h",
"E",
"C",
"v",
"u",
"R",
"X",
"5",
]
def tripletToBase64(e):
return (
lookup[63 & (e >> 18)] +
lookup[63 & (e >> 12)] +
lookup[(e >> 6) & 63] +
lookup[e & 63]
)
def encodeChunk(e, t, r):
m = []
for b in range(t, r, 3):
n = (16711680 & (e[b] << 16)) + \
((e[b + 1] << 8) & 65280) + (e[b + 2] & 255)
m.append(tripletToBase64(n))
return ''.join(m)
def b64Encode(e):
P = len(e)
W = P % 3
U = []
z = 16383
H = 0
Z = P - W
while H < Z:
U.append(encodeChunk(e, H, Z if H + z > Z else H + z))
H += z
if 1 == W:
F = e[P - 1]
U.append(lookup[F >> 2] + lookup[(F << 4) & 63] + "==")
elif 2 == W:
F = (e[P - 2] << 8) + e[P - 1]
U.append(lookup[F >> 10] + lookup[63 & (F >> 4)] +
lookup[(F << 2) & 63] + "=")
return "".join(U)
def encodeUtf8(e):
b = []
m = urllib.parse.quote(e, safe='~()*!.\'')
w = 0
while w < len(m):
T = m[w]
if T == "%":
E = m[w + 1] + m[w + 2]
S = int(E, 16)
b.append(S)
w += 2
else:
b.append(ord(T[0]))
w += 1
return b
def base36encode(number, alphabet='0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'):
if not isinstance(number, int):
raise TypeError('number must be an integer')
base36 = ''
sign = ''
if number < 0:
sign = '-'
number = -number
if 0 <= number < len(alphabet):
return sign + alphabet[number]
while number != 0:
number, i = divmod(number, len(alphabet))
base36 = alphabet[i] + base36
return sign + base36
def base36decode(number):
return int(number, 36)
def get_search_id():
e = int(time.time() * 1000) << 64
t = int(random.uniform(0, 2147483646))
return base36encode((e + t))
img_cdns = [
"https://sns-img-qc.xhscdn.com",
"https://sns-img-hw.xhscdn.com",
"https://sns-img-bd.xhscdn.com",
"https://sns-img-qn.xhscdn.com",
]
def get_img_url_by_trace_id(trace_id: str, format_type: str = "png"):
return f"{random.choice(img_cdns)}/{trace_id}?imageView2/format/{format_type}"
def get_img_urls_by_trace_id(trace_id: str, format_type: str = "png"):
return [f"{cdn}/{trace_id}?imageView2/format/{format_type}" for cdn in img_cdns]
def get_trace_id(img_url: str):
# Browser-uploaded images have an additional /spectrum/ path
return f"spectrum/{img_url.split('/')[-1]}" if img_url.find("spectrum") != -1 else img_url.split("/")[-1]
def parse_note_info_from_note_url(url: str) -> NoteUrlInfo:
note_id = url.split("/")[-1].split("?")[0]
params = extract_url_params_to_dict(url)
xsec_token = params.get("xsec_token", "")
xsec_source = params.get("xsec_source", "")
return NoteUrlInfo(note_id=note_id, xsec_token=xsec_token, xsec_source=xsec_source)
def parse_creator_info_from_url(url: str) -> CreatorUrlInfo:
# If it's a pure ID format (24 hexadecimal characters), return directly
if len(url) == 24 and all(c in "0123456789abcdef" for c in url):
return CreatorUrlInfo(user_id=url, xsec_token="", xsec_source="")
# Extract user_id from URL: /user/profile/xxx
import re
user_pattern = r'/user/profile/([^/?]+)'
match = re.search(user_pattern, url)
if match:
user_id = match.group(1)
# Extract xsec_token and xsec_source parameters
params = extract_url_params_to_dict(url)
xsec_token = params.get("xsec_token", "")
xsec_source = params.get("xsec_source", "")
return CreatorUrlInfo(user_id=user_id, xsec_token=xsec_token, xsec_source=xsec_source)
raise ValueError(f"Unable to parse creator info from URL: {url}")
if __name__ == '__main__':
_img_url = "https://sns-img-bd.xhscdn.com/7a3abfaf-90c1-a828-5de7-022c80b92aa3"
# Get image URL addresses under multiple CDNs for a single image
# final_img_urls = get_img_urls_by_trace_id(get_trace_id(_img_url))
final_img_url = get_img_url_by_trace_id(get_trace_id(_img_url))
print(final_img_url)
# Test creator URL parsing
print("\n=== Creator URL Parsing Test ===")
test_creator_urls = [
"https://www.xiaohongshu.com/user/profile/5eb8e1d400000000010075ae?xsec_token=AB1nWBKCo1vE2HEkfoJUOi5B6BE5n7wVrbdpHoWIj5xHw=&xsec_source=pc_feed",
"5eb8e1d400000000010075ae",
]
for url in test_creator_urls:
try:
result = parse_creator_info_from_url(url)
print(f"✓ URL: {url[:80]}...")
print(f" Result: {result}\n")
except Exception as e:
print(f"✗ URL: {url}")
print(f" Error: {e}\n") | --- +++ @@ -29,6 +29,9 @@
def sign(a1="", b1="", x_s="", x_t=""):
+ """
+ takes in a URI (uniform resource identifier), an optional data dictionary, and an optional ctime parameter. It returns a dictionary containing two keys: "x-s" and "x-t".
+ """
common = {
"s0": 3, # getPlatformCode
"s1": "",
@@ -247,6 +250,7 @@
def base36encode(number, alphabet='0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'):
+ """Converts an integer to a base36 string."""
if not isinstance(number, int):
raise TypeError('number must be an integer')
@@ -298,6 +302,13 @@
def parse_note_info_from_note_url(url: str) -> NoteUrlInfo:
+ """
+ Parse note information from Xiaohongshu note URL
+ Args:
+ url: "https://www.xiaohongshu.com/explore/66fad51c000000001b0224b8?xsec_token=AB3rO-QopW5sgrJ41GwN01WCXh6yWPxjSoFI9D5JIMgKw=&xsec_source=pc_search"
+ Returns:
+
+ """
note_id = url.split("/")[-1].split("?")[0]
params = extract_url_params_to_dict(url)
xsec_token = params.get("xsec_token", "")
@@ -306,6 +317,17 @@
def parse_creator_info_from_url(url: str) -> CreatorUrlInfo:
+ """
+ Parse creator information from Xiaohongshu creator homepage URL
+ Supports the following formats:
+ 1. Full URL: "https://www.xiaohongshu.com/user/profile/5eb8e1d400000000010075ae?xsec_token=AB1nWBKCo1vE2HEkfoJUOi5B6BE5n7wVrbdpHoWIj5xHw=&xsec_source=pc_feed"
+ 2. Pure ID: "5eb8e1d400000000010075ae"
+
+ Args:
+ url: Creator homepage URL or user_id
+ Returns:
+ CreatorUrlInfo: Object containing user_id, xsec_token, xsec_source
+ """
# If it's a pure ID format (24 hexadecimal characters), return directly
if len(url) == 24 and all(c in "0123456789abcdef" for c in url):
return CreatorUrlInfo(user_id=url, xsec_token="", xsec_source="")
@@ -345,4 +367,4 @@ print(f" Result: {result}\n")
except Exception as e:
print(f"✗ URL: {url}")
- print(f" Error: {e}\n")+ print(f" Error: {e}\n")
| https://raw.githubusercontent.com/NanmiCoder/MediaCrawler/HEAD/media_platform/xhs/help.py |
Add inline docstrings for readability | # -*- coding: utf-8 -*-
# Copyright (c) 2025 relakkes@gmail.com
#
# This file is part of MediaCrawler project.
# Repository: https://github.com/NanmiCoder/MediaCrawler/blob/main/media_platform/xhs/login.py
# GitHub: https://github.com/NanmiCoder
# Licensed under NON-COMMERCIAL LEARNING LICENSE 1.1
#
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
# 1. 不得用于任何商业用途。
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
# 3. 不得进行大规模爬取或对平台造成运营干扰。
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
# 5. 不得用于任何非法或不当的用途。
#
# 详细许可条款请参阅项目根目录下的LICENSE文件。
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
import asyncio
import functools
import sys
from typing import Optional
from playwright.async_api import BrowserContext, Page
from tenacity import (RetryError, retry, retry_if_result, stop_after_attempt,
wait_fixed)
import config
from base.base_crawler import AbstractLogin
from cache.cache_factory import CacheFactory
from tools import utils
class XiaoHongShuLogin(AbstractLogin):
def __init__(self,
login_type: str,
browser_context: BrowserContext,
context_page: Page,
login_phone: Optional[str] = "",
cookie_str: str = ""
):
config.LOGIN_TYPE = login_type
self.browser_context = browser_context
self.context_page = context_page
self.login_phone = login_phone
self.cookie_str = cookie_str
@retry(stop=stop_after_attempt(600), wait=wait_fixed(1), retry=retry_if_result(lambda value: value is False))
async def check_login_state(self, no_logged_in_session: str) -> bool:
# 1. Priority check: Check if the "Me" (Profile) node appears in the sidebar
try:
# Selector for elements containing "Me" text with a link pointing to the profile
# XPath Explanation: Find a span with text "Me" inside an anchor tag (<a>)
# whose href attribute contains "/user/profile/"
user_profile_selector = "xpath=//a[contains(@href, '/user/profile/')]//span[text()='我']"
# Set a short timeout since this is called within a retry loop
is_visible = await self.context_page.is_visible(user_profile_selector, timeout=500)
if is_visible:
utils.logger.info("[XiaoHongShuLogin.check_login_state] Login status confirmed by UI element ('Me' button).")
return True
except Exception:
pass
# 2. Alternative: Check for CAPTCHA prompt
if "请通过验证" in await self.context_page.content():
utils.logger.info("[XiaoHongShuLogin.check_login_state] CAPTCHA appeared, please verify manually.")
# 3. Compatibility fallback: Original Cookie-based change detection
current_cookie = await self.browser_context.cookies()
_, cookie_dict = utils.convert_cookies(current_cookie)
current_web_session = cookie_dict.get("web_session")
# If web_session has changed, consider the login successful
if current_web_session and current_web_session != no_logged_in_session:
utils.logger.info("[XiaoHongShuLogin.check_login_state] Login status confirmed by Cookie (web_session changed).")
return True
return False
async def begin(self):
utils.logger.info("[XiaoHongShuLogin.begin] Begin login xiaohongshu ...")
if config.LOGIN_TYPE == "qrcode":
await self.login_by_qrcode()
elif config.LOGIN_TYPE == "phone":
await self.login_by_mobile()
elif config.LOGIN_TYPE == "cookie":
await self.login_by_cookies()
else:
raise ValueError("[XiaoHongShuLogin.begin]I nvalid Login Type Currently only supported qrcode or phone or cookies ...")
async def login_by_mobile(self):
utils.logger.info("[XiaoHongShuLogin.login_by_mobile] Begin login xiaohongshu by mobile ...")
await asyncio.sleep(1)
try:
# After entering Xiaohongshu homepage, the login dialog may not pop up automatically, need to manually click login button
login_button_ele = await self.context_page.wait_for_selector(
selector="xpath=//*[@id='app']/div[1]/div[2]/div[1]/ul/div[1]/button",
timeout=5000
)
await login_button_ele.click()
# The login dialog has two forms: one shows phone number and verification code directly
# The other requires clicking to switch to phone login
element = await self.context_page.wait_for_selector(
selector='xpath=//div[@class="login-container"]//div[@class="other-method"]/div[1]',
timeout=5000
)
await element.click()
except Exception as e:
utils.logger.info("[XiaoHongShuLogin.login_by_mobile] have not found mobile button icon and keep going ...")
await asyncio.sleep(1)
login_container_ele = await self.context_page.wait_for_selector("div.login-container")
input_ele = await login_container_ele.query_selector("label.phone > input")
await input_ele.fill(self.login_phone)
await asyncio.sleep(0.5)
send_btn_ele = await login_container_ele.query_selector("label.auth-code > span")
await send_btn_ele.click() # Click to send verification code
sms_code_input_ele = await login_container_ele.query_selector("label.auth-code > input")
submit_btn_ele = await login_container_ele.query_selector("div.input-container > button")
cache_client = CacheFactory.create_cache(config.CACHE_TYPE_MEMORY)
max_get_sms_code_time = 60 * 2 # Maximum time to get verification code is 2 minutes
no_logged_in_session = ""
while max_get_sms_code_time > 0:
utils.logger.info(f"[XiaoHongShuLogin.login_by_mobile] get sms code from redis remaining time {max_get_sms_code_time}s ...")
await asyncio.sleep(1)
sms_code_key = f"xhs_{self.login_phone}"
sms_code_value = cache_client.get(sms_code_key)
if not sms_code_value:
max_get_sms_code_time -= 1
continue
current_cookie = await self.browser_context.cookies()
_, cookie_dict = utils.convert_cookies(current_cookie)
no_logged_in_session = cookie_dict.get("web_session")
await sms_code_input_ele.fill(value=sms_code_value.decode()) # Enter SMS verification code
await asyncio.sleep(0.5)
agree_privacy_ele = self.context_page.locator("xpath=//div[@class='agreements']//*[local-name()='svg']")
await agree_privacy_ele.click() # Click to agree to privacy policy
await asyncio.sleep(0.5)
await submit_btn_ele.click() # Click login
# TODO: Should also check if the verification code is correct, as it may be incorrect
break
try:
await self.check_login_state(no_logged_in_session)
except RetryError:
utils.logger.info("[XiaoHongShuLogin.login_by_mobile] Login xiaohongshu failed by mobile login method ...")
sys.exit()
wait_redirect_seconds = 5
utils.logger.info(f"[XiaoHongShuLogin.login_by_mobile] Login successful then wait for {wait_redirect_seconds} seconds redirect ...")
await asyncio.sleep(wait_redirect_seconds)
async def login_by_qrcode(self):
utils.logger.info("[XiaoHongShuLogin.login_by_qrcode] Begin login xiaohongshu by qrcode ...")
# login_selector = "div.login-container > div.left > div.qrcode > img"
qrcode_img_selector = "xpath=//img[@class='qrcode-img']"
# find login qrcode
base64_qrcode_img = await utils.find_login_qrcode(
self.context_page,
selector=qrcode_img_selector
)
if not base64_qrcode_img:
utils.logger.info("[XiaoHongShuLogin.login_by_qrcode] login failed , have not found qrcode please check ....")
# if this website does not automatically popup login dialog box, we will manual click login button
await asyncio.sleep(0.5)
login_button_ele = self.context_page.locator("xpath=//*[@id='app']/div[1]/div[2]/div[1]/ul/div[1]/button")
await login_button_ele.click()
base64_qrcode_img = await utils.find_login_qrcode(
self.context_page,
selector=qrcode_img_selector
)
if not base64_qrcode_img:
sys.exit()
# get not logged session
current_cookie = await self.browser_context.cookies()
_, cookie_dict = utils.convert_cookies(current_cookie)
no_logged_in_session = cookie_dict.get("web_session")
# show login qrcode
# fix issue #12
# we need to use partial function to call show_qrcode function and run in executor
# then current asyncio event loop will not be blocked
partial_show_qrcode = functools.partial(utils.show_qrcode, base64_qrcode_img)
asyncio.get_running_loop().run_in_executor(executor=None, func=partial_show_qrcode)
utils.logger.info(f"[XiaoHongShuLogin.login_by_qrcode] waiting for scan code login, remaining time is 120s")
try:
await self.check_login_state(no_logged_in_session)
except RetryError:
utils.logger.info("[XiaoHongShuLogin.login_by_qrcode] Login xiaohongshu failed by qrcode login method ...")
sys.exit()
wait_redirect_seconds = 5
utils.logger.info(f"[XiaoHongShuLogin.login_by_qrcode] Login successful then wait for {wait_redirect_seconds} seconds redirect ...")
await asyncio.sleep(wait_redirect_seconds)
async def login_by_cookies(self):
utils.logger.info("[XiaoHongShuLogin.login_by_cookies] Begin login xiaohongshu by cookie ...")
for key, value in utils.convert_str_cookie_to_dict(self.cookie_str).items():
if key != "web_session": # Only set web_session cookie attribute
continue
await self.browser_context.add_cookies([{
'name': key,
'value': value,
'domain': ".xiaohongshu.com",
'path': "/"
}]) | --- +++ @@ -50,6 +50,9 @@
@retry(stop=stop_after_attempt(600), wait=wait_fixed(1), retry=retry_if_result(lambda value: value is False))
async def check_login_state(self, no_logged_in_session: str) -> bool:
+ """
+ Verify login status using dual-check: UI elements and Cookies.
+ """
# 1. Priority check: Check if the "Me" (Profile) node appears in the sidebar
try:
# Selector for elements containing "Me" text with a link pointing to the profile
@@ -82,6 +85,7 @@ return False
async def begin(self):
+ """Start login xiaohongshu"""
utils.logger.info("[XiaoHongShuLogin.begin] Begin login xiaohongshu ...")
if config.LOGIN_TYPE == "qrcode":
await self.login_by_qrcode()
@@ -93,6 +97,7 @@ raise ValueError("[XiaoHongShuLogin.begin]I nvalid Login Type Currently only supported qrcode or phone or cookies ...")
async def login_by_mobile(self):
+ """Login xiaohongshu by mobile"""
utils.logger.info("[XiaoHongShuLogin.login_by_mobile] Begin login xiaohongshu by mobile ...")
await asyncio.sleep(1)
try:
@@ -160,6 +165,7 @@ await asyncio.sleep(wait_redirect_seconds)
async def login_by_qrcode(self):
+ """login xiaohongshu website and keep webdriver login state"""
utils.logger.info("[XiaoHongShuLogin.login_by_qrcode] Begin login xiaohongshu by qrcode ...")
# login_selector = "div.login-container > div.left > div.qrcode > img"
qrcode_img_selector = "xpath=//img[@class='qrcode-img']"
@@ -205,6 +211,7 @@ await asyncio.sleep(wait_redirect_seconds)
async def login_by_cookies(self):
+ """login xiaohongshu website by cookies"""
utils.logger.info("[XiaoHongShuLogin.login_by_cookies] Begin login xiaohongshu by cookie ...")
for key, value in utils.convert_str_cookie_to_dict(self.cookie_str).items():
if key != "web_session": # Only set web_session cookie attribute
@@ -214,4 +221,4 @@ 'value': value,
'domain': ".xiaohongshu.com",
'path': "/"
- }])+ }])
| https://raw.githubusercontent.com/NanmiCoder/MediaCrawler/HEAD/media_platform/xhs/login.py |
Add docstrings to clarify complex logic | # -*- coding: utf-8 -*-
# Copyright (c) 2025 relakkes@gmail.com
#
# This file is part of MediaCrawler project.
# Repository: https://github.com/NanmiCoder/MediaCrawler/blob/main/media_platform/bilibili/login.py
# GitHub: https://github.com/NanmiCoder
# Licensed under NON-COMMERCIAL LEARNING LICENSE 1.1
#
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
# 1. 不得用于任何商业用途。
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
# 3. 不得进行大规模爬取或对平台造成运营干扰。
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
# 5. 不得用于任何非法或不当的用途。
#
# 详细许可条款请参阅项目根目录下的LICENSE文件。
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
# -*- coding: utf-8 -*-
# @Author : relakkes@gmail.com
# @Time : 2023/12/2 18:44
# @Desc : bilibili login implementation class
import asyncio
import functools
import sys
from typing import Optional
from playwright.async_api import BrowserContext, Page
from tenacity import (RetryError, retry, retry_if_result, stop_after_attempt,
wait_fixed)
import config
from base.base_crawler import AbstractLogin
from tools import utils
class BilibiliLogin(AbstractLogin):
def __init__(self,
login_type: str,
browser_context: BrowserContext,
context_page: Page,
login_phone: Optional[str] = "",
cookie_str: str = ""
):
config.LOGIN_TYPE = login_type
self.browser_context = browser_context
self.context_page = context_page
self.login_phone = login_phone
self.cookie_str = cookie_str
async def begin(self):
utils.logger.info("[BilibiliLogin.begin] Begin login Bilibili ...")
if config.LOGIN_TYPE == "qrcode":
await self.login_by_qrcode()
elif config.LOGIN_TYPE == "phone":
await self.login_by_mobile()
elif config.LOGIN_TYPE == "cookie":
await self.login_by_cookies()
else:
raise ValueError(
"[BilibiliLogin.begin] Invalid Login Type Currently only supported qrcode or phone or cookie ...")
@retry(stop=stop_after_attempt(600), wait=wait_fixed(1), retry=retry_if_result(lambda value: value is False))
async def check_login_state(self) -> bool:
current_cookie = await self.browser_context.cookies()
_, cookie_dict = utils.convert_cookies(current_cookie)
if cookie_dict.get("SESSDATA", "") or cookie_dict.get("DedeUserID"):
return True
return False
async def login_by_qrcode(self):
utils.logger.info("[BilibiliLogin.login_by_qrcode] Begin login bilibili by qrcode ...")
# click login button
login_button_ele = self.context_page.locator(
"xpath=//div[@class='right-entry__outside go-login-btn']//div"
)
await login_button_ele.click()
await asyncio.sleep(1)
# find login qrcode
qrcode_img_selector = "//div[@class='login-scan-box']//img"
base64_qrcode_img = await utils.find_login_qrcode(
self.context_page,
selector=qrcode_img_selector
)
if not base64_qrcode_img:
utils.logger.info("[BilibiliLogin.login_by_qrcode] login failed , have not found qrcode please check ....")
sys.exit()
# show login qrcode
partial_show_qrcode = functools.partial(utils.show_qrcode, base64_qrcode_img)
asyncio.get_running_loop().run_in_executor(executor=None, func=partial_show_qrcode)
utils.logger.info(f"[BilibiliLogin.login_by_qrcode] Waiting for scan code login, remaining time is 20s")
try:
await self.check_login_state()
except RetryError:
utils.logger.info("[BilibiliLogin.login_by_qrcode] Login bilibili failed by qrcode login method ...")
sys.exit()
wait_redirect_seconds = 5
utils.logger.info(
f"[BilibiliLogin.login_by_qrcode] Login successful then wait for {wait_redirect_seconds} seconds redirect ...")
await asyncio.sleep(wait_redirect_seconds)
async def login_by_mobile(self):
pass
async def login_by_cookies(self):
utils.logger.info("[BilibiliLogin.login_by_qrcode] Begin login bilibili by cookie ...")
for key, value in utils.convert_str_cookie_to_dict(self.cookie_str).items():
await self.browser_context.add_cookies([{
'name': key,
'value': value,
'domain': ".bilibili.com",
'path': "/"
}]) | --- +++ @@ -52,6 +52,7 @@ self.cookie_str = cookie_str
async def begin(self):
+ """Start login bilibili"""
utils.logger.info("[BilibiliLogin.begin] Begin login Bilibili ...")
if config.LOGIN_TYPE == "qrcode":
await self.login_by_qrcode()
@@ -65,6 +66,11 @@
@retry(stop=stop_after_attempt(600), wait=wait_fixed(1), retry=retry_if_result(lambda value: value is False))
async def check_login_state(self) -> bool:
+ """
+ Check if the current login status is successful and return True otherwise return False
+ retry decorator will retry 20 times if the return value is False, and the retry interval is 1 second
+ if max retry times reached, raise RetryError
+ """
current_cookie = await self.browser_context.cookies()
_, cookie_dict = utils.convert_cookies(current_cookie)
if cookie_dict.get("SESSDATA", "") or cookie_dict.get("DedeUserID"):
@@ -72,6 +78,7 @@ return False
async def login_by_qrcode(self):
+ """login bilibili website and keep webdriver login state"""
utils.logger.info("[BilibiliLogin.login_by_qrcode] Begin login bilibili by qrcode ...")
# click login button
@@ -117,4 +124,4 @@ 'value': value,
'domain': ".bilibili.com",
'path': "/"
- }])+ }])
| https://raw.githubusercontent.com/NanmiCoder/MediaCrawler/HEAD/media_platform/bilibili/login.py |
Document all public functions with docstrings | # -*- coding: utf-8 -*-
# Copyright (c) 2025 relakkes@gmail.com
#
# This file is part of MediaCrawler project.
# Repository: https://github.com/NanmiCoder/MediaCrawler/blob/main/media_platform/xhs/client.py
# GitHub: https://github.com/NanmiCoder
# Licensed under NON-COMMERCIAL LEARNING LICENSE 1.1
#
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
# 1. 不得用于任何商业用途。
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
# 3. 不得进行大规模爬取或对平台造成运营干扰。
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
# 5. 不得用于任何非法或不当的用途。
#
# 详细许可条款请参阅项目根目录下的LICENSE文件。
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
import asyncio
import json
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Union
from urllib.parse import urlencode
import httpx
from playwright.async_api import BrowserContext, Page
from tenacity import retry, stop_after_attempt, wait_fixed, retry_if_not_exception_type
import config
from base.base_crawler import AbstractApiClient
from proxy.proxy_mixin import ProxyRefreshMixin
from tools import utils
if TYPE_CHECKING:
from proxy.proxy_ip_pool import ProxyIpPool
from .exception import DataFetchError, IPBlockError, NoteNotFoundError
from .field import SearchNoteType, SearchSortType
from .help import get_search_id
from .extractor import XiaoHongShuExtractor
from .playwright_sign import sign_with_playwright
class XiaoHongShuClient(AbstractApiClient, ProxyRefreshMixin):
def __init__(
self,
timeout=60, # If media crawling is enabled, Xiaohongshu long videos need longer timeout
proxy=None,
*,
headers: Dict[str, str],
playwright_page: Page,
cookie_dict: Dict[str, str],
proxy_ip_pool: Optional["ProxyIpPool"] = None,
):
self.proxy = proxy
self.timeout = timeout
self.headers = headers
self._host = "https://edith.xiaohongshu.com"
self._domain = "https://www.xiaohongshu.com"
self.IP_ERROR_STR = "Network connection error, please check network settings or restart"
self.IP_ERROR_CODE = 300012
self.NOTE_NOT_FOUND_CODE = -510000
self.NOTE_ABNORMAL_STR = "Note status abnormal, please check later"
self.NOTE_ABNORMAL_CODE = -510001
self.playwright_page = playwright_page
self.cookie_dict = cookie_dict
self._extractor = XiaoHongShuExtractor()
# Initialize proxy pool (from ProxyRefreshMixin)
self.init_proxy_pool(proxy_ip_pool)
async def _pre_headers(self, url: str, params: Optional[Dict] = None, payload: Optional[Dict] = None) -> Dict:
a1_value = self.cookie_dict.get("a1", "")
# Determine request data, method and URI
if params is not None:
data = params
method = "GET"
elif payload is not None:
data = payload
method = "POST"
else:
raise ValueError("params or payload is required")
# Generate signature using playwright injection method
signs = await sign_with_playwright(
page=self.playwright_page,
uri=url,
data=data,
a1=a1_value,
method=method,
)
headers = {
"X-S": signs["x-s"],
"X-T": signs["x-t"],
"x-S-Common": signs["x-s-common"],
"X-B3-Traceid": signs["x-b3-traceid"],
}
self.headers.update(headers)
return self.headers
@retry(stop=stop_after_attempt(3), wait=wait_fixed(1), retry=retry_if_not_exception_type(NoteNotFoundError))
async def request(self, method, url, **kwargs) -> Union[str, Any]:
# Check if proxy is expired before each request
await self._refresh_proxy_if_expired()
# return response.text
return_response = kwargs.pop("return_response", False)
async with httpx.AsyncClient(proxy=self.proxy) as client:
response = await client.request(method, url, timeout=self.timeout, **kwargs)
if response.status_code == 471 or response.status_code == 461:
# someday someone maybe will bypass captcha
verify_type = response.headers["Verifytype"]
verify_uuid = response.headers["Verifyuuid"]
msg = f"CAPTCHA appeared, request failed, Verifytype: {verify_type}, Verifyuuid: {verify_uuid}, Response: {response}"
utils.logger.error(msg)
raise Exception(msg)
if return_response:
return response.text
data: Dict = response.json()
if data["success"]:
return data.get("data", data.get("success", {}))
elif data["code"] == self.IP_ERROR_CODE:
raise IPBlockError(self.IP_ERROR_STR)
elif data["code"] in (self.NOTE_NOT_FOUND_CODE, self.NOTE_ABNORMAL_CODE):
raise NoteNotFoundError(f"Note not found or abnormal, code: {data['code']}")
else:
err_msg = data.get("msg", None) or f"{response.text}"
raise DataFetchError(err_msg)
async def get(self, uri: str, params: Optional[Dict] = None) -> Dict:
headers = await self._pre_headers(uri, params)
full_url = f"{self._host}{uri}"
return await self.request(
method="GET", url=full_url, headers=headers, params=params
)
async def post(self, uri: str, data: dict, **kwargs) -> Dict:
headers = await self._pre_headers(uri, payload=data)
json_str = json.dumps(data, separators=(",", ":"), ensure_ascii=False)
return await self.request(
method="POST",
url=f"{self._host}{uri}",
data=json_str,
headers=headers,
**kwargs,
)
async def get_note_media(self, url: str) -> Union[bytes, None]:
# Check if proxy is expired before request
await self._refresh_proxy_if_expired()
async with httpx.AsyncClient(proxy=self.proxy) as client:
try:
response = await client.request("GET", url, timeout=self.timeout)
response.raise_for_status()
if not response.reason_phrase == "OK":
utils.logger.error(
f"[XiaoHongShuClient.get_note_media] request {url} err, res:{response.text}"
)
return None
else:
return response.content
except (
httpx.HTTPError
) as exc: # some wrong when call httpx.request method, such as connection error, client error, server error or response status code is not 2xx
utils.logger.error(
f"[XiaoHongShuClient.get_aweme_media] {exc.__class__.__name__} for {exc.request.url} - {exc}"
) # Keep original exception type name for developer debugging
return None
async def query_self(self) -> Optional[Dict]:
uri = "/api/sns/web/v1/user/selfinfo"
headers = await self._pre_headers(uri, params={})
async with httpx.AsyncClient(proxy=self.proxy) as client:
response = await client.get(f"{self._host}{uri}", headers=headers)
if response.status_code == 200:
return response.json()
return None
async def pong(self) -> bool:
utils.logger.info("[XiaoHongShuClient.pong] Begin to check login state...")
ping_flag = False
try:
self_info: Dict = await self.query_self()
if self_info and self_info.get("data", {}).get("result", {}).get("success"):
ping_flag = True
except Exception as e:
utils.logger.error(
f"[XiaoHongShuClient.pong] Check login state failed: {e}, and try to login again..."
)
ping_flag = False
utils.logger.info(f"[XiaoHongShuClient.pong] Login state result: {ping_flag}")
return ping_flag
async def update_cookies(self, browser_context: BrowserContext):
cookie_str, cookie_dict = utils.convert_cookies(await browser_context.cookies())
self.headers["Cookie"] = cookie_str
self.cookie_dict = cookie_dict
async def get_note_by_keyword(
self,
keyword: str,
search_id: str = get_search_id(),
page: int = 1,
page_size: int = 20,
sort: SearchSortType = SearchSortType.GENERAL,
note_type: SearchNoteType = SearchNoteType.ALL,
) -> Dict:
uri = "/api/sns/web/v1/search/notes"
data = {
"keyword": keyword,
"page": page,
"page_size": page_size,
"search_id": search_id,
"sort": sort.value,
"note_type": note_type.value,
}
return await self.post(uri, data)
async def get_note_by_id(
self,
note_id: str,
xsec_source: str,
xsec_token: str,
) -> Dict:
if xsec_source == "":
xsec_source = "pc_search"
data = {
"source_note_id": note_id,
"image_formats": ["jpg", "webp", "avif"],
"extra": {"need_body_topic": 1},
"xsec_source": xsec_source,
"xsec_token": xsec_token,
}
uri = "/api/sns/web/v1/feed"
res = await self.post(uri, data)
if res and res.get("items"):
res_dict: Dict = res["items"][0]["note_card"]
return res_dict
# When crawling frequently, some notes may have results while others don't
utils.logger.error(
f"[XiaoHongShuClient.get_note_by_id] get note id:{note_id} empty and res:{res}"
)
return dict()
async def get_note_comments(
self,
note_id: str,
xsec_token: str,
cursor: str = "",
) -> Dict:
uri = "/api/sns/web/v2/comment/page"
params = {
"note_id": note_id,
"cursor": cursor,
"top_comment_id": "",
"image_formats": "jpg,webp,avif",
"xsec_token": xsec_token,
}
return await self.get(uri, params)
async def get_note_sub_comments(
self,
note_id: str,
root_comment_id: str,
xsec_token: str,
num: int = 10,
cursor: str = "",
):
uri = "/api/sns/web/v2/comment/sub/page"
params = {
"note_id": note_id,
"root_comment_id": root_comment_id,
"num": str(num),
"cursor": cursor,
"image_formats": "jpg,webp,avif",
"top_comment_id": "",
"xsec_token": xsec_token,
}
return await self.get(uri, params)
async def get_note_all_comments(
self,
note_id: str,
xsec_token: str,
crawl_interval: float = 1.0,
callback: Optional[Callable] = None,
max_count: int = 10,
) -> List[Dict]:
result = []
comments_has_more = True
comments_cursor = ""
while comments_has_more and len(result) < max_count:
comments_res = await self.get_note_comments(
note_id=note_id, xsec_token=xsec_token, cursor=comments_cursor
)
comments_has_more = comments_res.get("has_more", False)
comments_cursor = comments_res.get("cursor", "")
if "comments" not in comments_res:
utils.logger.info(
f"[XiaoHongShuClient.get_note_all_comments] No 'comments' key found in response: {comments_res}"
)
break
comments = comments_res["comments"]
if len(result) + len(comments) > max_count:
comments = comments[: max_count - len(result)]
if callback:
await callback(note_id, comments)
await asyncio.sleep(crawl_interval)
result.extend(comments)
sub_comments = await self.get_comments_all_sub_comments(
comments=comments,
xsec_token=xsec_token,
crawl_interval=crawl_interval,
callback=callback,
)
result.extend(sub_comments)
return result
async def get_comments_all_sub_comments(
self,
comments: List[Dict],
xsec_token: str,
crawl_interval: float = 1.0,
callback: Optional[Callable] = None,
) -> List[Dict]:
if not config.ENABLE_GET_SUB_COMMENTS:
utils.logger.info(
f"[XiaoHongShuCrawler.get_comments_all_sub_comments] Crawling sub_comment mode is not enabled"
)
return []
result = []
for comment in comments:
try:
note_id = comment.get("note_id")
sub_comments = comment.get("sub_comments")
if sub_comments and callback:
await callback(note_id, sub_comments)
sub_comment_has_more = comment.get("sub_comment_has_more")
if not sub_comment_has_more:
continue
root_comment_id = comment.get("id")
sub_comment_cursor = comment.get("sub_comment_cursor")
while sub_comment_has_more:
try:
comments_res = await self.get_note_sub_comments(
note_id=note_id,
root_comment_id=root_comment_id,
xsec_token=xsec_token,
num=10,
cursor=sub_comment_cursor,
)
if comments_res is None:
utils.logger.info(
f"[XiaoHongShuClient.get_comments_all_sub_comments] No response found for note_id: {note_id}"
)
break
sub_comment_has_more = comments_res.get("has_more", False)
sub_comment_cursor = comments_res.get("cursor", "")
if "comments" not in comments_res:
utils.logger.info(
f"[XiaoHongShuClient.get_comments_all_sub_comments] No 'comments' key found in response: {comments_res}"
)
break
comments = comments_res["comments"]
if callback:
await callback(note_id, comments)
await asyncio.sleep(crawl_interval)
result.extend(comments)
except DataFetchError as e:
utils.logger.warning(
f"[XiaoHongShuClient.get_comments_all_sub_comments] Failed to get sub-comments for note_id: {note_id}, root_comment_id: {root_comment_id}, error: {e}. Skipping this comment's sub-comments."
)
break # Break out of the sub-comment acquisition loop of the current comment and continue processing the next comment
except Exception as e:
utils.logger.error(
f"[XiaoHongShuClient.get_comments_all_sub_comments] Unexpected error when getting sub-comments for note_id: {note_id}, root_comment_id: {root_comment_id}, error: {e}"
)
break
except Exception as e:
utils.logger.error(
f"[XiaoHongShuClient.get_comments_all_sub_comments] Error processing comment: {comment.get('id', 'unknown')}, error: {e}. Continuing with next comment."
)
continue # Continue to next comment
return result
async def get_creator_info(
self, user_id: str, xsec_token: str = "", xsec_source: str = ""
) -> Dict:
# Build URI, add xsec parameters to URL if available
uri = f"/user/profile/{user_id}"
if xsec_token and xsec_source:
uri = f"{uri}?xsec_token={xsec_token}&xsec_source={xsec_source}"
html_content = await self.request(
"GET", self._domain + uri, return_response=True, headers=self.headers
)
return self._extractor.extract_creator_info_from_html(html_content)
async def get_notes_by_creator(
self,
creator: str,
cursor: str,
page_size: int = 30,
xsec_token: str = "",
xsec_source: str = "pc_feed",
) -> Dict:
uri = f"/api/sns/web/v1/user_posted"
params = {
"num": page_size,
"cursor": cursor,
"user_id": creator,
"xsec_token": xsec_token,
"xsec_source": xsec_source,
}
return await self.get(uri, params)
async def get_all_notes_by_creator(
self,
user_id: str,
crawl_interval: float = 1.0,
callback: Optional[Callable] = None,
xsec_token: str = "",
xsec_source: str = "pc_feed",
) -> List[Dict]:
result = []
notes_has_more = True
notes_cursor = ""
while notes_has_more and len(result) < config.CRAWLER_MAX_NOTES_COUNT:
notes_res = await self.get_notes_by_creator(
user_id, notes_cursor, xsec_token=xsec_token, xsec_source=xsec_source
)
if not notes_res:
utils.logger.error(
f"[XiaoHongShuClient.get_notes_by_creator] The current creator may have been banned by xhs, so they cannot access the data."
)
break
notes_has_more = notes_res.get("has_more", False)
notes_cursor = notes_res.get("cursor", "")
if "notes" not in notes_res:
utils.logger.info(
f"[XiaoHongShuClient.get_all_notes_by_creator] No 'notes' key found in response: {notes_res}"
)
break
notes = notes_res["notes"]
utils.logger.info(
f"[XiaoHongShuClient.get_all_notes_by_creator] got user_id:{user_id} notes len : {len(notes)}"
)
remaining = config.CRAWLER_MAX_NOTES_COUNT - len(result)
if remaining <= 0:
break
notes_to_add = notes[:remaining]
if callback:
await callback(notes_to_add)
result.extend(notes_to_add)
await asyncio.sleep(crawl_interval)
utils.logger.info(
f"[XiaoHongShuClient.get_all_notes_by_creator] Finished getting notes for user {user_id}, total: {len(result)}"
)
return result
async def get_note_short_url(self, note_id: str) -> Dict:
uri = f"/api/sns/web/short_url"
data = {"original_url": f"{self._domain}/discovery/item/{note_id}"}
return await self.post(uri, data=data, return_response=True)
@retry(stop=stop_after_attempt(3), wait=wait_fixed(1))
async def get_note_by_id_from_html(
self,
note_id: str,
xsec_source: str,
xsec_token: str,
enable_cookie: bool = False,
) -> Optional[Dict]:
url = (
"https://www.xiaohongshu.com/explore/"
+ note_id
+ f"?xsec_token={xsec_token}&xsec_source={xsec_source}"
)
copy_headers = self.headers.copy()
if not enable_cookie:
del copy_headers["Cookie"]
html = await self.request(
method="GET", url=url, return_response=True, headers=copy_headers
)
return self._extractor.extract_note_detail_from_html(note_id, html) | --- +++ @@ -70,6 +70,16 @@ self.init_proxy_pool(proxy_ip_pool)
async def _pre_headers(self, url: str, params: Optional[Dict] = None, payload: Optional[Dict] = None) -> Dict:
+ """Request header parameter signing (using playwright injection method)
+
+ Args:
+ url: Request URL
+ params: GET request parameters
+ payload: POST request parameters
+
+ Returns:
+ Dict: Signed request header parameters
+ """
a1_value = self.cookie_dict.get("a1", "")
# Determine request data, method and URI
@@ -102,6 +112,16 @@
@retry(stop=stop_after_attempt(3), wait=wait_fixed(1), retry=retry_if_not_exception_type(NoteNotFoundError))
async def request(self, method, url, **kwargs) -> Union[str, Any]:
+ """
+ Wrapper for httpx common request method, processes request response
+ Args:
+ method: Request method
+ url: Request URL
+ **kwargs: Other request parameters, such as headers, body, etc.
+
+ Returns:
+
+ """
# Check if proxy is expired before each request
await self._refresh_proxy_if_expired()
@@ -132,6 +152,15 @@ raise DataFetchError(err_msg)
async def get(self, uri: str, params: Optional[Dict] = None) -> Dict:
+ """
+ GET request, signs request headers
+ Args:
+ uri: Request route
+ params: Request parameters
+
+ Returns:
+
+ """
headers = await self._pre_headers(uri, params)
full_url = f"{self._host}{uri}"
@@ -140,6 +169,15 @@ )
async def post(self, uri: str, data: dict, **kwargs) -> Dict:
+ """
+ POST request, signs request headers
+ Args:
+ uri: Request route
+ data: Request body parameters
+
+ Returns:
+
+ """
headers = await self._pre_headers(uri, payload=data)
json_str = json.dumps(data, separators=(",", ":"), ensure_ascii=False)
return await self.request(
@@ -174,6 +212,11 @@ return None
async def query_self(self) -> Optional[Dict]:
+ """
+ Query self user info to check login state
+ Returns:
+ Dict: User info if logged in, None otherwise
+ """
uri = "/api/sns/web/v1/user/selfinfo"
headers = await self._pre_headers(uri, params={})
async with httpx.AsyncClient(proxy=self.proxy) as client:
@@ -183,6 +226,11 @@ return None
async def pong(self) -> bool:
+ """
+ Check if login state is still valid by querying self user info
+ Returns:
+ bool: True if logged in, False otherwise
+ """
utils.logger.info("[XiaoHongShuClient.pong] Begin to check login state...")
ping_flag = False
try:
@@ -198,6 +246,14 @@ return ping_flag
async def update_cookies(self, browser_context: BrowserContext):
+ """
+ Update cookies method provided by API client, usually called after successful login
+ Args:
+ browser_context: Browser context object
+
+ Returns:
+
+ """
cookie_str, cookie_dict = utils.convert_cookies(await browser_context.cookies())
self.headers["Cookie"] = cookie_str
self.cookie_dict = cookie_dict
@@ -211,6 +267,18 @@ sort: SearchSortType = SearchSortType.GENERAL,
note_type: SearchNoteType = SearchNoteType.ALL,
) -> Dict:
+ """
+ Search notes by keyword
+ Args:
+ keyword: Keyword parameter
+ page: Page number
+ page_size: Page data length
+ sort: Search result sorting specification
+ note_type: Type of note to search
+
+ Returns:
+
+ """
uri = "/api/sns/web/v1/search/notes"
data = {
"keyword": keyword,
@@ -228,6 +296,16 @@ xsec_source: str,
xsec_token: str,
) -> Dict:
+ """
+ Get note detail API
+ Args:
+ note_id: Note ID
+ xsec_source: Channel source
+ xsec_token: Token returned from search keyword result list
+
+ Returns:
+
+ """
if xsec_source == "":
xsec_source = "pc_search"
@@ -255,6 +333,16 @@ xsec_token: str,
cursor: str = "",
) -> Dict:
+ """
+ Get first-level comments API
+ Args:
+ note_id: Note ID
+ xsec_token: Verification token
+ cursor: Pagination cursor
+
+ Returns:
+
+ """
uri = "/api/sns/web/v2/comment/page"
params = {
"note_id": note_id,
@@ -273,6 +361,18 @@ num: int = 10,
cursor: str = "",
):
+ """
+ Get sub-comments under specified parent comment API
+ Args:
+ note_id: Post ID of sub-comments
+ root_comment_id: Root comment ID
+ xsec_token: Verification token
+ num: Pagination quantity
+ cursor: Pagination cursor
+
+ Returns:
+
+ """
uri = "/api/sns/web/v2/comment/sub/page"
params = {
"note_id": note_id,
@@ -293,6 +393,17 @@ callback: Optional[Callable] = None,
max_count: int = 10,
) -> List[Dict]:
+ """
+ Get all first-level comments under specified note, this method will continuously find all comment information under a post
+ Args:
+ note_id: Note ID
+ xsec_token: Verification token
+ crawl_interval: Crawl delay per note (seconds)
+ callback: Callback after one note crawl ends
+ max_count: Maximum number of comments to crawl per note
+ Returns:
+
+ """
result = []
comments_has_more = True
comments_cursor = ""
@@ -330,6 +441,17 @@ crawl_interval: float = 1.0,
callback: Optional[Callable] = None,
) -> List[Dict]:
+ """
+ Get all second-level comments under specified first-level comments, this method will continuously find all second-level comment information under first-level comments
+ Args:
+ comments: Comment list
+ xsec_token: Verification token
+ crawl_interval: Crawl delay per comment (seconds)
+ callback: Callback after one comment crawl ends
+
+ Returns:
+
+ """
if not config.ENABLE_GET_SUB_COMMENTS:
utils.logger.info(
f"[XiaoHongShuCrawler.get_comments_all_sub_comments] Crawling sub_comment mode is not enabled"
@@ -398,6 +520,18 @@ async def get_creator_info(
self, user_id: str, xsec_token: str = "", xsec_source: str = ""
) -> Dict:
+ """
+ Get user profile brief information by parsing user homepage HTML
+ The PC user homepage has window.__INITIAL_STATE__ variable, just parse it
+
+ Args:
+ user_id: User ID
+ xsec_token: Verification token (optional, pass if included in URL)
+ xsec_source: Channel source (optional, pass if included in URL)
+
+ Returns:
+ Dict: Creator information
+ """
# Build URI, add xsec parameters to URL if available
uri = f"/user/profile/{user_id}"
if xsec_token and xsec_source:
@@ -416,6 +550,18 @@ xsec_token: str = "",
xsec_source: str = "pc_feed",
) -> Dict:
+ """
+ Get creator's notes
+ Args:
+ creator: Creator ID
+ cursor: Last note ID from previous page
+ page_size: Page data length
+ xsec_token: Verification token
+ xsec_source: Channel source
+
+ Returns:
+
+ """
uri = f"/api/sns/web/v1/user_posted"
params = {
"num": page_size,
@@ -434,6 +580,18 @@ xsec_token: str = "",
xsec_source: str = "pc_feed",
) -> List[Dict]:
+ """
+ Get all posts published by specified user, this method will continuously find all post information under a user
+ Args:
+ user_id: User ID
+ crawl_interval: Crawl delay (seconds)
+ callback: Update callback function after one pagination crawl ends
+ xsec_token: Verification token
+ xsec_source: Channel source
+
+ Returns:
+
+ """
result = []
notes_has_more = True
notes_cursor = ""
@@ -477,6 +635,14 @@ return result
async def get_note_short_url(self, note_id: str) -> Dict:
+ """
+ Get note short URL
+ Args:
+ note_id: Note ID
+
+ Returns:
+
+ """
uri = f"/api/sns/web/short_url"
data = {"original_url": f"{self._domain}/discovery/item/{note_id}"}
return await self.post(uri, data=data, return_response=True)
@@ -489,6 +655,19 @@ xsec_token: str,
enable_cookie: bool = False,
) -> Optional[Dict]:
+ """
+ Get note details by parsing note detail page HTML, this interface may fail, retry 3 times here
+ copy from https://github.com/ReaJason/xhs/blob/eb1c5a0213f6fbb592f0a2897ee552847c69ea2d/xhs/core.py#L217-L259
+ thanks for ReaJason
+ Args:
+ note_id:
+ xsec_source:
+ xsec_token:
+ enable_cookie:
+
+ Returns:
+
+ """
url = (
"https://www.xiaohongshu.com/explore/"
+ note_id
@@ -502,4 +681,4 @@ method="GET", url=url, return_response=True, headers=copy_headers
)
- return self._extractor.extract_note_detail_from_html(note_id, html)+ return self._extractor.extract_note_detail_from_html(note_id, html)
| https://raw.githubusercontent.com/NanmiCoder/MediaCrawler/HEAD/media_platform/xhs/client.py |
Create simple docstrings for beginners | # -*- coding: utf-8 -*-
# Copyright (c) 2025 relakkes@gmail.com
#
# This file is part of MediaCrawler project.
# Repository: https://github.com/NanmiCoder/MediaCrawler/blob/main/media_platform/zhihu/help.py
# GitHub: https://github.com/NanmiCoder
# Licensed under NON-COMMERCIAL LEARNING LICENSE 1.1
#
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
# 1. 不得用于任何商业用途。
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
# 3. 不得进行大规模爬取或对平台造成运营干扰。
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
# 5. 不得用于任何非法或不当的用途。
#
# 详细许可条款请参阅项目根目录下的LICENSE文件。
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
# -*- coding: utf-8 -*-
import json
from typing import Dict, List, Optional
from urllib.parse import parse_qs, urlparse
import execjs
from parsel import Selector
from constant import zhihu as zhihu_constant
from model.m_zhihu import ZhihuComment, ZhihuContent, ZhihuCreator
from tools import utils
from tools.crawler_util import extract_text_from_html
ZHIHU_SGIN_JS = None
def sign(url: str, cookies: str) -> Dict:
global ZHIHU_SGIN_JS
if not ZHIHU_SGIN_JS:
with open("libs/zhihu.js", mode="r", encoding="utf-8-sig") as f:
ZHIHU_SGIN_JS = execjs.compile(f.read())
return ZHIHU_SGIN_JS.call("get_sign", url, cookies)
class ZhihuExtractor:
def __init__(self):
pass
def extract_contents_from_search(self, json_data: Dict) -> List[ZhihuContent]:
if not json_data:
return []
search_result: List[Dict] = json_data.get("data", [])
search_result = [s_item for s_item in search_result if s_item.get("type") in ['search_result', 'zvideo']]
return self._extract_content_list([sr_item.get("object") for sr_item in search_result if sr_item.get("object")])
def _extract_content_list(self, content_list: List[Dict]) -> List[ZhihuContent]:
if not content_list:
return []
res: List[ZhihuContent] = []
for content in content_list:
if content.get("type") == zhihu_constant.ANSWER_NAME:
res.append(self._extract_answer_content(content))
elif content.get("type") == zhihu_constant.ARTICLE_NAME:
res.append(self._extract_article_content(content))
elif content.get("type") == zhihu_constant.VIDEO_NAME:
res.append(self._extract_zvideo_content(content))
else:
continue
return res
def _extract_answer_content(self, answer: Dict) -> ZhihuContent:
res = ZhihuContent()
res.content_id = answer.get("id")
res.content_type = answer.get("type")
res.content_text = extract_text_from_html(answer.get("content", ""))
res.question_id = answer.get("question").get("id")
res.content_url = f"{zhihu_constant.ZHIHU_URL}/question/{res.question_id}/answer/{res.content_id}"
res.title = extract_text_from_html(answer.get("title", ""))
res.desc = extract_text_from_html(answer.get("description", "") or answer.get("excerpt", ""))
res.created_time = answer.get("created_time")
res.updated_time = answer.get("updated_time")
res.voteup_count = answer.get("voteup_count", 0)
res.comment_count = answer.get("comment_count", 0)
# extract author info
author_info = self._extract_content_or_comment_author(answer.get("author"))
res.user_id = author_info.user_id
res.user_link = author_info.user_link
res.user_nickname = author_info.user_nickname
res.user_avatar = author_info.user_avatar
res.user_url_token = author_info.url_token
return res
def _extract_article_content(self, article: Dict) -> ZhihuContent:
res = ZhihuContent()
res.content_id = article.get("id")
res.content_type = article.get("type")
res.content_text = extract_text_from_html(article.get("content"))
res.content_url = f"{zhihu_constant.ZHIHU_ZHUANLAN_URL}/p/{res.content_id}"
res.title = extract_text_from_html(article.get("title"))
res.desc = extract_text_from_html(article.get("excerpt"))
res.created_time = article.get("created_time", 0) or article.get("created", 0)
res.updated_time = article.get("updated_time", 0) or article.get("updated", 0)
res.voteup_count = article.get("voteup_count", 0)
res.comment_count = article.get("comment_count", 0)
# extract author info
author_info = self._extract_content_or_comment_author(article.get("author"))
res.user_id = author_info.user_id
res.user_link = author_info.user_link
res.user_nickname = author_info.user_nickname
res.user_avatar = author_info.user_avatar
res.user_url_token = author_info.url_token
return res
def _extract_zvideo_content(self, zvideo: Dict) -> ZhihuContent:
res = ZhihuContent()
if "video" in zvideo and isinstance(zvideo.get("video"), dict): # This indicates data from the creator's homepage video list API
res.content_url = f"{zhihu_constant.ZHIHU_URL}/zvideo/{res.content_id}"
res.created_time = zvideo.get("published_at")
res.updated_time = zvideo.get("updated_at")
else:
res.content_url = zvideo.get("video_url")
res.created_time = zvideo.get("created_at")
res.content_id = zvideo.get("id")
res.content_type = zvideo.get("type")
res.title = extract_text_from_html(zvideo.get("title"))
res.desc = extract_text_from_html(zvideo.get("description"))
res.voteup_count = zvideo.get("voteup_count")
res.comment_count = zvideo.get("comment_count")
# extract author info
author_info = self._extract_content_or_comment_author(zvideo.get("author"))
res.user_id = author_info.user_id
res.user_link = author_info.user_link
res.user_nickname = author_info.user_nickname
res.user_avatar = author_info.user_avatar
res.user_url_token = author_info.url_token
return res
@staticmethod
def _extract_content_or_comment_author(author: Dict) -> ZhihuCreator:
res = ZhihuCreator()
try:
if not author:
return res
if not author.get("id"):
author = author.get("member")
res.user_id = author.get("id")
res.user_link = f"{zhihu_constant.ZHIHU_URL}/people/{author.get('url_token')}"
res.user_nickname = author.get("name")
res.user_avatar = author.get("avatar_url")
res.url_token = author.get("url_token")
except Exception as e :
utils.logger.warning(
f"[ZhihuExtractor._extract_content_or_comment_author] User Maybe Blocked. {e}"
)
return res
def extract_comments(self, page_content: ZhihuContent, comments: List[Dict]) -> List[ZhihuComment]:
if not comments:
return []
res: List[ZhihuComment] = []
for comment in comments:
if comment.get("type") != "comment":
continue
res.append(self._extract_comment(page_content, comment))
return res
def _extract_comment(self, page_content: ZhihuContent, comment: Dict) -> ZhihuComment:
res = ZhihuComment()
res.comment_id = str(comment.get("id", ""))
res.parent_comment_id = comment.get("reply_comment_id")
res.content = extract_text_from_html(comment.get("content"))
res.publish_time = comment.get("created_time")
res.ip_location = self._extract_comment_ip_location(comment.get("comment_tag", []))
res.sub_comment_count = comment.get("child_comment_count")
res.like_count = comment.get("like_count") if comment.get("like_count") else 0
res.dislike_count = comment.get("dislike_count") if comment.get("dislike_count") else 0
res.content_id = page_content.content_id
res.content_type = page_content.content_type
# extract author info
author_info = self._extract_content_or_comment_author(comment.get("author"))
res.user_id = author_info.user_id
res.user_link = author_info.user_link
res.user_nickname = author_info.user_nickname
res.user_avatar = author_info.user_avatar
return res
@staticmethod
def _extract_comment_ip_location(comment_tags: List[Dict]) -> str:
if not comment_tags:
return ""
for ct in comment_tags:
if ct.get("type") == "ip_info":
return ct.get("text")
return ""
@staticmethod
def extract_offset(paging_info: Dict) -> str:
# https://www.zhihu.com/api/v4/comment_v5/zvideos/1424368906836807681/root_comment?limit=10&offset=456770961_10125996085_0&order_by=score
next_url = paging_info.get("next")
if not next_url:
return ""
parsed_url = urlparse(next_url)
query_params = parse_qs(parsed_url.query)
offset = query_params.get('offset', [""])[0]
return offset
@staticmethod
def _foramt_gender_text(gender: int) -> str:
if gender == 1:
return "Male"
elif gender == 0:
return "Female"
else:
return "Unknown"
def extract_creator(self, user_url_token: str, html_content: str) -> Optional[ZhihuCreator]:
if not html_content:
return None
js_init_data = Selector(text=html_content).xpath("//script[@id='js-initialData']/text()").get(default="").strip()
if not js_init_data:
return None
js_init_data_dict: Dict = json.loads(js_init_data)
users_info: Dict = js_init_data_dict.get("initialState", {}).get("entities", {}).get("users", {})
if not users_info:
return None
creator_info: Dict = users_info.get(user_url_token)
if not creator_info:
return None
res = ZhihuCreator()
res.user_id = creator_info.get("id")
res.user_link = f"{zhihu_constant.ZHIHU_URL}/people/{user_url_token}"
res.user_nickname = creator_info.get("name")
res.user_avatar = creator_info.get("avatarUrl")
res.url_token = creator_info.get("urlToken") or user_url_token
res.gender = self._foramt_gender_text(creator_info.get("gender"))
res.ip_location = creator_info.get("ipInfo")
res.follows = creator_info.get("followingCount")
res.fans = creator_info.get("followerCount")
res.anwser_count = creator_info.get("answerCount")
res.video_count = creator_info.get("zvideoCount")
res.question_count = creator_info.get("questionCount")
res.article_count = creator_info.get("articlesCount")
res.column_count = creator_info.get("columnsCount")
res.get_voteup_count = creator_info.get("voteupCount")
return res
def extract_content_list_from_creator(self, anwser_list: List[Dict]) -> List[ZhihuContent]:
if not anwser_list:
return []
return self._extract_content_list(anwser_list)
def extract_answer_content_from_html(self, html_content: str) -> Optional[ZhihuContent]:
js_init_data: str = Selector(text=html_content).xpath("//script[@id='js-initialData']/text()").get(default="")
if not js_init_data:
return None
json_data: Dict = json.loads(js_init_data)
answer_info: Dict = json_data.get("initialState", {}).get("entities", {}).get("answers", {})
if not answer_info:
return None
return self._extract_answer_content(answer_info.get(list(answer_info.keys())[0]))
def extract_article_content_from_html(self, html_content: str) -> Optional[ZhihuContent]:
js_init_data: str = Selector(text=html_content).xpath("//script[@id='js-initialData']/text()").get(default="")
if not js_init_data:
return None
json_data: Dict = json.loads(js_init_data)
article_info: Dict = json_data.get("initialState", {}).get("entities", {}).get("articles", {})
if not article_info:
return None
return self._extract_article_content(article_info.get(list(article_info.keys())[0]))
def extract_zvideo_content_from_html(self, html_content: str) -> Optional[ZhihuContent]:
js_init_data: str = Selector(text=html_content).xpath("//script[@id='js-initialData']/text()").get(default="")
if not js_init_data:
return None
json_data: Dict = json.loads(js_init_data)
zvideo_info: Dict = json_data.get("initialState", {}).get("entities", {}).get("zvideos", {})
users: Dict = json_data.get("initialState", {}).get("entities", {}).get("users", {})
if not zvideo_info:
return None
# handler user info and video info
video_detail_info: Dict = zvideo_info.get(list(zvideo_info.keys())[0])
if not video_detail_info:
return None
if isinstance(video_detail_info.get("author"), str):
author_name: str = video_detail_info.get("author")
video_detail_info["author"] = users.get(author_name)
return self._extract_zvideo_content(video_detail_info)
def judge_zhihu_url(note_detail_url: str) -> str:
if "/answer/" in note_detail_url:
return zhihu_constant.ANSWER_NAME
elif "/p/" in note_detail_url:
return zhihu_constant.ARTICLE_NAME
elif "/zvideo/" in note_detail_url:
return zhihu_constant.VIDEO_NAME
else:
return "" | --- +++ @@ -35,6 +35,15 @@
def sign(url: str, cookies: str) -> Dict:
+ """
+ zhihu sign algorithm
+ Args:
+ url: request url with query string
+ cookies: request cookies with d_c0 key
+
+ Returns:
+
+ """
global ZHIHU_SGIN_JS
if not ZHIHU_SGIN_JS:
with open("libs/zhihu.js", mode="r", encoding="utf-8-sig") as f:
@@ -48,6 +57,14 @@ pass
def extract_contents_from_search(self, json_data: Dict) -> List[ZhihuContent]:
+ """
+ extract zhihu contents
+ Args:
+ json_data: zhihu json data
+
+ Returns:
+
+ """
if not json_data:
return []
@@ -57,6 +74,14 @@
def _extract_content_list(self, content_list: List[Dict]) -> List[ZhihuContent]:
+ """
+ extract zhihu content list
+ Args:
+ content_list:
+
+ Returns:
+
+ """
if not content_list:
return []
@@ -73,6 +98,13 @@ return res
def _extract_answer_content(self, answer: Dict) -> ZhihuContent:
+ """
+ extract zhihu answer content
+ Args:
+ answer: zhihu answer
+
+ Returns:
+ """
res = ZhihuContent()
res.content_id = answer.get("id")
res.content_type = answer.get("type")
@@ -96,6 +128,14 @@ return res
def _extract_article_content(self, article: Dict) -> ZhihuContent:
+ """
+ extract zhihu article content
+ Args:
+ article: zhihu article
+
+ Returns:
+
+ """
res = ZhihuContent()
res.content_id = article.get("id")
res.content_type = article.get("type")
@@ -118,6 +158,14 @@ return res
def _extract_zvideo_content(self, zvideo: Dict) -> ZhihuContent:
+ """
+ extract zhihu zvideo content
+ Args:
+ zvideo:
+
+ Returns:
+
+ """
res = ZhihuContent()
if "video" in zvideo and isinstance(zvideo.get("video"), dict): # This indicates data from the creator's homepage video list API
@@ -145,6 +193,14 @@
@staticmethod
def _extract_content_or_comment_author(author: Dict) -> ZhihuCreator:
+ """
+ extract zhihu author
+ Args:
+ author:
+
+ Returns:
+
+ """
res = ZhihuCreator()
try:
if not author:
@@ -164,6 +220,15 @@ return res
def extract_comments(self, page_content: ZhihuContent, comments: List[Dict]) -> List[ZhihuComment]:
+ """
+ extract zhihu comments
+ Args:
+ page_content: zhihu content object
+ comments: zhihu comments
+
+ Returns:
+
+ """
if not comments:
return []
res: List[ZhihuComment] = []
@@ -174,6 +239,15 @@ return res
def _extract_comment(self, page_content: ZhihuContent, comment: Dict) -> ZhihuComment:
+ """
+ extract zhihu comment
+ Args:
+ page_content: comment with content object
+ comment: zhihu comment
+
+ Returns:
+
+ """
res = ZhihuComment()
res.comment_id = str(comment.get("id", ""))
res.parent_comment_id = comment.get("reply_comment_id")
@@ -196,6 +270,14 @@
@staticmethod
def _extract_comment_ip_location(comment_tags: List[Dict]) -> str:
+ """
+ extract comment ip location
+ Args:
+ comment_tags:
+
+ Returns:
+
+ """
if not comment_tags:
return ""
@@ -207,6 +289,14 @@
@staticmethod
def extract_offset(paging_info: Dict) -> str:
+ """
+ extract offset
+ Args:
+ paging_info:
+
+ Returns:
+
+ """
# https://www.zhihu.com/api/v4/comment_v5/zvideos/1424368906836807681/root_comment?limit=10&offset=456770961_10125996085_0&order_by=score
next_url = paging_info.get("next")
if not next_url:
@@ -219,6 +309,14 @@
@staticmethod
def _foramt_gender_text(gender: int) -> str:
+ """
+ format gender text
+ Args:
+ gender:
+
+ Returns:
+
+ """
if gender == 1:
return "Male"
elif gender == 0:
@@ -228,6 +326,15 @@
def extract_creator(self, user_url_token: str, html_content: str) -> Optional[ZhihuCreator]:
+ """
+ extract zhihu creator
+ Args:
+ user_url_token : zhihu creator url token
+ html_content: zhihu creator html content
+
+ Returns:
+
+ """
if not html_content:
return None
@@ -264,6 +371,14 @@
def extract_content_list_from_creator(self, anwser_list: List[Dict]) -> List[ZhihuContent]:
+ """
+ extract content list from creator
+ Args:
+ anwser_list:
+
+ Returns:
+
+ """
if not anwser_list:
return []
@@ -273,6 +388,14 @@
def extract_answer_content_from_html(self, html_content: str) -> Optional[ZhihuContent]:
+ """
+ extract zhihu answer content from html
+ Args:
+ html_content:
+
+ Returns:
+
+ """
js_init_data: str = Selector(text=html_content).xpath("//script[@id='js-initialData']/text()").get(default="")
if not js_init_data:
return None
@@ -284,6 +407,14 @@ return self._extract_answer_content(answer_info.get(list(answer_info.keys())[0]))
def extract_article_content_from_html(self, html_content: str) -> Optional[ZhihuContent]:
+ """
+ extract zhihu article content from html
+ Args:
+ html_content:
+
+ Returns:
+
+ """
js_init_data: str = Selector(text=html_content).xpath("//script[@id='js-initialData']/text()").get(default="")
if not js_init_data:
return None
@@ -295,6 +426,14 @@ return self._extract_article_content(article_info.get(list(article_info.keys())[0]))
def extract_zvideo_content_from_html(self, html_content: str) -> Optional[ZhihuContent]:
+ """
+ extract zhihu zvideo content from html
+ Args:
+ html_content:
+
+ Returns:
+
+ """
js_init_data: str = Selector(text=html_content).xpath("//script[@id='js-initialData']/text()").get(default="")
if not js_init_data:
return None
@@ -316,6 +455,17 @@
def judge_zhihu_url(note_detail_url: str) -> str:
+ """
+ judge zhihu url type
+ Args:
+ note_detail_url:
+ eg1: https://www.zhihu.com/question/123456789/answer/123456789 # answer
+ eg2: https://www.zhihu.com/p/123456789 # article
+ eg3: https://www.zhihu.com/zvideo/123456789 # zvideo
+
+ Returns:
+
+ """
if "/answer/" in note_detail_url:
return zhihu_constant.ANSWER_NAME
elif "/p/" in note_detail_url:
@@ -323,4 +473,4 @@ elif "/zvideo/" in note_detail_url:
return zhihu_constant.VIDEO_NAME
else:
- return ""+ return ""
| https://raw.githubusercontent.com/NanmiCoder/MediaCrawler/HEAD/media_platform/zhihu/help.py |
Generate helpful docstrings for debugging | # -*- coding: utf-8 -*-
# Copyright (c) 2025 relakkes@gmail.com
#
# This file is part of MediaCrawler project.
# Repository: https://github.com/NanmiCoder/MediaCrawler/blob/main/api/routers/websocket.py
# GitHub: https://github.com/NanmiCoder
# Licensed under NON-COMMERCIAL LEARNING LICENSE 1.1
#
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
# 1. 不得用于任何商业用途。
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
# 3. 不得进行大规模爬取或对平台造成运营干扰。
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
# 5. 不得用于任何非法或不当的用途。
#
# 详细许可条款请参阅项目根目录下的LICENSE文件。
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
import asyncio
from typing import Set, Optional
from fastapi import APIRouter, WebSocket, WebSocketDisconnect
from ..services import crawler_manager
router = APIRouter(tags=["websocket"])
class ConnectionManager:
def __init__(self):
self.active_connections: Set[WebSocket] = set()
async def connect(self, websocket: WebSocket):
await websocket.accept()
self.active_connections.add(websocket)
def disconnect(self, websocket: WebSocket):
self.active_connections.discard(websocket)
async def broadcast(self, message: dict):
if not self.active_connections:
return
disconnected = []
for connection in list(self.active_connections):
try:
await connection.send_json(message)
except Exception:
disconnected.append(connection)
# Clean up disconnected connections
for conn in disconnected:
self.disconnect(conn)
manager = ConnectionManager()
async def log_broadcaster():
queue = crawler_manager.get_log_queue()
while True:
try:
# Get log entry from queue
entry = await queue.get()
# Broadcast to all WebSocket connections
await manager.broadcast(entry.model_dump())
except asyncio.CancelledError:
break
except Exception as e:
print(f"Log broadcaster error: {e}")
await asyncio.sleep(0.1)
# Global broadcast task
_broadcaster_task: Optional[asyncio.Task] = None
def start_broadcaster():
global _broadcaster_task
if _broadcaster_task is None or _broadcaster_task.done():
_broadcaster_task = asyncio.create_task(log_broadcaster())
@router.websocket("/ws/logs")
async def websocket_logs(websocket: WebSocket):
print("[WS] New connection attempt")
try:
# Ensure broadcast task is running
start_broadcaster()
await manager.connect(websocket)
print(f"[WS] Connected, active connections: {len(manager.active_connections)}")
# Send existing logs
for log in crawler_manager.logs:
try:
await websocket.send_json(log.model_dump())
except Exception as e:
print(f"[WS] Error sending existing log: {e}")
break
print(f"[WS] Sent {len(crawler_manager.logs)} existing logs, entering main loop")
while True:
# Keep connection alive, receive heartbeat or any message
try:
data = await asyncio.wait_for(
websocket.receive_text(),
timeout=30.0
)
if data == "ping":
await websocket.send_text("pong")
except asyncio.TimeoutError:
# Send ping to keep connection alive
try:
await websocket.send_text("ping")
except Exception as e:
print(f"[WS] Error sending ping: {e}")
break
except WebSocketDisconnect:
print("[WS] Client disconnected")
except Exception as e:
print(f"[WS] Error: {type(e).__name__}: {e}")
finally:
manager.disconnect(websocket)
print(f"[WS] Cleanup done, active connections: {len(manager.active_connections)}")
@router.websocket("/ws/status")
async def websocket_status(websocket: WebSocket):
await websocket.accept()
try:
while True:
# Send status every second
status = crawler_manager.get_status()
await websocket.send_json(status)
await asyncio.sleep(1)
except WebSocketDisconnect:
pass
except Exception:
pass | --- +++ @@ -27,6 +27,7 @@
class ConnectionManager:
+ """WebSocket connection manager"""
def __init__(self):
self.active_connections: Set[WebSocket] = set()
@@ -39,6 +40,7 @@ self.active_connections.discard(websocket)
async def broadcast(self, message: dict):
+ """Broadcast message to all connections"""
if not self.active_connections:
return
@@ -58,6 +60,7 @@
async def log_broadcaster():
+ """Background task: read logs from queue and broadcast"""
queue = crawler_manager.get_log_queue()
while True:
try:
@@ -77,6 +80,7 @@
def start_broadcaster():
+ """Start broadcast task"""
global _broadcaster_task
if _broadcaster_task is None or _broadcaster_task.done():
_broadcaster_task = asyncio.create_task(log_broadcaster())
@@ -84,6 +88,7 @@
@router.websocket("/ws/logs")
async def websocket_logs(websocket: WebSocket):
+ """WebSocket log stream"""
print("[WS] New connection attempt")
try:
@@ -131,6 +136,7 @@
@router.websocket("/ws/status")
async def websocket_status(websocket: WebSocket):
+ """WebSocket status stream"""
await websocket.accept()
try:
@@ -142,4 +148,4 @@ except WebSocketDisconnect:
pass
except Exception:
- pass+ pass
| https://raw.githubusercontent.com/NanmiCoder/MediaCrawler/HEAD/api/routers/websocket.py |
Write docstrings that follow conventions | # -*- coding: utf-8 -*-
# Copyright (c) 2025 relakkes@gmail.com
#
# This file is part of MediaCrawler project.
# Repository: https://github.com/NanmiCoder/MediaCrawler/blob/main/media_platform/zhihu/login.py
# GitHub: https://github.com/NanmiCoder
# Licensed under NON-COMMERCIAL LEARNING LICENSE 1.1
#
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
# 1. 不得用于任何商业用途。
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
# 3. 不得进行大规模爬取或对平台造成运营干扰。
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
# 5. 不得用于任何非法或不当的用途。
#
# 详细许可条款请参阅项目根目录下的LICENSE文件。
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
# -*- coding: utf-8 -*-
import asyncio
import functools
import sys
from typing import Optional
from playwright.async_api import BrowserContext, Page
from tenacity import (RetryError, retry, retry_if_result, stop_after_attempt,
wait_fixed)
import config
from base.base_crawler import AbstractLogin
from tools import utils
class ZhiHuLogin(AbstractLogin):
def __init__(self,
login_type: str,
browser_context: BrowserContext,
context_page: Page,
login_phone: Optional[str] = "",
cookie_str: str = ""
):
config.LOGIN_TYPE = login_type
self.browser_context = browser_context
self.context_page = context_page
self.login_phone = login_phone
self.cookie_str = cookie_str
@retry(stop=stop_after_attempt(600), wait=wait_fixed(1), retry=retry_if_result(lambda value: value is False))
async def check_login_state(self) -> bool:
current_cookie = await self.browser_context.cookies()
_, cookie_dict = utils.convert_cookies(current_cookie)
current_web_session = cookie_dict.get("z_c0")
if current_web_session:
return True
return False
async def begin(self):
utils.logger.info("[ZhiHu.begin] Begin login zhihu ...")
if config.LOGIN_TYPE == "qrcode":
await self.login_by_qrcode()
elif config.LOGIN_TYPE == "phone":
await self.login_by_mobile()
elif config.LOGIN_TYPE == "cookie":
await self.login_by_cookies()
else:
raise ValueError("[ZhiHu.begin]I nvalid Login Type Currently only supported qrcode or phone or cookies ...")
async def login_by_mobile(self):
# todo implement login by mobile
async def login_by_qrcode(self):
utils.logger.info("[ZhiHu.login_by_qrcode] Begin login zhihu by qrcode ...")
qrcode_img_selector = "canvas.Qrcode-qrcode"
# find login qrcode
base64_qrcode_img = await utils.find_qrcode_img_from_canvas(
self.context_page,
canvas_selector=qrcode_img_selector
)
if not base64_qrcode_img:
utils.logger.info("[ZhiHu.login_by_qrcode] login failed , have not found qrcode please check ....")
if not base64_qrcode_img:
sys.exit()
# show login qrcode
# fix issue #12
# we need to use partial function to call show_qrcode function and run in executor
# then current asyncio event loop will not be blocked
partial_show_qrcode = functools.partial(utils.show_qrcode, base64_qrcode_img)
asyncio.get_running_loop().run_in_executor(executor=None, func=partial_show_qrcode)
utils.logger.info(f"[ZhiHu.login_by_qrcode] waiting for scan code login, remaining time is 120s")
try:
await self.check_login_state()
except RetryError:
utils.logger.info("[ZhiHu.login_by_qrcode] Login zhihu failed by qrcode login method ...")
sys.exit()
wait_redirect_seconds = 5
utils.logger.info(
f"[ZhiHu.login_by_qrcode] Login successful then wait for {wait_redirect_seconds} seconds redirect ...")
await asyncio.sleep(wait_redirect_seconds)
async def login_by_cookies(self):
utils.logger.info("[ZhiHu.login_by_cookies] Begin login zhihu by cookie ...")
for key, value in utils.convert_str_cookie_to_dict(self.cookie_str).items():
await self.browser_context.add_cookies([{
'name': key,
'value': value,
'domain': ".zhihu.com",
'path': "/"
}]) | --- +++ @@ -50,6 +50,11 @@
@retry(stop=stop_after_attempt(600), wait=wait_fixed(1), retry=retry_if_result(lambda value: value is False))
async def check_login_state(self) -> bool:
+ """
+ Check if the current login status is successful and return True otherwise return False
+ Returns:
+
+ """
current_cookie = await self.browser_context.cookies()
_, cookie_dict = utils.convert_cookies(current_cookie)
current_web_session = cookie_dict.get("z_c0")
@@ -58,6 +63,7 @@ return False
async def begin(self):
+ """Start login zhihu"""
utils.logger.info("[ZhiHu.begin] Begin login zhihu ...")
if config.LOGIN_TYPE == "qrcode":
await self.login_by_qrcode()
@@ -69,9 +75,11 @@ raise ValueError("[ZhiHu.begin]I nvalid Login Type Currently only supported qrcode or phone or cookies ...")
async def login_by_mobile(self):
+ """Login zhihu by mobile"""
# todo implement login by mobile
async def login_by_qrcode(self):
+ """login zhihu website and keep webdriver login state"""
utils.logger.info("[ZhiHu.login_by_qrcode] Begin login zhihu by qrcode ...")
qrcode_img_selector = "canvas.Qrcode-qrcode"
# find login qrcode
@@ -105,6 +113,7 @@ await asyncio.sleep(wait_redirect_seconds)
async def login_by_cookies(self):
+ """login zhihu website by cookies"""
utils.logger.info("[ZhiHu.login_by_cookies] Begin login zhihu by cookie ...")
for key, value in utils.convert_str_cookie_to_dict(self.cookie_str).items():
await self.browser_context.add_cookies([{
@@ -112,4 +121,4 @@ 'value': value,
'domain': ".zhihu.com",
'path': "/"
- }])+ }])
| https://raw.githubusercontent.com/NanmiCoder/MediaCrawler/HEAD/media_platform/zhihu/login.py |
Write reusable docstrings | # -*- coding: utf-8 -*-
# Copyright (c) 2025 relakkes@gmail.com
#
# This file is part of MediaCrawler project.
# Repository: https://github.com/NanmiCoder/MediaCrawler/blob/main/media_platform/kuaishou/login.py
# GitHub: https://github.com/NanmiCoder
# Licensed under NON-COMMERCIAL LEARNING LICENSE 1.1
#
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
# 1. 不得用于任何商业用途。
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
# 3. 不得进行大规模爬取或对平台造成运营干扰。
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
# 5. 不得用于任何非法或不当的用途。
#
# 详细许可条款请参阅项目根目录下的LICENSE文件。
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
import asyncio
import functools
import sys
from typing import Optional
from playwright.async_api import BrowserContext, Page
from tenacity import (RetryError, retry, retry_if_result, stop_after_attempt,
wait_fixed)
import config
from base.base_crawler import AbstractLogin
from tools import utils
class KuaishouLogin(AbstractLogin):
def __init__(self,
login_type: str,
browser_context: BrowserContext,
context_page: Page,
login_phone: Optional[str] = "",
cookie_str: str = ""
):
config.LOGIN_TYPE = login_type
self.browser_context = browser_context
self.context_page = context_page
self.login_phone = login_phone
self.cookie_str = cookie_str
async def begin(self):
utils.logger.info("[KuaishouLogin.begin] Begin login kuaishou ...")
if config.LOGIN_TYPE == "qrcode":
await self.login_by_qrcode()
elif config.LOGIN_TYPE == "phone":
await self.login_by_mobile()
elif config.LOGIN_TYPE == "cookie":
await self.login_by_cookies()
else:
raise ValueError("[KuaishouLogin.begin] Invalid Login Type Currently only supported qrcode or phone or cookie ...")
@retry(stop=stop_after_attempt(600), wait=wait_fixed(1), retry=retry_if_result(lambda value: value is False))
async def check_login_state(self) -> bool:
current_cookie = await self.browser_context.cookies()
_, cookie_dict = utils.convert_cookies(current_cookie)
kuaishou_pass_token = cookie_dict.get("passToken")
if kuaishou_pass_token:
return True
return False
async def login_by_qrcode(self):
utils.logger.info("[KuaishouLogin.login_by_qrcode] Begin login kuaishou by qrcode ...")
# click login button
login_button_ele = self.context_page.locator(
"xpath=//p[text()='登录']"
)
await login_button_ele.click()
# find login qrcode
qrcode_img_selector = "//div[@class='qrcode-img']//img"
base64_qrcode_img = await utils.find_login_qrcode(
self.context_page,
selector=qrcode_img_selector
)
if not base64_qrcode_img:
utils.logger.info("[KuaishouLogin.login_by_qrcode] login failed , have not found qrcode please check ....")
sys.exit()
# show login qrcode
partial_show_qrcode = functools.partial(utils.show_qrcode, base64_qrcode_img)
asyncio.get_running_loop().run_in_executor(executor=None, func=partial_show_qrcode)
utils.logger.info(f"[KuaishouLogin.login_by_qrcode] waiting for scan code login, remaining time is 20s")
try:
await self.check_login_state()
except RetryError:
utils.logger.info("[KuaishouLogin.login_by_qrcode] Login kuaishou failed by qrcode login method ...")
sys.exit()
wait_redirect_seconds = 5
utils.logger.info(f"[KuaishouLogin.login_by_qrcode] Login successful then wait for {wait_redirect_seconds} seconds redirect ...")
await asyncio.sleep(wait_redirect_seconds)
async def login_by_mobile(self):
pass
async def login_by_cookies(self):
utils.logger.info("[KuaishouLogin.login_by_cookies] Begin login kuaishou by cookie ...")
for key, value in utils.convert_str_cookie_to_dict(self.cookie_str).items():
await self.browser_context.add_cookies([{
'name': key,
'value': value,
'domain': ".kuaishou.com",
'path': "/"
}]) | --- +++ @@ -47,6 +47,7 @@ self.cookie_str = cookie_str
async def begin(self):
+ """Start login xiaohongshu"""
utils.logger.info("[KuaishouLogin.begin] Begin login kuaishou ...")
if config.LOGIN_TYPE == "qrcode":
await self.login_by_qrcode()
@@ -59,6 +60,11 @@
@retry(stop=stop_after_attempt(600), wait=wait_fixed(1), retry=retry_if_result(lambda value: value is False))
async def check_login_state(self) -> bool:
+ """
+ Check if the current login status is successful and return True otherwise return False
+ retry decorator will retry 20 times if the return value is False, and the retry interval is 1 second
+ if max retry times reached, raise RetryError
+ """
current_cookie = await self.browser_context.cookies()
_, cookie_dict = utils.convert_cookies(current_cookie)
kuaishou_pass_token = cookie_dict.get("passToken")
@@ -67,6 +73,7 @@ return False
async def login_by_qrcode(self):
+ """login kuaishou website and keep webdriver login state"""
utils.logger.info("[KuaishouLogin.login_by_qrcode] Begin login kuaishou by qrcode ...")
# click login button
@@ -112,4 +119,4 @@ 'value': value,
'domain': ".kuaishou.com",
'path': "/"
- }])+ }])
| https://raw.githubusercontent.com/NanmiCoder/MediaCrawler/HEAD/media_platform/kuaishou/login.py |
Provide docstrings following PEP 257 | # -*- coding: utf-8 -*-
# Copyright (c) 2025 relakkes@gmail.com
#
# This file is part of MediaCrawler project.
# Repository: https://github.com/NanmiCoder/MediaCrawler/blob/main/cmd_arg/arg.py
# GitHub: https://github.com/NanmiCoder
# Licensed under NON-COMMERCIAL LEARNING LICENSE 1.1
#
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
# 1. 不得用于任何商业用途。
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
# 3. 不得进行大规模爬取或对平台造成运营干扰。
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
# 5. 不得用于任何非法或不当的用途。
#
# 详细许可条款请参阅项目根目录下的LICENSE文件。
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
from __future__ import annotations
import sys
from enum import Enum
from types import SimpleNamespace
from typing import Iterable, Optional, Sequence, Type, TypeVar
import typer
from typing_extensions import Annotated
import config
from tools.utils import str2bool
EnumT = TypeVar("EnumT", bound=Enum)
class PlatformEnum(str, Enum):
XHS = "xhs"
DOUYIN = "dy"
KUAISHOU = "ks"
BILIBILI = "bili"
WEIBO = "wb"
TIEBA = "tieba"
ZHIHU = "zhihu"
class LoginTypeEnum(str, Enum):
QRCODE = "qrcode"
PHONE = "phone"
COOKIE = "cookie"
class CrawlerTypeEnum(str, Enum):
SEARCH = "search"
DETAIL = "detail"
CREATOR = "creator"
class SaveDataOptionEnum(str, Enum):
CSV = "csv"
DB = "db"
JSON = "json"
JSONL = "jsonl"
SQLITE = "sqlite"
MONGODB = "mongodb"
EXCEL = "excel"
POSTGRES = "postgres"
class InitDbOptionEnum(str, Enum):
SQLITE = "sqlite"
MYSQL = "mysql"
POSTGRES = "postgres"
def _to_bool(value: bool | str) -> bool:
if isinstance(value, bool):
return value
return str2bool(value)
def _coerce_enum(
enum_cls: Type[EnumT],
value: EnumT | str,
default: EnumT,
) -> EnumT:
if isinstance(value, enum_cls):
return value
try:
return enum_cls(value)
except ValueError:
typer.secho(
f"⚠️ Config value '{value}' is not within the supported range of {enum_cls.__name__}, falling back to default value '{default.value}'.",
fg=typer.colors.YELLOW,
)
return default
def _normalize_argv(argv: Optional[Sequence[str]]) -> Iterable[str]:
if argv is None:
return list(sys.argv[1:])
return list(argv)
def _inject_init_db_default(args: Sequence[str]) -> list[str]:
normalized: list[str] = []
i = 0
while i < len(args):
arg = args[i]
normalized.append(arg)
if arg == "--init_db":
next_arg = args[i + 1] if i + 1 < len(args) else None
if not next_arg or next_arg.startswith("-"):
normalized.append(InitDbOptionEnum.SQLITE.value)
i += 1
return normalized
async def parse_cmd(argv: Optional[Sequence[str]] = None):
app = typer.Typer(add_completion=False)
@app.callback(invoke_without_command=True)
def main(
platform: Annotated[
PlatformEnum,
typer.Option(
"--platform",
help="Media platform selection (xhs=XiaoHongShu | dy=Douyin | ks=Kuaishou | bili=Bilibili | wb=Weibo | tieba=Baidu Tieba | zhihu=Zhihu)",
rich_help_panel="Basic Configuration",
),
] = _coerce_enum(PlatformEnum, config.PLATFORM, PlatformEnum.XHS),
lt: Annotated[
LoginTypeEnum,
typer.Option(
"--lt",
help="Login type (qrcode=QR Code | phone=Phone | cookie=Cookie)",
rich_help_panel="Account Configuration",
),
] = _coerce_enum(LoginTypeEnum, config.LOGIN_TYPE, LoginTypeEnum.QRCODE),
crawler_type: Annotated[
CrawlerTypeEnum,
typer.Option(
"--type",
help="Crawler type (search=Search | detail=Detail | creator=Creator)",
rich_help_panel="Basic Configuration",
),
] = _coerce_enum(CrawlerTypeEnum, config.CRAWLER_TYPE, CrawlerTypeEnum.SEARCH),
start: Annotated[
int,
typer.Option(
"--start",
help="Starting page number",
rich_help_panel="Basic Configuration",
),
] = config.START_PAGE,
keywords: Annotated[
str,
typer.Option(
"--keywords",
help="Enter keywords, multiple keywords separated by commas",
rich_help_panel="Basic Configuration",
),
] = config.KEYWORDS,
get_comment: Annotated[
str,
typer.Option(
"--get_comment",
help="Whether to crawl first-level comments, supports yes/true/t/y/1 or no/false/f/n/0",
rich_help_panel="Comment Configuration",
show_default=True,
),
] = str(config.ENABLE_GET_COMMENTS),
get_sub_comment: Annotated[
str,
typer.Option(
"--get_sub_comment",
help="Whether to crawl second-level comments, supports yes/true/t/y/1 or no/false/f/n/0",
rich_help_panel="Comment Configuration",
show_default=True,
),
] = str(config.ENABLE_GET_SUB_COMMENTS),
headless: Annotated[
str,
typer.Option(
"--headless",
help="Whether to enable headless mode (applies to both Playwright and CDP), supports yes/true/t/y/1 or no/false/f/n/0",
rich_help_panel="Runtime Configuration",
show_default=True,
),
] = str(config.HEADLESS),
save_data_option: Annotated[
SaveDataOptionEnum,
typer.Option(
"--save_data_option",
help="Data save option (csv=CSV file | db=MySQL database | json=JSON file | jsonl=JSONL file | sqlite=SQLite database | mongodb=MongoDB database | excel=Excel file | postgres=PostgreSQL database)",
rich_help_panel="Storage Configuration",
),
] = _coerce_enum(
SaveDataOptionEnum, config.SAVE_DATA_OPTION, SaveDataOptionEnum.JSONL
),
init_db: Annotated[
Optional[InitDbOptionEnum],
typer.Option(
"--init_db",
help="Initialize database table structure (sqlite | mysql | postgres)",
rich_help_panel="Storage Configuration",
),
] = None,
cookies: Annotated[
str,
typer.Option(
"--cookies",
help="Cookie value used for Cookie login method",
rich_help_panel="Account Configuration",
),
] = config.COOKIES,
specified_id: Annotated[
str,
typer.Option(
"--specified_id",
help="Post/video ID list in detail mode, multiple IDs separated by commas (supports full URL or ID)",
rich_help_panel="Basic Configuration",
),
] = "",
creator_id: Annotated[
str,
typer.Option(
"--creator_id",
help="Creator ID list in creator mode, multiple IDs separated by commas (supports full URL or ID)",
rich_help_panel="Basic Configuration",
),
] = "",
max_comments_count_singlenotes: Annotated[
int,
typer.Option(
"--max_comments_count_singlenotes",
help="Maximum number of first-level comments to crawl per post/video",
rich_help_panel="Comment Configuration",
),
] = config.CRAWLER_MAX_COMMENTS_COUNT_SINGLENOTES,
max_concurrency_num: Annotated[
int,
typer.Option(
"--max_concurrency_num",
help="Maximum number of concurrent crawlers",
rich_help_panel="Performance Configuration",
),
] = config.MAX_CONCURRENCY_NUM,
save_data_path: Annotated[
str,
typer.Option(
"--save_data_path",
help="Data save path, default is empty and will save to data folder",
rich_help_panel="Storage Configuration",
),
] = config.SAVE_DATA_PATH,
enable_ip_proxy: Annotated[
str,
typer.Option(
"--enable_ip_proxy",
help="Whether to enable IP proxy, supports yes/true/t/y/1 or no/false/f/n/0",
rich_help_panel="Proxy Configuration",
show_default=True,
),
] = str(config.ENABLE_IP_PROXY),
ip_proxy_pool_count: Annotated[
int,
typer.Option(
"--ip_proxy_pool_count",
help="IP proxy pool count",
rich_help_panel="Proxy Configuration",
),
] = config.IP_PROXY_POOL_COUNT,
ip_proxy_provider_name: Annotated[
str,
typer.Option(
"--ip_proxy_provider_name",
help="IP proxy provider name (kuaidaili | wandouhttp)",
rich_help_panel="Proxy Configuration",
),
] = config.IP_PROXY_PROVIDER_NAME,
) -> SimpleNamespace:
enable_comment = _to_bool(get_comment)
enable_sub_comment = _to_bool(get_sub_comment)
enable_headless = _to_bool(headless)
enable_ip_proxy_value = _to_bool(enable_ip_proxy)
init_db_value = init_db.value if init_db else None
# Parse specified_id and creator_id into lists
specified_id_list = [id.strip() for id in specified_id.split(",") if id.strip()] if specified_id else []
creator_id_list = [id.strip() for id in creator_id.split(",") if id.strip()] if creator_id else []
# override global config
config.PLATFORM = platform.value
config.LOGIN_TYPE = lt.value
config.CRAWLER_TYPE = crawler_type.value
config.START_PAGE = start
config.KEYWORDS = keywords
config.ENABLE_GET_COMMENTS = enable_comment
config.ENABLE_GET_SUB_COMMENTS = enable_sub_comment
config.HEADLESS = enable_headless
config.CDP_HEADLESS = enable_headless
config.SAVE_DATA_OPTION = save_data_option.value
config.COOKIES = cookies
config.CRAWLER_MAX_COMMENTS_COUNT_SINGLENOTES = max_comments_count_singlenotes
config.MAX_CONCURRENCY_NUM = max_concurrency_num
config.SAVE_DATA_PATH = save_data_path
config.ENABLE_IP_PROXY = enable_ip_proxy_value
config.IP_PROXY_POOL_COUNT = ip_proxy_pool_count
config.IP_PROXY_PROVIDER_NAME = ip_proxy_provider_name
# Set platform-specific ID lists for detail/creator mode
if specified_id_list:
if platform == PlatformEnum.XHS:
config.XHS_SPECIFIED_NOTE_URL_LIST = specified_id_list
elif platform == PlatformEnum.BILIBILI:
config.BILI_SPECIFIED_ID_LIST = specified_id_list
elif platform == PlatformEnum.DOUYIN:
config.DY_SPECIFIED_ID_LIST = specified_id_list
elif platform == PlatformEnum.WEIBO:
config.WEIBO_SPECIFIED_ID_LIST = specified_id_list
elif platform == PlatformEnum.KUAISHOU:
config.KS_SPECIFIED_ID_LIST = specified_id_list
if creator_id_list:
if platform == PlatformEnum.XHS:
config.XHS_CREATOR_ID_LIST = creator_id_list
elif platform == PlatformEnum.BILIBILI:
config.BILI_CREATOR_ID_LIST = creator_id_list
elif platform == PlatformEnum.DOUYIN:
config.DY_CREATOR_ID_LIST = creator_id_list
elif platform == PlatformEnum.WEIBO:
config.WEIBO_CREATOR_ID_LIST = creator_id_list
elif platform == PlatformEnum.KUAISHOU:
config.KS_CREATOR_ID_LIST = creator_id_list
return SimpleNamespace(
platform=config.PLATFORM,
lt=config.LOGIN_TYPE,
type=config.CRAWLER_TYPE,
start=config.START_PAGE,
keywords=config.KEYWORDS,
get_comment=config.ENABLE_GET_COMMENTS,
get_sub_comment=config.ENABLE_GET_SUB_COMMENTS,
headless=config.HEADLESS,
save_data_option=config.SAVE_DATA_OPTION,
init_db=init_db_value,
cookies=config.COOKIES,
specified_id=specified_id,
creator_id=creator_id,
)
command = typer.main.get_command(app)
cli_args = _normalize_argv(argv)
cli_args = _inject_init_db_default(cli_args)
try:
result = command.main(args=cli_args, standalone_mode=False)
if isinstance(result, int): # help/options handled by Typer; propagate exit code
raise SystemExit(result)
return result
except typer.Exit as exc: # pragma: no cover - CLI exit paths
raise SystemExit(exc.exit_code) from exc | --- +++ @@ -37,6 +37,7 @@
class PlatformEnum(str, Enum):
+ """Supported media platform enumeration"""
XHS = "xhs"
DOUYIN = "dy"
@@ -48,6 +49,7 @@
class LoginTypeEnum(str, Enum):
+ """Login type enumeration"""
QRCODE = "qrcode"
PHONE = "phone"
@@ -55,6 +57,7 @@
class CrawlerTypeEnum(str, Enum):
+ """Crawler type enumeration"""
SEARCH = "search"
DETAIL = "detail"
@@ -62,6 +65,7 @@
class SaveDataOptionEnum(str, Enum):
+ """Data save option enumeration"""
CSV = "csv"
DB = "db"
@@ -74,6 +78,7 @@
class InitDbOptionEnum(str, Enum):
+ """Database initialization option"""
SQLITE = "sqlite"
MYSQL = "mysql"
@@ -91,6 +96,7 @@ value: EnumT | str,
default: EnumT,
) -> EnumT:
+ """Safely convert a raw config value to an enum member."""
if isinstance(value, enum_cls):
return value
@@ -112,6 +118,7 @@
def _inject_init_db_default(args: Sequence[str]) -> list[str]:
+ """Ensure bare --init_db defaults to sqlite for backward compatibility."""
normalized: list[str] = []
i = 0
@@ -129,6 +136,7 @@
async def parse_cmd(argv: Optional[Sequence[str]] = None):
+ """Parse command line arguments using Typer."""
app = typer.Typer(add_completion=False)
@@ -293,6 +301,7 @@ ),
] = config.IP_PROXY_PROVIDER_NAME,
) -> SimpleNamespace:
+ """MediaCrawler 命令行入口"""
enable_comment = _to_bool(get_comment)
enable_sub_comment = _to_bool(get_sub_comment)
@@ -375,4 +384,4 @@ raise SystemExit(result)
return result
except typer.Exit as exc: # pragma: no cover - CLI exit paths
- raise SystemExit(exc.exit_code) from exc+ raise SystemExit(exc.exit_code) from exc
| https://raw.githubusercontent.com/NanmiCoder/MediaCrawler/HEAD/cmd_arg/arg.py |
Add docstrings that explain inputs and outputs | # -*- coding: utf-8 -*-
# Copyright (c) 2025 relakkes@gmail.com
#
# This file is part of MediaCrawler project.
# Repository: https://github.com/NanmiCoder/MediaCrawler/blob/main/media_platform/douyin/core.py
# GitHub: https://github.com/NanmiCoder
# Licensed under NON-COMMERCIAL LEARNING LICENSE 1.1
#
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
# 1. 不得用于任何商业用途。
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
# 3. 不得进行大规模爬取或对平台造成运营干扰。
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
# 5. 不得用于任何非法或不当的用途。
#
# 详细许可条款请参阅项目根目录下的LICENSE文件。
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
import asyncio
import os
import random
from asyncio import Task
from typing import Any, Dict, List, Optional, Tuple
from playwright.async_api import (
BrowserContext,
BrowserType,
Page,
Playwright,
async_playwright,
)
import config
from base.base_crawler import AbstractCrawler
from proxy.proxy_ip_pool import IpInfoModel, create_ip_pool
from store import douyin as douyin_store
from tools import utils
from tools.cdp_browser import CDPBrowserManager
from var import crawler_type_var, source_keyword_var
from .client import DouYinClient
from .exception import DataFetchError
from .field import PublishTimeType
from .help import parse_video_info_from_url, parse_creator_info_from_url
from .login import DouYinLogin
class DouYinCrawler(AbstractCrawler):
context_page: Page
dy_client: DouYinClient
browser_context: BrowserContext
cdp_manager: Optional[CDPBrowserManager]
def __init__(self) -> None:
self.index_url = "https://www.douyin.com"
self.cdp_manager = None
self.ip_proxy_pool = None # Proxy IP pool for automatic proxy refresh
async def start(self) -> None:
playwright_proxy_format, httpx_proxy_format = None, None
if config.ENABLE_IP_PROXY:
self.ip_proxy_pool = await create_ip_pool(config.IP_PROXY_POOL_COUNT, enable_validate_ip=True)
ip_proxy_info: IpInfoModel = await self.ip_proxy_pool.get_proxy()
playwright_proxy_format, httpx_proxy_format = utils.format_proxy_info(ip_proxy_info)
async with async_playwright() as playwright:
# Select startup mode based on configuration
if config.ENABLE_CDP_MODE:
utils.logger.info("[DouYinCrawler] 使用CDP模式启动浏览器")
self.browser_context = await self.launch_browser_with_cdp(
playwright,
playwright_proxy_format,
None,
headless=config.CDP_HEADLESS,
)
else:
utils.logger.info("[DouYinCrawler] 使用标准模式启动浏览器")
# Launch a browser context.
chromium = playwright.chromium
self.browser_context = await self.launch_browser(
chromium,
playwright_proxy_format,
user_agent=None,
headless=config.HEADLESS,
)
# stealth.min.js is a js script to prevent the website from detecting the crawler.
await self.browser_context.add_init_script(path="libs/stealth.min.js")
self.context_page = await self.browser_context.new_page()
await self.context_page.goto(self.index_url)
self.dy_client = await self.create_douyin_client(httpx_proxy_format)
if not await self.dy_client.pong(browser_context=self.browser_context):
login_obj = DouYinLogin(
login_type=config.LOGIN_TYPE,
login_phone="", # you phone number
browser_context=self.browser_context,
context_page=self.context_page,
cookie_str=config.COOKIES,
)
await login_obj.begin()
await self.dy_client.update_cookies(browser_context=self.browser_context)
crawler_type_var.set(config.CRAWLER_TYPE)
if config.CRAWLER_TYPE == "search":
# Search for notes and retrieve their comment information.
await self.search()
elif config.CRAWLER_TYPE == "detail":
# Get the information and comments of the specified post
await self.get_specified_awemes()
elif config.CRAWLER_TYPE == "creator":
# Get the information and comments of the specified creator
await self.get_creators_and_videos()
utils.logger.info("[DouYinCrawler.start] Douyin Crawler finished ...")
async def search(self) -> None:
utils.logger.info("[DouYinCrawler.search] Begin search douyin keywords")
dy_limit_count = 10 # douyin limit page fixed value
if config.CRAWLER_MAX_NOTES_COUNT < dy_limit_count:
config.CRAWLER_MAX_NOTES_COUNT = dy_limit_count
start_page = config.START_PAGE # start page number
for keyword in config.KEYWORDS.split(","):
source_keyword_var.set(keyword)
utils.logger.info(f"[DouYinCrawler.search] Current keyword: {keyword}")
aweme_list: List[str] = []
page = 0
dy_search_id = ""
while (page - start_page + 1) * dy_limit_count <= config.CRAWLER_MAX_NOTES_COUNT:
if page < start_page:
utils.logger.info(f"[DouYinCrawler.search] Skip {page}")
page += 1
continue
try:
utils.logger.info(f"[DouYinCrawler.search] search douyin keyword: {keyword}, page: {page}")
posts_res = await self.dy_client.search_info_by_keyword(
keyword=keyword,
offset=page * dy_limit_count - dy_limit_count,
publish_time=PublishTimeType(config.PUBLISH_TIME_TYPE),
search_id=dy_search_id,
)
if posts_res.get("data") is None or posts_res.get("data") == []:
utils.logger.info(f"[DouYinCrawler.search] search douyin keyword: {keyword}, page: {page} is empty,{posts_res.get('data')}`")
break
except DataFetchError:
utils.logger.error(f"[DouYinCrawler.search] search douyin keyword: {keyword} failed")
break
page += 1
if "data" not in posts_res:
utils.logger.error(f"[DouYinCrawler.search] search douyin keyword: {keyword} failed,账号也许被风控了。")
break
dy_search_id = posts_res.get("extra", {}).get("logid", "")
page_aweme_list = []
for post_item in posts_res.get("data"):
try:
aweme_info: Dict = (post_item.get("aweme_info") or post_item.get("aweme_mix_info", {}).get("mix_items")[0])
except TypeError:
continue
aweme_list.append(aweme_info.get("aweme_id", ""))
page_aweme_list.append(aweme_info.get("aweme_id", ""))
await douyin_store.update_douyin_aweme(aweme_item=aweme_info)
await self.get_aweme_media(aweme_item=aweme_info)
# Batch get note comments for the current page
await self.batch_get_note_comments(page_aweme_list)
# Sleep after each page navigation
await asyncio.sleep(config.CRAWLER_MAX_SLEEP_SEC)
utils.logger.info(f"[DouYinCrawler.search] Sleeping for {config.CRAWLER_MAX_SLEEP_SEC} seconds after page {page-1}")
utils.logger.info(f"[DouYinCrawler.search] keyword:{keyword}, aweme_list:{aweme_list}")
async def get_specified_awemes(self):
utils.logger.info("[DouYinCrawler.get_specified_awemes] Parsing video URLs...")
aweme_id_list = []
for video_url in config.DY_SPECIFIED_ID_LIST:
try:
video_info = parse_video_info_from_url(video_url)
# Handling short links
if video_info.url_type == "short":
utils.logger.info(f"[DouYinCrawler.get_specified_awemes] Resolving short link: {video_url}")
resolved_url = await self.dy_client.resolve_short_url(video_url)
if resolved_url:
# Extract video ID from parsed URL
video_info = parse_video_info_from_url(resolved_url)
utils.logger.info(f"[DouYinCrawler.get_specified_awemes] Short link resolved to aweme ID: {video_info.aweme_id}")
else:
utils.logger.error(f"[DouYinCrawler.get_specified_awemes] Failed to resolve short link: {video_url}")
continue
aweme_id_list.append(video_info.aweme_id)
utils.logger.info(f"[DouYinCrawler.get_specified_awemes] Parsed aweme ID: {video_info.aweme_id} from {video_url}")
except ValueError as e:
utils.logger.error(f"[DouYinCrawler.get_specified_awemes] Failed to parse video URL: {e}")
continue
semaphore = asyncio.Semaphore(config.MAX_CONCURRENCY_NUM)
task_list = [self.get_aweme_detail(aweme_id=aweme_id, semaphore=semaphore) for aweme_id in aweme_id_list]
aweme_details = await asyncio.gather(*task_list)
for aweme_detail in aweme_details:
if aweme_detail is not None:
await douyin_store.update_douyin_aweme(aweme_item=aweme_detail)
await self.get_aweme_media(aweme_item=aweme_detail)
await self.batch_get_note_comments(aweme_id_list)
async def get_aweme_detail(self, aweme_id: str, semaphore: asyncio.Semaphore) -> Any:
async with semaphore:
try:
result = await self.dy_client.get_video_by_id(aweme_id)
# Sleep after fetching aweme detail
await asyncio.sleep(config.CRAWLER_MAX_SLEEP_SEC)
utils.logger.info(f"[DouYinCrawler.get_aweme_detail] Sleeping for {config.CRAWLER_MAX_SLEEP_SEC} seconds after fetching aweme {aweme_id}")
return result
except DataFetchError as ex:
utils.logger.error(f"[DouYinCrawler.get_aweme_detail] Get aweme detail error: {ex}")
return None
except KeyError as ex:
utils.logger.error(f"[DouYinCrawler.get_aweme_detail] have not fund note detail aweme_id:{aweme_id}, err: {ex}")
return None
async def batch_get_note_comments(self, aweme_list: List[str]) -> None:
if not config.ENABLE_GET_COMMENTS:
utils.logger.info(f"[DouYinCrawler.batch_get_note_comments] Crawling comment mode is not enabled")
return
task_list: List[Task] = []
semaphore = asyncio.Semaphore(config.MAX_CONCURRENCY_NUM)
for aweme_id in aweme_list:
task = asyncio.create_task(self.get_comments(aweme_id, semaphore), name=aweme_id)
task_list.append(task)
if len(task_list) > 0:
await asyncio.wait(task_list)
async def get_comments(self, aweme_id: str, semaphore: asyncio.Semaphore) -> None:
async with semaphore:
try:
# Pass the list of keywords to the get_aweme_all_comments method
# Use fixed crawling interval
crawl_interval = config.CRAWLER_MAX_SLEEP_SEC
await self.dy_client.get_aweme_all_comments(
aweme_id=aweme_id,
crawl_interval=crawl_interval,
is_fetch_sub_comments=config.ENABLE_GET_SUB_COMMENTS,
callback=douyin_store.batch_update_dy_aweme_comments,
max_count=config.CRAWLER_MAX_COMMENTS_COUNT_SINGLENOTES,
)
# Sleep after fetching comments
await asyncio.sleep(crawl_interval)
utils.logger.info(f"[DouYinCrawler.get_comments] Sleeping for {crawl_interval} seconds after fetching comments for aweme {aweme_id}")
utils.logger.info(f"[DouYinCrawler.get_comments] aweme_id: {aweme_id} comments have all been obtained and filtered ...")
except DataFetchError as e:
utils.logger.error(f"[DouYinCrawler.get_comments] aweme_id: {aweme_id} get comments failed, error: {e}")
async def get_creators_and_videos(self) -> None:
utils.logger.info("[DouYinCrawler.get_creators_and_videos] Begin get douyin creators")
utils.logger.info("[DouYinCrawler.get_creators_and_videos] Parsing creator URLs...")
for creator_url in config.DY_CREATOR_ID_LIST:
try:
creator_info_parsed = parse_creator_info_from_url(creator_url)
user_id = creator_info_parsed.sec_user_id
utils.logger.info(f"[DouYinCrawler.get_creators_and_videos] Parsed sec_user_id: {user_id} from {creator_url}")
except ValueError as e:
utils.logger.error(f"[DouYinCrawler.get_creators_and_videos] Failed to parse creator URL: {e}")
continue
creator_info: Dict = await self.dy_client.get_user_info(user_id)
if creator_info:
await douyin_store.save_creator(user_id, creator=creator_info)
# Get all video information of the creator
all_video_list = await self.dy_client.get_all_user_aweme_posts(sec_user_id=user_id, callback=self.fetch_creator_video_detail)
video_ids = [video_item.get("aweme_id") for video_item in all_video_list]
await self.batch_get_note_comments(video_ids)
async def fetch_creator_video_detail(self, video_list: List[Dict]):
semaphore = asyncio.Semaphore(config.MAX_CONCURRENCY_NUM)
task_list = [self.get_aweme_detail(post_item.get("aweme_id"), semaphore) for post_item in video_list]
note_details = await asyncio.gather(*task_list)
for aweme_item in note_details:
if aweme_item is not None:
await douyin_store.update_douyin_aweme(aweme_item=aweme_item)
await self.get_aweme_media(aweme_item=aweme_item)
async def create_douyin_client(self, httpx_proxy: Optional[str]) -> DouYinClient:
cookie_str, cookie_dict = utils.convert_cookies(await self.browser_context.cookies()) # type: ignore
douyin_client = DouYinClient(
proxy=httpx_proxy,
headers={
"User-Agent": await self.context_page.evaluate("() => navigator.userAgent"),
"Cookie": cookie_str,
"Host": "www.douyin.com",
"Origin": "https://www.douyin.com/",
"Referer": "https://www.douyin.com/",
"Content-Type": "application/json;charset=UTF-8",
},
playwright_page=self.context_page,
cookie_dict=cookie_dict,
proxy_ip_pool=self.ip_proxy_pool, # Pass proxy pool for automatic refresh
)
return douyin_client
async def launch_browser(
self,
chromium: BrowserType,
playwright_proxy: Optional[Dict],
user_agent: Optional[str],
headless: bool = True,
) -> BrowserContext:
if config.SAVE_LOGIN_STATE:
user_data_dir = os.path.join(os.getcwd(), "browser_data", config.USER_DATA_DIR % config.PLATFORM) # type: ignore
browser_context = await chromium.launch_persistent_context(
user_data_dir=user_data_dir,
accept_downloads=True,
headless=headless,
proxy=playwright_proxy, # type: ignore
viewport={
"width": 1920,
"height": 1080
},
user_agent=user_agent,
) # type: ignore
return browser_context
else:
browser = await chromium.launch(headless=headless, proxy=playwright_proxy) # type: ignore
browser_context = await browser.new_context(viewport={"width": 1920, "height": 1080}, user_agent=user_agent)
return browser_context
async def launch_browser_with_cdp(
self,
playwright: Playwright,
playwright_proxy: Optional[Dict],
user_agent: Optional[str],
headless: bool = True,
) -> BrowserContext:
try:
self.cdp_manager = CDPBrowserManager()
browser_context = await self.cdp_manager.launch_and_connect(
playwright=playwright,
playwright_proxy=playwright_proxy,
user_agent=user_agent,
headless=headless,
)
# Add anti-detection script
await self.cdp_manager.add_stealth_script()
# Show browser information
browser_info = await self.cdp_manager.get_browser_info()
utils.logger.info(f"[DouYinCrawler] CDP浏览器信息: {browser_info}")
return browser_context
except Exception as e:
utils.logger.error(f"[DouYinCrawler] CDP模式启动失败,回退到标准模式: {e}")
# Fall back to standard mode
chromium = playwright.chromium
return await self.launch_browser(chromium, playwright_proxy, user_agent, headless)
async def close(self) -> None:
# If you use CDP mode, special processing is required
if self.cdp_manager:
await self.cdp_manager.cleanup()
self.cdp_manager = None
else:
await self.browser_context.close()
utils.logger.info("[DouYinCrawler.close] Browser context closed ...")
async def get_aweme_media(self, aweme_item: Dict):
if not config.ENABLE_GET_MEIDAS:
utils.logger.info(f"[DouYinCrawler.get_aweme_media] Crawling image mode is not enabled")
return
# List of note urls. If it is a short video type, an empty list will be returned.
note_download_url: List[str] = douyin_store._extract_note_image_list(aweme_item)
# The video URL will always exist, but when it is a short video type, the file is actually an audio file.
video_download_url: str = douyin_store._extract_video_download_url(aweme_item)
# TODO: Douyin does not adopt the audio and video separation strategy, so the audio can be separated from the original video and will not be extracted for the time being.
if note_download_url:
await self.get_aweme_images(aweme_item)
else:
await self.get_aweme_video(aweme_item)
async def get_aweme_images(self, aweme_item: Dict):
if not config.ENABLE_GET_MEIDAS:
return
aweme_id = aweme_item.get("aweme_id")
# List of note urls. If it is a short video type, an empty list will be returned.
note_download_url: List[str] = douyin_store._extract_note_image_list(aweme_item)
if not note_download_url:
return
picNum = 0
for url in note_download_url:
if not url:
continue
content = await self.dy_client.get_aweme_media(url)
await asyncio.sleep(random.random())
if content is None:
continue
extension_file_name = f"{picNum:>03d}.jpeg"
picNum += 1
await douyin_store.update_dy_aweme_image(aweme_id, content, extension_file_name)
async def get_aweme_video(self, aweme_item: Dict):
if not config.ENABLE_GET_MEIDAS:
return
aweme_id = aweme_item.get("aweme_id")
# The video URL will always exist, but when it is a short video type, the file is actually an audio file.
video_download_url: str = douyin_store._extract_video_download_url(aweme_item)
if not video_download_url:
return
content = await self.dy_client.get_aweme_media(video_download_url)
await asyncio.sleep(random.random())
if content is None:
return
extension_file_name = f"video.mp4"
await douyin_store.update_dy_aweme_video(aweme_id, content, extension_file_name) | --- +++ @@ -171,6 +171,7 @@ utils.logger.info(f"[DouYinCrawler.search] keyword:{keyword}, aweme_list:{aweme_list}")
async def get_specified_awemes(self):
+ """Get the information and comments of the specified post from URLs or IDs"""
utils.logger.info("[DouYinCrawler.get_specified_awemes] Parsing video URLs...")
aweme_id_list = []
for video_url in config.DY_SPECIFIED_ID_LIST:
@@ -205,6 +206,7 @@ await self.batch_get_note_comments(aweme_id_list)
async def get_aweme_detail(self, aweme_id: str, semaphore: asyncio.Semaphore) -> Any:
+ """Get note detail"""
async with semaphore:
try:
result = await self.dy_client.get_video_by_id(aweme_id)
@@ -220,6 +222,9 @@ return None
async def batch_get_note_comments(self, aweme_list: List[str]) -> None:
+ """
+ Batch get note comments
+ """
if not config.ENABLE_GET_COMMENTS:
utils.logger.info(f"[DouYinCrawler.batch_get_note_comments] Crawling comment mode is not enabled")
return
@@ -253,6 +258,9 @@ utils.logger.error(f"[DouYinCrawler.get_comments] aweme_id: {aweme_id} get comments failed, error: {e}")
async def get_creators_and_videos(self) -> None:
+ """
+ Get the information and videos of the specified creator from URLs or IDs
+ """
utils.logger.info("[DouYinCrawler.get_creators_and_videos] Begin get douyin creators")
utils.logger.info("[DouYinCrawler.get_creators_and_videos] Parsing creator URLs...")
@@ -276,6 +284,9 @@ await self.batch_get_note_comments(video_ids)
async def fetch_creator_video_detail(self, video_list: List[Dict]):
+ """
+ Concurrently obtain the specified post list and save the data
+ """
semaphore = asyncio.Semaphore(config.MAX_CONCURRENCY_NUM)
task_list = [self.get_aweme_detail(post_item.get("aweme_id"), semaphore) for post_item in video_list]
@@ -286,6 +297,7 @@ await self.get_aweme_media(aweme_item=aweme_item)
async def create_douyin_client(self, httpx_proxy: Optional[str]) -> DouYinClient:
+ """Create douyin client"""
cookie_str, cookie_dict = utils.convert_cookies(await self.browser_context.cookies()) # type: ignore
douyin_client = DouYinClient(
proxy=httpx_proxy,
@@ -310,6 +322,7 @@ user_agent: Optional[str],
headless: bool = True,
) -> BrowserContext:
+ """Launch browser and create browser context"""
if config.SAVE_LOGIN_STATE:
user_data_dir = os.path.join(os.getcwd(), "browser_data", config.USER_DATA_DIR % config.PLATFORM) # type: ignore
browser_context = await chromium.launch_persistent_context(
@@ -336,6 +349,9 @@ user_agent: Optional[str],
headless: bool = True,
) -> BrowserContext:
+ """
+ 使用CDP模式启动浏览器
+ """
try:
self.cdp_manager = CDPBrowserManager()
browser_context = await self.cdp_manager.launch_and_connect(
@@ -361,6 +377,7 @@ return await self.launch_browser(chromium, playwright_proxy, user_agent, headless)
async def close(self) -> None:
+ """Close browser context"""
# If you use CDP mode, special processing is required
if self.cdp_manager:
await self.cdp_manager.cleanup()
@@ -370,6 +387,12 @@ utils.logger.info("[DouYinCrawler.close] Browser context closed ...")
async def get_aweme_media(self, aweme_item: Dict):
+ """
+ 获取抖音媒体,自动判断媒体类型是短视频还是帖子图片并下载
+
+ Args:
+ aweme_item (Dict): 抖音作品详情
+ """
if not config.ENABLE_GET_MEIDAS:
utils.logger.info(f"[DouYinCrawler.get_aweme_media] Crawling image mode is not enabled")
return
@@ -384,6 +407,12 @@ await self.get_aweme_video(aweme_item)
async def get_aweme_images(self, aweme_item: Dict):
+ """
+ get aweme images. please use get_aweme_media
+
+ Args:
+ aweme_item (Dict): 抖音作品详情
+ """
if not config.ENABLE_GET_MEIDAS:
return
aweme_id = aweme_item.get("aweme_id")
@@ -405,6 +434,12 @@ await douyin_store.update_dy_aweme_image(aweme_id, content, extension_file_name)
async def get_aweme_video(self, aweme_item: Dict):
+ """
+ get aweme videos. please use get_aweme_media
+
+ Args:
+ aweme_item (Dict): 抖音作品详情
+ """
if not config.ENABLE_GET_MEIDAS:
return
aweme_id = aweme_item.get("aweme_id")
@@ -419,4 +454,4 @@ if content is None:
return
extension_file_name = f"video.mp4"
- await douyin_store.update_dy_aweme_video(aweme_id, content, extension_file_name)+ await douyin_store.update_dy_aweme_video(aweme_id, content, extension_file_name)
| https://raw.githubusercontent.com/NanmiCoder/MediaCrawler/HEAD/media_platform/douyin/core.py |
Add concise docstrings to each method | # -*- coding: utf-8 -*-
# Copyright (c) 2025 relakkes@gmail.com
#
# This file is part of MediaCrawler project.
# Repository: https://github.com/NanmiCoder/MediaCrawler/blob/main/cache/redis_cache.py
# GitHub: https://github.com/NanmiCoder
# Licensed under NON-COMMERCIAL LEARNING LICENSE 1.1
#
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
# 1. 不得用于任何商业用途。
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
# 3. 不得进行大规模爬取或对平台造成运营干扰。
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
# 5. 不得用于任何非法或不当的用途。
#
# 详细许可条款请参阅项目根目录下的LICENSE文件。
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
# -*- coding: utf-8 -*-
# @Author : relakkes@gmail.com
# @Name : Programmer AJiang-Relakkes
# @Time : 2024/5/29 22:57
# @Desc : RedisCache implementation
import pickle
import time
from typing import Any, List
from redis import Redis
from redis.exceptions import ResponseError
from cache.abs_cache import AbstractCache
from config import db_config
class RedisCache(AbstractCache):
def __init__(self) -> None:
# Connect to redis, return redis client
self._redis_client = self._connet_redis()
@staticmethod
def _connet_redis() -> Redis:
return Redis(
host=db_config.REDIS_DB_HOST,
port=db_config.REDIS_DB_PORT,
db=db_config.REDIS_DB_NUM,
password=db_config.REDIS_DB_PWD,
)
def get(self, key: str) -> Any:
value = self._redis_client.get(key)
if value is None:
return None
return pickle.loads(value)
def set(self, key: str, value: Any, expire_time: int) -> None:
self._redis_client.set(key, pickle.dumps(value), ex=expire_time)
def keys(self, pattern: str) -> List[str]:
try:
# Try KEYS command first (faster for standard Redis)
return [key.decode() if isinstance(key, bytes) else key for key in self._redis_client.keys(pattern)]
except ResponseError as e:
# If KEYS is not supported (e.g., Redis Cluster or cloud Redis), use SCAN
if "unknown command" in str(e).lower() or "keys" in str(e).lower():
keys_list: List[str] = []
cursor = 0
while True:
cursor, keys = self._redis_client.scan(cursor=cursor, match=pattern, count=100)
keys_list.extend([key.decode() if isinstance(key, bytes) else key for key in keys])
if cursor == 0:
break
return keys_list
else:
# Re-raise if it's a different error
raise
if __name__ == '__main__':
redis_cache = RedisCache()
# basic usage
redis_cache.set("name", "Programmer AJiang-Relakkes", 1)
print(redis_cache.get("name")) # Relakkes
print(redis_cache.keys("*")) # ['name']
time.sleep(2)
print(redis_cache.get("name")) # None
# special python type usage
# list
redis_cache.set("list", [1, 2, 3], 10)
_value = redis_cache.get("list")
print(_value, f"value type:{type(_value)}") # [1, 2, 3] | --- +++ @@ -42,6 +42,10 @@
@staticmethod
def _connet_redis() -> Redis:
+ """
+ Connect to redis, return redis client, configure redis connection information as needed
+ :return:
+ """
return Redis(
host=db_config.REDIS_DB_HOST,
port=db_config.REDIS_DB_PORT,
@@ -50,15 +54,31 @@ )
def get(self, key: str) -> Any:
+ """
+ Get the value of a key from the cache and deserialize it
+ :param key:
+ :return:
+ """
value = self._redis_client.get(key)
if value is None:
return None
return pickle.loads(value)
def set(self, key: str, value: Any, expire_time: int) -> None:
+ """
+ Set the value of a key in the cache and serialize it
+ :param key:
+ :param value:
+ :param expire_time:
+ :return:
+ """
self._redis_client.set(key, pickle.dumps(value), ex=expire_time)
def keys(self, pattern: str) -> List[str]:
+ """
+ Get all keys matching the pattern
+ First try KEYS command, if not supported fallback to SCAN
+ """
try:
# Try KEYS command first (faster for standard Redis)
return [key.decode() if isinstance(key, bytes) else key for key in self._redis_client.keys(pattern)]
@@ -91,4 +111,4 @@ # list
redis_cache.set("list", [1, 2, 3], 10)
_value = redis_cache.get("list")
- print(_value, f"value type:{type(_value)}") # [1, 2, 3]+ print(_value, f"value type:{type(_value)}") # [1, 2, 3]
| https://raw.githubusercontent.com/NanmiCoder/MediaCrawler/HEAD/cache/redis_cache.py |
Add docstrings for better understanding | # -*- coding: utf-8 -*-
# Copyright (c) 2025 relakkes@gmail.com
#
# This file is part of MediaCrawler project.
# Repository: https://github.com/NanmiCoder/MediaCrawler/blob/main/media_platform/xhs/extractor.py
# GitHub: https://github.com/NanmiCoder
# Licensed under NON-COMMERCIAL LEARNING LICENSE 1.1
#
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
# 1. 不得用于任何商业用途。
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
# 3. 不得进行大规模爬取或对平台造成运营干扰。
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
# 5. 不得用于任何非法或不当的用途。
#
# 详细许可条款请参阅项目根目录下的LICENSE文件。
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
import json
import re
from typing import Dict, Optional
import humps
class XiaoHongShuExtractor:
def __init__(self):
pass
def extract_note_detail_from_html(self, note_id: str, html: str) -> Optional[Dict]:
if "noteDetailMap" not in html:
# Either a CAPTCHA appeared or the note doesn't exist
return None
state = re.findall(r"window.__INITIAL_STATE__=({.*})</script>", html)[
0
].replace("undefined", '""')
if state != "{}":
note_dict = humps.decamelize(json.loads(state))
return note_dict["note"]["note_detail_map"][note_id]["note"]
return None
def extract_creator_info_from_html(self, html: str) -> Optional[Dict]:
match = re.search(
r"<script>window.__INITIAL_STATE__=(.+)<\/script>", html, re.M
)
if match is None:
return None
info = json.loads(match.group(1).replace(":undefined", ":null"), strict=False)
if info is None:
return None
return info.get("user").get("userPageData") | --- +++ @@ -29,6 +29,14 @@ pass
def extract_note_detail_from_html(self, note_id: str, html: str) -> Optional[Dict]:
+ """Extract note details from HTML
+
+ Args:
+ html (str): HTML string
+
+ Returns:
+ Dict: Note details dictionary
+ """
if "noteDetailMap" not in html:
# Either a CAPTCHA appeared or the note doesn't exist
return None
@@ -42,6 +50,14 @@ return None
def extract_creator_info_from_html(self, html: str) -> Optional[Dict]:
+ """Extract user information from HTML
+
+ Args:
+ html (str): HTML string
+
+ Returns:
+ Dict: User information dictionary
+ """
match = re.search(
r"<script>window.__INITIAL_STATE__=(.+)<\/script>", html, re.M
)
@@ -50,4 +66,4 @@ info = json.loads(match.group(1).replace(":undefined", ":null"), strict=False)
if info is None:
return None
- return info.get("user").get("userPageData")+ return info.get("user").get("userPageData")
| https://raw.githubusercontent.com/NanmiCoder/MediaCrawler/HEAD/media_platform/xhs/extractor.py |
Add docstrings to incomplete code | # -*- coding: utf-8 -*-
# Copyright (c) 2025 relakkes@gmail.com
#
# This file is part of MediaCrawler project.
# Repository: https://github.com/NanmiCoder/MediaCrawler/blob/main/media_platform/weibo/client.py
# GitHub: https://github.com/NanmiCoder
# Licensed under NON-COMMERCIAL LEARNING LICENSE 1.1
#
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
# 1. 不得用于任何商业用途。
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
# 3. 不得进行大规模爬取或对平台造成运营干扰。
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
# 5. 不得用于任何非法或不当的用途。
#
# 详细许可条款请参阅项目根目录下的LICENSE文件。
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
# -*- coding: utf-8 -*-
# @Author : relakkes@gmail.com
# @Time : 2023/12/23 15:40
# @Desc : Weibo crawler API request client
import asyncio
import copy
import json
import re
from typing import TYPE_CHECKING, Callable, Dict, List, Optional, Union
from urllib.parse import parse_qs, unquote, urlencode
import httpx
from httpx import Response
from playwright.async_api import BrowserContext, Page
from tenacity import retry, stop_after_attempt, wait_fixed
import config
from proxy.proxy_mixin import ProxyRefreshMixin
from tools import utils
if TYPE_CHECKING:
from proxy.proxy_ip_pool import ProxyIpPool
from .exception import DataFetchError
from .field import SearchType
class WeiboClient(ProxyRefreshMixin):
def __init__(
self,
timeout=60, # If media crawling is enabled, Weibo images need a longer timeout
proxy=None,
*,
headers: Dict[str, str],
playwright_page: Page,
cookie_dict: Dict[str, str],
proxy_ip_pool: Optional["ProxyIpPool"] = None,
):
self.proxy = proxy
self.timeout = timeout
self.headers = headers
self._host = "https://m.weibo.cn"
self.playwright_page = playwright_page
self.cookie_dict = cookie_dict
self._image_agent_host = "https://i1.wp.com/"
# Initialize proxy pool (from ProxyRefreshMixin)
self.init_proxy_pool(proxy_ip_pool)
@retry(stop=stop_after_attempt(5), wait=wait_fixed(3))
async def request(self, method, url, **kwargs) -> Union[Response, Dict]:
# Check if proxy is expired before each request
await self._refresh_proxy_if_expired()
enable_return_response = kwargs.pop("return_response", False)
async with httpx.AsyncClient(proxy=self.proxy) as client:
response = await client.request(method, url, timeout=self.timeout, **kwargs)
if enable_return_response:
return response
try:
data: Dict = response.json()
except json.decoder.JSONDecodeError:
# issue: #771 Search API returns error 432, retry multiple times + update h5 cookies
utils.logger.error(f"[WeiboClient.request] request {method}:{url} err code: {response.status_code} res:{response.text}")
await self.playwright_page.goto(self._host)
await asyncio.sleep(2)
await self.update_cookies(browser_context=self.playwright_page.context)
raise DataFetchError(f"get response code error: {response.status_code}")
ok_code = data.get("ok")
if ok_code == 0: # response error
utils.logger.error(f"[WeiboClient.request] request {method}:{url} err, res:{data}")
raise DataFetchError(data.get("msg", "response error"))
elif ok_code != 1: # unknown error
utils.logger.error(f"[WeiboClient.request] request {method}:{url} err, res:{data}")
raise DataFetchError(data.get("msg", "unknown error"))
else: # response right
return data.get("data", {})
async def get(self, uri: str, params=None, headers=None, **kwargs) -> Union[Response, Dict]:
final_uri = uri
if isinstance(params, dict):
final_uri = (f"{uri}?"
f"{urlencode(params)}")
if headers is None:
headers = self.headers
return await self.request(method="GET", url=f"{self._host}{final_uri}", headers=headers, **kwargs)
async def post(self, uri: str, data: dict) -> Dict:
json_str = json.dumps(data, separators=(',', ':'), ensure_ascii=False)
return await self.request(method="POST", url=f"{self._host}{uri}", data=json_str, headers=self.headers)
async def pong(self) -> bool:
utils.logger.info("[WeiboClient.pong] Begin pong weibo...")
ping_flag = False
try:
uri = "/api/config"
resp_data: Dict = await self.request(method="GET", url=f"{self._host}{uri}", headers=self.headers)
if resp_data.get("login"):
ping_flag = True
else:
utils.logger.error(f"[WeiboClient.pong] cookie may be invalid and again login...")
except Exception as e:
utils.logger.error(f"[WeiboClient.pong] Pong weibo failed: {e}, and try to login again...")
ping_flag = False
return ping_flag
async def update_cookies(self, browser_context: BrowserContext, urls: Optional[List[str]] = None):
if urls:
cookies = await browser_context.cookies(urls=urls)
utils.logger.info(f"[WeiboClient.update_cookies] Updating cookies for specific URLs: {urls}")
else:
cookies = await browser_context.cookies()
utils.logger.info("[WeiboClient.update_cookies] Updating all cookies")
cookie_str, cookie_dict = utils.convert_cookies(cookies)
self.headers["Cookie"] = cookie_str
self.cookie_dict = cookie_dict
utils.logger.info(f"[WeiboClient.update_cookies] Cookie updated successfully, total: {len(cookie_dict)} cookies")
async def get_note_by_keyword(
self,
keyword: str,
page: int = 1,
search_type: SearchType = SearchType.DEFAULT,
) -> Dict:
uri = "/api/container/getIndex"
containerid = f"100103type={search_type.value}&q={keyword}"
params = {
"containerid": containerid,
"page_type": "searchall",
"page": page,
}
return await self.get(uri, params)
async def get_note_comments(self, mid_id: str, max_id: int, max_id_type: int = 0) -> Dict:
uri = "/comments/hotflow"
params = {
"id": mid_id,
"mid": mid_id,
"max_id_type": max_id_type,
}
if max_id > 0:
params.update({"max_id": max_id})
referer_url = f"https://m.weibo.cn/detail/{mid_id}"
headers = copy.copy(self.headers)
headers["Referer"] = referer_url
return await self.get(uri, params, headers=headers)
async def get_note_all_comments(
self,
note_id: str,
crawl_interval: float = 1.0,
callback: Optional[Callable] = None,
max_count: int = 10,
):
result = []
is_end = False
max_id = -1
max_id_type = 0
while not is_end and len(result) < max_count:
comments_res = await self.get_note_comments(note_id, max_id, max_id_type)
max_id: int = comments_res.get("max_id")
max_id_type: int = comments_res.get("max_id_type")
comment_list: List[Dict] = comments_res.get("data", [])
is_end = max_id == 0
if len(result) + len(comment_list) > max_count:
comment_list = comment_list[:max_count - len(result)]
if callback: # If callback function exists, execute it
await callback(note_id, comment_list)
await asyncio.sleep(crawl_interval)
result.extend(comment_list)
sub_comment_result = await self.get_comments_all_sub_comments(note_id, comment_list, callback)
result.extend(sub_comment_result)
return result
@staticmethod
async def get_comments_all_sub_comments(
note_id: str,
comment_list: List[Dict],
callback: Optional[Callable] = None,
) -> List[Dict]:
if not config.ENABLE_GET_SUB_COMMENTS:
utils.logger.info(f"[WeiboClient.get_comments_all_sub_comments] Crawling sub_comment mode is not enabled")
return []
res_sub_comments = []
for comment in comment_list:
sub_comments = comment.get("comments")
if sub_comments and isinstance(sub_comments, list):
await callback(note_id, sub_comments)
res_sub_comments.extend(sub_comments)
return res_sub_comments
async def get_note_info_by_id(self, note_id: str) -> Dict:
url = f"{self._host}/detail/{note_id}"
async with httpx.AsyncClient(proxy=self.proxy) as client:
response = await client.request("GET", url, timeout=self.timeout, headers=self.headers)
if response.status_code != 200:
raise DataFetchError(f"get weibo detail err: {response.text}")
match = re.search(r'var \$render_data = (\[.*?\])\[0\]', response.text, re.DOTALL)
if match:
render_data_json = match.group(1)
render_data_dict = json.loads(render_data_json)
note_detail = render_data_dict[0].get("status")
note_item = {"mblog": note_detail}
return note_item
else:
utils.logger.info(f"[WeiboClient.get_note_info_by_id] $render_data value not found")
return dict()
async def get_note_image(self, image_url: str) -> bytes:
image_url = image_url[8:] # Remove https://
sub_url = image_url.split("/")
image_url = ""
for i in range(len(sub_url)):
if i == 1:
image_url += "large/" # Get high-resolution images
elif i == len(sub_url) - 1:
image_url += sub_url[i]
else:
image_url += sub_url[i] + "/"
# Weibo image hosting has anti-hotlinking, so proxy access is needed
# Since Weibo images are accessed through i1.wp.com, we need to concatenate the URL
final_uri = (f"{self._image_agent_host}"
f"{image_url}")
async with httpx.AsyncClient(proxy=self.proxy) as client:
try:
response = await client.request("GET", final_uri, timeout=self.timeout)
response.raise_for_status()
if not response.reason_phrase == "OK":
utils.logger.error(f"[WeiboClient.get_note_image] request {final_uri} err, res:{response.text}")
return None
else:
return response.content
except httpx.HTTPError as exc: # some wrong when call httpx.request method, such as connection error, client error, server error or response status code is not 2xx
utils.logger.error(f"[DouYinClient.get_aweme_media] {exc.__class__.__name__} for {exc.request.url} - {exc}") # Keep original exception type name for developer debugging
return None
async def get_creator_container_info(self, creator_id: str) -> Dict:
response = await self.get(f"/u/{creator_id}", return_response=True)
m_weibocn_params = response.cookies.get("M_WEIBOCN_PARAMS")
if not m_weibocn_params:
raise DataFetchError("get containerid failed")
m_weibocn_params_dict = parse_qs(unquote(m_weibocn_params))
return {"fid_container_id": m_weibocn_params_dict.get("fid", [""])[0], "lfid_container_id": m_weibocn_params_dict.get("lfid", [""])[0]}
async def get_creator_info_by_id(self, creator_id: str) -> Dict:
uri = "/api/container/getIndex"
containerid = f"100505{creator_id}"
params = {
"jumpfrom": "weibocom",
"type": "uid",
"value": creator_id,
"containerid":containerid,
}
user_res = await self.get(uri, params)
return user_res
async def get_notes_by_creator(
self,
creator: str,
container_id: str,
since_id: str = "0",
) -> Dict:
uri = "/api/container/getIndex"
params = {
"jumpfrom": "weibocom",
"type": "uid",
"value": creator,
"containerid": container_id,
"since_id": since_id,
}
return await self.get(uri, params)
async def get_all_notes_by_creator_id(
self,
creator_id: str,
container_id: str,
crawl_interval: float = 1.0,
callback: Optional[Callable] = None,
) -> List[Dict]:
result = []
notes_has_more = True
since_id = ""
crawler_total_count = 0
while notes_has_more:
notes_res = await self.get_notes_by_creator(creator_id, container_id, since_id)
if not notes_res:
utils.logger.error(f"[WeiboClient.get_notes_by_creator] The current creator may have been banned by Weibo, so they cannot access the data.")
break
since_id = notes_res.get("cardlistInfo", {}).get("since_id", "0")
if "cards" not in notes_res:
utils.logger.info(f"[WeiboClient.get_all_notes_by_creator] No 'notes' key found in response: {notes_res}")
break
notes = notes_res["cards"]
utils.logger.info(f"[WeiboClient.get_all_notes_by_creator] got user_id:{creator_id} notes len : {len(notes)}")
notes = [note for note in notes if note.get("card_type") == 9]
if callback:
await callback(notes)
await asyncio.sleep(crawl_interval)
result.extend(notes)
crawler_total_count += 10
notes_has_more = notes_res.get("cardlistInfo", {}).get("total", 0) > crawler_total_count
return result | --- +++ @@ -114,6 +114,7 @@ return await self.request(method="POST", url=f"{self._host}{uri}", data=json_str, headers=self.headers)
async def pong(self) -> bool:
+ """get a note to check if login state is ok"""
utils.logger.info("[WeiboClient.pong] Begin pong weibo...")
ping_flag = False
try:
@@ -129,6 +130,12 @@ return ping_flag
async def update_cookies(self, browser_context: BrowserContext, urls: Optional[List[str]] = None):
+ """
+ Update cookies from browser context
+ :param browser_context: Browser context
+ :param urls: Optional list of URLs to filter cookies (e.g., ["https://m.weibo.cn"])
+ If provided, only cookies for these URLs will be retrieved
+ """
if urls:
cookies = await browser_context.cookies(urls=urls)
utils.logger.info(f"[WeiboClient.update_cookies] Updating cookies for specific URLs: {urls}")
@@ -147,6 +154,13 @@ page: int = 1,
search_type: SearchType = SearchType.DEFAULT,
) -> Dict:
+ """
+ search note by keyword
+ :param keyword: Search keyword for Weibo
+ :param page: Pagination parameter - current page number
+ :param search_type: Search type, see SearchType enum in weibo/field.py
+ :return:
+ """
uri = "/api/container/getIndex"
containerid = f"100103type={search_type.value}&q={keyword}"
params = {
@@ -157,6 +171,12 @@ return await self.get(uri, params)
async def get_note_comments(self, mid_id: str, max_id: int, max_id_type: int = 0) -> Dict:
+ """get notes comments
+ :param mid_id: Weibo ID
+ :param max_id: Pagination parameter ID
+ :param max_id_type: Pagination parameter ID type
+ :return:
+ """
uri = "/comments/hotflow"
params = {
"id": mid_id,
@@ -178,6 +198,14 @@ callback: Optional[Callable] = None,
max_count: int = 10,
):
+ """
+ get note all comments include sub comments
+ :param note_id:
+ :param crawl_interval:
+ :param callback:
+ :param max_count:
+ :return:
+ """
result = []
is_end = False
max_id = -1
@@ -204,6 +232,16 @@ comment_list: List[Dict],
callback: Optional[Callable] = None,
) -> List[Dict]:
+ """
+ Get all sub-comments of comments
+ Args:
+ note_id:
+ comment_list:
+ callback:
+
+ Returns:
+
+ """
if not config.ENABLE_GET_SUB_COMMENTS:
utils.logger.info(f"[WeiboClient.get_comments_all_sub_comments] Crawling sub_comment mode is not enabled")
return []
@@ -217,6 +255,11 @@ return res_sub_comments
async def get_note_info_by_id(self, note_id: str) -> Dict:
+ """
+ Get note details by note ID
+ :param note_id:
+ :return:
+ """
url = f"{self._host}/detail/{note_id}"
async with httpx.AsyncClient(proxy=self.proxy) as client:
response = await client.request("GET", url, timeout=self.timeout, headers=self.headers)
@@ -262,6 +305,16 @@ return None
async def get_creator_container_info(self, creator_id: str) -> Dict:
+ """
+ Get user's container ID, container information represents the real API request path
+ fid_container_id: Container ID for user's Weibo detail API
+ lfid_container_id: Container ID for user's Weibo list API
+ Args:
+ creator_id: User ID
+
+ Returns: Dictionary with container IDs
+
+ """
response = await self.get(f"/u/{creator_id}", return_response=True)
m_weibocn_params = response.cookies.get("M_WEIBOCN_PARAMS")
if not m_weibocn_params:
@@ -270,6 +323,14 @@ return {"fid_container_id": m_weibocn_params_dict.get("fid", [""])[0], "lfid_container_id": m_weibocn_params_dict.get("lfid", [""])[0]}
async def get_creator_info_by_id(self, creator_id: str) -> Dict:
+ """
+ Get user details by user ID
+ Args:
+ creator_id:
+
+ Returns:
+
+ """
uri = "/api/container/getIndex"
containerid = f"100505{creator_id}"
params = {
@@ -287,6 +348,15 @@ container_id: str,
since_id: str = "0",
) -> Dict:
+ """
+ Get creator's notes
+ Args:
+ creator: Creator ID
+ container_id: Container ID
+ since_id: ID of the last note from previous page
+ Returns:
+
+ """
uri = "/api/container/getIndex"
params = {
@@ -305,6 +375,17 @@ crawl_interval: float = 1.0,
callback: Optional[Callable] = None,
) -> List[Dict]:
+ """
+ Get all posts published by a specified user, this method will continuously fetch all posts from a user
+ Args:
+ creator_id: Creator user ID
+ container_id: Container ID for the user
+ crawl_interval: Interval between requests in seconds
+ callback: Optional callback function to process notes
+
+ Returns: List of all notes
+
+ """
result = []
notes_has_more = True
since_id = ""
@@ -328,4 +409,4 @@ result.extend(notes)
crawler_total_count += 10
notes_has_more = notes_res.get("cardlistInfo", {}).get("total", 0) > crawler_total_count
- return result+ return result
| https://raw.githubusercontent.com/NanmiCoder/MediaCrawler/HEAD/media_platform/weibo/client.py |
Generate documentation strings for clarity | # -*- coding: utf-8 -*-
# Copyright (c) 2025 relakkes@gmail.com
#
# This file is part of MediaCrawler project.
# Repository: https://github.com/NanmiCoder/MediaCrawler/blob/main/store/xhs/xhs_store_media.py
# GitHub: https://github.com/NanmiCoder
# Licensed under NON-COMMERCIAL LEARNING LICENSE 1.1
#
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
# 1. 不得用于任何商业用途。
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
# 3. 不得进行大规模爬取或对平台造成运营干扰。
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
# 5. 不得用于任何非法或不当的用途。
#
# 详细许可条款请参阅项目根目录下的LICENSE文件。
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
# -*- coding: utf-8 -*-
# @Author : helloteemo
# @Time : 2024/7/11 22:35
# @Desc : Xiaohongshu media storage
import pathlib
from typing import Dict
import aiofiles
from base.base_crawler import AbstractStoreImage, AbstractStoreVideo
from tools import utils
import config
class XiaoHongShuImage(AbstractStoreImage):
def __init__(self):
if config.SAVE_DATA_PATH:
self.image_store_path = f"{config.SAVE_DATA_PATH}/xhs/images"
else:
self.image_store_path = "data/xhs/images"
async def store_image(self, image_content_item: Dict):
await self.save_image(image_content_item.get("notice_id"), image_content_item.get("pic_content"), image_content_item.get("extension_file_name"))
def make_save_file_name(self, notice_id: str, extension_file_name: str) -> str:
return f"{self.image_store_path}/{notice_id}/{extension_file_name}"
async def save_image(self, notice_id: str, pic_content: str, extension_file_name):
pathlib.Path(self.image_store_path + "/" + notice_id).mkdir(parents=True, exist_ok=True)
save_file_name = self.make_save_file_name(notice_id, extension_file_name)
async with aiofiles.open(save_file_name, 'wb') as f:
await f.write(pic_content)
utils.logger.info(f"[XiaoHongShuImageStoreImplement.save_image] save image {save_file_name} success ...")
class XiaoHongShuVideo(AbstractStoreVideo):
def __init__(self):
if config.SAVE_DATA_PATH:
self.video_store_path = f"{config.SAVE_DATA_PATH}/xhs/videos"
else:
self.video_store_path = "data/xhs/videos"
async def store_video(self, video_content_item: Dict):
await self.save_video(video_content_item.get("notice_id"), video_content_item.get("video_content"), video_content_item.get("extension_file_name"))
def make_save_file_name(self, notice_id: str, extension_file_name: str) -> str:
return f"{self.video_store_path}/{notice_id}/{extension_file_name}"
async def save_video(self, notice_id: str, video_content: str, extension_file_name):
pathlib.Path(self.video_store_path + "/" + notice_id).mkdir(parents=True, exist_ok=True)
save_file_name = self.make_save_file_name(notice_id, extension_file_name)
async with aiofiles.open(save_file_name, 'wb') as f:
await f.write(video_content)
utils.logger.info(f"[XiaoHongShuVideoStoreImplement.save_video] save video {save_file_name} success ...") | --- +++ @@ -39,12 +39,42 @@ self.image_store_path = "data/xhs/images"
async def store_image(self, image_content_item: Dict):
+ """
+ store content
+
+ Args:
+ image_content_item:
+
+ Returns:
+
+ """
await self.save_image(image_content_item.get("notice_id"), image_content_item.get("pic_content"), image_content_item.get("extension_file_name"))
def make_save_file_name(self, notice_id: str, extension_file_name: str) -> str:
+ """
+ make save file name by store type
+
+ Args:
+ notice_id: notice id
+ extension_file_name: image filename with extension
+
+ Returns:
+
+ """
return f"{self.image_store_path}/{notice_id}/{extension_file_name}"
async def save_image(self, notice_id: str, pic_content: str, extension_file_name):
+ """
+ save image to local
+
+ Args:
+ notice_id: notice id
+ pic_content: image content
+ extension_file_name: image filename with extension
+
+ Returns:
+
+ """
pathlib.Path(self.image_store_path + "/" + notice_id).mkdir(parents=True, exist_ok=True)
save_file_name = self.make_save_file_name(notice_id, extension_file_name)
async with aiofiles.open(save_file_name, 'wb') as f:
@@ -60,14 +90,44 @@ self.video_store_path = "data/xhs/videos"
async def store_video(self, video_content_item: Dict):
+ """
+ store content
+
+ Args:
+ video_content_item:
+
+ Returns:
+
+ """
await self.save_video(video_content_item.get("notice_id"), video_content_item.get("video_content"), video_content_item.get("extension_file_name"))
def make_save_file_name(self, notice_id: str, extension_file_name: str) -> str:
+ """
+ make save file name by store type
+
+ Args:
+ notice_id: notice id
+ extension_file_name: video filename with extension
+
+ Returns:
+
+ """
return f"{self.video_store_path}/{notice_id}/{extension_file_name}"
async def save_video(self, notice_id: str, video_content: str, extension_file_name):
+ """
+ save video to local
+
+ Args:
+ notice_id: notice id
+ video_content: video content
+ extension_file_name: video filename with extension
+
+ Returns:
+
+ """
pathlib.Path(self.video_store_path + "/" + notice_id).mkdir(parents=True, exist_ok=True)
save_file_name = self.make_save_file_name(notice_id, extension_file_name)
async with aiofiles.open(save_file_name, 'wb') as f:
await f.write(video_content)
- utils.logger.info(f"[XiaoHongShuVideoStoreImplement.save_video] save video {save_file_name} success ...")+ utils.logger.info(f"[XiaoHongShuVideoStoreImplement.save_video] save video {save_file_name} success ...")
| https://raw.githubusercontent.com/NanmiCoder/MediaCrawler/HEAD/store/xhs/xhs_store_media.py |
Turn comments into proper docstrings | # -*- coding: utf-8 -*-
# Copyright (c) 2025 relakkes@gmail.com
#
# This file is part of MediaCrawler project.
# Repository: https://github.com/NanmiCoder/MediaCrawler/blob/main/store/zhihu/_store_impl.py
# GitHub: https://github.com/NanmiCoder
# Licensed under NON-COMMERCIAL LEARNING LICENSE 1.1
#
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
# 1. 不得用于任何商业用途。
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
# 3. 不得进行大规模爬取或对平台造成运营干扰。
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
# 5. 不得用于任何非法或不当的用途。
#
# 详细许可条款请参阅项目根目录下的LICENSE文件。
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
# -*- coding: utf-8 -*-
# @Author : persist1@126.com
# @Time : 2025/9/5 19:34
# @Desc : Zhihu storage implementation class
import asyncio
import csv
import json
import os
import pathlib
from typing import Dict
import aiofiles
from sqlalchemy import select
from sqlalchemy.ext.asyncio import AsyncSession
import config
from base.base_crawler import AbstractStore
from database.db_session import get_session
from database.models import ZhihuContent, ZhihuComment, ZhihuCreator
from tools import utils, words
from var import crawler_type_var
from tools.async_file_writer import AsyncFileWriter
from database.mongodb_store_base import MongoDBStoreBase
def calculate_number_of_files(file_store_path: str) -> int:
if not os.path.exists(file_store_path):
return 1
try:
return max([int(file_name.split("_")[0]) for file_name in os.listdir(file_store_path)]) + 1
except ValueError:
return 1
class ZhihuCsvStoreImplement(AbstractStore):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.writer = AsyncFileWriter(platform="zhihu", crawler_type=crawler_type_var.get())
async def store_content(self, content_item: Dict):
await self.writer.write_to_csv(item_type="contents", item=content_item)
async def store_comment(self, comment_item: Dict):
await self.writer.write_to_csv(item_type="comments", item=comment_item)
async def store_creator(self, creator: Dict):
await self.writer.write_to_csv(item_type="creators", item=creator)
class ZhihuDbStoreImplement(AbstractStore):
async def store_content(self, content_item: Dict):
content_id = content_item.get("content_id")
async with get_session() as session:
stmt = select(ZhihuContent).where(ZhihuContent.content_id == content_id)
result = await session.execute(stmt)
existing_content = result.scalars().first()
if existing_content:
for key, value in content_item.items():
if hasattr(existing_content, key):
setattr(existing_content, key, value)
else:
if "add_ts" not in content_item:
content_item["add_ts"] = utils.get_current_timestamp()
new_content = ZhihuContent(**content_item)
session.add(new_content)
await session.commit()
async def store_comment(self, comment_item: Dict):
comment_id = comment_item.get("comment_id")
async with get_session() as session:
stmt = select(ZhihuComment).where(ZhihuComment.comment_id == comment_id)
result = await session.execute(stmt)
existing_comment = result.scalars().first()
if existing_comment:
for key, value in comment_item.items():
if hasattr(existing_comment, key):
setattr(existing_comment, key, value)
else:
if "add_ts" not in comment_item:
comment_item["add_ts"] = utils.get_current_timestamp()
new_comment = ZhihuComment(**comment_item)
session.add(new_comment)
await session.commit()
async def store_creator(self, creator: Dict):
user_id = creator.get("user_id")
async with get_session() as session:
stmt = select(ZhihuCreator).where(ZhihuCreator.user_id == user_id)
result = await session.execute(stmt)
existing_creator = result.scalars().first()
if existing_creator:
for key, value in creator.items():
if hasattr(existing_creator, key):
setattr(existing_creator, key, value)
else:
if "add_ts" not in creator:
creator["add_ts"] = utils.get_current_timestamp()
new_creator = ZhihuCreator(**creator)
session.add(new_creator)
await session.commit()
class ZhihuJsonStoreImplement(AbstractStore):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.writer = AsyncFileWriter(platform="zhihu", crawler_type=crawler_type_var.get())
async def store_content(self, content_item: Dict):
await self.writer.write_single_item_to_json(item_type="contents", item=content_item)
async def store_comment(self, comment_item: Dict):
await self.writer.write_single_item_to_json(item_type="comments", item=comment_item)
async def store_creator(self, creator: Dict):
await self.writer.write_single_item_to_json(item_type="creators", item=creator)
class ZhihuJsonlStoreImplement(AbstractStore):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.writer = AsyncFileWriter(platform="zhihu", crawler_type=crawler_type_var.get())
async def store_content(self, content_item: Dict):
await self.writer.write_to_jsonl(item_type="contents", item=content_item)
async def store_comment(self, comment_item: Dict):
await self.writer.write_to_jsonl(item_type="comments", item=comment_item)
async def store_creator(self, creator: Dict):
await self.writer.write_to_jsonl(item_type="creators", item=creator)
class ZhihuSqliteStoreImplement(ZhihuDbStoreImplement):
pass
class ZhihuMongoStoreImplement(AbstractStore):
def __init__(self):
self.mongo_store = MongoDBStoreBase(collection_prefix="zhihu")
async def store_content(self, content_item: Dict):
note_id = content_item.get("note_id")
if not note_id:
return
await self.mongo_store.save_or_update(
collection_suffix="contents",
query={"note_id": note_id},
data=content_item
)
utils.logger.info(f"[ZhihuMongoStoreImplement.store_content] Saved note {note_id} to MongoDB")
async def store_comment(self, comment_item: Dict):
comment_id = comment_item.get("comment_id")
if not comment_id:
return
await self.mongo_store.save_or_update(
collection_suffix="comments",
query={"comment_id": comment_id},
data=comment_item
)
utils.logger.info(f"[ZhihuMongoStoreImplement.store_comment] Saved comment {comment_id} to MongoDB")
async def store_creator(self, creator_item: Dict):
user_id = creator_item.get("user_id")
if not user_id:
return
await self.mongo_store.save_or_update(
collection_suffix="creators",
query={"user_id": user_id},
data=creator_item
)
utils.logger.info(f"[ZhihuMongoStoreImplement.store_creator] Saved creator {user_id} to MongoDB")
class ZhihuExcelStoreImplement:
def __new__(cls, *args, **kwargs):
from store.excel_store_base import ExcelStoreBase
return ExcelStoreBase.get_instance(
platform="zhihu",
crawler_type=crawler_type_var.get()
) | --- +++ @@ -43,6 +43,12 @@ from database.mongodb_store_base import MongoDBStoreBase
def calculate_number_of_files(file_store_path: str) -> int:
+ """Calculate the prefix sorting number for data save files, supporting writing to different files for each run
+ Args:
+ file_store_path;
+ Returns:
+ file nums
+ """
if not os.path.exists(file_store_path):
return 1
try:
@@ -57,17 +63,46 @@ self.writer = AsyncFileWriter(platform="zhihu", crawler_type=crawler_type_var.get())
async def store_content(self, content_item: Dict):
+ """
+ Zhihu content CSV storage implementation
+ Args:
+ content_item: note item dict
+
+ Returns:
+
+ """
await self.writer.write_to_csv(item_type="contents", item=content_item)
async def store_comment(self, comment_item: Dict):
+ """
+ Zhihu comment CSV storage implementation
+ Args:
+ comment_item: comment item dict
+
+ Returns:
+
+ """
await self.writer.write_to_csv(item_type="comments", item=comment_item)
async def store_creator(self, creator: Dict):
+ """
+ Zhihu content CSV storage implementation
+ Args:
+ creator: creator dict
+
+ Returns:
+
+ """
await self.writer.write_to_csv(item_type="creators", item=creator)
class ZhihuDbStoreImplement(AbstractStore):
async def store_content(self, content_item: Dict):
+ """
+ Zhihu content DB storage implementation
+ Args:
+ content_item: content item dict
+ """
content_id = content_item.get("content_id")
async with get_session() as session:
stmt = select(ZhihuContent).where(ZhihuContent.content_id == content_id)
@@ -85,6 +120,11 @@ await session.commit()
async def store_comment(self, comment_item: Dict):
+ """
+ Zhihu content DB storage implementation
+ Args:
+ comment_item: comment item dict
+ """
comment_id = comment_item.get("comment_id")
async with get_session() as session:
stmt = select(ZhihuComment).where(ZhihuComment.comment_id == comment_id)
@@ -102,6 +142,11 @@ await session.commit()
async def store_creator(self, creator: Dict):
+ """
+ Zhihu content DB storage implementation
+ Args:
+ creator: creator dict
+ """
user_id = creator.get("user_id")
async with get_session() as session:
stmt = select(ZhihuCreator).where(ZhihuCreator.user_id == user_id)
@@ -125,12 +170,36 @@ self.writer = AsyncFileWriter(platform="zhihu", crawler_type=crawler_type_var.get())
async def store_content(self, content_item: Dict):
+ """
+ content JSON storage implementation
+ Args:
+ content_item:
+
+ Returns:
+
+ """
await self.writer.write_single_item_to_json(item_type="contents", item=content_item)
async def store_comment(self, comment_item: Dict):
+ """
+ comment JSON storage implementation
+ Args:
+ comment_item:
+
+ Returns:
+
+ """
await self.writer.write_single_item_to_json(item_type="comments", item=comment_item)
async def store_creator(self, creator: Dict):
+ """
+ Zhihu content JSON storage implementation
+ Args:
+ creator: creator dict
+
+ Returns:
+
+ """
await self.writer.write_single_item_to_json(item_type="creators", item=creator)
@@ -150,15 +219,24 @@
class ZhihuSqliteStoreImplement(ZhihuDbStoreImplement):
+ """
+ Zhihu content SQLite storage implementation
+ """
pass
class ZhihuMongoStoreImplement(AbstractStore):
+ """Zhihu MongoDB storage implementation"""
def __init__(self):
self.mongo_store = MongoDBStoreBase(collection_prefix="zhihu")
async def store_content(self, content_item: Dict):
+ """
+ Store content to MongoDB
+ Args:
+ content_item: Content data
+ """
note_id = content_item.get("note_id")
if not note_id:
return
@@ -171,6 +249,11 @@ utils.logger.info(f"[ZhihuMongoStoreImplement.store_content] Saved note {note_id} to MongoDB")
async def store_comment(self, comment_item: Dict):
+ """
+ Store comment to MongoDB
+ Args:
+ comment_item: Comment data
+ """
comment_id = comment_item.get("comment_id")
if not comment_id:
return
@@ -183,6 +266,11 @@ utils.logger.info(f"[ZhihuMongoStoreImplement.store_comment] Saved comment {comment_id} to MongoDB")
async def store_creator(self, creator_item: Dict):
+ """
+ Store creator information to MongoDB
+ Args:
+ creator_item: Creator data
+ """
user_id = creator_item.get("user_id")
if not user_id:
return
@@ -196,10 +284,11 @@
class ZhihuExcelStoreImplement:
+ """Zhihu Excel storage implementation - Global singleton"""
def __new__(cls, *args, **kwargs):
from store.excel_store_base import ExcelStoreBase
return ExcelStoreBase.get_instance(
platform="zhihu",
crawler_type=crawler_type_var.get()
- )+ )
| https://raw.githubusercontent.com/NanmiCoder/MediaCrawler/HEAD/store/zhihu/_store_impl.py |
Add docstrings to improve readability | # -*- coding: utf-8 -*-
# Copyright (c) 2025 relakkes@gmail.com
#
# This file is part of MediaCrawler project.
# Repository: https://github.com/NanmiCoder/MediaCrawler/blob/main/store/xhs/__init__.py
# GitHub: https://github.com/NanmiCoder
# Licensed under NON-COMMERCIAL LEARNING LICENSE 1.1
#
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
# 1. 不得用于任何商业用途。
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
# 3. 不得进行大规模爬取或对平台造成运营干扰。
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
# 5. 不得用于任何非法或不当的用途。
#
# 详细许可条款请参阅项目根目录下的LICENSE文件。
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
# -*- coding: utf-8 -*-
# @Author : relakkes@gmail.com
# @Time : 2024/1/14 17:34
# @Desc :
from typing import List
import config
from var import source_keyword_var
from .xhs_store_media import *
from ._store_impl import *
class XhsStoreFactory:
STORES = {
"csv": XhsCsvStoreImplement,
"db": XhsDbStoreImplement,
"postgres": XhsDbStoreImplement,
"json": XhsJsonStoreImplement,
"jsonl": XhsJsonlStoreImplement,
"sqlite": XhsSqliteStoreImplement,
"mongodb": XhsMongoStoreImplement,
"excel": XhsExcelStoreImplement,
}
@staticmethod
def create_store() -> AbstractStore:
store_class = XhsStoreFactory.STORES.get(config.SAVE_DATA_OPTION)
if not store_class:
raise ValueError("[XhsStoreFactory.create_store] Invalid save option only supported csv or db or json or sqlite or mongodb or excel ...")
return store_class()
def get_video_url_arr(note_item: Dict) -> List:
if note_item.get('type') != 'video':
return []
video_dict = note_item.get('video')
if not video_dict:
return []
videoArr = []
consumer = video_dict.get('consumer', {})
originVideoKey = consumer.get('origin_video_key', '')
if originVideoKey == '':
originVideoKey = consumer.get('originVideoKey', '')
# Fallback with watermark
if originVideoKey == '':
media = video_dict.get('media', {})
stream = media.get('stream', {})
videos = stream.get('h264')
if type(videos).__name__ == 'list':
videoArr = [v.get('master_url') for v in videos]
else:
videoArr = [f"http://sns-video-bd.xhscdn.com/{originVideoKey}"]
return videoArr
async def update_xhs_note(note_item: Dict):
note_id = note_item.get("note_id")
user_info = note_item.get("user", {})
interact_info = note_item.get("interact_info", {})
image_list: List[Dict] = note_item.get("image_list", [])
tag_list: List[Dict] = note_item.get("tag_list", [])
for img in image_list:
if img.get('url_default') != '':
img.update({'url': img.get('url_default')})
video_url = ','.join(get_video_url_arr(note_item))
local_db_item = {
"note_id": note_item.get("note_id"), # Note ID
"type": note_item.get("type"), # Note type
"title": note_item.get("title") or note_item.get("desc", "")[:255], # Note title
"desc": note_item.get("desc", ""), # Note description
"video_url": video_url, # Note video url
"time": note_item.get("time"), # Note publish time
"last_update_time": note_item.get("last_update_time", 0), # Note last update time
"user_id": user_info.get("user_id"), # User ID
"nickname": user_info.get("nickname"), # User nickname
"avatar": user_info.get("avatar"), # User avatar
"liked_count": interact_info.get("liked_count"), # Like count
"collected_count": interact_info.get("collected_count"), # Collection count
"comment_count": interact_info.get("comment_count"), # Comment count
"share_count": interact_info.get("share_count"), # Share count
"ip_location": note_item.get("ip_location", ""), # IP location
"image_list": ','.join([img.get('url', '') for img in image_list]), # Image URLs
"tag_list": ','.join([tag.get('name', '') for tag in tag_list if tag.get('type') == 'topic']), # Tags
"last_modify_ts": utils.get_current_timestamp(), # Last modification timestamp (Generated by MediaCrawler, mainly used to record the latest update time of a record in DB storage)
"note_url": f"https://www.xiaohongshu.com/explore/{note_id}?xsec_token={note_item.get('xsec_token')}&xsec_source=pc_search", # Note URL
"source_keyword": source_keyword_var.get(), # Search keyword
"xsec_token": note_item.get("xsec_token"), # xsec_token
}
utils.logger.info(f"[store.xhs.update_xhs_note] xhs note: {local_db_item}")
await XhsStoreFactory.create_store().store_content(local_db_item)
async def batch_update_xhs_note_comments(note_id: str, comments: List[Dict]):
if not comments:
return
for comment_item in comments:
await update_xhs_note_comment(note_id, comment_item)
async def update_xhs_note_comment(note_id: str, comment_item: Dict):
user_info = comment_item.get("user_info", {})
comment_id = comment_item.get("id")
comment_pictures = [item.get("url_default", "") for item in comment_item.get("pictures", [])]
target_comment = comment_item.get("target_comment", {})
local_db_item = {
"comment_id": comment_id, # Comment ID
"create_time": comment_item.get("create_time"), # Comment time
"ip_location": comment_item.get("ip_location"), # IP location
"note_id": note_id, # Note ID
"content": comment_item.get("content"), # Comment content
"user_id": user_info.get("user_id"), # User ID
"nickname": user_info.get("nickname"), # User nickname
"avatar": user_info.get("image"), # User avatar
"sub_comment_count": comment_item.get("sub_comment_count", 0), # Sub-comment count
"pictures": ",".join(comment_pictures), # Comment pictures
"parent_comment_id": target_comment.get("id", 0), # Parent comment ID
"last_modify_ts": utils.get_current_timestamp(), # Last modification timestamp (Generated by MediaCrawler, mainly used to record the latest update time of a record in DB storage)
"like_count": comment_item.get("like_count", 0),
}
utils.logger.info(f"[store.xhs.update_xhs_note_comment] xhs note comment:{local_db_item}")
await XhsStoreFactory.create_store().store_comment(local_db_item)
async def save_creator(user_id: str, creator: Dict):
user_info = creator.get('basicInfo', {})
follows = 0
fans = 0
interaction = 0
for i in creator.get('interactions'):
if i.get('type') == 'follows':
follows = i.get('count')
elif i.get('type') == 'fans':
fans = i.get('count')
elif i.get('type') == 'interaction':
interaction = i.get('count')
def get_gender(gender):
if gender == 1:
return 'Female'
elif gender == 0:
return 'Male'
else:
return None
local_db_item = {
'user_id': user_id, # User ID
'nickname': user_info.get('nickname'), # Nickname
'gender': get_gender(user_info.get('gender')), # Gender
'avatar': user_info.get('images'), # Avatar
'desc': user_info.get('desc'), # Personal description
'ip_location': user_info.get('ipLocation'), # IP location
'follows': follows, # Following count
'fans': fans, # Fans count
'interaction': interaction, # Interaction count
'tag_list': json.dumps({tag.get('tagType'): tag.get('name')
for tag in creator.get('tags')}, ensure_ascii=False), # Tags
"last_modify_ts": utils.get_current_timestamp(), # Last modification timestamp (Generated by MediaCrawler, mainly used to record the latest update time of a record in DB storage)
}
utils.logger.info(f"[store.xhs.save_creator] creator:{local_db_item}")
await XhsStoreFactory.create_store().store_creator(local_db_item)
async def update_xhs_note_image(note_id, pic_content, extension_file_name):
await XiaoHongShuImage().store_image({"notice_id": note_id, "pic_content": pic_content, "extension_file_name": extension_file_name})
async def update_xhs_note_video(note_id, video_content, extension_file_name):
await XiaoHongShuVideo().store_video({"notice_id": note_id, "video_content": video_content, "extension_file_name": extension_file_name}) | --- +++ @@ -51,6 +51,14 @@
def get_video_url_arr(note_item: Dict) -> List:
+ """
+ Get video url array
+ Args:
+ note_item:
+
+ Returns:
+
+ """
if note_item.get('type') != 'video':
return []
@@ -77,6 +85,14 @@
async def update_xhs_note(note_item: Dict):
+ """
+ Update Xiaohongshu note
+ Args:
+ note_item:
+
+ Returns:
+
+ """
note_id = note_item.get("note_id")
user_info = note_item.get("user", {})
interact_info = note_item.get("interact_info", {})
@@ -117,6 +133,15 @@
async def batch_update_xhs_note_comments(note_id: str, comments: List[Dict]):
+ """
+ Batch update Xiaohongshu note comments
+ Args:
+ note_id:
+ comments:
+
+ Returns:
+
+ """
if not comments:
return
for comment_item in comments:
@@ -124,6 +149,15 @@
async def update_xhs_note_comment(note_id: str, comment_item: Dict):
+ """
+ Update Xiaohongshu note comment
+ Args:
+ note_id:
+ comment_item:
+
+ Returns:
+
+ """
user_info = comment_item.get("user_info", {})
comment_id = comment_item.get("id")
comment_pictures = [item.get("url_default", "") for item in comment_item.get("pictures", [])]
@@ -148,6 +182,15 @@
async def save_creator(user_id: str, creator: Dict):
+ """
+ Save Xiaohongshu creator
+ Args:
+ user_id:
+ creator:
+
+ Returns:
+
+ """
user_info = creator.get('basicInfo', {})
follows = 0
@@ -188,10 +231,30 @@
async def update_xhs_note_image(note_id, pic_content, extension_file_name):
+ """
+ Update Xiaohongshu note image
+ Args:
+ note_id:
+ pic_content:
+ extension_file_name:
+
+ Returns:
+
+ """
await XiaoHongShuImage().store_image({"notice_id": note_id, "pic_content": pic_content, "extension_file_name": extension_file_name})
async def update_xhs_note_video(note_id, video_content, extension_file_name):
-
- await XiaoHongShuVideo().store_video({"notice_id": note_id, "video_content": video_content, "extension_file_name": extension_file_name})+ """
+ Update Xiaohongshu note video
+ Args:
+ note_id:
+ video_content:
+ extension_file_name:
+
+ Returns:
+
+ """
+
+ await XiaoHongShuVideo().store_video({"notice_id": note_id, "video_content": video_content, "extension_file_name": extension_file_name})
| https://raw.githubusercontent.com/NanmiCoder/MediaCrawler/HEAD/store/xhs/__init__.py |
Add docstrings to improve readability | # -*- coding: utf-8 -*-
# Copyright (c) 2025 relakkes@gmail.com
#
# This file is part of MediaCrawler project.
# Repository: https://github.com/NanmiCoder/MediaCrawler/blob/main/store/xhs/_store_impl.py
# GitHub: https://github.com/NanmiCoder
# Licensed under NON-COMMERCIAL LEARNING LICENSE 1.1
#
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
# 1. 不得用于任何商业用途。
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
# 3. 不得进行大规模爬取或对平台造成运营干扰。
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
# 5. 不得用于任何非法或不当的用途。
#
# 详细许可条款请参阅项目根目录下的LICENSE文件。
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
# @Author : persist1@126.com
# @Time : 2025/9/5 19:34
# @Desc : Xiaohongshu storage implementation class
import json
import os
from datetime import datetime
from typing import List, Dict, Any
from sqlalchemy import select, update, delete
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.orm import Session
from base.base_crawler import AbstractStore
from database.db_session import get_session
from database.models import XhsNote, XhsNoteComment, XhsCreator
from tools.async_file_writer import AsyncFileWriter
from tools.time_util import get_current_timestamp
from var import crawler_type_var
from database.mongodb_store_base import MongoDBStoreBase
from tools import utils
from store.excel_store_base import ExcelStoreBase
class XhsCsvStoreImplement(AbstractStore):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.writer = AsyncFileWriter(platform="xhs", crawler_type=crawler_type_var.get())
async def store_content(self, content_item: Dict):
await self.writer.write_to_csv(item_type="contents", item=content_item)
async def store_comment(self, comment_item: Dict):
await self.writer.write_to_csv(item_type="comments", item=comment_item)
async def store_creator(self, creator_item: Dict):
pass
def flush(self):
pass
class XhsJsonStoreImplement(AbstractStore):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.writer = AsyncFileWriter(platform="xhs", crawler_type=crawler_type_var.get())
async def store_content(self, content_item: Dict):
await self.writer.write_single_item_to_json(item_type="contents", item=content_item)
async def store_comment(self, comment_item: Dict):
await self.writer.write_single_item_to_json(item_type="comments", item=comment_item)
async def store_creator(self, creator_item: Dict):
pass
def flush(self):
pass
class XhsJsonlStoreImplement(AbstractStore):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.writer = AsyncFileWriter(platform="xhs", crawler_type=crawler_type_var.get())
async def store_content(self, content_item: Dict):
await self.writer.write_to_jsonl(item_type="contents", item=content_item)
async def store_comment(self, comment_item: Dict):
await self.writer.write_to_jsonl(item_type="comments", item=comment_item)
async def store_creator(self, creator_item: Dict):
pass
def flush(self):
pass
class XhsDbStoreImplement(AbstractStore):
def __init__(self, **kwargs):
super().__init__(**kwargs)
async def store_content(self, content_item: Dict):
note_id = content_item.get("note_id")
if not note_id:
return
async with get_session() as session:
if await self.content_is_exist(session, note_id):
await self.update_content(session, content_item)
else:
await self.add_content(session, content_item)
async def add_content(self, session: AsyncSession, content_item: Dict):
add_ts = int(get_current_timestamp())
last_modify_ts = int(get_current_timestamp())
note = XhsNote(
user_id=content_item.get("user_id"),
nickname=content_item.get("nickname"),
avatar=content_item.get("avatar"),
ip_location=content_item.get("ip_location"),
add_ts=add_ts,
last_modify_ts=last_modify_ts,
note_id=content_item.get("note_id"),
type=content_item.get("type"),
title=content_item.get("title"),
desc=content_item.get("desc"),
video_url=content_item.get("video_url"),
time=content_item.get("time"),
last_update_time=content_item.get("last_update_time"),
liked_count=str(content_item.get("liked_count")),
collected_count=str(content_item.get("collected_count")),
comment_count=str(content_item.get("comment_count")),
share_count=str(content_item.get("share_count")),
image_list=json.dumps(content_item.get("image_list")),
tag_list=json.dumps(content_item.get("tag_list")),
note_url=content_item.get("note_url"),
source_keyword=content_item.get("source_keyword", ""),
xsec_token=content_item.get("xsec_token", "")
)
session.add(note)
async def update_content(self, session: AsyncSession, content_item: Dict):
note_id = content_item.get("note_id")
last_modify_ts = int(get_current_timestamp())
update_data = {
"last_modify_ts": last_modify_ts,
"liked_count": str(content_item.get("liked_count")),
"collected_count": str(content_item.get("collected_count")),
"comment_count": str(content_item.get("comment_count")),
"share_count": str(content_item.get("share_count")),
"last_update_time": content_item.get("last_update_time"),
}
stmt = update(XhsNote).where(XhsNote.note_id == note_id).values(**update_data)
await session.execute(stmt)
async def content_is_exist(self, session: AsyncSession, note_id: str) -> bool:
stmt = select(XhsNote).where(XhsNote.note_id == note_id)
result = await session.execute(stmt)
return result.first() is not None
async def store_comment(self, comment_item: Dict):
if not comment_item:
return
async with get_session() as session:
comment_id = comment_item.get("comment_id")
if not comment_id:
return
if await self.comment_is_exist(session, comment_id):
await self.update_comment(session, comment_item)
else:
await self.add_comment(session, comment_item)
async def add_comment(self, session: AsyncSession, comment_item: Dict):
add_ts = int(get_current_timestamp())
last_modify_ts = int(get_current_timestamp())
comment = XhsNoteComment(
user_id=comment_item.get("user_id"),
nickname=comment_item.get("nickname"),
avatar=comment_item.get("avatar"),
ip_location=comment_item.get("ip_location"),
add_ts=add_ts,
last_modify_ts=last_modify_ts,
comment_id=comment_item.get("comment_id"),
create_time=comment_item.get("create_time"),
note_id=comment_item.get("note_id"),
content=comment_item.get("content"),
sub_comment_count=int(comment_item.get("sub_comment_count", 0) or 0),
pictures=json.dumps(comment_item.get("pictures")),
parent_comment_id=str(comment_item.get("parent_comment_id", "")),
like_count=str(comment_item.get("like_count"))
)
session.add(comment)
async def update_comment(self, session: AsyncSession, comment_item: Dict):
comment_id = comment_item.get("comment_id")
last_modify_ts = int(get_current_timestamp())
update_data = {
"last_modify_ts": last_modify_ts,
"like_count": str(comment_item.get("like_count")),
"sub_comment_count": int(comment_item.get("sub_comment_count", 0) or 0),
}
stmt = update(XhsNoteComment).where(XhsNoteComment.comment_id == comment_id).values(**update_data)
await session.execute(stmt)
async def comment_is_exist(self, session: AsyncSession, comment_id: str) -> bool:
stmt = select(XhsNoteComment).where(XhsNoteComment.comment_id == comment_id)
result = await session.execute(stmt)
return result.first() is not None
async def store_creator(self, creator_item: Dict):
user_id = creator_item.get("user_id")
if not user_id:
return
async with get_session() as session:
if await self.creator_is_exist(session, user_id):
await self.update_creator(session, creator_item)
else:
await self.add_creator(session, creator_item)
async def add_creator(self, session: AsyncSession, creator_item: Dict):
add_ts = int(get_current_timestamp())
last_modify_ts = int(get_current_timestamp())
creator = XhsCreator(
user_id=creator_item.get("user_id"),
nickname=creator_item.get("nickname"),
avatar=creator_item.get("avatar"),
ip_location=creator_item.get("ip_location"),
add_ts=add_ts,
last_modify_ts=last_modify_ts,
desc=creator_item.get("desc"),
gender=creator_item.get("gender"),
follows=str(creator_item.get("follows")),
fans=str(creator_item.get("fans")),
interaction=str(creator_item.get("interaction")),
tag_list=json.dumps(creator_item.get("tag_list"))
)
session.add(creator)
async def update_creator(self, session: AsyncSession, creator_item: Dict):
user_id = creator_item.get("user_id")
last_modify_ts = int(get_current_timestamp())
update_data = {
"last_modify_ts": last_modify_ts,
"nickname": creator_item.get("nickname"),
"avatar": creator_item.get("avatar"),
"desc": creator_item.get("desc"),
"follows": str(creator_item.get("follows")),
"fans": str(creator_item.get("fans")),
"interaction": str(creator_item.get("interaction")),
"tag_list": json.dumps(creator_item.get("tag_list"))
}
stmt = update(XhsCreator).where(XhsCreator.user_id == user_id).values(**update_data)
await session.execute(stmt)
async def creator_is_exist(self, session: AsyncSession, user_id: str) -> bool:
stmt = select(XhsCreator).where(XhsCreator.user_id == user_id)
result = await session.execute(stmt)
return result.first() is not None
async def get_all_content(self) -> List[Dict]:
async with get_session() as session:
stmt = select(XhsNote)
result = await session.execute(stmt)
return [item.__dict__ for item in result.scalars().all()]
async def get_all_comments(self) -> List[Dict]:
async with get_session() as session:
stmt = select(XhsNoteComment)
result = await session.execute(stmt)
return [item.__dict__ for item in result.scalars().all()]
class XhsSqliteStoreImplement(XhsDbStoreImplement):
def __init__(self, **kwargs):
super().__init__(**kwargs)
class XhsMongoStoreImplement(AbstractStore):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.mongo_store = MongoDBStoreBase(collection_prefix="xhs")
async def store_content(self, content_item: Dict):
note_id = content_item.get("note_id")
if not note_id:
return
await self.mongo_store.save_or_update(
collection_suffix="contents",
query={"note_id": note_id},
data=content_item
)
utils.logger.info(f"[XhsMongoStoreImplement.store_content] Saved note {note_id} to MongoDB")
async def store_comment(self, comment_item: Dict):
comment_id = comment_item.get("comment_id")
if not comment_id:
return
await self.mongo_store.save_or_update(
collection_suffix="comments",
query={"comment_id": comment_id},
data=comment_item
)
utils.logger.info(f"[XhsMongoStoreImplement.store_comment] Saved comment {comment_id} to MongoDB")
async def store_creator(self, creator_item: Dict):
user_id = creator_item.get("user_id")
if not user_id:
return
await self.mongo_store.save_or_update(
collection_suffix="creators",
query={"user_id": user_id},
data=creator_item
)
utils.logger.info(f"[XhsMongoStoreImplement.store_creator] Saved creator {user_id} to MongoDB")
class XhsExcelStoreImplement:
def __new__(cls, *args, **kwargs):
from store.excel_store_base import ExcelStoreBase
return ExcelStoreBase.get_instance(
platform="xhs",
crawler_type=crawler_type_var.get()
) | --- +++ @@ -45,9 +45,19 @@ self.writer = AsyncFileWriter(platform="xhs", crawler_type=crawler_type_var.get())
async def store_content(self, content_item: Dict):
+ """
+ store content data to csv file
+ :param content_item:
+ :return:
+ """
await self.writer.write_to_csv(item_type="contents", item=content_item)
async def store_comment(self, comment_item: Dict):
+ """
+ store comment data to csv file
+ :param comment_item:
+ :return:
+ """
await self.writer.write_to_csv(item_type="comments", item=comment_item)
@@ -64,15 +74,29 @@ self.writer = AsyncFileWriter(platform="xhs", crawler_type=crawler_type_var.get())
async def store_content(self, content_item: Dict):
+ """
+ store content data to json file
+ :param content_item:
+ :return:
+ """
await self.writer.write_single_item_to_json(item_type="contents", item=content_item)
async def store_comment(self, comment_item: Dict):
+ """
+ store comment data to json file
+ :param comment_item:
+ :return:
+ """
await self.writer.write_single_item_to_json(item_type="comments", item=comment_item)
async def store_creator(self, creator_item: Dict):
pass
def flush(self):
+ """
+ flush data to json file
+ :return:
+ """
pass
@@ -275,12 +299,18 @@
class XhsMongoStoreImplement(AbstractStore):
+ """Xiaohongshu MongoDB storage implementation"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.mongo_store = MongoDBStoreBase(collection_prefix="xhs")
async def store_content(self, content_item: Dict):
+ """
+ Store note content to MongoDB
+ Args:
+ content_item: Note content data
+ """
note_id = content_item.get("note_id")
if not note_id:
return
@@ -293,6 +323,11 @@ utils.logger.info(f"[XhsMongoStoreImplement.store_content] Saved note {note_id} to MongoDB")
async def store_comment(self, comment_item: Dict):
+ """
+ Store comment to MongoDB
+ Args:
+ comment_item: Comment data
+ """
comment_id = comment_item.get("comment_id")
if not comment_id:
return
@@ -305,6 +340,11 @@ utils.logger.info(f"[XhsMongoStoreImplement.store_comment] Saved comment {comment_id} to MongoDB")
async def store_creator(self, creator_item: Dict):
+ """
+ Store creator information to MongoDB
+ Args:
+ creator_item: Creator data
+ """
user_id = creator_item.get("user_id")
if not user_id:
return
@@ -318,10 +358,11 @@
class XhsExcelStoreImplement:
+ """Xiaohongshu Excel storage implementation - Global singleton"""
def __new__(cls, *args, **kwargs):
from store.excel_store_base import ExcelStoreBase
return ExcelStoreBase.get_instance(
platform="xhs",
crawler_type=crawler_type_var.get()
- )+ )
| https://raw.githubusercontent.com/NanmiCoder/MediaCrawler/HEAD/store/xhs/_store_impl.py |
Add docstrings to incomplete code | # -*- coding: utf-8 -*-
# Copyright (c) 2025 relakkes@gmail.com
#
# This file is part of MediaCrawler project.
# Repository: https://github.com/NanmiCoder/MediaCrawler/blob/main/store/tieba/_store_impl.py
# GitHub: https://github.com/NanmiCoder
# Licensed under NON-COMMERCIAL LEARNING LICENSE 1.1
#
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
# 1. 不得用于任何商业用途。
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
# 3. 不得进行大规模爬取或对平台造成运营干扰。
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
# 5. 不得用于任何非法或不当的用途。
#
# 详细许可条款请参阅项目根目录下的LICENSE文件。
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
# -*- coding: utf-8 -*-
# @Author : persist1@126.com
# @Time : 2025/9/5 19:34
# @Desc : Tieba storage implementation class
import asyncio
import csv
import json
import os
import pathlib
from typing import Dict
import aiofiles
from sqlalchemy import select
from sqlalchemy.ext.asyncio import AsyncSession
import config
from base.base_crawler import AbstractStore
from database.models import TiebaNote, TiebaComment, TiebaCreator
from tools import utils, words
from database.db_session import get_session
from var import crawler_type_var
from tools.async_file_writer import AsyncFileWriter
from database.mongodb_store_base import MongoDBStoreBase
def calculate_number_of_files(file_store_path: str) -> int:
if not os.path.exists(file_store_path):
return 1
try:
return max([int(file_name.split("_")[0]) for file_name in os.listdir(file_store_path)]) + 1
except ValueError:
return 1
class TieBaCsvStoreImplement(AbstractStore):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.writer = AsyncFileWriter(platform="tieba", crawler_type=crawler_type_var.get())
async def store_content(self, content_item: Dict):
await self.writer.write_to_csv(item_type="contents", item=content_item)
async def store_comment(self, comment_item: Dict):
await self.writer.write_to_csv(item_type="comments", item=comment_item)
async def store_creator(self, creator: Dict):
await self.writer.write_to_csv(item_type="creators", item=creator)
class TieBaDbStoreImplement(AbstractStore):
async def store_content(self, content_item: Dict):
note_id = content_item.get("note_id")
async with get_session() as session:
stmt = select(TiebaNote).where(TiebaNote.note_id == note_id)
res = await session.execute(stmt)
db_note = res.scalar_one_or_none()
if db_note:
for key, value in content_item.items():
setattr(db_note, key, value)
else:
db_note = TiebaNote(**content_item)
session.add(db_note)
await session.commit()
async def store_comment(self, comment_item: Dict):
comment_id = comment_item.get("comment_id")
async with get_session() as session:
stmt = select(TiebaComment).where(TiebaComment.comment_id == comment_id)
res = await session.execute(stmt)
db_comment = res.scalar_one_or_none()
if db_comment:
for key, value in comment_item.items():
setattr(db_comment, key, value)
else:
db_comment = TiebaComment(**comment_item)
session.add(db_comment)
await session.commit()
async def store_creator(self, creator: Dict):
user_id = creator.get("user_id")
async with get_session() as session:
stmt = select(TiebaCreator).where(TiebaCreator.user_id == user_id)
res = await session.execute(stmt)
db_creator = res.scalar_one_or_none()
if db_creator:
for key, value in creator.items():
setattr(db_creator, key, value)
else:
db_creator = TiebaCreator(**creator)
session.add(db_creator)
await session.commit()
class TieBaJsonStoreImplement(AbstractStore):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.writer = AsyncFileWriter(platform="tieba", crawler_type=crawler_type_var.get())
async def store_content(self, content_item: Dict):
await self.writer.write_single_item_to_json(item_type="contents", item=content_item)
async def store_comment(self, comment_item: Dict):
await self.writer.write_single_item_to_json(item_type="comments", item=comment_item)
async def store_creator(self, creator: Dict):
await self.writer.write_single_item_to_json(item_type="creators", item=creator)
class TieBaJsonlStoreImplement(AbstractStore):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.writer = AsyncFileWriter(platform="tieba", crawler_type=crawler_type_var.get())
async def store_content(self, content_item: Dict):
await self.writer.write_to_jsonl(item_type="contents", item=content_item)
async def store_comment(self, comment_item: Dict):
await self.writer.write_to_jsonl(item_type="comments", item=comment_item)
async def store_creator(self, creator: Dict):
await self.writer.write_to_jsonl(item_type="creators", item=creator)
class TieBaSqliteStoreImplement(TieBaDbStoreImplement):
pass
class TieBaMongoStoreImplement(AbstractStore):
def __init__(self):
self.mongo_store = MongoDBStoreBase(collection_prefix="tieba")
async def store_content(self, content_item: Dict):
note_id = content_item.get("note_id")
if not note_id:
return
await self.mongo_store.save_or_update(
collection_suffix="contents",
query={"note_id": note_id},
data=content_item
)
utils.logger.info(f"[TieBaMongoStoreImplement.store_content] Saved note {note_id} to MongoDB")
async def store_comment(self, comment_item: Dict):
comment_id = comment_item.get("comment_id")
if not comment_id:
return
await self.mongo_store.save_or_update(
collection_suffix="comments",
query={"comment_id": comment_id},
data=comment_item
)
utils.logger.info(f"[TieBaMongoStoreImplement.store_comment] Saved comment {comment_id} to MongoDB")
async def store_creator(self, creator_item: Dict):
user_id = creator_item.get("user_id")
if not user_id:
return
await self.mongo_store.save_or_update(
collection_suffix="creators",
query={"user_id": user_id},
data=creator_item
)
utils.logger.info(f"[TieBaMongoStoreImplement.store_creator] Saved creator {user_id} to MongoDB")
class TieBaExcelStoreImplement:
def __new__(cls, *args, **kwargs):
from store.excel_store_base import ExcelStoreBase
return ExcelStoreBase.get_instance(
platform="tieba",
crawler_type=crawler_type_var.get()
) | --- +++ @@ -44,6 +44,12 @@
def calculate_number_of_files(file_store_path: str) -> int:
+ """Calculate the prefix sorting number for data save files, supporting writing to different files for each run
+ Args:
+ file_store_path;
+ Returns:
+ file nums
+ """
if not os.path.exists(file_store_path):
return 1
try:
@@ -58,17 +64,46 @@ self.writer = AsyncFileWriter(platform="tieba", crawler_type=crawler_type_var.get())
async def store_content(self, content_item: Dict):
+ """
+ tieba content CSV storage implementation
+ Args:
+ content_item: note item dict
+
+ Returns:
+
+ """
await self.writer.write_to_csv(item_type="contents", item=content_item)
async def store_comment(self, comment_item: Dict):
+ """
+ tieba comment CSV storage implementation
+ Args:
+ comment_item: comment item dict
+
+ Returns:
+
+ """
await self.writer.write_to_csv(item_type="comments", item=comment_item)
async def store_creator(self, creator: Dict):
+ """
+ tieba content CSV storage implementation
+ Args:
+ creator: creator dict
+
+ Returns:
+
+ """
await self.writer.write_to_csv(item_type="creators", item=creator)
class TieBaDbStoreImplement(AbstractStore):
async def store_content(self, content_item: Dict):
+ """
+ tieba content DB storage implementation
+ Args:
+ content_item: content item dict
+ """
note_id = content_item.get("note_id")
async with get_session() as session:
stmt = select(TiebaNote).where(TiebaNote.note_id == note_id)
@@ -83,6 +118,11 @@ await session.commit()
async def store_comment(self, comment_item: Dict):
+ """
+ tieba content DB storage implementation
+ Args:
+ comment_item: comment item dict
+ """
comment_id = comment_item.get("comment_id")
async with get_session() as session:
stmt = select(TiebaComment).where(TiebaComment.comment_id == comment_id)
@@ -97,6 +137,11 @@ await session.commit()
async def store_creator(self, creator: Dict):
+ """
+ tieba content DB storage implementation
+ Args:
+ creator: creator dict
+ """
user_id = creator.get("user_id")
async with get_session() as session:
stmt = select(TiebaCreator).where(TiebaCreator.user_id == user_id)
@@ -117,12 +162,36 @@ self.writer = AsyncFileWriter(platform="tieba", crawler_type=crawler_type_var.get())
async def store_content(self, content_item: Dict):
+ """
+ tieba content JSON storage implementation
+ Args:
+ content_item: note item dict
+
+ Returns:
+
+ """
await self.writer.write_single_item_to_json(item_type="contents", item=content_item)
async def store_comment(self, comment_item: Dict):
+ """
+ tieba comment JSON storage implementation
+ Args:
+ comment_item: comment item dict
+
+ Returns:
+
+ """
await self.writer.write_single_item_to_json(item_type="comments", item=comment_item)
async def store_creator(self, creator: Dict):
+ """
+ tieba content JSON storage implementation
+ Args:
+ creator: creator dict
+
+ Returns:
+
+ """
await self.writer.write_single_item_to_json(item_type="creators", item=creator)
@@ -142,15 +211,24 @@
class TieBaSqliteStoreImplement(TieBaDbStoreImplement):
+ """
+ Tieba sqlite store implement
+ """
pass
class TieBaMongoStoreImplement(AbstractStore):
+ """Tieba MongoDB storage implementation"""
def __init__(self):
self.mongo_store = MongoDBStoreBase(collection_prefix="tieba")
async def store_content(self, content_item: Dict):
+ """
+ Store post content to MongoDB
+ Args:
+ content_item: Post content data
+ """
note_id = content_item.get("note_id")
if not note_id:
return
@@ -163,6 +241,11 @@ utils.logger.info(f"[TieBaMongoStoreImplement.store_content] Saved note {note_id} to MongoDB")
async def store_comment(self, comment_item: Dict):
+ """
+ Store comment to MongoDB
+ Args:
+ comment_item: Comment data
+ """
comment_id = comment_item.get("comment_id")
if not comment_id:
return
@@ -175,6 +258,11 @@ utils.logger.info(f"[TieBaMongoStoreImplement.store_comment] Saved comment {comment_id} to MongoDB")
async def store_creator(self, creator_item: Dict):
+ """
+ Store creator information to MongoDB
+ Args:
+ creator_item: Creator data
+ """
user_id = creator_item.get("user_id")
if not user_id:
return
@@ -188,10 +276,11 @@
class TieBaExcelStoreImplement:
+ """Tieba Excel storage implementation - Global singleton"""
def __new__(cls, *args, **kwargs):
from store.excel_store_base import ExcelStoreBase
return ExcelStoreBase.get_instance(
platform="tieba",
crawler_type=crawler_type_var.get()
- )+ )
| https://raw.githubusercontent.com/NanmiCoder/MediaCrawler/HEAD/store/tieba/_store_impl.py |
Add well-formatted docstrings | # -*- coding: utf-8 -*-
# Copyright (c) 2025 relakkes@gmail.com
#
# This file is part of MediaCrawler project.
# Repository: https://github.com/NanmiCoder/MediaCrawler/blob/main/store/weibo/__init__.py
# GitHub: https://github.com/NanmiCoder
# Licensed under NON-COMMERCIAL LEARNING LICENSE 1.1
#
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
# 1. 不得用于任何商业用途。
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
# 3. 不得进行大规模爬取或对平台造成运营干扰。
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
# 5. 不得用于任何非法或不当的用途。
#
# 详细许可条款请参阅项目根目录下的LICENSE文件。
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
# -*- coding: utf-8 -*-
# @Author : relakkes@gmail.com
# @Time : 2024/1/14 21:34
# @Desc :
import re
from typing import List
from var import source_keyword_var
from .weibo_store_media import *
from ._store_impl import *
class WeibostoreFactory:
STORES = {
"csv": WeiboCsvStoreImplement,
"db": WeiboDbStoreImplement,
"postgres": WeiboDbStoreImplement,
"json": WeiboJsonStoreImplement,
"jsonl": WeiboJsonlStoreImplement,
"sqlite": WeiboSqliteStoreImplement,
"mongodb": WeiboMongoStoreImplement,
"excel": WeiboExcelStoreImplement,
}
@staticmethod
def create_store() -> AbstractStore:
store_class = WeibostoreFactory.STORES.get(config.SAVE_DATA_OPTION)
if not store_class:
raise ValueError("[WeibotoreFactory.create_store] Invalid save option only supported csv or db or json or sqlite or mongodb or excel ...")
return store_class()
async def batch_update_weibo_notes(note_list: List[Dict]):
if not note_list:
return
for note_item in note_list:
await update_weibo_note(note_item)
async def update_weibo_note(note_item: Dict):
if not note_item:
return
mblog: Dict = note_item.get("mblog")
user_info: Dict = mblog.get("user")
note_id = mblog.get("id")
content_text = mblog.get("text")
clean_text = re.sub(r"<.*?>", "", content_text)
save_content_item = {
# Weibo information
"note_id": note_id,
"content": clean_text,
"create_time": utils.rfc2822_to_timestamp(mblog.get("created_at")),
"create_date_time": str(utils.rfc2822_to_china_datetime(mblog.get("created_at"))),
"liked_count": str(mblog.get("attitudes_count", 0)),
"comments_count": str(mblog.get("comments_count", 0)),
"shared_count": str(mblog.get("reposts_count", 0)),
"last_modify_ts": utils.get_current_timestamp(),
"note_url": f"https://m.weibo.cn/detail/{note_id}",
"ip_location": mblog.get("region_name", "").replace("发布于 ", ""),
# User information
"user_id": str(user_info.get("id")),
"nickname": user_info.get("screen_name", ""),
"gender": user_info.get("gender", ""),
"profile_url": user_info.get("profile_url", ""),
"avatar": user_info.get("profile_image_url", ""),
"source_keyword": source_keyword_var.get(),
}
utils.logger.info(f"[store.weibo.update_weibo_note] weibo note id:{note_id}, title:{save_content_item.get('content')[:24]} ...")
await WeibostoreFactory.create_store().store_content(content_item=save_content_item)
async def batch_update_weibo_note_comments(note_id: str, comments: List[Dict]):
if not comments:
return
for comment_item in comments:
await update_weibo_note_comment(note_id, comment_item)
async def update_weibo_note_comment(note_id: str, comment_item: Dict):
if not comment_item or not note_id:
return
comment_id = str(comment_item.get("id"))
user_info: Dict = comment_item.get("user")
content_text = comment_item.get("text")
clean_text = re.sub(r"<.*?>", "", content_text)
save_comment_item = {
"comment_id": comment_id,
"create_time": utils.rfc2822_to_timestamp(comment_item.get("created_at")),
"create_date_time": str(utils.rfc2822_to_china_datetime(comment_item.get("created_at"))),
"note_id": note_id,
"content": clean_text,
"sub_comment_count": str(comment_item.get("total_number", 0)),
"comment_like_count": str(comment_item.get("like_count", 0)),
"last_modify_ts": utils.get_current_timestamp(),
"ip_location": comment_item.get("source", "").replace("来自", ""),
"parent_comment_id": comment_item.get("rootid", ""),
# User information
"user_id": str(user_info.get("id")),
"nickname": user_info.get("screen_name", ""),
"gender": user_info.get("gender", ""),
"profile_url": user_info.get("profile_url", ""),
"avatar": user_info.get("profile_image_url", ""),
}
utils.logger.info(f"[store.weibo.update_weibo_note_comment] Weibo note comment: {comment_id}, content: {save_comment_item.get('content', '')[:24]} ...")
await WeibostoreFactory.create_store().store_comment(comment_item=save_comment_item)
async def update_weibo_note_image(picid: str, pic_content, extension_file_name):
await WeiboStoreImage().store_image({"pic_id": picid, "pic_content": pic_content, "extension_file_name": extension_file_name})
async def save_creator(user_id: str, user_info: Dict):
local_db_item = {
'user_id': user_id,
'nickname': user_info.get('screen_name'),
'gender': 'Female' if user_info.get('gender') == "f" else 'Male',
'avatar': user_info.get('avatar_hd'),
'desc': user_info.get('description'),
'ip_location': user_info.get("source", "").replace("来自", ""),
'follows': user_info.get('follow_count', ''),
'fans': user_info.get('followers_count', ''),
'tag_list': '',
"last_modify_ts": utils.get_current_timestamp(),
}
utils.logger.info(f"[store.weibo.save_creator] creator:{local_db_item}")
await WeibostoreFactory.create_store().store_creator(local_db_item) | --- +++ @@ -52,6 +52,14 @@
async def batch_update_weibo_notes(note_list: List[Dict]):
+ """
+ Batch update weibo notes
+ Args:
+ note_list:
+
+ Returns:
+
+ """
if not note_list:
return
for note_item in note_list:
@@ -59,6 +67,14 @@
async def update_weibo_note(note_item: Dict):
+ """
+ Update weibo note
+ Args:
+ note_item:
+
+ Returns:
+
+ """
if not note_item:
return
@@ -93,6 +109,15 @@
async def batch_update_weibo_note_comments(note_id: str, comments: List[Dict]):
+ """
+ Batch update weibo note comments
+ Args:
+ note_id:
+ comments:
+
+ Returns:
+
+ """
if not comments:
return
for comment_item in comments:
@@ -100,6 +125,15 @@
async def update_weibo_note_comment(note_id: str, comment_item: Dict):
+ """
+ Update weibo note comment
+ Args:
+ note_id: weibo note id
+ comment_item: weibo comment item
+
+ Returns:
+
+ """
if not comment_item or not note_id:
return
comment_id = str(comment_item.get("id"))
@@ -130,10 +164,29 @@
async def update_weibo_note_image(picid: str, pic_content, extension_file_name):
+ """
+ Save weibo note image to local
+ Args:
+ picid:
+ pic_content:
+ extension_file_name:
+
+ Returns:
+
+ """
await WeiboStoreImage().store_image({"pic_id": picid, "pic_content": pic_content, "extension_file_name": extension_file_name})
async def save_creator(user_id: str, user_info: Dict):
+ """
+ Save creator information to local
+ Args:
+ user_id:
+ user_info:
+
+ Returns:
+
+ """
local_db_item = {
'user_id': user_id,
'nickname': user_info.get('screen_name'),
@@ -147,4 +200,4 @@ "last_modify_ts": utils.get_current_timestamp(),
}
utils.logger.info(f"[store.weibo.save_creator] creator:{local_db_item}")
- await WeibostoreFactory.create_store().store_creator(local_db_item)+ await WeibostoreFactory.create_store().store_creator(local_db_item)
| https://raw.githubusercontent.com/NanmiCoder/MediaCrawler/HEAD/store/weibo/__init__.py |
Write beginner-friendly docstrings | # -*- coding: utf-8 -*-
# Copyright (c) 2025 relakkes@gmail.com
#
# This file is part of MediaCrawler project.
# Repository: https://github.com/NanmiCoder/MediaCrawler/blob/main/store/weibo/_store_impl.py
# GitHub: https://github.com/NanmiCoder
# Licensed under NON-COMMERCIAL LEARNING LICENSE 1.1
#
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
# 1. 不得用于任何商业用途。
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
# 3. 不得进行大规模爬取或对平台造成运营干扰。
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
# 5. 不得用于任何非法或不当的用途。
#
# 详细许可条款请参阅项目根目录下的LICENSE文件。
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
# -*- coding: utf-8 -*-
# @Author : persist1@126.com
# @Time : 2025/9/5 19:34
# @Desc : Weibo storage implementation class
import asyncio
import csv
import json
import os
import pathlib
from typing import Dict
import aiofiles
from sqlalchemy import select
from sqlalchemy.ext.asyncio import AsyncSession
import config
from base.base_crawler import AbstractStore
from database.models import WeiboCreator, WeiboNote, WeiboNoteComment
from tools import utils, words
from tools.async_file_writer import AsyncFileWriter
from database.db_session import get_session
from var import crawler_type_var
from database.mongodb_store_base import MongoDBStoreBase
def calculate_number_of_files(file_store_path: str) -> int:
if not os.path.exists(file_store_path):
return 1
try:
return max([int(file_name.split("_")[0]) for file_name in os.listdir(file_store_path)]) + 1
except ValueError:
return 1
class WeiboCsvStoreImplement(AbstractStore):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.writer = AsyncFileWriter(platform="weibo", crawler_type=crawler_type_var.get())
async def store_content(self, content_item: Dict):
await self.writer.write_to_csv(item_type="contents", item=content_item)
async def store_comment(self, comment_item: Dict):
await self.writer.write_to_csv(item_type="comments", item=comment_item)
async def store_creator(self, creator: Dict):
await self.writer.write_to_csv(item_type="creators", item=creator)
class WeiboDbStoreImplement(AbstractStore):
async def store_content(self, content_item: Dict):
note_id = int(content_item.get("note_id"))
content_item["note_id"] = note_id
async with get_session() as session:
stmt = select(WeiboNote).where(WeiboNote.note_id == note_id)
res = await session.execute(stmt)
db_note = res.scalar_one_or_none()
if db_note:
db_note.last_modify_ts = utils.get_current_timestamp()
for key, value in content_item.items():
if hasattr(db_note, key):
setattr(db_note, key, value)
else:
content_item["add_ts"] = utils.get_current_timestamp()
content_item["last_modify_ts"] = utils.get_current_timestamp()
db_note = WeiboNote(**content_item)
session.add(db_note)
await session.commit()
async def store_comment(self, comment_item: Dict):
comment_id = int(comment_item.get("comment_id"))
comment_item["comment_id"] = comment_id
comment_item["note_id"] = int(comment_item.get("note_id", 0) or 0)
comment_item["create_time"] = int(comment_item.get("create_time", 0) or 0)
comment_item["comment_like_count"] = str(comment_item.get("comment_like_count", "0"))
comment_item["sub_comment_count"] = str(comment_item.get("sub_comment_count", "0"))
comment_item["parent_comment_id"] = str(comment_item.get("parent_comment_id", "0"))
async with get_session() as session:
stmt = select(WeiboNoteComment).where(WeiboNoteComment.comment_id == comment_id)
res = await session.execute(stmt)
db_comment = res.scalar_one_or_none()
if db_comment:
db_comment.last_modify_ts = utils.get_current_timestamp()
for key, value in comment_item.items():
if hasattr(db_comment, key):
setattr(db_comment, key, value)
else:
comment_item["add_ts"] = utils.get_current_timestamp()
comment_item["last_modify_ts"] = utils.get_current_timestamp()
db_comment = WeiboNoteComment(**comment_item)
session.add(db_comment)
await session.commit()
async def store_creator(self, creator: Dict):
user_id = int(creator.get("user_id"))
creator["user_id"] = user_id
async with get_session() as session:
stmt = select(WeiboCreator).where(WeiboCreator.user_id == user_id)
res = await session.execute(stmt)
db_creator = res.scalar_one_or_none()
if db_creator:
db_creator.last_modify_ts = utils.get_current_timestamp()
for key, value in creator.items():
if hasattr(db_creator, key):
setattr(db_creator, key, value)
else:
creator["add_ts"] = utils.get_current_timestamp()
creator["last_modify_ts"] = utils.get_current_timestamp()
db_creator = WeiboCreator(**creator)
session.add(db_creator)
await session.commit()
class WeiboJsonStoreImplement(AbstractStore):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.writer = AsyncFileWriter(platform="weibo", crawler_type=crawler_type_var.get())
async def store_content(self, content_item: Dict):
await self.writer.write_single_item_to_json(item_type="contents", item=content_item)
async def store_comment(self, comment_item: Dict):
await self.writer.write_single_item_to_json(item_type="comments", item=comment_item)
async def store_creator(self, creator: Dict):
await self.writer.write_single_item_to_json(item_type="creators", item=creator)
class WeiboJsonlStoreImplement(AbstractStore):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.writer = AsyncFileWriter(platform="weibo", crawler_type=crawler_type_var.get())
async def store_content(self, content_item: Dict):
await self.writer.write_to_jsonl(item_type="contents", item=content_item)
async def store_comment(self, comment_item: Dict):
await self.writer.write_to_jsonl(item_type="comments", item=comment_item)
async def store_creator(self, creator: Dict):
await self.writer.write_to_jsonl(item_type="creators", item=creator)
class WeiboSqliteStoreImplement(WeiboDbStoreImplement):
pass
class WeiboMongoStoreImplement(AbstractStore):
def __init__(self):
self.mongo_store = MongoDBStoreBase(collection_prefix="weibo")
async def store_content(self, content_item: Dict):
note_id = content_item.get("note_id")
if not note_id:
return
await self.mongo_store.save_or_update(
collection_suffix="contents",
query={"note_id": note_id},
data=content_item
)
utils.logger.info(f"[WeiboMongoStoreImplement.store_content] Saved note {note_id} to MongoDB")
async def store_comment(self, comment_item: Dict):
comment_id = comment_item.get("comment_id")
if not comment_id:
return
await self.mongo_store.save_or_update(
collection_suffix="comments",
query={"comment_id": comment_id},
data=comment_item
)
utils.logger.info(f"[WeiboMongoStoreImplement.store_comment] Saved comment {comment_id} to MongoDB")
async def store_creator(self, creator_item: Dict):
user_id = creator_item.get("user_id")
if not user_id:
return
await self.mongo_store.save_or_update(
collection_suffix="creators",
query={"user_id": user_id},
data=creator_item
)
utils.logger.info(f"[WeiboMongoStoreImplement.store_creator] Saved creator {user_id} to MongoDB")
class WeiboExcelStoreImplement:
def __new__(cls, *args, **kwargs):
from store.excel_store_base import ExcelStoreBase
return ExcelStoreBase.get_instance(
platform="weibo",
crawler_type=crawler_type_var.get()
) | --- +++ @@ -44,6 +44,12 @@
def calculate_number_of_files(file_store_path: str) -> int:
+ """Calculate the prefix sorting number for data save files, supporting writing to different files for each run
+ Args:
+ file_store_path;
+ Returns:
+ file nums
+ """
if not os.path.exists(file_store_path):
return 1
try:
@@ -58,18 +64,50 @@ self.writer = AsyncFileWriter(platform="weibo", crawler_type=crawler_type_var.get())
async def store_content(self, content_item: Dict):
+ """
+ Weibo content CSV storage implementation
+ Args:
+ content_item: note item dict
+
+ Returns:
+
+ """
await self.writer.write_to_csv(item_type="contents", item=content_item)
async def store_comment(self, comment_item: Dict):
+ """
+ Weibo comment CSV storage implementation
+ Args:
+ comment_item: comment item dict
+
+ Returns:
+
+ """
await self.writer.write_to_csv(item_type="comments", item=comment_item)
async def store_creator(self, creator: Dict):
+ """
+ Weibo creator CSV storage implementation
+ Args:
+ creator:
+
+ Returns:
+
+ """
await self.writer.write_to_csv(item_type="creators", item=creator)
class WeiboDbStoreImplement(AbstractStore):
async def store_content(self, content_item: Dict):
+ """
+ Weibo content DB storage implementation
+ Args:
+ content_item: content item dict
+
+ Returns:
+
+ """
note_id = int(content_item.get("note_id"))
content_item["note_id"] = note_id
async with get_session() as session:
@@ -89,6 +127,14 @@ await session.commit()
async def store_comment(self, comment_item: Dict):
+ """
+ Weibo content DB storage implementation
+ Args:
+ comment_item: comment item dict
+
+ Returns:
+
+ """
comment_id = int(comment_item.get("comment_id"))
comment_item["comment_id"] = comment_id
comment_item["note_id"] = int(comment_item.get("note_id", 0) or 0)
@@ -114,6 +160,14 @@ await session.commit()
async def store_creator(self, creator: Dict):
+ """
+ Weibo creator DB storage implementation
+ Args:
+ creator:
+
+ Returns:
+
+ """
user_id = int(creator.get("user_id"))
creator["user_id"] = user_id
async with get_session() as session:
@@ -139,12 +193,36 @@ self.writer = AsyncFileWriter(platform="weibo", crawler_type=crawler_type_var.get())
async def store_content(self, content_item: Dict):
+ """
+ content JSON storage implementation
+ Args:
+ content_item:
+
+ Returns:
+
+ """
await self.writer.write_single_item_to_json(item_type="contents", item=content_item)
async def store_comment(self, comment_item: Dict):
+ """
+ comment JSON storage implementation
+ Args:
+ comment_item:
+
+ Returns:
+
+ """
await self.writer.write_single_item_to_json(item_type="comments", item=comment_item)
async def store_creator(self, creator: Dict):
+ """
+ creator JSON storage implementation
+ Args:
+ creator:
+
+ Returns:
+
+ """
await self.writer.write_single_item_to_json(item_type="creators", item=creator)
@@ -164,15 +242,24 @@
class WeiboSqliteStoreImplement(WeiboDbStoreImplement):
+ """
+ Weibo content SQLite storage implementation
+ """
pass
class WeiboMongoStoreImplement(AbstractStore):
+ """Weibo MongoDB storage implementation"""
def __init__(self):
self.mongo_store = MongoDBStoreBase(collection_prefix="weibo")
async def store_content(self, content_item: Dict):
+ """
+ Store Weibo content to MongoDB
+ Args:
+ content_item: Weibo content data
+ """
note_id = content_item.get("note_id")
if not note_id:
return
@@ -185,6 +272,11 @@ utils.logger.info(f"[WeiboMongoStoreImplement.store_content] Saved note {note_id} to MongoDB")
async def store_comment(self, comment_item: Dict):
+ """
+ Store comment to MongoDB
+ Args:
+ comment_item: Comment data
+ """
comment_id = comment_item.get("comment_id")
if not comment_id:
return
@@ -197,6 +289,11 @@ utils.logger.info(f"[WeiboMongoStoreImplement.store_comment] Saved comment {comment_id} to MongoDB")
async def store_creator(self, creator_item: Dict):
+ """
+ Store creator information to MongoDB
+ Args:
+ creator_item: Creator data
+ """
user_id = creator_item.get("user_id")
if not user_id:
return
@@ -210,10 +307,11 @@
class WeiboExcelStoreImplement:
+ """Weibo Excel storage implementation - Global singleton"""
def __new__(cls, *args, **kwargs):
from store.excel_store_base import ExcelStoreBase
return ExcelStoreBase.get_instance(
platform="weibo",
crawler_type=crawler_type_var.get()
- )+ )
| https://raw.githubusercontent.com/NanmiCoder/MediaCrawler/HEAD/store/weibo/_store_impl.py |
Write docstrings for backend logic | # -*- coding: utf-8 -*-
# Copyright (c) 2025 relakkes@gmail.com
#
# This file is part of MediaCrawler project.
# Repository: https://github.com/NanmiCoder/MediaCrawler/blob/main/proxy/providers/wandou_http_proxy.py
# GitHub: https://github.com/NanmiCoder
# Licensed under NON-COMMERCIAL LEARNING LICENSE 1.1
#
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
# 1. 不得用于任何商业用途。
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
# 3. 不得进行大规模爬取或对平台造成运营干扰。
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
# 5. 不得用于任何非法或不当的用途。
#
# 详细许可条款请参阅项目根目录下的LICENSE文件。
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
# -*- coding: utf-8 -*-
# @Author : relakkes@gmail.com
# @Time : 2025/7/31
# @Desc : WanDou HTTP proxy IP implementation
import os
from typing import Dict, List
from urllib.parse import urlencode
import httpx
from proxy import IpCache, IpGetError, ProxyProvider
from proxy.types import IpInfoModel
from tools import utils
class WanDouHttpProxy(ProxyProvider):
def __init__(self, app_key: str, num: int = 100):
self.proxy_brand_name = "WANDOUHTTP"
self.api_path = "https://api.wandouapp.com/"
self.params = {
"app_key": app_key,
"num": num,
}
self.ip_cache = IpCache()
async def get_proxy(self, num: int) -> List[IpInfoModel]:
# Prioritize getting IP from cache
ip_cache_list = self.ip_cache.load_all_ip(
proxy_brand_name=self.proxy_brand_name
)
if len(ip_cache_list) >= num:
return ip_cache_list[:num]
# If the quantity in cache is insufficient, get from IP provider to supplement, then store in cache
need_get_count = num - len(ip_cache_list)
self.params.update({"num": min(need_get_count, 100)}) # Maximum 100
ip_infos = []
async with httpx.AsyncClient() as client:
url = self.api_path + "?" + urlencode(self.params)
utils.logger.info(f"[WanDouHttpProxy.get_proxy] get ip proxy url:{url}")
response = await client.get(
url,
headers={
"User-Agent": "MediaCrawler https://github.com/NanmiCoder/MediaCrawler",
},
)
res_dict: Dict = response.json()
if res_dict.get("code") == 200:
data: List[Dict] = res_dict.get("data", [])
current_ts = utils.get_unix_timestamp()
for ip_item in data:
ip_info_model = IpInfoModel(
ip=ip_item.get("ip"),
port=ip_item.get("port"),
user="", # WanDou HTTP does not require username password authentication
password="",
expired_time_ts=utils.get_unix_time_from_time_str(
ip_item.get("expire_time")
),
)
ip_key = f"WANDOUHTTP_{ip_info_model.ip}_{ip_info_model.port}"
ip_value = ip_info_model.model_dump_json()
ip_infos.append(ip_info_model)
self.ip_cache.set_ip(
ip_key, ip_value, ex=ip_info_model.expired_time_ts - current_ts
)
else:
error_msg = res_dict.get("msg", "unknown error")
# Handle specific error codes
error_code = res_dict.get("code")
if error_code == 10001:
error_msg = "General error, check msg content for specific error information"
elif error_code == 10048:
error_msg = "No available package"
raise IpGetError(f"{error_msg} (code: {error_code})")
return ip_cache_list + ip_infos
def new_wandou_http_proxy() -> WanDouHttpProxy:
# Support both uppercase and lowercase environment variable formats, prioritize uppercase
app_key = os.getenv("WANDOU_APP_KEY") or os.getenv("wandou_app_key", "your_wandou_http_app_key")
return WanDouHttpProxy(app_key=app_key) | --- +++ @@ -35,6 +35,11 @@ class WanDouHttpProxy(ProxyProvider):
def __init__(self, app_key: str, num: int = 100):
+ """
+ WanDou HTTP proxy IP implementation
+ :param app_key: Open app_key, can be obtained through user center
+ :param num: Number of IPs extracted at once, maximum 100
+ """
self.proxy_brand_name = "WANDOUHTTP"
self.api_path = "https://api.wandouapp.com/"
self.params = {
@@ -44,6 +49,10 @@ self.ip_cache = IpCache()
async def get_proxy(self, num: int) -> List[IpInfoModel]:
+ """
+ :param num:
+ :return:
+ """
# Prioritize getting IP from cache
ip_cache_list = self.ip_cache.load_all_ip(
@@ -98,7 +107,16 @@
def new_wandou_http_proxy() -> WanDouHttpProxy:
+ """
+ Construct WanDou HTTP instance
+ Supports two environment variable naming formats:
+ 1. Uppercase format: WANDOU_APP_KEY
+ 2. Lowercase format: wandou_app_key
+ Prioritize uppercase format, use lowercase format if not exists
+ Returns:
+
+ """
# Support both uppercase and lowercase environment variable formats, prioritize uppercase
app_key = os.getenv("WANDOU_APP_KEY") or os.getenv("wandou_app_key", "your_wandou_http_app_key")
- return WanDouHttpProxy(app_key=app_key)+ return WanDouHttpProxy(app_key=app_key)
| https://raw.githubusercontent.com/NanmiCoder/MediaCrawler/HEAD/proxy/providers/wandou_http_proxy.py |
Add minimal docstrings for each function | # -*- coding: utf-8 -*-
# Copyright (c) 2025 relakkes@gmail.com
#
# This file is part of MediaCrawler project.
# Repository: https://github.com/NanmiCoder/MediaCrawler/blob/main/store/weibo/weibo_store_media.py
# GitHub: https://github.com/NanmiCoder
# Licensed under NON-COMMERCIAL LEARNING LICENSE 1.1
#
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
# 1. 不得用于任何商业用途。
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
# 3. 不得进行大规模爬取或对平台造成运营干扰。
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
# 5. 不得用于任何非法或不当的用途。
#
# 详细许可条款请参阅项目根目录下的LICENSE文件。
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
# -*- coding: utf-8 -*-
# @Author : Erm
# @Time : 2024/4/9 17:35
# @Desc : Weibo media storage
import pathlib
from typing import Dict
import aiofiles
from base.base_crawler import AbstractStoreImage, AbstractStoreVideo
from tools import utils
import config
class WeiboStoreImage(AbstractStoreImage):
def __init__(self):
if config.SAVE_DATA_PATH:
self.image_store_path = f"{config.SAVE_DATA_PATH}/weibo/images"
else:
self.image_store_path = "data/weibo/images"
async def store_image(self, image_content_item: Dict):
await self.save_image(image_content_item.get("pic_id"), image_content_item.get("pic_content"), image_content_item.get("extension_file_name"))
def make_save_file_name(self, picid: str, extension_file_name: str) -> str:
return f"{self.image_store_path}/{picid}.{extension_file_name}"
async def save_image(self, picid: str, pic_content: str, extension_file_name="jpg"):
pathlib.Path(self.image_store_path).mkdir(parents=True, exist_ok=True)
save_file_name = self.make_save_file_name(picid, extension_file_name)
async with aiofiles.open(save_file_name, 'wb') as f:
await f.write(pic_content)
utils.logger.info(f"[WeiboImageStoreImplement.save_image] save image {save_file_name} success ...") | --- +++ @@ -39,14 +39,44 @@ self.image_store_path = "data/weibo/images"
async def store_image(self, image_content_item: Dict):
+ """
+ store content
+
+ Args:
+ image_content_item:
+
+ Returns:
+
+ """
await self.save_image(image_content_item.get("pic_id"), image_content_item.get("pic_content"), image_content_item.get("extension_file_name"))
def make_save_file_name(self, picid: str, extension_file_name: str) -> str:
+ """
+ make save file name by store type
+
+ Args:
+ picid: image id
+ extension_file_name: video filename with extension
+
+ Returns:
+
+ """
return f"{self.image_store_path}/{picid}.{extension_file_name}"
async def save_image(self, picid: str, pic_content: str, extension_file_name="jpg"):
+ """
+ save image to local
+
+ Args:
+ picid: image id
+ pic_content: image content
+ extension_file_name: image filename with extension
+
+ Returns:
+
+ """
pathlib.Path(self.image_store_path).mkdir(parents=True, exist_ok=True)
save_file_name = self.make_save_file_name(picid, extension_file_name)
async with aiofiles.open(save_file_name, 'wb') as f:
await f.write(pic_content)
- utils.logger.info(f"[WeiboImageStoreImplement.save_image] save image {save_file_name} success ...")+ utils.logger.info(f"[WeiboImageStoreImplement.save_image] save image {save_file_name} success ...")
| https://raw.githubusercontent.com/NanmiCoder/MediaCrawler/HEAD/store/weibo/weibo_store_media.py |
Add concise docstrings to each method | # -*- coding: utf-8 -*-
# Copyright (c) 2025 relakkes@gmail.com
#
# This file is part of MediaCrawler project.
# Repository: https://github.com/NanmiCoder/MediaCrawler/blob/main/proxy/providers/kuaidl_proxy.py
# GitHub: https://github.com/NanmiCoder
# Licensed under NON-COMMERCIAL LEARNING LICENSE 1.1
#
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
# 1. 不得用于任何商业用途。
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
# 3. 不得进行大规模爬取或对平台造成运营干扰。
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
# 5. 不得用于任何非法或不当的用途。
#
# 详细许可条款请参阅项目根目录下的LICENSE文件。
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
# -*- coding: utf-8 -*-
# @Author : relakkes@gmail.com
# @Time : 2024/4/5 09:43
# @Desc : KuaiDaili HTTP implementation, official documentation: https://www.kuaidaili.com/?ref=ldwkjqipvz6c
import os
import re
from typing import Dict, List
import httpx
from pydantic import BaseModel, Field
from proxy import IpCache, IpInfoModel, ProxyProvider
from proxy.types import ProviderNameEnum
from tools import utils
# KuaiDaili IP proxy expiration time is moved forward by 5 seconds to avoid critical time usage failure
DELTA_EXPIRED_SECOND = 5
class KuaidailiProxyModel(BaseModel):
ip: str = Field("ip")
port: int = Field("port")
expire_ts: int = Field("Expiration time, in seconds, how many seconds until expiration")
def parse_kuaidaili_proxy(proxy_info: str) -> KuaidailiProxyModel:
proxies: List[str] = proxy_info.split(":")
if len(proxies) != 2:
raise Exception("not invalid kuaidaili proxy info")
pattern = r'(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}):(\d{1,5}),(\d+)'
match = re.search(pattern, proxy_info)
if not match.groups():
raise Exception("not match kuaidaili proxy info")
return KuaidailiProxyModel(
ip=match.groups()[0],
port=int(match.groups()[1]),
expire_ts=int(match.groups()[2])
)
class KuaiDaiLiProxy(ProxyProvider):
def __init__(self, kdl_user_name: str, kdl_user_pwd: str, kdl_secret_id: str, kdl_signature: str):
self.kdl_user_name = kdl_user_name
self.kdl_user_pwd = kdl_user_pwd
self.api_base = "https://dps.kdlapi.com/"
self.secret_id = kdl_secret_id
self.signature = kdl_signature
self.ip_cache = IpCache()
self.proxy_brand_name = ProviderNameEnum.KUAI_DAILI_PROVIDER.value
self.params = {
"secret_id": self.secret_id,
"signature": self.signature,
"pt": 1,
"format": "json",
"sep": 1,
"f_et": 1,
}
async def get_proxy(self, num: int) -> List[IpInfoModel]:
uri = "/api/getdps/"
# Prioritize getting IP from cache
ip_cache_list = self.ip_cache.load_all_ip(proxy_brand_name=self.proxy_brand_name)
if len(ip_cache_list) >= num:
return ip_cache_list[:num]
# If the quantity in cache is insufficient, get from IP provider to supplement, then store in cache
need_get_count = num - len(ip_cache_list)
self.params.update({"num": need_get_count})
ip_infos: List[IpInfoModel] = []
async with httpx.AsyncClient() as client:
response = await client.get(self.api_base + uri, params=self.params)
if response.status_code != 200:
utils.logger.error(f"[KuaiDaiLiProxy.get_proxies] statuc code not 200 and response.txt:{response.text}, status code: {response.status_code}")
raise Exception("get ip error from proxy provider and status code not 200 ...")
ip_response: Dict = response.json()
if ip_response.get("code") != 0:
utils.logger.error(f"[KuaiDaiLiProxy.get_proxies] code not 0 and msg:{ip_response.get('msg')}")
raise Exception("get ip error from proxy provider and code not 0 ...")
proxy_list: List[str] = ip_response.get("data", {}).get("proxy_list")
for proxy in proxy_list:
proxy_model = parse_kuaidaili_proxy(proxy)
# expire_ts is relative time (seconds), needs to be converted to absolute timestamp
# Consider expired DELTA_EXPIRED_SECOND seconds in advance to avoid critical time usage failure
ip_info_model = IpInfoModel(
ip=proxy_model.ip,
port=proxy_model.port,
user=self.kdl_user_name,
password=self.kdl_user_pwd,
expired_time_ts=proxy_model.expire_ts + utils.get_unix_timestamp() - DELTA_EXPIRED_SECOND,
)
ip_key = f"{self.proxy_brand_name}_{ip_info_model.ip}_{ip_info_model.port}"
# Cache expiration time uses relative time (seconds), also needs to subtract buffer time
self.ip_cache.set_ip(ip_key, ip_info_model.model_dump_json(), ex=proxy_model.expire_ts - DELTA_EXPIRED_SECOND)
ip_infos.append(ip_info_model)
return ip_cache_list + ip_infos
def new_kuai_daili_proxy() -> KuaiDaiLiProxy:
# Support both uppercase and lowercase environment variable formats, prioritize uppercase
kdl_secret_id = os.getenv("KDL_SECERT_ID") or os.getenv("kdl_secret_id", "your_kuaidaili_secret_id")
kdl_signature = os.getenv("KDL_SIGNATURE") or os.getenv("kdl_signature", "your_kuaidaili_signature")
kdl_user_name = os.getenv("KDL_USER_NAME") or os.getenv("kdl_user_name", "your_kuaidaili_username")
kdl_user_pwd = os.getenv("KDL_USER_PWD") or os.getenv("kdl_user_pwd", "your_kuaidaili_password")
return KuaiDaiLiProxy(
kdl_secret_id=kdl_secret_id,
kdl_signature=kdl_signature,
kdl_user_name=kdl_user_name,
kdl_user_pwd=kdl_user_pwd,
) | --- +++ @@ -44,6 +44,14 @@
def parse_kuaidaili_proxy(proxy_info: str) -> KuaidailiProxyModel:
+ """
+ Parse KuaiDaili IP information
+ Args:
+ proxy_info:
+
+ Returns:
+
+ """
proxies: List[str] = proxy_info.split(":")
if len(proxies) != 2:
raise Exception("not invalid kuaidaili proxy info")
@@ -62,6 +70,12 @@
class KuaiDaiLiProxy(ProxyProvider):
def __init__(self, kdl_user_name: str, kdl_user_pwd: str, kdl_secret_id: str, kdl_signature: str):
+ """
+
+ Args:
+ kdl_user_name:
+ kdl_user_pwd:
+ """
self.kdl_user_name = kdl_user_name
self.kdl_user_pwd = kdl_user_pwd
self.api_base = "https://dps.kdlapi.com/"
@@ -79,6 +93,14 @@ }
async def get_proxy(self, num: int) -> List[IpInfoModel]:
+ """
+ KuaiDaili implementation
+ Args:
+ num:
+
+ Returns:
+
+ """
uri = "/api/getdps/"
# Prioritize getting IP from cache
@@ -125,6 +147,15 @@
def new_kuai_daili_proxy() -> KuaiDaiLiProxy:
+ """
+ Construct KuaiDaili HTTP instance
+ Supports two environment variable naming formats:
+ 1. Uppercase format: KDL_SECERT_ID, KDL_SIGNATURE, KDL_USER_NAME, KDL_USER_PWD
+ 2. Lowercase format: kdl_secret_id, kdl_signature, kdl_user_name, kdl_user_pwd
+ Prioritize uppercase format, use lowercase format if not exists
+ Returns:
+
+ """
# Support both uppercase and lowercase environment variable formats, prioritize uppercase
kdl_secret_id = os.getenv("KDL_SECERT_ID") or os.getenv("kdl_secret_id", "your_kuaidaili_secret_id")
kdl_signature = os.getenv("KDL_SIGNATURE") or os.getenv("kdl_signature", "your_kuaidaili_signature")
@@ -136,4 +167,4 @@ kdl_signature=kdl_signature,
kdl_user_name=kdl_user_name,
kdl_user_pwd=kdl_user_pwd,
- )+ )
| https://raw.githubusercontent.com/NanmiCoder/MediaCrawler/HEAD/proxy/providers/kuaidl_proxy.py |
Improve documentation using docstrings | # -*- coding: utf-8 -*-
# Copyright (c) 2025 relakkes@gmail.com
#
# This file is part of MediaCrawler project.
# Repository: https://github.com/NanmiCoder/MediaCrawler/blob/main/store/zhihu/__init__.py
# GitHub: https://github.com/NanmiCoder
# Licensed under NON-COMMERCIAL LEARNING LICENSE 1.1
#
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
# 1. 不得用于任何商业用途。
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
# 3. 不得进行大规模爬取或对平台造成运营干扰。
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
# 5. 不得用于任何非法或不当的用途。
#
# 详细许可条款请参阅项目根目录下的LICENSE文件。
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
# -*- coding: utf-8 -*-
from typing import List
import config
from base.base_crawler import AbstractStore
from model.m_zhihu import ZhihuComment, ZhihuContent, ZhihuCreator
from ._store_impl import (ZhihuCsvStoreImplement,
ZhihuDbStoreImplement,
ZhihuJsonStoreImplement,
ZhihuJsonlStoreImplement,
ZhihuSqliteStoreImplement,
ZhihuMongoStoreImplement,
ZhihuExcelStoreImplement)
from tools import utils
from var import source_keyword_var
class ZhihuStoreFactory:
STORES = {
"csv": ZhihuCsvStoreImplement,
"db": ZhihuDbStoreImplement,
"postgres": ZhihuDbStoreImplement,
"json": ZhihuJsonStoreImplement,
"jsonl": ZhihuJsonlStoreImplement,
"sqlite": ZhihuSqliteStoreImplement,
"mongodb": ZhihuMongoStoreImplement,
"excel": ZhihuExcelStoreImplement,
}
@staticmethod
def create_store() -> AbstractStore:
store_class = ZhihuStoreFactory.STORES.get(config.SAVE_DATA_OPTION)
if not store_class:
raise ValueError("[ZhihuStoreFactory.create_store] Invalid save option only supported csv or db or json or sqlite or mongodb or excel ...")
return store_class()
async def batch_update_zhihu_contents(contents: List[ZhihuContent]):
if not contents:
return
for content_item in contents:
await update_zhihu_content(content_item)
async def update_zhihu_content(content_item: ZhihuContent):
content_item.source_keyword = source_keyword_var.get()
local_db_item = content_item.model_dump()
local_db_item.update({"last_modify_ts": utils.get_current_timestamp()})
utils.logger.info(f"[store.zhihu.update_zhihu_content] zhihu content: {local_db_item}")
await ZhihuStoreFactory.create_store().store_content(local_db_item)
async def batch_update_zhihu_note_comments(comments: List[ZhihuComment]):
if not comments:
return
for comment_item in comments:
await update_zhihu_content_comment(comment_item)
async def update_zhihu_content_comment(comment_item: ZhihuComment):
local_db_item = comment_item.model_dump()
local_db_item.update({"last_modify_ts": utils.get_current_timestamp()})
utils.logger.info(f"[store.zhihu.update_zhihu_note_comment] zhihu content comment:{local_db_item}")
await ZhihuStoreFactory.create_store().store_comment(local_db_item)
async def save_creator(creator: ZhihuCreator):
if not creator:
return
local_db_item = creator.model_dump()
local_db_item.update({"last_modify_ts": utils.get_current_timestamp()})
await ZhihuStoreFactory.create_store().store_creator(local_db_item) | --- +++ @@ -55,6 +55,14 @@ return store_class()
async def batch_update_zhihu_contents(contents: List[ZhihuContent]):
+ """
+ Batch update Zhihu contents
+ Args:
+ contents:
+
+ Returns:
+
+ """
if not contents:
return
@@ -62,6 +70,14 @@ await update_zhihu_content(content_item)
async def update_zhihu_content(content_item: ZhihuContent):
+ """
+ Update Zhihu content
+ Args:
+ content_item:
+
+ Returns:
+
+ """
content_item.source_keyword = source_keyword_var.get()
local_db_item = content_item.model_dump()
local_db_item.update({"last_modify_ts": utils.get_current_timestamp()})
@@ -71,6 +87,14 @@
async def batch_update_zhihu_note_comments(comments: List[ZhihuComment]):
+ """
+ Batch update Zhihu content comments
+ Args:
+ comments:
+
+ Returns:
+
+ """
if not comments:
return
@@ -79,6 +103,14 @@
async def update_zhihu_content_comment(comment_item: ZhihuComment):
+ """
+ Update Zhihu content comment
+ Args:
+ comment_item:
+
+ Returns:
+
+ """
local_db_item = comment_item.model_dump()
local_db_item.update({"last_modify_ts": utils.get_current_timestamp()})
utils.logger.info(f"[store.zhihu.update_zhihu_note_comment] zhihu content comment:{local_db_item}")
@@ -86,8 +118,16 @@
async def save_creator(creator: ZhihuCreator):
+ """
+ Save Zhihu creator information
+ Args:
+ creator:
+
+ Returns:
+
+ """
if not creator:
return
local_db_item = creator.model_dump()
local_db_item.update({"last_modify_ts": utils.get_current_timestamp()})
- await ZhihuStoreFactory.create_store().store_creator(local_db_item)+ await ZhihuStoreFactory.create_store().store_creator(local_db_item)
| https://raw.githubusercontent.com/NanmiCoder/MediaCrawler/HEAD/store/zhihu/__init__.py |
Write docstrings describing each step | # -*- coding: utf-8 -*-
# Copyright (c) 2025 relakkes@gmail.com
#
# This file is part of MediaCrawler project.
# Repository: https://github.com/NanmiCoder/MediaCrawler
# GitHub: https://github.com/NanmiCoder
# Licensed under NON-COMMERCIAL LEARNING LICENSE 1.1
#
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
# 1. 不得用于任何商业用途。
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
# 3. 不得进行大规模爬取或对平台造成运营干扰。
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
# 5. 不得用于任何非法或不当的用途。
#
# 详细许可条款请参阅项目根目录下的LICENSE文件。
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
# -*- coding: utf-8 -*-
# @Author : relakkes@gmail.com
# @Time : 2025/11/25
# @Desc : Auto-refresh proxy Mixin class for use by various platform clients
from typing import TYPE_CHECKING, Optional
from tools import utils
if TYPE_CHECKING:
from proxy.proxy_ip_pool import ProxyIpPool
class ProxyRefreshMixin:
_proxy_ip_pool: Optional["ProxyIpPool"] = None
def init_proxy_pool(self, proxy_ip_pool: Optional["ProxyIpPool"]) -> None:
self._proxy_ip_pool = proxy_ip_pool
async def _refresh_proxy_if_expired(self) -> None:
if self._proxy_ip_pool is None:
return
if self._proxy_ip_pool.is_current_proxy_expired():
utils.logger.info(
f"[{self.__class__.__name__}._refresh_proxy_if_expired] Proxy expired, refreshing..."
)
new_proxy = await self._proxy_ip_pool.get_or_refresh_proxy()
# Update httpx proxy URL
if new_proxy.user and new_proxy.password:
self.proxy = f"http://{new_proxy.user}:{new_proxy.password}@{new_proxy.ip}:{new_proxy.port}"
else:
self.proxy = f"http://{new_proxy.ip}:{new_proxy.port}"
utils.logger.info(
f"[{self.__class__.__name__}._refresh_proxy_if_expired] New proxy: {new_proxy.ip}:{new_proxy.port}"
) | --- +++ @@ -32,13 +32,33 @@
class ProxyRefreshMixin:
+ """
+ Auto-refresh proxy Mixin class
+
+ Usage:
+ 1. Let client class inherit this Mixin
+ 2. Call init_proxy_pool(proxy_ip_pool) in client's __init__
+ 3. Call await _refresh_proxy_if_expired() before each request method call
+
+ Requirements:
+ - client class must have self.proxy attribute to store current proxy URL
+ """
_proxy_ip_pool: Optional["ProxyIpPool"] = None
def init_proxy_pool(self, proxy_ip_pool: Optional["ProxyIpPool"]) -> None:
+ """
+ Initialize proxy pool reference
+ Args:
+ proxy_ip_pool: Proxy IP pool instance
+ """
self._proxy_ip_pool = proxy_ip_pool
async def _refresh_proxy_if_expired(self) -> None:
+ """
+ Check if proxy has expired, automatically refresh if so
+ Call this method before each request to ensure proxy is valid
+ """
if self._proxy_ip_pool is None:
return
@@ -54,4 +74,4 @@ self.proxy = f"http://{new_proxy.ip}:{new_proxy.port}"
utils.logger.info(
f"[{self.__class__.__name__}._refresh_proxy_if_expired] New proxy: {new_proxy.ip}:{new_proxy.port}"
- )+ )
| https://raw.githubusercontent.com/NanmiCoder/MediaCrawler/HEAD/proxy/proxy_mixin.py |
Generate docstrings for exported functions | # -*- coding: utf-8 -*-
# Copyright (c) 2025 relakkes@gmail.com
#
# This file is part of MediaCrawler project.
# Repository: https://github.com/NanmiCoder/MediaCrawler/blob/main/proxy/providers/jishu_http_proxy.py
# GitHub: https://github.com/NanmiCoder
# Licensed under NON-COMMERCIAL LEARNING LICENSE 1.1
#
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
# 1. 不得用于任何商业用途。
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
# 3. 不得进行大规模爬取或对平台造成运营干扰。
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
# 5. 不得用于任何非法或不当的用途。
#
# 详细许可条款请参阅项目根目录下的LICENSE文件。
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
# -*- coding: utf-8 -*-
# @Author : relakkes@gmail.com
# @Time : 2024/4/5 09:32
# @Desc : Deprecated!!!!! Shut down!!! JiSu HTTP proxy IP implementation. Please use KuaiDaili implementation (proxy/providers/kuaidl_proxy.py)
import os
from typing import Dict, List
from urllib.parse import urlencode
import httpx
from proxy import IpCache, IpGetError, ProxyProvider
from proxy.types import IpInfoModel
from tools import utils
class JiSuHttpProxy(ProxyProvider):
def __init__(self, key: str, crypto: str, time_validity_period: int):
self.proxy_brand_name = "JISUHTTP"
self.api_path = "https://api.jisuhttp.com"
self.params = {
"key": key,
"crypto": crypto,
"time": time_validity_period, # IP usage duration, supports 3, 5, 10, 15, 30 minute validity
"type": "json", # Data result is json
"port": "2", # IP protocol: 1:HTTP, 2:HTTPS, 3:SOCKS5
"pw": "1", # Whether to use account password authentication, 1: yes, 0: no, no means whitelist authentication; default is 0
"se": "1", # Whether to show IP expiration time when returning JSON format, 1: show, 0: don't show; default is 0
}
self.ip_cache = IpCache()
async def get_proxy(self, num: int) -> List[IpInfoModel]:
# Prioritize getting IP from cache
ip_cache_list = self.ip_cache.load_all_ip(proxy_brand_name=self.proxy_brand_name)
if len(ip_cache_list) >= num:
return ip_cache_list[:num]
# If the quantity in cache is insufficient, get from IP provider to supplement, then store in cache
need_get_count = num - len(ip_cache_list)
self.params.update({"num": need_get_count})
ip_infos = []
async with httpx.AsyncClient() as client:
url = self.api_path + "/fetchips" + '?' + urlencode(self.params)
utils.logger.info(f"[JiSuHttpProxy.get_proxy] get ip proxy url:{url}")
response = await client.get(url, headers={
"User-Agent": "MediaCrawler https://github.com/NanmiCoder/MediaCrawler",
})
res_dict: Dict = response.json()
if res_dict.get("code") == 0:
data: List[Dict] = res_dict.get("data")
current_ts = utils.get_unix_timestamp()
for ip_item in data:
ip_info_model = IpInfoModel(
ip=ip_item.get("ip"),
port=ip_item.get("port"),
user=ip_item.get("user"),
password=ip_item.get("pass"),
expired_time_ts=utils.get_unix_time_from_time_str(ip_item.get("expire")),
)
ip_key = f"JISUHTTP_{ip_info_model.ip}_{ip_info_model.port}_{ip_info_model.user}_{ip_info_model.password}"
ip_value = ip_info_model.json()
ip_infos.append(ip_info_model)
self.ip_cache.set_ip(ip_key, ip_value, ex=ip_info_model.expired_time_ts - current_ts)
else:
raise IpGetError(res_dict.get("msg", "unkown err"))
return ip_cache_list + ip_infos
def new_jisu_http_proxy() -> JiSuHttpProxy:
return JiSuHttpProxy(
key=os.getenv("jisu_key", ""), # Get JiSu HTTP IP extraction key value through environment variable
crypto=os.getenv("jisu_crypto", ""), # Get JiSu HTTP IP extraction encryption signature through environment variable
time_validity_period=30 # 30 minutes (maximum validity)
) | --- +++ @@ -35,6 +35,11 @@ class JiSuHttpProxy(ProxyProvider):
def __init__(self, key: str, crypto: str, time_validity_period: int):
+ """
+ JiSu HTTP proxy IP implementation
+ :param key: Extraction key value (obtain after registering on the official website)
+ :param crypto: Encryption signature (obtain after registering on the official website)
+ """
self.proxy_brand_name = "JISUHTTP"
self.api_path = "https://api.jisuhttp.com"
self.params = {
@@ -49,6 +54,10 @@ self.ip_cache = IpCache()
async def get_proxy(self, num: int) -> List[IpInfoModel]:
+ """
+ :param num:
+ :return:
+ """
# Prioritize getting IP from cache
ip_cache_list = self.ip_cache.load_all_ip(proxy_brand_name=self.proxy_brand_name)
@@ -87,8 +96,13 @@
def new_jisu_http_proxy() -> JiSuHttpProxy:
+ """
+ Construct JiSu HTTP instance
+ Returns:
+
+ """
return JiSuHttpProxy(
key=os.getenv("jisu_key", ""), # Get JiSu HTTP IP extraction key value through environment variable
crypto=os.getenv("jisu_crypto", ""), # Get JiSu HTTP IP extraction encryption signature through environment variable
time_validity_period=30 # 30 minutes (maximum validity)
- )+ )
| https://raw.githubusercontent.com/NanmiCoder/MediaCrawler/HEAD/proxy/providers/jishu_http_proxy.py |
Help me add docstrings to my project | # -*- coding: utf-8 -*-
# Copyright (c) 2025 relakkes@gmail.com
#
# This file is part of MediaCrawler project.
# Repository: https://github.com/NanmiCoder/MediaCrawler/blob/main/proxy/base_proxy.py
# GitHub: https://github.com/NanmiCoder
# Licensed under NON-COMMERCIAL LEARNING LICENSE 1.1
#
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
# 1. 不得用于任何商业用途。
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
# 3. 不得进行大规模爬取或对平台造成运营干扰。
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
# 5. 不得用于任何非法或不当的用途。
#
# 详细许可条款请参阅项目根目录下的LICENSE文件。
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
# -*- coding: utf-8 -*-
# @Author : relakkes@gmail.com
# @Time : 2023/12/2 11:18
# @Desc : Crawler IP acquisition implementation
# @Url : KuaiDaili HTTP implementation, official documentation: https://www.kuaidaili.com/?ref=ldwkjqipvz6c
import json
from abc import ABC, abstractmethod
from typing import List
import config
from cache.abs_cache import AbstractCache
from cache.cache_factory import CacheFactory
from tools.utils import utils
from .types import IpInfoModel
class IpGetError(Exception):
class ProxyProvider(ABC):
@abstractmethod
async def get_proxy(self, num: int) -> List[IpInfoModel]:
raise NotImplementedError
class IpCache:
def __init__(self):
self.cache_client: AbstractCache = CacheFactory.create_cache(cache_type=config.CACHE_TYPE_REDIS)
def set_ip(self, ip_key: str, ip_value_info: str, ex: int):
self.cache_client.set(key=ip_key, value=ip_value_info, expire_time=ex)
def load_all_ip(self, proxy_brand_name: str) -> List[IpInfoModel]:
all_ip_list: List[IpInfoModel] = []
all_ip_keys: List[str] = self.cache_client.keys(pattern=f"{proxy_brand_name}_*")
try:
for ip_key in all_ip_keys:
ip_value = self.cache_client.get(ip_key)
if not ip_value:
continue
all_ip_list.append(IpInfoModel(**json.loads(ip_value)))
except Exception as e:
utils.logger.error("[IpCache.load_all_ip] get ip err from redis db", e)
return all_ip_list | --- +++ @@ -36,11 +36,17 @@
class IpGetError(Exception):
+ """ ip get error"""
class ProxyProvider(ABC):
@abstractmethod
async def get_proxy(self, num: int) -> List[IpInfoModel]:
+ """
+ Abstract method to get IP, different HTTP proxy providers need to implement this method
+ :param num: Number of IPs to extract
+ :return:
+ """
raise NotImplementedError
@@ -50,9 +56,21 @@ self.cache_client: AbstractCache = CacheFactory.create_cache(cache_type=config.CACHE_TYPE_REDIS)
def set_ip(self, ip_key: str, ip_value_info: str, ex: int):
+ """
+ Set IP with expiration time, Redis is responsible for deletion after expiration
+ :param ip_key:
+ :param ip_value_info:
+ :param ex:
+ :return:
+ """
self.cache_client.set(key=ip_key, value=ip_value_info, expire_time=ex)
def load_all_ip(self, proxy_brand_name: str) -> List[IpInfoModel]:
+ """
+ Load all unexpired IP information from Redis
+ :param proxy_brand_name: Proxy provider name
+ :return:
+ """
all_ip_list: List[IpInfoModel] = []
all_ip_keys: List[str] = self.cache_client.keys(pattern=f"{proxy_brand_name}_*")
try:
@@ -63,4 +81,4 @@ all_ip_list.append(IpInfoModel(**json.loads(ip_value)))
except Exception as e:
utils.logger.error("[IpCache.load_all_ip] get ip err from redis db", e)
- return all_ip_list+ return all_ip_list
| https://raw.githubusercontent.com/NanmiCoder/MediaCrawler/HEAD/proxy/base_proxy.py |
Generate consistent docstrings | # -*- coding: utf-8 -*-
# Copyright (c) 2025 relakkes@gmail.com
#
# This file is part of MediaCrawler project.
# Repository: https://github.com/NanmiCoder/MediaCrawler/blob/main/proxy/proxy_ip_pool.py
# GitHub: https://github.com/NanmiCoder
# Licensed under NON-COMMERCIAL LEARNING LICENSE 1.1
#
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
# 1. 不得用于任何商业用途。
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
# 3. 不得进行大规模爬取或对平台造成运营干扰。
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
# 5. 不得用于任何非法或不当的用途。
#
# 详细许可条款请参阅项目根目录下的LICENSE文件。
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
# -*- coding: utf-8 -*-
# @Author : relakkes@gmail.com
# @Time : 2023/12/2 13:45
# @Desc : IP proxy pool implementation
import random
from typing import Dict, List
import httpx
from tenacity import retry, stop_after_attempt, wait_fixed
import config
from proxy.providers import (
new_kuai_daili_proxy,
new_wandou_http_proxy,
)
from tools import utils
from .base_proxy import ProxyProvider
from .types import IpInfoModel, ProviderNameEnum
class ProxyIpPool:
def __init__(
self, ip_pool_count: int, enable_validate_ip: bool, ip_provider: ProxyProvider
) -> None:
self.valid_ip_url = "https://echo.apifox.cn/" # URL to validate if IP is valid
self.ip_pool_count = ip_pool_count
self.enable_validate_ip = enable_validate_ip
self.proxy_list: List[IpInfoModel] = []
self.ip_provider: ProxyProvider = ip_provider
self.current_proxy: IpInfoModel | None = None # Currently used proxy
async def load_proxies(self) -> None:
self.proxy_list = await self.ip_provider.get_proxy(self.ip_pool_count)
async def _is_valid_proxy(self, proxy: IpInfoModel) -> bool:
utils.logger.info(
f"[ProxyIpPool._is_valid_proxy] testing {proxy.ip} is it valid "
)
try:
# httpx 0.28.1 requires passing proxy URL string directly, not a dictionary
if proxy.user and proxy.password:
proxy_url = f"http://{proxy.user}:{proxy.password}@{proxy.ip}:{proxy.port}"
else:
proxy_url = f"http://{proxy.ip}:{proxy.port}"
async with httpx.AsyncClient(proxy=proxy_url) as client:
response = await client.get(self.valid_ip_url)
if response.status_code == 200:
return True
else:
return False
except Exception as e:
utils.logger.info(
f"[ProxyIpPool._is_valid_proxy] testing {proxy.ip} err: {e}"
)
raise e
@retry(stop=stop_after_attempt(3), wait=wait_fixed(1))
async def get_proxy(self) -> IpInfoModel:
if len(self.proxy_list) == 0:
await self._reload_proxies()
proxy = random.choice(self.proxy_list)
self.proxy_list.remove(proxy) # Remove an IP once extracted
if self.enable_validate_ip:
if not await self._is_valid_proxy(proxy):
raise Exception(
"[ProxyIpPool.get_proxy] current ip invalid and again get it"
)
self.current_proxy = proxy # Save currently used proxy
return proxy
def is_current_proxy_expired(self, buffer_seconds: int = 30) -> bool:
if self.current_proxy is None:
return True
return self.current_proxy.is_expired(buffer_seconds)
async def get_or_refresh_proxy(self, buffer_seconds: int = 30) -> IpInfoModel:
if self.is_current_proxy_expired(buffer_seconds):
utils.logger.info(
f"[ProxyIpPool.get_or_refresh_proxy] Current proxy expired or not set, getting new proxy..."
)
return await self.get_proxy()
return self.current_proxy
async def _reload_proxies(self):
self.proxy_list = []
await self.load_proxies()
IpProxyProvider: Dict[str, ProxyProvider] = {
ProviderNameEnum.KUAI_DAILI_PROVIDER.value: new_kuai_daili_proxy(),
ProviderNameEnum.WANDOU_HTTP_PROVIDER.value: new_wandou_http_proxy(),
}
async def create_ip_pool(ip_pool_count: int, enable_validate_ip: bool) -> ProxyIpPool:
pool = ProxyIpPool(
ip_pool_count=ip_pool_count,
enable_validate_ip=enable_validate_ip,
ip_provider=IpProxyProvider.get(config.IP_PROXY_PROVIDER_NAME),
)
await pool.load_proxies()
return pool
if __name__ == "__main__":
pass | --- +++ @@ -43,6 +43,13 @@ def __init__(
self, ip_pool_count: int, enable_validate_ip: bool, ip_provider: ProxyProvider
) -> None:
+ """
+
+ Args:
+ ip_pool_count:
+ enable_validate_ip:
+ ip_provider:
+ """
self.valid_ip_url = "https://echo.apifox.cn/" # URL to validate if IP is valid
self.ip_pool_count = ip_pool_count
self.enable_validate_ip = enable_validate_ip
@@ -51,9 +58,19 @@ self.current_proxy: IpInfoModel | None = None # Currently used proxy
async def load_proxies(self) -> None:
+ """
+ Load IP proxies
+ Returns:
+
+ """
self.proxy_list = await self.ip_provider.get_proxy(self.ip_pool_count)
async def _is_valid_proxy(self, proxy: IpInfoModel) -> bool:
+ """
+ Validate if proxy IP is valid
+ :param proxy:
+ :return:
+ """
utils.logger.info(
f"[ProxyIpPool._is_valid_proxy] testing {proxy.ip} is it valid "
)
@@ -78,6 +95,10 @@
@retry(stop=stop_after_attempt(3), wait=wait_fixed(1))
async def get_proxy(self) -> IpInfoModel:
+ """
+ Randomly extract a proxy IP from the proxy pool
+ :return:
+ """
if len(self.proxy_list) == 0:
await self._reload_proxies()
@@ -92,11 +113,26 @@ return proxy
def is_current_proxy_expired(self, buffer_seconds: int = 30) -> bool:
+ """
+ Check if current proxy has expired
+ Args:
+ buffer_seconds: Buffer time (seconds), how many seconds ahead to consider expired
+ Returns:
+ bool: True means expired or no current proxy, False means still valid
+ """
if self.current_proxy is None:
return True
return self.current_proxy.is_expired(buffer_seconds)
async def get_or_refresh_proxy(self, buffer_seconds: int = 30) -> IpInfoModel:
+ """
+ Get current proxy, automatically refresh if expired
+ Call this method before each request to ensure proxy is valid
+ Args:
+ buffer_seconds: Buffer time (seconds), how many seconds ahead to consider expired
+ Returns:
+ IpInfoModel: Valid proxy IP information
+ """
if self.is_current_proxy_expired(buffer_seconds):
utils.logger.info(
f"[ProxyIpPool.get_or_refresh_proxy] Current proxy expired or not set, getting new proxy..."
@@ -105,6 +141,10 @@ return self.current_proxy
async def _reload_proxies(self):
+ """
+ Reload proxy pool
+ :return:
+ """
self.proxy_list = []
await self.load_proxies()
@@ -116,6 +156,12 @@
async def create_ip_pool(ip_pool_count: int, enable_validate_ip: bool) -> ProxyIpPool:
+ """
+ Create IP proxy pool
+ :param ip_pool_count: Number of IPs in the pool
+ :param enable_validate_ip: Whether to enable IP proxy validation
+ :return:
+ """
pool = ProxyIpPool(
ip_pool_count=ip_pool_count,
enable_validate_ip=enable_validate_ip,
@@ -126,4 +172,4 @@
if __name__ == "__main__":
- pass+ pass
| https://raw.githubusercontent.com/NanmiCoder/MediaCrawler/HEAD/proxy/proxy_ip_pool.py |
Document functions with clear intent | # -*- coding: utf-8 -*-
# Copyright (c) 2025 relakkes@gmail.com
#
# This file is part of MediaCrawler project.
# Repository: https://github.com/NanmiCoder/MediaCrawler/blob/main/tools/file_header_manager.py
# GitHub: https://github.com/NanmiCoder
# Licensed under NON-COMMERCIAL LEARNING LICENSE 1.1
#
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
# 1. 不得用于任何商业用途。
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
# 3. 不得进行大规模爬取或对平台造成运营干扰。
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
# 5. 不得用于任何非法或不当的用途。
#
# 详细许可条款请参阅项目根目录下的LICENSE文件。
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
import os
import re
import sys
from typing import List, Tuple
# Project configuration
REPO_URL = "https://github.com/NanmiCoder/MediaCrawler"
GITHUB_PROFILE = "https://github.com/NanmiCoder"
EMAIL = "relakkes@gmail.com"
COPYRIGHT_YEAR = "2025"
LICENSE_TYPE = "NON-COMMERCIAL LEARNING LICENSE 1.1"
# Disclaimer standard text
DISCLAIMER = """# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
# 1. 不得用于任何商业用途。
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
# 3. 不得进行大规模爬取或对平台造成运营干扰。
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
# 5. 不得用于任何非法或不当的用途。
#
# 详细许可条款请参阅项目根目录下的LICENSE文件。
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。"""
def get_file_relative_path(file_path: str, project_root: str) -> str:
return os.path.relpath(file_path, project_root)
def generate_copyright_header(relative_path: str) -> str:
file_url = f"{REPO_URL}/blob/main/{relative_path}"
header = f"""# Copyright (c) {COPYRIGHT_YEAR} {EMAIL}
#
# This file is part of MediaCrawler project.
# Repository: {file_url}
# GitHub: {GITHUB_PROFILE}
# Licensed under {LICENSE_TYPE}
#"""
return header
def has_copyright_header(content: str) -> bool:
# Check if contains Copyright keyword
return "Copyright (c)" in content and "MediaCrawler project" in content
def has_disclaimer(content: str) -> bool:
return "声明:本代码仅供学习和研究目的使用" in content
def find_insert_position(lines: List[str]) -> Tuple[int, bool]:
insert_pos = 0
has_encoding = False
# Check if first line is shebang
if lines and lines[0].startswith('#!'):
insert_pos = 1
# Check encoding declaration (usually on line 1 or 2)
for i in range(insert_pos, min(insert_pos + 2, len(lines))):
if i < len(lines):
line = lines[i].strip()
# Match # -*- coding: utf-8 -*- or # coding: utf-8 etc.
if re.match(r'#.*coding[:=]\s*([-\w.]+)', line):
has_encoding = True
insert_pos = i + 1
break
return insert_pos, has_encoding
def process_file(file_path: str, project_root: str, dry_run: bool = False) -> Tuple[bool, str]:
try:
with open(file_path, 'r', encoding='utf-8') as f:
content = f.read()
lines = content.splitlines(keepends=True)
# Skip if already has copyright header
if has_copyright_header(content):
return False, f"✓ Already has copyright header: {file_path}"
# Get relative path
relative_path = get_file_relative_path(file_path, project_root)
# Generate copyright header
copyright_header = generate_copyright_header(relative_path)
# Find insert position
insert_pos, has_encoding = find_insert_position(lines)
# Build new file content
new_lines = []
# Add encoding declaration if not present
if not has_encoding:
new_lines.append("# -*- coding: utf-8 -*-\n")
# Add front part (shebang and encoding declaration)
new_lines.extend(lines[:insert_pos])
# Add copyright header
new_lines.append(copyright_header + "\n")
# Add disclaimer if file doesn't have one
if not has_disclaimer(content):
new_lines.append(DISCLAIMER + "\n")
# Add empty line (if next line is not empty)
if insert_pos < len(lines) and lines[insert_pos].strip():
new_lines.append("\n")
# Add remaining content
new_lines.extend(lines[insert_pos:])
# Write to file if not dry run
if not dry_run:
with open(file_path, 'w', encoding='utf-8') as f:
f.writelines(new_lines)
return True, f"✓ Updated: {file_path}"
else:
return True, f"→ Would update: {file_path}"
except Exception as e:
return False, f"✗ Error processing {file_path}: {str(e)}"
def find_python_files(root_dir: str, exclude_patterns: List[str] = None) -> List[str]:
if exclude_patterns is None:
exclude_patterns = ['venv', '.venv', 'node_modules', '__pycache__', '.git', 'build', 'dist', '.eggs']
python_files = []
for root, dirs, files in os.walk(root_dir):
# Exclude specific directories
dirs[:] = [d for d in dirs if d not in exclude_patterns and not d.startswith('.')]
for file in files:
if file.endswith('.py'):
python_files.append(os.path.join(root, file))
return sorted(python_files)
def main():
import argparse
parser = argparse.ArgumentParser(description='Python file header copyright declaration management tool')
parser.add_argument('files', nargs='*', help='File paths to process (optional, defaults to all .py files)')
parser.add_argument('--dry-run', action='store_true', help='Check only without modifying files')
parser.add_argument('--project-root', default=None, help='Project root directory (defaults to current directory)')
parser.add_argument('--check', action='store_true', help='Check mode, return non-zero exit code if files missing copyright declaration')
args = parser.parse_args()
# Determine project root directory
if args.project_root:
project_root = os.path.abspath(args.project_root)
else:
# Assume this script is in tools/ directory
project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
print(f"Project root: {project_root}")
print(f"Mode: {'DRY RUN' if args.dry_run else 'UPDATE'}")
print("-" * 60)
# Get list of files to process
if args.files:
# Process specified files
files_to_process = [os.path.abspath(f) for f in args.files if f.endswith('.py')]
else:
# Process all Python files
files_to_process = find_python_files(project_root)
print(f"Found {len(files_to_process)} Python files to process\n")
# Process files
updated_count = 0
skipped_count = 0
error_count = 0
for file_path in files_to_process:
modified, message = process_file(file_path, project_root, args.dry_run or args.check)
print(message)
if "Error" in message:
error_count += 1
elif modified:
updated_count += 1
else:
skipped_count += 1
# Print summary
print("\n" + "=" * 60)
print(f"Summary:")
print(f" Total files: {len(files_to_process)}")
print(f" Updated/Need update: {updated_count}")
print(f" Already compliant: {skipped_count}")
print(f" Errors: {error_count}")
print("=" * 60)
# Return non-zero exit code in check mode if files need update
if args.check and updated_count > 0:
sys.exit(1)
elif error_count > 0:
sys.exit(1)
else:
sys.exit(0)
if __name__ == '__main__':
main() | --- +++ @@ -16,6 +16,15 @@ # 详细许可条款请参阅项目根目录下的LICENSE文件。
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
+"""
+File header copyright declaration management tool
+
+Features:
+- Automatically add standardized copyright declaration and disclaimer to Python files
+- Intelligently detect existing file headers (encoding declaration, author info, disclaimer, etc.)
+- Insert copyright info at appropriate position without breaking existing content
+- Support batch processing and single file check mode
+"""
import os
import re
@@ -42,10 +51,29 @@
def get_file_relative_path(file_path: str, project_root: str) -> str:
+ """
+ Get file path relative to project root
+
+ Args:
+ file_path: File absolute path
+ project_root: Project root directory
+
+ Returns:
+ Relative path string
+ """
return os.path.relpath(file_path, project_root)
def generate_copyright_header(relative_path: str) -> str:
+ """
+ Generate copyright declaration header
+
+ Args:
+ relative_path: File path relative to project root
+
+ Returns:
+ Formatted copyright declaration string
+ """
file_url = f"{REPO_URL}/blob/main/{relative_path}"
header = f"""# Copyright (c) {COPYRIGHT_YEAR} {EMAIL}
@@ -60,15 +88,42 @@
def has_copyright_header(content: str) -> bool:
+ """
+ Check if file already contains copyright declaration
+
+ Args:
+ content: File content
+
+ Returns:
+ True if already contains copyright declaration
+ """
# Check if contains Copyright keyword
return "Copyright (c)" in content and "MediaCrawler project" in content
def has_disclaimer(content: str) -> bool:
+ """
+ Check if file already contains disclaimer
+
+ Args:
+ content: File content
+
+ Returns:
+ True if already contains disclaimer
+ """
return "声明:本代码仅供学习和研究目的使用" in content
def find_insert_position(lines: List[str]) -> Tuple[int, bool]:
+ """
+ Find position to insert copyright declaration
+
+ Args:
+ lines: List of file content lines
+
+ Returns:
+ (insert line number, whether encoding declaration needs to be added)
+ """
insert_pos = 0
has_encoding = False
@@ -90,6 +145,17 @@
def process_file(file_path: str, project_root: str, dry_run: bool = False) -> Tuple[bool, str]:
+ """
+ Process single Python file
+
+ Args:
+ file_path: File path
+ project_root: Project root directory
+ dry_run: Check only without modification
+
+ Returns:
+ (whether modification needed, status message)
+ """
try:
with open(file_path, 'r', encoding='utf-8') as f:
content = f.read()
@@ -145,6 +211,16 @@
def find_python_files(root_dir: str, exclude_patterns: List[str] = None) -> List[str]:
+ """
+ Find all Python files
+
+ Args:
+ root_dir: Root directory
+ exclude_patterns: Directory patterns to exclude
+
+ Returns:
+ List of Python file paths
+ """
if exclude_patterns is None:
exclude_patterns = ['venv', '.venv', 'node_modules', '__pycache__', '.git', 'build', 'dist', '.eggs']
@@ -162,6 +238,7 @@
def main():
+ """Main function"""
import argparse
parser = argparse.ArgumentParser(description='Python file header copyright declaration management tool')
@@ -228,4 +305,4 @@
if __name__ == '__main__':
- main()+ main()
| https://raw.githubusercontent.com/NanmiCoder/MediaCrawler/HEAD/tools/file_header_manager.py |
Generate NumPy-style docstrings | # -*- coding: utf-8 -*-
# Copyright (c) 2025 relakkes@gmail.com
#
# This file is part of MediaCrawler project.
# Repository: https://github.com/NanmiCoder/MediaCrawler/blob/main/tools/time_util.py
# GitHub: https://github.com/NanmiCoder
# Licensed under NON-COMMERCIAL LEARNING LICENSE 1.1
#
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
# 1. 不得用于任何商业用途。
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
# 3. 不得进行大规模爬取或对平台造成运营干扰。
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
# 5. 不得用于任何非法或不当的用途。
#
# 详细许可条款请参阅项目根目录下的LICENSE文件。
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
# -*- coding: utf-8 -*-
# @Author : relakkes@gmail.com
# @Time : 2023/12/2 12:52
# @Desc : Time utility functions
import time
from datetime import datetime, timedelta, timezone
def get_current_timestamp() -> int:
return int(time.time() * 1000)
def get_current_time() -> str:
return time.strftime('%Y-%m-%d %X', time.localtime())
def get_current_time_hour() -> str:
return time.strftime('%Y-%m-%d-%H', time.localtime())
def get_current_date() -> str:
return time.strftime('%Y-%m-%d', time.localtime())
def get_time_str_from_unix_time(unixtime):
if int(unixtime) > 1000000000000:
unixtime = int(unixtime) / 1000
return time.strftime('%Y-%m-%d %X', time.localtime(unixtime))
def get_date_str_from_unix_time(unixtime):
if int(unixtime) > 1000000000000:
unixtime = int(unixtime) / 1000
return time.strftime('%Y-%m-%d', time.localtime(unixtime))
def get_unix_time_from_time_str(time_str):
try:
format_str = "%Y-%m-%d %H:%M:%S"
tm_object = time.strptime(str(time_str), format_str)
return int(time.mktime(tm_object))
except Exception as e:
return 0
pass
def get_unix_timestamp():
return int(time.time())
def rfc2822_to_china_datetime(rfc2822_time):
# Define RFC 2822 format
rfc2822_format = "%a %b %d %H:%M:%S %z %Y"
# Convert RFC 2822 time string to datetime object
dt_object = datetime.strptime(rfc2822_time, rfc2822_format)
# Convert datetime object timezone to China timezone
dt_object_china = dt_object.astimezone(timezone(timedelta(hours=8)))
return dt_object_china
def rfc2822_to_timestamp(rfc2822_time):
# Define RFC 2822 format
rfc2822_format = "%a %b %d %H:%M:%S %z %Y"
# Convert RFC 2822 time string to datetime object
dt_object = datetime.strptime(rfc2822_time, rfc2822_format)
# Convert datetime object to UTC time
dt_utc = dt_object.replace(tzinfo=timezone.utc)
# Calculate Unix timestamp from UTC time
timestamp = int(dt_utc.timestamp())
return timestamp
if __name__ == '__main__':
# Example usage
_rfc2822_time = "Sat Dec 23 17:12:54 +0800 2023"
print(rfc2822_to_china_datetime(_rfc2822_time)) | --- +++ @@ -28,32 +28,63 @@
def get_current_timestamp() -> int:
+ """
+ Get current timestamp (13 digits): 1701493264496
+ :return:
+ """
return int(time.time() * 1000)
def get_current_time() -> str:
+ """
+ Get current time: '2023-12-02 13:01:23'
+ :return:
+ """
return time.strftime('%Y-%m-%d %X', time.localtime())
def get_current_time_hour() -> str:
+ """
+ Get current time with hour: '2023-12-02-13'
+ :return:
+ """
return time.strftime('%Y-%m-%d-%H', time.localtime())
def get_current_date() -> str:
+ """
+ Get current date: '2023-12-02'
+ :return:
+ """
return time.strftime('%Y-%m-%d', time.localtime())
def get_time_str_from_unix_time(unixtime):
+ """
+ Unix integer timestamp ==> datetime string
+ :param unixtime:
+ :return:
+ """
if int(unixtime) > 1000000000000:
unixtime = int(unixtime) / 1000
return time.strftime('%Y-%m-%d %X', time.localtime(unixtime))
def get_date_str_from_unix_time(unixtime):
+ """
+ Unix integer timestamp ==> date string
+ :param unixtime:
+ :return:
+ """
if int(unixtime) > 1000000000000:
unixtime = int(unixtime) / 1000
return time.strftime('%Y-%m-%d', time.localtime(unixtime))
def get_unix_time_from_time_str(time_str):
+ """
+ Time string ==> Unix integer timestamp, precise to seconds
+ :param time_str:
+ :return:
+ """
try:
format_str = "%Y-%m-%d %H:%M:%S"
tm_object = time.strptime(str(time_str), format_str)
@@ -98,4 +129,4 @@ if __name__ == '__main__':
# Example usage
_rfc2822_time = "Sat Dec 23 17:12:54 +0800 2023"
- print(rfc2822_to_china_datetime(_rfc2822_time))+ print(rfc2822_to_china_datetime(_rfc2822_time))
| https://raw.githubusercontent.com/NanmiCoder/MediaCrawler/HEAD/tools/time_util.py |
Generate NumPy-style docstrings | # -*- coding: utf-8 -*-
# Copyright (c) 2025 relakkes@gmail.com
#
# This file is part of MediaCrawler project.
# Repository: https://github.com/NanmiCoder/MediaCrawler/blob/main/store/kuaishou/_store_impl.py
# GitHub: https://github.com/NanmiCoder
# Licensed under NON-COMMERCIAL LEARNING LICENSE 1.1
#
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
# 1. 不得用于任何商业用途。
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
# 3. 不得进行大规模爬取或对平台造成运营干扰。
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
# 5. 不得用于任何非法或不当的用途。
#
# 详细许可条款请参阅项目根目录下的LICENSE文件。
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
# -*- coding: utf-8 -*-
# @Author : persist1@126.com
# @Time : 2025/9/5 19:34
# @Desc : Kuaishou storage implementation class
import asyncio
import csv
import json
import os
import pathlib
from typing import Dict
from tools.async_file_writer import AsyncFileWriter
import aiofiles
from sqlalchemy import select
import config
from base.base_crawler import AbstractStore
from database.db_session import get_session
from database.models import KuaishouVideo, KuaishouVideoComment
from tools import utils, words
from var import crawler_type_var
from database.mongodb_store_base import MongoDBStoreBase
def calculate_number_of_files(file_store_path: str) -> int:
if not os.path.exists(file_store_path):
return 1
try:
return max([int(file_name.split("_")[0]) for file_name in os.listdir(file_store_path)]) + 1
except ValueError:
return 1
class KuaishouCsvStoreImplement(AbstractStore):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.writer = AsyncFileWriter(platform="kuaishou", crawler_type=crawler_type_var.get())
async def store_content(self, content_item: Dict):
await self.writer.write_to_csv(item_type="contents", item=content_item)
async def store_comment(self, comment_item: Dict):
await self.writer.write_to_csv(item_type="comments", item=comment_item)
async def store_creator(self, creator: Dict):
pass
class KuaishouDbStoreImplement(AbstractStore):
async def store_creator(self, creator: Dict):
pass
async def store_content(self, content_item: Dict):
video_id = content_item.get("video_id")
async with get_session() as session:
result = await session.execute(select(KuaishouVideo).where(KuaishouVideo.video_id == video_id))
video_detail = result.scalar_one_or_none()
if not video_detail:
content_item["add_ts"] = utils.get_current_timestamp()
new_content = KuaishouVideo(**content_item)
session.add(new_content)
else:
for key, value in content_item.items():
if hasattr(video_detail, key):
setattr(video_detail, key, value)
await session.commit()
async def store_comment(self, comment_item: Dict):
comment_id = comment_item.get("comment_id")
async with get_session() as session:
result = await session.execute(
select(KuaishouVideoComment).where(KuaishouVideoComment.comment_id == comment_id))
comment_detail = result.scalar_one_or_none()
if not comment_detail:
comment_item["add_ts"] = utils.get_current_timestamp()
new_comment = KuaishouVideoComment(**comment_item)
session.add(new_comment)
else:
for key, value in comment_item.items():
if hasattr(comment_detail, key):
setattr(comment_detail, key, value)
await session.commit()
class KuaishouJsonStoreImplement(AbstractStore):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.writer = AsyncFileWriter(platform="kuaishou", crawler_type=crawler_type_var.get())
async def store_content(self, content_item: Dict):
await self.writer.write_single_item_to_json(item_type="contents", item=content_item)
async def store_comment(self, comment_item: Dict):
await self.writer.write_single_item_to_json(item_type="comments", item=comment_item)
async def store_creator(self, creator: Dict):
pass
class KuaishouJsonlStoreImplement(AbstractStore):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.writer = AsyncFileWriter(platform="kuaishou", crawler_type=crawler_type_var.get())
async def store_content(self, content_item: Dict):
await self.writer.write_to_jsonl(item_type="contents", item=content_item)
async def store_comment(self, comment_item: Dict):
await self.writer.write_to_jsonl(item_type="comments", item=comment_item)
async def store_creator(self, creator: Dict):
pass
class KuaishouSqliteStoreImplement(KuaishouDbStoreImplement):
async def store_creator(self, creator: Dict):
pass
class KuaishouMongoStoreImplement(AbstractStore):
def __init__(self):
self.mongo_store = MongoDBStoreBase(collection_prefix="kuaishou")
async def store_content(self, content_item: Dict):
video_id = content_item.get("video_id")
if not video_id:
return
await self.mongo_store.save_or_update(
collection_suffix="contents",
query={"video_id": video_id},
data=content_item
)
utils.logger.info(f"[KuaishouMongoStoreImplement.store_content] Saved video {video_id} to MongoDB")
async def store_comment(self, comment_item: Dict):
comment_id = comment_item.get("comment_id")
if not comment_id:
return
await self.mongo_store.save_or_update(
collection_suffix="comments",
query={"comment_id": comment_id},
data=comment_item
)
utils.logger.info(f"[KuaishouMongoStoreImplement.store_comment] Saved comment {comment_id} to MongoDB")
async def store_creator(self, creator_item: Dict):
user_id = creator_item.get("user_id")
if not user_id:
return
await self.mongo_store.save_or_update(
collection_suffix="creators",
query={"user_id": user_id},
data=creator_item
)
utils.logger.info(f"[KuaishouMongoStoreImplement.store_creator] Saved creator {user_id} to MongoDB")
class KuaishouExcelStoreImplement:
def __new__(cls, *args, **kwargs):
from store.excel_store_base import ExcelStoreBase
return ExcelStoreBase.get_instance(
platform="kuaishou",
crawler_type=crawler_type_var.get()
) | --- +++ @@ -43,6 +43,12 @@
def calculate_number_of_files(file_store_path: str) -> int:
+ """Calculate the prefix sorting number for data save files, supporting writing to different files for each run
+ Args:
+ file_store_path;
+ Returns:
+ file nums
+ """
if not os.path.exists(file_store_path):
return 1
try:
@@ -57,9 +63,25 @@ self.writer = AsyncFileWriter(platform="kuaishou", crawler_type=crawler_type_var.get())
async def store_content(self, content_item: Dict):
+ """
+ Kuaishou content CSV storage implementation
+ Args:
+ content_item: note item dict
+
+ Returns:
+
+ """
await self.writer.write_to_csv(item_type="contents", item=content_item)
async def store_comment(self, comment_item: Dict):
+ """
+ Kuaishou comment CSV storage implementation
+ Args:
+ comment_item: comment item dict
+
+ Returns:
+
+ """
await self.writer.write_to_csv(item_type="comments", item=comment_item)
async def store_creator(self, creator: Dict):
@@ -71,6 +93,11 @@ pass
async def store_content(self, content_item: Dict):
+ """
+ Kuaishou content DB storage implementation
+ Args:
+ content_item: content item dict
+ """
video_id = content_item.get("video_id")
async with get_session() as session:
result = await session.execute(select(KuaishouVideo).where(KuaishouVideo.video_id == video_id))
@@ -87,6 +114,11 @@ await session.commit()
async def store_comment(self, comment_item: Dict):
+ """
+ Kuaishou comment DB storage implementation
+ Args:
+ comment_item: comment item dict
+ """
comment_id = comment_item.get("comment_id")
async with get_session() as session:
result = await session.execute(
@@ -110,9 +142,25 @@ self.writer = AsyncFileWriter(platform="kuaishou", crawler_type=crawler_type_var.get())
async def store_content(self, content_item: Dict):
+ """
+ content JSON storage implementation
+ Args:
+ content_item:
+
+ Returns:
+
+ """
await self.writer.write_single_item_to_json(item_type="contents", item=content_item)
async def store_comment(self, comment_item: Dict):
+ """
+ comment JSON storage implementation
+ Args:
+ comment_item:
+
+ Returns:
+
+ """
await self.writer.write_single_item_to_json(item_type="comments", item=comment_item)
async def store_creator(self, creator: Dict):
@@ -140,11 +188,17 @@
class KuaishouMongoStoreImplement(AbstractStore):
+ """Kuaishou MongoDB storage implementation"""
def __init__(self):
self.mongo_store = MongoDBStoreBase(collection_prefix="kuaishou")
async def store_content(self, content_item: Dict):
+ """
+ Store video content to MongoDB
+ Args:
+ content_item: Video content data
+ """
video_id = content_item.get("video_id")
if not video_id:
return
@@ -157,6 +211,11 @@ utils.logger.info(f"[KuaishouMongoStoreImplement.store_content] Saved video {video_id} to MongoDB")
async def store_comment(self, comment_item: Dict):
+ """
+ Store comment to MongoDB
+ Args:
+ comment_item: Comment data
+ """
comment_id = comment_item.get("comment_id")
if not comment_id:
return
@@ -169,6 +228,11 @@ utils.logger.info(f"[KuaishouMongoStoreImplement.store_comment] Saved comment {comment_id} to MongoDB")
async def store_creator(self, creator_item: Dict):
+ """
+ Store creator information to MongoDB
+ Args:
+ creator_item: Creator data
+ """
user_id = creator_item.get("user_id")
if not user_id:
return
@@ -182,10 +246,11 @@
class KuaishouExcelStoreImplement:
+ """Kuaishou Excel storage implementation - Global singleton"""
def __new__(cls, *args, **kwargs):
from store.excel_store_base import ExcelStoreBase
return ExcelStoreBase.get_instance(
platform="kuaishou",
crawler_type=crawler_type_var.get()
- )+ )
| https://raw.githubusercontent.com/NanmiCoder/MediaCrawler/HEAD/store/kuaishou/_store_impl.py |
Help me write clear docstrings | # -*- coding: utf-8 -*-
# Copyright (c) 2025 relakkes@gmail.com
#
# This file is part of MediaCrawler project.
# Repository: https://github.com/NanmiCoder/MediaCrawler/blob/main/store/douyin/_store_impl.py
# GitHub: https://github.com/NanmiCoder
# Licensed under NON-COMMERCIAL LEARNING LICENSE 1.1
#
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
# 1. 不得用于任何商业用途。
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
# 3. 不得进行大规模爬取或对平台造成运营干扰。
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
# 5. 不得用于任何非法或不当的用途。
#
# 详细许可条款请参阅项目根目录下的LICENSE文件。
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
# -*- coding: utf-8 -*-
# @Author : persist1@126.com
# @Time : 2025/9/5 19:34
# @Desc : Douyin storage implementation class
import asyncio
import json
import os
import pathlib
from typing import Dict
from sqlalchemy import select
import config
from base.base_crawler import AbstractStore
from database.db_session import get_session
from database.models import DouyinAweme, DouyinAwemeComment, DyCreator
from tools import utils, words
from tools.async_file_writer import AsyncFileWriter
from var import crawler_type_var
from database.mongodb_store_base import MongoDBStoreBase
class DouyinCsvStoreImplement(AbstractStore):
def __init__(self):
self.file_writer = AsyncFileWriter(
crawler_type=crawler_type_var.get(),
platform="douyin"
)
async def store_content(self, content_item: Dict):
await self.file_writer.write_to_csv(
item=content_item,
item_type="contents"
)
async def store_comment(self, comment_item: Dict):
await self.file_writer.write_to_csv(
item=comment_item,
item_type="comments"
)
async def store_creator(self, creator: Dict):
await self.file_writer.write_to_csv(
item=creator,
item_type="creators"
)
class DouyinDbStoreImplement(AbstractStore):
async def store_content(self, content_item: Dict):
aweme_id = int(content_item.get("aweme_id"))
async with get_session() as session:
result = await session.execute(select(DouyinAweme).where(DouyinAweme.aweme_id == aweme_id))
aweme_detail = result.scalar_one_or_none()
if not aweme_detail:
content_item["add_ts"] = utils.get_current_timestamp()
if content_item.get("title"):
new_content = DouyinAweme(**content_item)
session.add(new_content)
else:
for key, value in content_item.items():
setattr(aweme_detail, key, value)
await session.commit()
async def store_comment(self, comment_item: Dict):
comment_id = int(comment_item.get("comment_id"))
async with get_session() as session:
result = await session.execute(select(DouyinAwemeComment).where(DouyinAwemeComment.comment_id == comment_id))
comment_detail = result.scalar_one_or_none()
if not comment_detail:
comment_item["add_ts"] = utils.get_current_timestamp()
new_comment = DouyinAwemeComment(**comment_item)
session.add(new_comment)
else:
for key, value in comment_item.items():
setattr(comment_detail, key, value)
await session.commit()
async def store_creator(self, creator: Dict):
user_id = creator.get("user_id")
async with get_session() as session:
result = await session.execute(select(DyCreator).where(DyCreator.user_id == user_id))
user_detail = result.scalar_one_or_none()
if not user_detail:
creator["add_ts"] = utils.get_current_timestamp()
new_creator = DyCreator(**creator)
session.add(new_creator)
else:
for key, value in creator.items():
setattr(user_detail, key, value)
await session.commit()
class DouyinJsonStoreImplement(AbstractStore):
def __init__(self):
self.file_writer = AsyncFileWriter(
crawler_type=crawler_type_var.get(),
platform="douyin"
)
async def store_content(self, content_item: Dict):
await self.file_writer.write_single_item_to_json(
item=content_item,
item_type="contents"
)
async def store_comment(self, comment_item: Dict):
await self.file_writer.write_single_item_to_json(
item=comment_item,
item_type="comments"
)
async def store_creator(self, creator: Dict):
await self.file_writer.write_single_item_to_json(
item=creator,
item_type="creators"
)
class DouyinJsonlStoreImplement(AbstractStore):
def __init__(self):
self.file_writer = AsyncFileWriter(
crawler_type=crawler_type_var.get(),
platform="douyin"
)
async def store_content(self, content_item: Dict):
await self.file_writer.write_to_jsonl(
item=content_item,
item_type="contents"
)
async def store_comment(self, comment_item: Dict):
await self.file_writer.write_to_jsonl(
item=comment_item,
item_type="comments"
)
async def store_creator(self, creator: Dict):
await self.file_writer.write_to_jsonl(
item=creator,
item_type="creators"
)
class DouyinSqliteStoreImplement(DouyinDbStoreImplement):
pass
class DouyinMongoStoreImplement(AbstractStore):
def __init__(self):
self.mongo_store = MongoDBStoreBase(collection_prefix="douyin")
async def store_content(self, content_item: Dict):
aweme_id = content_item.get("aweme_id")
if not aweme_id:
return
await self.mongo_store.save_or_update(
collection_suffix="contents",
query={"aweme_id": aweme_id},
data=content_item
)
utils.logger.info(f"[DouyinMongoStoreImplement.store_content] Saved aweme {aweme_id} to MongoDB")
async def store_comment(self, comment_item: Dict):
comment_id = comment_item.get("comment_id")
if not comment_id:
return
await self.mongo_store.save_or_update(
collection_suffix="comments",
query={"comment_id": comment_id},
data=comment_item
)
utils.logger.info(f"[DouyinMongoStoreImplement.store_comment] Saved comment {comment_id} to MongoDB")
async def store_creator(self, creator_item: Dict):
user_id = creator_item.get("user_id")
if not user_id:
return
await self.mongo_store.save_or_update(
collection_suffix="creators",
query={"user_id": user_id},
data=creator_item
)
utils.logger.info(f"[DouyinMongoStoreImplement.store_creator] Saved creator {user_id} to MongoDB")
class DouyinExcelStoreImplement:
def __new__(cls, *args, **kwargs):
from store.excel_store_base import ExcelStoreBase
return ExcelStoreBase.get_instance(
platform="douyin",
crawler_type=crawler_type_var.get()
) | --- +++ @@ -48,18 +48,42 @@ )
async def store_content(self, content_item: Dict):
+ """
+ Douyin content CSV storage implementation
+ Args:
+ content_item: note item dict
+
+ Returns:
+
+ """
await self.file_writer.write_to_csv(
item=content_item,
item_type="contents"
)
async def store_comment(self, comment_item: Dict):
+ """
+ Douyin comment CSV storage implementation
+ Args:
+ comment_item: comment item dict
+
+ Returns:
+
+ """
await self.file_writer.write_to_csv(
item=comment_item,
item_type="comments"
)
async def store_creator(self, creator: Dict):
+ """
+ Douyin creator CSV storage implementation
+ Args:
+ creator: creator item dict
+
+ Returns:
+
+ """
await self.file_writer.write_to_csv(
item=creator,
item_type="creators"
@@ -68,6 +92,11 @@
class DouyinDbStoreImplement(AbstractStore):
async def store_content(self, content_item: Dict):
+ """
+ Douyin content DB storage implementation
+ Args:
+ content_item: content item dict
+ """
aweme_id = int(content_item.get("aweme_id"))
async with get_session() as session:
result = await session.execute(select(DouyinAweme).where(DouyinAweme.aweme_id == aweme_id))
@@ -84,6 +113,11 @@ await session.commit()
async def store_comment(self, comment_item: Dict):
+ """
+ Douyin comment DB storage implementation
+ Args:
+ comment_item: comment item dict
+ """
comment_id = int(comment_item.get("comment_id"))
async with get_session() as session:
result = await session.execute(select(DouyinAwemeComment).where(DouyinAwemeComment.comment_id == comment_id))
@@ -99,6 +133,11 @@ await session.commit()
async def store_creator(self, creator: Dict):
+ """
+ Douyin creator DB storage implementation
+ Args:
+ creator: creator dict
+ """
user_id = creator.get("user_id")
async with get_session() as session:
result = await session.execute(select(DyCreator).where(DyCreator.user_id == user_id))
@@ -122,18 +161,42 @@ )
async def store_content(self, content_item: Dict):
+ """
+ content JSON storage implementation
+ Args:
+ content_item:
+
+ Returns:
+
+ """
await self.file_writer.write_single_item_to_json(
item=content_item,
item_type="contents"
)
async def store_comment(self, comment_item: Dict):
+ """
+ comment JSON storage implementation
+ Args:
+ comment_item:
+
+ Returns:
+
+ """
await self.file_writer.write_single_item_to_json(
item=comment_item,
item_type="comments"
)
async def store_creator(self, creator: Dict):
+ """
+ creator JSON storage implementation
+ Args:
+ creator:
+
+ Returns:
+
+ """
await self.file_writer.write_single_item_to_json(
item=creator,
item_type="creators"
@@ -172,11 +235,17 @@
class DouyinMongoStoreImplement(AbstractStore):
+ """Douyin MongoDB storage implementation"""
def __init__(self):
self.mongo_store = MongoDBStoreBase(collection_prefix="douyin")
async def store_content(self, content_item: Dict):
+ """
+ Store video content to MongoDB
+ Args:
+ content_item: Video content data
+ """
aweme_id = content_item.get("aweme_id")
if not aweme_id:
return
@@ -189,6 +258,11 @@ utils.logger.info(f"[DouyinMongoStoreImplement.store_content] Saved aweme {aweme_id} to MongoDB")
async def store_comment(self, comment_item: Dict):
+ """
+ Store comment to MongoDB
+ Args:
+ comment_item: Comment data
+ """
comment_id = comment_item.get("comment_id")
if not comment_id:
return
@@ -201,6 +275,11 @@ utils.logger.info(f"[DouyinMongoStoreImplement.store_comment] Saved comment {comment_id} to MongoDB")
async def store_creator(self, creator_item: Dict):
+ """
+ Store creator information to MongoDB
+ Args:
+ creator_item: Creator data
+ """
user_id = creator_item.get("user_id")
if not user_id:
return
@@ -214,10 +293,11 @@
class DouyinExcelStoreImplement:
+ """Douyin Excel storage implementation - Global singleton"""
def __new__(cls, *args, **kwargs):
from store.excel_store_base import ExcelStoreBase
return ExcelStoreBase.get_instance(
platform="douyin",
crawler_type=crawler_type_var.get()
- )+ )
| https://raw.githubusercontent.com/NanmiCoder/MediaCrawler/HEAD/store/douyin/_store_impl.py |
Add docstrings that explain inputs and outputs | # -*- coding: utf-8 -*-
# Copyright (c) 2025 relakkes@gmail.com
#
# This file is part of MediaCrawler project.
# Repository: https://github.com/NanmiCoder/MediaCrawler/blob/main/store/tieba/__init__.py
# GitHub: https://github.com/NanmiCoder
# Licensed under NON-COMMERCIAL LEARNING LICENSE 1.1
#
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
# 1. 不得用于任何商业用途。
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
# 3. 不得进行大规模爬取或对平台造成运营干扰。
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
# 5. 不得用于任何非法或不当的用途。
#
# 详细许可条款请参阅项目根目录下的LICENSE文件。
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
# -*- coding: utf-8 -*-
from typing import List
from model.m_baidu_tieba import TiebaComment, TiebaCreator, TiebaNote
from var import source_keyword_var
from ._store_impl import *
class TieBaStoreFactory:
STORES = {
"csv": TieBaCsvStoreImplement,
"db": TieBaDbStoreImplement,
"postgres": TieBaDbStoreImplement,
"json": TieBaJsonStoreImplement,
"jsonl": TieBaJsonlStoreImplement,
"sqlite": TieBaSqliteStoreImplement,
"mongodb": TieBaMongoStoreImplement,
"excel": TieBaExcelStoreImplement,
}
@staticmethod
def create_store() -> AbstractStore:
store_class = TieBaStoreFactory.STORES.get(config.SAVE_DATA_OPTION)
if not store_class:
raise ValueError(
"[TieBaStoreFactory.create_store] Invalid save option only supported csv or db or json or sqlite or mongodb or excel ...")
return store_class()
async def batch_update_tieba_notes(note_list: List[TiebaNote]):
if not note_list:
return
for note_item in note_list:
await update_tieba_note(note_item)
async def update_tieba_note(note_item: TiebaNote):
note_item.source_keyword = source_keyword_var.get()
save_note_item = note_item.model_dump()
save_note_item.update({"last_modify_ts": utils.get_current_timestamp()})
utils.logger.info(f"[store.tieba.update_tieba_note] tieba note: {save_note_item}")
await TieBaStoreFactory.create_store().store_content(save_note_item)
async def batch_update_tieba_note_comments(note_id: str, comments: List[TiebaComment]):
if not comments:
return
for comment_item in comments:
await update_tieba_note_comment(note_id, comment_item)
async def update_tieba_note_comment(note_id: str, comment_item: TiebaComment):
save_comment_item = comment_item.model_dump()
save_comment_item.update({"last_modify_ts": utils.get_current_timestamp()})
utils.logger.info(f"[store.tieba.update_tieba_note_comment] tieba note id: {note_id} comment:{save_comment_item}")
await TieBaStoreFactory.create_store().store_comment(save_comment_item)
async def save_creator(user_info: TiebaCreator):
local_db_item = user_info.model_dump()
local_db_item["last_modify_ts"] = utils.get_current_timestamp()
utils.logger.info(f"[store.tieba.save_creator] creator:{local_db_item}")
await TieBaStoreFactory.create_store().store_creator(local_db_item) | --- +++ @@ -49,6 +49,14 @@
async def batch_update_tieba_notes(note_list: List[TiebaNote]):
+ """
+ Batch update tieba notes
+ Args:
+ note_list:
+
+ Returns:
+
+ """
if not note_list:
return
for note_item in note_list:
@@ -56,6 +64,14 @@
async def update_tieba_note(note_item: TiebaNote):
+ """
+ Add or Update tieba note
+ Args:
+ note_item:
+
+ Returns:
+
+ """
note_item.source_keyword = source_keyword_var.get()
save_note_item = note_item.model_dump()
save_note_item.update({"last_modify_ts": utils.get_current_timestamp()})
@@ -65,6 +81,15 @@
async def batch_update_tieba_note_comments(note_id: str, comments: List[TiebaComment]):
+ """
+ Batch update tieba note comments
+ Args:
+ note_id:
+ comments:
+
+ Returns:
+
+ """
if not comments:
return
for comment_item in comments:
@@ -72,6 +97,15 @@
async def update_tieba_note_comment(note_id: str, comment_item: TiebaComment):
+ """
+ Update tieba note comment
+ Args:
+ note_id:
+ comment_item:
+
+ Returns:
+
+ """
save_comment_item = comment_item.model_dump()
save_comment_item.update({"last_modify_ts": utils.get_current_timestamp()})
utils.logger.info(f"[store.tieba.update_tieba_note_comment] tieba note id: {note_id} comment:{save_comment_item}")
@@ -79,7 +113,15 @@
async def save_creator(user_info: TiebaCreator):
+ """
+ Save creator information to local
+ Args:
+ user_info:
+
+ Returns:
+
+ """
local_db_item = user_info.model_dump()
local_db_item["last_modify_ts"] = utils.get_current_timestamp()
utils.logger.info(f"[store.tieba.save_creator] creator:{local_db_item}")
- await TieBaStoreFactory.create_store().store_creator(local_db_item)+ await TieBaStoreFactory.create_store().store_creator(local_db_item)
| https://raw.githubusercontent.com/NanmiCoder/MediaCrawler/HEAD/store/tieba/__init__.py |
Help me write clear docstrings | # -*- coding: utf-8 -*-
# Copyright (c) 2025 relakkes@gmail.com
#
# This file is part of MediaCrawler project.
# Repository: https://github.com/NanmiCoder/MediaCrawler/blob/main/tools/browser_launcher.py
# GitHub: https://github.com/NanmiCoder
# Licensed under NON-COMMERCIAL LEARNING LICENSE 1.1
#
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
# 1. 不得用于任何商业用途。
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
# 3. 不得进行大规模爬取或对平台造成运营干扰。
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
# 5. 不得用于任何非法或不当的用途。
#
# 详细许可条款请参阅项目根目录下的LICENSE文件。
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
import os
import platform
import subprocess
import time
import socket
import signal
from typing import Optional, List, Tuple
import asyncio
from pathlib import Path
from tools import utils
class BrowserLauncher:
def __init__(self):
self.system = platform.system()
self.browser_process = None
self.debug_port = None
def detect_browser_paths(self) -> List[str]:
paths = []
if self.system == "Windows":
# Common Chrome/Edge installation paths on Windows
possible_paths = [
# Chrome paths
os.path.expandvars(r"%PROGRAMFILES%\Google\Chrome\Application\chrome.exe"),
os.path.expandvars(r"%PROGRAMFILES(X86)%\Google\Chrome\Application\chrome.exe"),
os.path.expandvars(r"%LOCALAPPDATA%\Google\Chrome\Application\chrome.exe"),
# Edge paths
os.path.expandvars(r"%PROGRAMFILES%\Microsoft\Edge\Application\msedge.exe"),
os.path.expandvars(r"%PROGRAMFILES(X86)%\Microsoft\Edge\Application\msedge.exe"),
# Chrome Beta/Dev/Canary
os.path.expandvars(r"%LOCALAPPDATA%\Google\Chrome Beta\Application\chrome.exe"),
os.path.expandvars(r"%LOCALAPPDATA%\Google\Chrome Dev\Application\chrome.exe"),
os.path.expandvars(r"%LOCALAPPDATA%\Google\Chrome SxS\Application\chrome.exe"),
]
elif self.system == "Darwin": # macOS
# Common Chrome/Edge installation paths on macOS
possible_paths = [
# Chrome paths
"/Applications/Google Chrome.app/Contents/MacOS/Google Chrome",
"/Applications/Google Chrome Beta.app/Contents/MacOS/Google Chrome Beta",
"/Applications/Google Chrome Dev.app/Contents/MacOS/Google Chrome Dev",
"/Applications/Google Chrome Canary.app/Contents/MacOS/Google Chrome Canary",
# Edge paths
"/Applications/Microsoft Edge.app/Contents/MacOS/Microsoft Edge",
"/Applications/Microsoft Edge Beta.app/Contents/MacOS/Microsoft Edge Beta",
"/Applications/Microsoft Edge Dev.app/Contents/MacOS/Microsoft Edge Dev",
"/Applications/Microsoft Edge Canary.app/Contents/MacOS/Microsoft Edge Canary",
]
else:
# Linux and other systems
possible_paths = [
"/usr/bin/google-chrome",
"/usr/bin/google-chrome-stable",
"/usr/bin/google-chrome-beta",
"/usr/bin/google-chrome-unstable",
"/usr/bin/chromium-browser",
"/usr/bin/chromium",
"/snap/bin/chromium",
"/usr/bin/microsoft-edge",
"/usr/bin/microsoft-edge-stable",
"/usr/bin/microsoft-edge-beta",
"/usr/bin/microsoft-edge-dev",
]
# Check if path exists and is executable
for path in possible_paths:
if os.path.isfile(path) and os.access(path, os.X_OK):
paths.append(path)
return paths
def find_available_port(self, start_port: int = 9222) -> int:
port = start_port
while port < start_port + 100: # Try up to 100 ports
try:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind(('localhost', port))
return port
except OSError:
port += 1
raise RuntimeError(f"Cannot find available port, tried {start_port} to {port-1}")
def launch_browser(self, browser_path: str, debug_port: int, headless: bool = False,
user_data_dir: Optional[str] = None) -> subprocess.Popen:
# Basic launch arguments
args = [
browser_path,
f"--remote-debugging-port={debug_port}",
"--remote-debugging-address=0.0.0.0", # Allow remote access
"--no-first-run",
"--no-default-browser-check",
"--disable-background-timer-throttling",
"--disable-backgrounding-occluded-windows",
"--disable-renderer-backgrounding",
"--disable-features=TranslateUI",
"--disable-ipc-flooding-protection",
"--disable-hang-monitor",
"--disable-prompt-on-repost",
"--disable-sync",
"--disable-dev-shm-usage", # Avoid shared memory issues
"--no-sandbox", # Disable sandbox in CDP mode
# Key anti-detection arguments
"--disable-blink-features=AutomationControlled", # Disable automation control flag
"--exclude-switches=enable-automation", # Exclude automation switch
"--disable-infobars", # Disable info bars
]
# Headless mode
if headless:
args.extend([
"--headless=new", # Use new headless mode
"--disable-gpu",
])
else:
# Extra arguments for non-headless mode
args.extend([
"--start-maximized", # Maximize window, more like real user
])
# User data directory
if user_data_dir:
args.append(f"--user-data-dir={user_data_dir}")
utils.logger.info(f"[BrowserLauncher] Launching browser: {browser_path}")
utils.logger.info(f"[BrowserLauncher] Debug port: {debug_port}")
utils.logger.info(f"[BrowserLauncher] Headless mode: {headless}")
try:
# On Windows, use CREATE_NEW_PROCESS_GROUP to prevent Ctrl+C from affecting subprocess
if self.system == "Windows":
process = subprocess.Popen(
args,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
creationflags=subprocess.CREATE_NEW_PROCESS_GROUP
)
else:
process = subprocess.Popen(
args,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
preexec_fn=os.setsid # Create new process group
)
self.browser_process = process
return process
except Exception as e:
utils.logger.error(f"[BrowserLauncher] Failed to launch browser: {e}")
raise
def wait_for_browser_ready(self, debug_port: int, timeout: int = 30) -> bool:
utils.logger.info(f"[BrowserLauncher] Waiting for browser to be ready on port {debug_port}...")
start_time = time.time()
while time.time() - start_time < timeout:
try:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.settimeout(1)
result = s.connect_ex(('localhost', debug_port))
if result == 0:
utils.logger.info(f"[BrowserLauncher] Browser is ready on port {debug_port}")
return True
except Exception:
pass
time.sleep(0.5)
utils.logger.error(f"[BrowserLauncher] Browser failed to be ready within {timeout} seconds")
return False
def get_browser_info(self, browser_path: str) -> Tuple[str, str]:
try:
if "chrome" in browser_path.lower():
name = "Google Chrome"
elif "edge" in browser_path.lower() or "msedge" in browser_path.lower():
name = "Microsoft Edge"
elif "chromium" in browser_path.lower():
name = "Chromium"
else:
name = "Unknown Browser"
# Try to get version info
try:
result = subprocess.run([browser_path, "--version"],
capture_output=True, text=True, encoding='utf-8', errors='ignore', timeout=5)
version = result.stdout.strip() if result.stdout else "Unknown Version"
except:
version = "Unknown Version"
return name, version
except Exception:
return "Unknown Browser", "Unknown Version"
def cleanup(self):
if not self.browser_process:
return
process = self.browser_process
if process.poll() is not None:
utils.logger.info("[BrowserLauncher] Browser process already exited, no cleanup needed")
self.browser_process = None
return
utils.logger.info("[BrowserLauncher] Closing browser process...")
try:
if self.system == "Windows":
# First try normal termination
process.terminate()
try:
process.wait(timeout=5)
except subprocess.TimeoutExpired:
utils.logger.warning("[BrowserLauncher] Normal termination timeout, using taskkill to force kill")
subprocess.run(
["taskkill", "/F", "/T", "/PID", str(process.pid)],
capture_output=True,
check=False,
encoding='utf-8',
errors='ignore'
)
process.wait(timeout=5)
else:
pgid = os.getpgid(process.pid)
try:
os.killpg(pgid, signal.SIGTERM)
except ProcessLookupError:
utils.logger.info("[BrowserLauncher] Browser process group does not exist, may have exited")
else:
try:
process.wait(timeout=5)
except subprocess.TimeoutExpired:
utils.logger.warning("[BrowserLauncher] Graceful shutdown timeout, sending SIGKILL")
os.killpg(pgid, signal.SIGKILL)
process.wait(timeout=5)
utils.logger.info("[BrowserLauncher] Browser process closed")
except Exception as e:
utils.logger.warning(f"[BrowserLauncher] Error closing browser process: {e}")
finally:
self.browser_process = None | --- +++ @@ -32,6 +32,10 @@
class BrowserLauncher:
+ """
+ Browser launcher for detecting and launching user's Chrome/Edge browser
+ Supports Windows and macOS systems
+ """
def __init__(self):
self.system = platform.system()
@@ -39,6 +43,10 @@ self.debug_port = None
def detect_browser_paths(self) -> List[str]:
+ """
+ Detect available browser paths in system
+ Returns list of browser paths sorted by priority
+ """
paths = []
if self.system == "Windows":
@@ -94,6 +102,9 @@ return paths
def find_available_port(self, start_port: int = 9222) -> int:
+ """
+ Find available port
+ """
port = start_port
while port < start_port + 100: # Try up to 100 ports
try:
@@ -107,6 +118,9 @@
def launch_browser(self, browser_path: str, debug_port: int, headless: bool = False,
user_data_dir: Optional[str] = None) -> subprocess.Popen:
+ """
+ Launch browser process
+ """
# Basic launch arguments
args = [
browser_path,
@@ -175,6 +189,9 @@ raise
def wait_for_browser_ready(self, debug_port: int, timeout: int = 30) -> bool:
+ """
+ Wait for browser to be ready
+ """
utils.logger.info(f"[BrowserLauncher] Waiting for browser to be ready on port {debug_port}...")
start_time = time.time()
@@ -195,6 +212,9 @@ return False
def get_browser_info(self, browser_path: str) -> Tuple[str, str]:
+ """
+ Get browser info (name and version)
+ """
try:
if "chrome" in browser_path.lower():
name = "Google Chrome"
@@ -219,6 +239,9 @@ return "Unknown Browser", "Unknown Version"
def cleanup(self):
+ """
+ Cleanup resources, close browser process
+ """
if not self.browser_process:
return
@@ -265,4 +288,4 @@ except Exception as e:
utils.logger.warning(f"[BrowserLauncher] Error closing browser process: {e}")
finally:
- self.browser_process = None+ self.browser_process = None
| https://raw.githubusercontent.com/NanmiCoder/MediaCrawler/HEAD/tools/browser_launcher.py |
Create docstrings for API functions | # -*- coding: utf-8 -*-
# Copyright (c) 2025 relakkes@gmail.com
#
# This file is part of MediaCrawler project.
# Repository: https://github.com/NanmiCoder/MediaCrawler/blob/main/store/bilibili/bilibilli_store_media.py
# GitHub: https://github.com/NanmiCoder
# Licensed under NON-COMMERCIAL LEARNING LICENSE 1.1
#
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
# 1. 不得用于任何商业用途。
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
# 3. 不得进行大规模爬取或对平台造成运营干扰。
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
# 5. 不得用于任何非法或不当的用途。
#
# 详细许可条款请参阅项目根目录下的LICENSE文件。
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
# -*- coding: utf-8 -*-
# @Author : helloteemo
# @Time : 2024/7/12 20:01
# @Desc : Bilibili media storage
import pathlib
from typing import Dict
import aiofiles
from base.base_crawler import AbstractStoreImage, AbstractStoreVideo
from tools import utils
import config
class BilibiliVideo(AbstractStoreVideo):
def __init__(self):
if config.SAVE_DATA_PATH:
self.video_store_path = f"{config.SAVE_DATA_PATH}/bili/videos"
else:
self.video_store_path = "data/bili/videos"
async def store_video(self, video_content_item: Dict):
await self.save_video(video_content_item.get("aid"), video_content_item.get("video_content"), video_content_item.get("extension_file_name"))
def make_save_file_name(self, aid: str, extension_file_name: str) -> str:
return f"{self.video_store_path}/{aid}/{extension_file_name}"
async def save_video(self, aid: int, video_content: str, extension_file_name="mp4"):
pathlib.Path(self.video_store_path + "/" + str(aid)).mkdir(parents=True, exist_ok=True)
save_file_name = self.make_save_file_name(str(aid), extension_file_name)
async with aiofiles.open(save_file_name, 'wb') as f:
await f.write(video_content)
utils.logger.info(f"[BilibiliVideoImplement.save_video] save save_video {save_file_name} success ...") | --- +++ @@ -39,14 +39,44 @@ self.video_store_path = "data/bili/videos"
async def store_video(self, video_content_item: Dict):
+ """
+ store content
+
+ Args:
+ video_content_item:
+
+ Returns:
+
+ """
await self.save_video(video_content_item.get("aid"), video_content_item.get("video_content"), video_content_item.get("extension_file_name"))
def make_save_file_name(self, aid: str, extension_file_name: str) -> str:
+ """
+ make save file name by store type
+
+ Args:
+ aid: aid
+ extension_file_name: video filename with extension
+
+ Returns:
+
+ """
return f"{self.video_store_path}/{aid}/{extension_file_name}"
async def save_video(self, aid: int, video_content: str, extension_file_name="mp4"):
+ """
+ save video to local
+
+ Args:
+ aid: aid
+ video_content: video content
+ extension_file_name: video filename with extension
+
+ Returns:
+
+ """
pathlib.Path(self.video_store_path + "/" + str(aid)).mkdir(parents=True, exist_ok=True)
save_file_name = self.make_save_file_name(str(aid), extension_file_name)
async with aiofiles.open(save_file_name, 'wb') as f:
await f.write(video_content)
- utils.logger.info(f"[BilibiliVideoImplement.save_video] save save_video {save_file_name} success ...")+ utils.logger.info(f"[BilibiliVideoImplement.save_video] save save_video {save_file_name} success ...")
| https://raw.githubusercontent.com/NanmiCoder/MediaCrawler/HEAD/store/bilibili/bilibilli_store_media.py |
Add well-formatted docstrings | # -*- coding: utf-8 -*-
# Copyright (c) 2025 relakkes@gmail.com
#
# This file is part of MediaCrawler project.
# Repository: https://github.com/NanmiCoder/MediaCrawler/blob/main/tools/crawler_util.py
# GitHub: https://github.com/NanmiCoder
# Licensed under NON-COMMERCIAL LEARNING LICENSE 1.1
#
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
# 1. 不得用于任何商业用途。
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
# 3. 不得进行大规模爬取或对平台造成运营干扰。
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
# 5. 不得用于任何非法或不当的用途。
#
# 详细许可条款请参阅项目根目录下的LICENSE文件。
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
# -*- coding: utf-8 -*-
# @Author : relakkes@gmail.com
# @Time : 2023/12/2 12:53
# @Desc : Crawler utility functions
import base64
import json
import random
import re
import urllib
import urllib.parse
from io import BytesIO
from typing import Dict, List, Optional, Tuple, cast
import httpx
from PIL import Image, ImageDraw, ImageShow
from playwright.async_api import Cookie, Page
from . import utils
async def find_login_qrcode(page: Page, selector: str) -> str:
try:
elements = await page.wait_for_selector(
selector=selector,
)
login_qrcode_img = str(await elements.get_property("src")) # type: ignore
if "http://" in login_qrcode_img or "https://" in login_qrcode_img:
async with httpx.AsyncClient(follow_redirects=True) as client:
utils.logger.info(f"[find_login_qrcode] get qrcode by url:{login_qrcode_img}")
resp = await client.get(login_qrcode_img, headers={"User-Agent": get_user_agent()})
if resp.status_code == 200:
image_data = resp.content
base64_image = base64.b64encode(image_data).decode('utf-8')
return base64_image
raise Exception(f"fetch login image url failed, response message:{resp.text}")
return login_qrcode_img
except Exception as e:
print(e)
return ""
async def find_qrcode_img_from_canvas(page: Page, canvas_selector: str) -> str:
# Wait for Canvas element to load
canvas = await page.wait_for_selector(canvas_selector)
# Take screenshot of Canvas element
screenshot = await canvas.screenshot()
# Convert screenshot to base64 format
base64_image = base64.b64encode(screenshot).decode('utf-8')
return base64_image
def show_qrcode(qr_code) -> None: # type: ignore
if "," in qr_code:
qr_code = qr_code.split(",")[1]
qr_code = base64.b64decode(qr_code)
image = Image.open(BytesIO(qr_code))
# Add a square border around the QR code and display it within the border to improve scanning accuracy.
width, height = image.size
new_image = Image.new('RGB', (width + 20, height + 20), color=(255, 255, 255))
new_image.paste(image, (10, 10))
draw = ImageDraw.Draw(new_image)
draw.rectangle((0, 0, width + 19, height + 19), outline=(0, 0, 0), width=1)
del ImageShow.UnixViewer.options["save_all"]
new_image.show()
def get_user_agent() -> str:
ua_list = [
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.5112.79 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.0.0 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.0.0 Safari/537.36",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.5060.53 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.84 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/123.0.0.0 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.5112.79 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/121.0.0.0 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.5060.53 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.4844.84 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.5112.79 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.5060.53 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.4844.84 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.5112.79 Safari/537.36"
]
return random.choice(ua_list)
def get_mobile_user_agent() -> str:
ua_list = [
"Mozilla/5.0 (iPhone; CPU iPhone OS 18_5 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/18.5 Mobile/15E148 Safari/604.1"
]
return random.choice(ua_list)
def convert_cookies(cookies: Optional[List[Cookie]]) -> Tuple[str, Dict]:
if not cookies:
return "", {}
cookies_str = ";".join([f"{cookie.get('name')}={cookie.get('value')}" for cookie in cookies])
cookie_dict = dict()
for cookie in cookies:
cookie_dict[cookie.get('name')] = cookie.get('value')
return cookies_str, cookie_dict
def convert_str_cookie_to_dict(cookie_str: str) -> Dict:
cookie_dict: Dict[str, str] = dict()
if not cookie_str:
return cookie_dict
for cookie in cookie_str.split(";"):
cookie = cookie.strip()
if not cookie:
continue
cookie_list = cookie.split("=")
if len(cookie_list) != 2:
continue
cookie_value = cookie_list[1]
if isinstance(cookie_value, list):
cookie_value = "".join(cookie_value)
cookie_dict[cookie_list[0]] = cookie_value
return cookie_dict
def match_interact_info_count(count_str: str) -> int:
if not count_str:
return 0
match = re.search(r'\d+', count_str)
if match:
number = match.group()
return int(number)
else:
return 0
def format_proxy_info(ip_proxy_info) -> Tuple[Optional[Dict], Optional[str]]:
# fix circular import issue
from proxy.proxy_ip_pool import IpInfoModel
ip_proxy_info = cast(IpInfoModel, ip_proxy_info)
# Playwright proxy server should be in format "host:port" without protocol prefix
server = f"{ip_proxy_info.ip}:{ip_proxy_info.port}"
playwright_proxy = {
"server": server,
}
# Only add username and password if they are not empty
if ip_proxy_info.user and ip_proxy_info.password:
playwright_proxy["username"] = ip_proxy_info.user
playwright_proxy["password"] = ip_proxy_info.password
# httpx 0.28.1 requires passing proxy URL string directly, not a dictionary
if ip_proxy_info.user and ip_proxy_info.password:
httpx_proxy = f"http://{ip_proxy_info.user}:{ip_proxy_info.password}@{ip_proxy_info.ip}:{ip_proxy_info.port}"
else:
httpx_proxy = f"http://{ip_proxy_info.ip}:{ip_proxy_info.port}"
return playwright_proxy, httpx_proxy
def extract_text_from_html(html: str) -> str:
if not html:
return ""
# Remove script and style elements
clean_html = re.sub(r'<(script|style)[^>]*>.*?</\1>', '', html, flags=re.DOTALL)
# Remove all other tags
clean_text = re.sub(r'<[^>]+>', '', clean_html).strip()
return clean_text
def extract_url_params_to_dict(url: str) -> Dict:
url_params_dict = dict()
if not url:
return url_params_dict
parsed_url = urllib.parse.urlparse(url)
url_params_dict = dict(urllib.parse.parse_qsl(parsed_url.query))
return url_params_dict | --- +++ @@ -40,6 +40,7 @@
async def find_login_qrcode(page: Page, selector: str) -> str:
+ """find login qrcode image from target selector"""
try:
elements = await page.wait_for_selector(
selector=selector,
@@ -62,6 +63,15 @@
async def find_qrcode_img_from_canvas(page: Page, canvas_selector: str) -> str:
+ """
+ find qrcode image from canvas element
+ Args:
+ page:
+ canvas_selector:
+
+ Returns:
+
+ """
# Wait for Canvas element to load
canvas = await page.wait_for_selector(canvas_selector)
@@ -75,6 +85,7 @@
def show_qrcode(qr_code) -> None: # type: ignore
+ """parse base64 encode qrcode image and show it"""
if "," in qr_code:
qr_code = qr_code.split(",")[1]
qr_code = base64.b64decode(qr_code)
@@ -164,6 +175,7 @@
def format_proxy_info(ip_proxy_info) -> Tuple[Optional[Dict], Optional[str]]:
+ """format proxy info for playwright and httpx"""
# fix circular import issue
from proxy.proxy_ip_pool import IpInfoModel
ip_proxy_info = cast(IpInfoModel, ip_proxy_info)
@@ -189,6 +201,7 @@
def extract_text_from_html(html: str) -> str:
+ """Extract text from HTML, removing all tags."""
if not html:
return ""
@@ -199,9 +212,10 @@ return clean_text
def extract_url_params_to_dict(url: str) -> Dict:
+ """Extract URL parameters to dict"""
url_params_dict = dict()
if not url:
return url_params_dict
parsed_url = urllib.parse.urlparse(url)
url_params_dict = dict(urllib.parse.parse_qsl(parsed_url.query))
- return url_params_dict+ return url_params_dict
| https://raw.githubusercontent.com/NanmiCoder/MediaCrawler/HEAD/tools/crawler_util.py |
Add docstrings to improve code quality | # -*- coding: utf-8 -*-
# Copyright (c) 2025 relakkes@gmail.com
#
# This file is part of MediaCrawler project.
# Repository: https://github.com/NanmiCoder/MediaCrawler/blob/main/tools/cdp_browser.py
# GitHub: https://github.com/NanmiCoder
# Licensed under NON-COMMERCIAL LEARNING LICENSE 1.1
#
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
# 1. 不得用于任何商业用途。
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
# 3. 不得进行大规模爬取或对平台造成运营干扰。
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
# 5. 不得用于任何非法或不当的用途。
#
# 详细许可条款请参阅项目根目录下的LICENSE文件。
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
import os
import asyncio
import socket
import httpx
import signal
import atexit
from typing import Optional, Dict, Any
from playwright.async_api import Browser, BrowserContext, Playwright
import config
from tools.browser_launcher import BrowserLauncher
from tools import utils
class CDPBrowserManager:
def __init__(self):
self.launcher = BrowserLauncher()
self.browser: Optional[Browser] = None
self.browser_context: Optional[BrowserContext] = None
self.debug_port: Optional[int] = None
self._cleanup_registered = False
def _register_cleanup_handlers(self):
if self._cleanup_registered:
return
def sync_cleanup():
if self.launcher and self.launcher.browser_process:
utils.logger.info("[CDPBrowserManager] atexit: Cleaning up browser process")
self.launcher.cleanup()
# Register atexit cleanup
atexit.register(sync_cleanup)
# Register signal handlers (only when no custom handlers exist, to avoid overriding main entry signal handling logic)
prev_sigint = signal.getsignal(signal.SIGINT)
prev_sigterm = signal.getsignal(signal.SIGTERM)
def signal_handler(signum, frame):
utils.logger.info(f"[CDPBrowserManager] Received signal {signum}, cleaning up browser process")
if self.launcher and self.launcher.browser_process:
self.launcher.cleanup()
if signum == signal.SIGINT:
if prev_sigint == signal.default_int_handler:
return prev_sigint(signum, frame)
raise KeyboardInterrupt
raise SystemExit(0)
install_sigint = prev_sigint in (signal.default_int_handler, signal.SIG_DFL)
install_sigterm = prev_sigterm == signal.SIG_DFL
# Register SIGINT (Ctrl+C) and SIGTERM
if install_sigint:
signal.signal(signal.SIGINT, signal_handler)
else:
utils.logger.info("[CDPBrowserManager] SIGINT handler already exists, skipping registration to avoid override")
if install_sigterm:
signal.signal(signal.SIGTERM, signal_handler)
else:
utils.logger.info("[CDPBrowserManager] SIGTERM handler already exists, skipping registration to avoid override")
self._cleanup_registered = True
utils.logger.info("[CDPBrowserManager] Cleanup handlers registered")
async def launch_and_connect(
self,
playwright: Playwright,
playwright_proxy: Optional[Dict] = None,
user_agent: Optional[str] = None,
headless: bool = False,
) -> BrowserContext:
try:
# 1. Detect browser path
browser_path = await self._get_browser_path()
# 2. Get available port
self.debug_port = self.launcher.find_available_port(config.CDP_DEBUG_PORT)
# 3. Launch browser
await self._launch_browser(browser_path, headless)
# 4. Register cleanup handlers (ensure cleanup on abnormal exit)
self._register_cleanup_handlers()
# 5. Connect via CDP
await self._connect_via_cdp(playwright)
# 6. Create browser context
browser_context = await self._create_browser_context(
playwright_proxy, user_agent
)
self.browser_context = browser_context
return browser_context
except Exception as e:
utils.logger.error(f"[CDPBrowserManager] CDP browser launch failed: {e}")
await self.cleanup()
raise
async def _get_browser_path(self) -> str:
# Prefer user-defined path
if config.CUSTOM_BROWSER_PATH and os.path.isfile(config.CUSTOM_BROWSER_PATH):
utils.logger.info(
f"[CDPBrowserManager] Using custom browser path: {config.CUSTOM_BROWSER_PATH}"
)
return config.CUSTOM_BROWSER_PATH
# Auto-detect browser path
browser_paths = self.launcher.detect_browser_paths()
if not browser_paths:
raise RuntimeError(
"No available browser found. Please ensure Chrome or Edge browser is installed, "
"or set CUSTOM_BROWSER_PATH in config file to specify browser path."
)
browser_path = browser_paths[0] # Use the first browser found
browser_name, browser_version = self.launcher.get_browser_info(browser_path)
utils.logger.info(
f"[CDPBrowserManager] Detected browser: {browser_name} ({browser_version})"
)
utils.logger.info(f"[CDPBrowserManager] Browser path: {browser_path}")
return browser_path
async def _test_cdp_connection(self, debug_port: int) -> bool:
try:
# Simple socket connection test
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.settimeout(5)
result = s.connect_ex(("localhost", debug_port))
if result == 0:
utils.logger.info(
f"[CDPBrowserManager] CDP port {debug_port} is accessible"
)
return True
else:
utils.logger.warning(
f"[CDPBrowserManager] CDP port {debug_port} is not accessible"
)
return False
except Exception as e:
utils.logger.warning(f"[CDPBrowserManager] CDP connection test failed: {e}")
return False
async def _launch_browser(self, browser_path: str, headless: bool):
# Set user data directory (if save login state is enabled)
user_data_dir = None
if config.SAVE_LOGIN_STATE:
user_data_dir = os.path.join(
os.getcwd(),
"browser_data",
f"cdp_{config.USER_DATA_DIR % config.PLATFORM}",
)
os.makedirs(user_data_dir, exist_ok=True)
utils.logger.info(f"[CDPBrowserManager] User data directory: {user_data_dir}")
# Launch browser
self.launcher.browser_process = self.launcher.launch_browser(
browser_path=browser_path,
debug_port=self.debug_port,
headless=headless,
user_data_dir=user_data_dir,
)
# Wait for browser to be ready
if not self.launcher.wait_for_browser_ready(
self.debug_port, config.BROWSER_LAUNCH_TIMEOUT
):
raise RuntimeError(f"Browser failed to start within {config.BROWSER_LAUNCH_TIMEOUT} seconds")
# Extra wait for CDP service to fully start
await asyncio.sleep(1)
# Test CDP connection
if not await self._test_cdp_connection(self.debug_port):
utils.logger.warning(
"[CDPBrowserManager] CDP connection test failed, but will continue to try connecting"
)
async def _get_browser_websocket_url(self, debug_port: int) -> str:
try:
async with httpx.AsyncClient() as client:
response = await client.get(
f"http://localhost:{debug_port}/json/version", timeout=10
)
if response.status_code == 200:
data = response.json()
ws_url = data.get("webSocketDebuggerUrl")
if ws_url:
utils.logger.info(
f"[CDPBrowserManager] Got browser WebSocket URL: {ws_url}"
)
return ws_url
else:
raise RuntimeError("webSocketDebuggerUrl not found")
else:
raise RuntimeError(f"HTTP {response.status_code}: {response.text}")
except Exception as e:
utils.logger.error(f"[CDPBrowserManager] Failed to get WebSocket URL: {e}")
raise
async def _connect_via_cdp(self, playwright: Playwright):
try:
# Get correct WebSocket URL
ws_url = await self._get_browser_websocket_url(self.debug_port)
utils.logger.info(f"[CDPBrowserManager] Connecting to browser via CDP: {ws_url}")
# Use Playwright's connectOverCDP method to connect
self.browser = await playwright.chromium.connect_over_cdp(ws_url)
if self.browser.is_connected():
utils.logger.info("[CDPBrowserManager] Successfully connected to browser")
utils.logger.info(
f"[CDPBrowserManager] Browser contexts count: {len(self.browser.contexts)}"
)
else:
raise RuntimeError("CDP connection failed")
except Exception as e:
utils.logger.error(f"[CDPBrowserManager] CDP connection failed: {e}")
raise
async def _create_browser_context(
self, playwright_proxy: Optional[Dict] = None, user_agent: Optional[str] = None
) -> BrowserContext:
if not self.browser:
raise RuntimeError("Browser not connected")
# Get existing context or create new context
contexts = self.browser.contexts
if contexts:
# Use existing first context
browser_context = contexts[0]
utils.logger.info("[CDPBrowserManager] Using existing browser context")
else:
# Create new context
context_options = {
"viewport": {"width": 1920, "height": 1080},
"accept_downloads": True,
}
# Set user agent
if user_agent:
context_options["user_agent"] = user_agent
utils.logger.info(f"[CDPBrowserManager] Setting user agent: {user_agent}")
# Note: Proxy settings may not work in CDP mode since browser is already launched
if playwright_proxy:
utils.logger.warning(
"[CDPBrowserManager] Warning: Proxy settings may not work in CDP mode, "
"recommend configuring system proxy or browser proxy extension before launching browser"
)
browser_context = await self.browser.new_context(**context_options)
utils.logger.info("[CDPBrowserManager] Created new browser context")
return browser_context
async def add_stealth_script(self, script_path: str = "libs/stealth.min.js"):
if self.browser_context and os.path.exists(script_path):
try:
await self.browser_context.add_init_script(path=script_path)
utils.logger.info(
f"[CDPBrowserManager] Added anti-detection script: {script_path}"
)
except Exception as e:
utils.logger.warning(f"[CDPBrowserManager] Failed to add anti-detection script: {e}")
async def add_cookies(self, cookies: list):
if self.browser_context:
try:
await self.browser_context.add_cookies(cookies)
utils.logger.info(f"[CDPBrowserManager] Added {len(cookies)} cookies")
except Exception as e:
utils.logger.warning(f"[CDPBrowserManager] Failed to add cookies: {e}")
async def get_cookies(self) -> list:
if self.browser_context:
try:
cookies = await self.browser_context.cookies()
return cookies
except Exception as e:
utils.logger.warning(f"[CDPBrowserManager] Failed to get cookies: {e}")
return []
return []
async def cleanup(self, force: bool = False):
try:
# Close browser context
if self.browser_context:
try:
# Check if context is already closed
# Try to get page list, if fails means already closed
try:
pages = self.browser_context.pages
if pages is not None:
await self.browser_context.close()
utils.logger.info("[CDPBrowserManager] Browser context closed")
except:
utils.logger.debug("[CDPBrowserManager] Browser context already closed")
except Exception as context_error:
# Only log warning if error is not due to already being closed
error_msg = str(context_error).lower()
if "closed" not in error_msg and "disconnected" not in error_msg:
utils.logger.warning(
f"[CDPBrowserManager] Failed to close browser context: {context_error}"
)
else:
utils.logger.debug(f"[CDPBrowserManager] Browser context already closed: {context_error}")
finally:
self.browser_context = None
# Disconnect browser
if self.browser:
try:
# Check if browser is still connected
if self.browser.is_connected():
await self.browser.close()
utils.logger.info("[CDPBrowserManager] Browser connection disconnected")
else:
utils.logger.debug("[CDPBrowserManager] Browser connection already disconnected")
except Exception as browser_error:
# Only log warning if error is not due to already being closed
error_msg = str(browser_error).lower()
if "closed" not in error_msg and "disconnected" not in error_msg:
utils.logger.warning(
f"[CDPBrowserManager] Failed to close browser connection: {browser_error}"
)
else:
utils.logger.debug(f"[CDPBrowserManager] Browser connection already closed: {browser_error}")
finally:
self.browser = None
# Close browser process
# force=True means force close, ignoring AUTO_CLOSE_BROWSER config
# Used for handling abnormal exit or manual cleanup
if force or config.AUTO_CLOSE_BROWSER:
if self.launcher and self.launcher.browser_process:
self.launcher.cleanup()
else:
utils.logger.debug("[CDPBrowserManager] No browser process to cleanup")
else:
utils.logger.info(
"[CDPBrowserManager] Browser process kept running (AUTO_CLOSE_BROWSER=False)"
)
except Exception as e:
utils.logger.error(f"[CDPBrowserManager] Error during resource cleanup: {e}")
def is_connected(self) -> bool:
return self.browser is not None and self.browser.is_connected()
async def get_browser_info(self) -> Dict[str, Any]:
if not self.browser:
return {}
try:
version = self.browser.version
contexts_count = len(self.browser.contexts)
return {
"version": version,
"contexts_count": contexts_count,
"debug_port": self.debug_port,
"is_connected": self.is_connected(),
}
except Exception as e:
utils.logger.warning(f"[CDPBrowserManager] Failed to get browser info: {e}")
return {} | --- +++ @@ -33,6 +33,9 @@
class CDPBrowserManager:
+ """
+ CDP browser manager, responsible for launching and managing browsers connected via CDP
+ """
def __init__(self):
self.launcher = BrowserLauncher()
@@ -42,10 +45,14 @@ self._cleanup_registered = False
def _register_cleanup_handlers(self):
+ """
+ Register cleanup handlers to ensure browser process cleanup on program exit
+ """
if self._cleanup_registered:
return
def sync_cleanup():
+ """Synchronous cleanup function for atexit"""
if self.launcher and self.launcher.browser_process:
utils.logger.info("[CDPBrowserManager] atexit: Cleaning up browser process")
self.launcher.cleanup()
@@ -58,6 +65,7 @@ prev_sigterm = signal.getsignal(signal.SIGTERM)
def signal_handler(signum, frame):
+ """Signal handler"""
utils.logger.info(f"[CDPBrowserManager] Received signal {signum}, cleaning up browser process")
if self.launcher and self.launcher.browser_process:
self.launcher.cleanup()
@@ -93,6 +101,9 @@ user_agent: Optional[str] = None,
headless: bool = False,
) -> BrowserContext:
+ """
+ Launch browser and connect via CDP
+ """
try:
# 1. Detect browser path
browser_path = await self._get_browser_path()
@@ -123,6 +134,9 @@ raise
async def _get_browser_path(self) -> str:
+ """
+ Get browser path
+ """
# Prefer user-defined path
if config.CUSTOM_BROWSER_PATH and os.path.isfile(config.CUSTOM_BROWSER_PATH):
utils.logger.info(
@@ -150,6 +164,9 @@ return browser_path
async def _test_cdp_connection(self, debug_port: int) -> bool:
+ """
+ Test if CDP connection is available
+ """
try:
# Simple socket connection test
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
@@ -170,6 +187,9 @@ return False
async def _launch_browser(self, browser_path: str, headless: bool):
+ """
+ Launch browser process
+ """
# Set user data directory (if save login state is enabled)
user_data_dir = None
if config.SAVE_LOGIN_STATE:
@@ -205,6 +225,9 @@ )
async def _get_browser_websocket_url(self, debug_port: int) -> str:
+ """
+ Get browser WebSocket connection URL
+ """
try:
async with httpx.AsyncClient() as client:
response = await client.get(
@@ -227,6 +250,9 @@ raise
async def _connect_via_cdp(self, playwright: Playwright):
+ """
+ Connect to browser via CDP
+ """
try:
# Get correct WebSocket URL
ws_url = await self._get_browser_websocket_url(self.debug_port)
@@ -250,6 +276,9 @@ async def _create_browser_context(
self, playwright_proxy: Optional[Dict] = None, user_agent: Optional[str] = None
) -> BrowserContext:
+ """
+ Create or get browser context
+ """
if not self.browser:
raise RuntimeError("Browser not connected")
@@ -285,6 +314,9 @@ return browser_context
async def add_stealth_script(self, script_path: str = "libs/stealth.min.js"):
+ """
+ Add anti-detection script
+ """
if self.browser_context and os.path.exists(script_path):
try:
await self.browser_context.add_init_script(path=script_path)
@@ -295,6 +327,9 @@ utils.logger.warning(f"[CDPBrowserManager] Failed to add anti-detection script: {e}")
async def add_cookies(self, cookies: list):
+ """
+ Add cookies
+ """
if self.browser_context:
try:
await self.browser_context.add_cookies(cookies)
@@ -303,6 +338,9 @@ utils.logger.warning(f"[CDPBrowserManager] Failed to add cookies: {e}")
async def get_cookies(self) -> list:
+ """
+ Get current cookies
+ """
if self.browser_context:
try:
cookies = await self.browser_context.cookies()
@@ -313,6 +351,12 @@ return []
async def cleanup(self, force: bool = False):
+ """
+ Cleanup resources
+
+ Args:
+ force: Whether to force cleanup browser process (ignoring AUTO_CLOSE_BROWSER config)
+ """
try:
# Close browser context
if self.browser_context:
@@ -376,9 +420,15 @@ utils.logger.error(f"[CDPBrowserManager] Error during resource cleanup: {e}")
def is_connected(self) -> bool:
+ """
+ Check if connected to browser
+ """
return self.browser is not None and self.browser.is_connected()
async def get_browser_info(self) -> Dict[str, Any]:
+ """
+ Get browser info
+ """
if not self.browser:
return {}
@@ -394,4 +444,4 @@ }
except Exception as e:
utils.logger.warning(f"[CDPBrowserManager] Failed to get browser info: {e}")
- return {}+ return {}
| https://raw.githubusercontent.com/NanmiCoder/MediaCrawler/HEAD/tools/cdp_browser.py |
Document functions with clear intent | # -*- coding: utf-8 -*-
# Copyright (c) 2025 relakkes@gmail.com
#
# This file is part of MediaCrawler project.
# Repository: https://github.com/NanmiCoder/MediaCrawler/blob/main/store/douyin/__init__.py
# GitHub: https://github.com/NanmiCoder
# Licensed under NON-COMMERCIAL LEARNING LICENSE 1.1
#
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
# 1. 不得用于任何商业用途。
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
# 3. 不得进行大规模爬取或对平台造成运营干扰。
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
# 5. 不得用于任何非法或不当的用途。
#
# 详细许可条款请参阅项目根目录下的LICENSE文件。
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
# -*- coding: utf-8 -*-
# @Author : relakkes@gmail.com
# @Time : 2024/1/14 18:46
# @Desc :
from typing import List
import config
from var import source_keyword_var
from ._store_impl import *
from .douyin_store_media import *
class DouyinStoreFactory:
STORES = {
"csv": DouyinCsvStoreImplement,
"db": DouyinDbStoreImplement,
"postgres": DouyinDbStoreImplement,
"json": DouyinJsonStoreImplement,
"jsonl": DouyinJsonlStoreImplement,
"sqlite": DouyinSqliteStoreImplement,
"mongodb": DouyinMongoStoreImplement,
"excel": DouyinExcelStoreImplement,
}
@staticmethod
def create_store() -> AbstractStore:
store_class = DouyinStoreFactory.STORES.get(config.SAVE_DATA_OPTION)
if not store_class:
raise ValueError("[DouyinStoreFactory.create_store] Invalid save option only supported csv or db or json or sqlite or mongodb or excel ...")
return store_class()
def _extract_note_image_list(aweme_detail: Dict) -> List[str]:
images_res: List[str] = []
images: List[Dict] = aweme_detail.get("images", [])
if not images:
return []
for image in images:
image_url_list = image.get("url_list", []) # download_url_list has watermarked images, url_list has non-watermarked images
if image_url_list:
images_res.append(image_url_list[-1])
return images_res
def _extract_comment_image_list(comment_item: Dict) -> List[str]:
images_res: List[str] = []
image_list: List[Dict] = comment_item.get("image_list", [])
if not image_list:
return []
for image in image_list:
image_url_list = image.get("origin_url", {}).get("url_list", [])
if image_url_list and len(image_url_list) > 1:
images_res.append(image_url_list[1])
return images_res
def _extract_content_cover_url(aweme_detail: Dict) -> str:
res_cover_url = ""
video_item = aweme_detail.get("video", {})
raw_cover_url_list = (video_item.get("raw_cover", {}) or video_item.get("origin_cover", {})).get("url_list", [])
if raw_cover_url_list and len(raw_cover_url_list) > 1:
res_cover_url = raw_cover_url_list[1]
return res_cover_url
def _extract_video_download_url(aweme_detail: Dict) -> str:
video_item = aweme_detail.get("video", {})
url_h264_list = video_item.get("play_addr_h264", {}).get("url_list", [])
url_256_list = video_item.get("play_addr_256", {}).get("url_list", [])
url_list = video_item.get("play_addr", {}).get("url_list", [])
actual_url_list = url_h264_list or url_256_list or url_list
if not actual_url_list or len(actual_url_list) < 2:
return ""
return actual_url_list[-1]
def _extract_music_download_url(aweme_detail: Dict) -> str:
music_item = aweme_detail.get("music", {})
play_url = music_item.get("play_url", {})
music_url = play_url.get("uri", "")
return music_url
async def update_douyin_aweme(aweme_item: Dict):
aweme_id = aweme_item.get("aweme_id")
user_info = aweme_item.get("author", {})
interact_info = aweme_item.get("statistics", {})
save_content_item = {
"aweme_id": aweme_id,
"aweme_type": str(aweme_item.get("aweme_type")),
"title": aweme_item.get("desc", ""),
"desc": aweme_item.get("desc", ""),
"create_time": aweme_item.get("create_time"),
"user_id": user_info.get("uid"),
"sec_uid": user_info.get("sec_uid"),
"short_user_id": user_info.get("short_id"),
"user_unique_id": user_info.get("unique_id"),
"user_signature": user_info.get("signature"),
"nickname": user_info.get("nickname"),
"avatar": user_info.get("avatar_thumb", {}).get("url_list", [""])[0],
"liked_count": str(interact_info.get("digg_count")),
"collected_count": str(interact_info.get("collect_count")),
"comment_count": str(interact_info.get("comment_count")),
"share_count": str(interact_info.get("share_count")),
"ip_location": aweme_item.get("ip_label", ""),
"last_modify_ts": utils.get_current_timestamp(),
"aweme_url": f"https://www.douyin.com/video/{aweme_id}",
"cover_url": _extract_content_cover_url(aweme_item),
"video_download_url": _extract_video_download_url(aweme_item),
"music_download_url": _extract_music_download_url(aweme_item),
"note_download_url": ",".join(_extract_note_image_list(aweme_item)),
"source_keyword": source_keyword_var.get(),
}
utils.logger.info(f"[store.douyin.update_douyin_aweme] douyin aweme id:{aweme_id}, title:{save_content_item.get('title')}")
await DouyinStoreFactory.create_store().store_content(content_item=save_content_item)
async def batch_update_dy_aweme_comments(aweme_id: str, comments: List[Dict]):
if not comments:
return
for comment_item in comments:
await update_dy_aweme_comment(aweme_id, comment_item)
async def update_dy_aweme_comment(aweme_id: str, comment_item: Dict):
comment_aweme_id = comment_item.get("aweme_id")
if aweme_id != comment_aweme_id:
utils.logger.error(f"[store.douyin.update_dy_aweme_comment] comment_aweme_id: {comment_aweme_id} != aweme_id: {aweme_id}")
return
user_info = comment_item.get("user", {})
comment_id = comment_item.get("cid")
parent_comment_id = comment_item.get("reply_id", "0")
avatar_info = (user_info.get("avatar_medium", {}) or user_info.get("avatar_300x300", {}) or user_info.get("avatar_168x168", {}) or user_info.get("avatar_thumb", {}) or {})
save_comment_item = {
"comment_id": comment_id,
"create_time": comment_item.get("create_time"),
"ip_location": comment_item.get("ip_label", ""),
"aweme_id": aweme_id,
"content": comment_item.get("text"),
"user_id": user_info.get("uid"),
"sec_uid": user_info.get("sec_uid"),
"short_user_id": user_info.get("short_id"),
"user_unique_id": user_info.get("unique_id"),
"user_signature": user_info.get("signature"),
"nickname": user_info.get("nickname"),
"avatar": avatar_info.get("url_list", [""])[0],
"sub_comment_count": str(comment_item.get("reply_comment_total", 0)),
"like_count": (comment_item.get("digg_count") if comment_item.get("digg_count") else 0),
"last_modify_ts": utils.get_current_timestamp(),
"parent_comment_id": parent_comment_id,
"pictures": ",".join(_extract_comment_image_list(comment_item)),
}
utils.logger.info(f"[store.douyin.update_dy_aweme_comment] douyin aweme comment: {comment_id}, content: {save_comment_item.get('content')}")
await DouyinStoreFactory.create_store().store_comment(comment_item=save_comment_item)
async def save_creator(user_id: str, creator: Dict):
user_info = creator.get("user", {})
gender_map = {0: "Unknown", 1: "Male", 2: "Female"}
avatar_uri = user_info.get("avatar_300x300", {}).get("uri")
local_db_item = {
"user_id": user_id,
"nickname": user_info.get("nickname"),
"gender": gender_map.get(user_info.get("gender"), "Unknown"),
"avatar": f"https://p3-pc.douyinpic.com/img/{avatar_uri}" + r"~c5_300x300.jpeg?from=2956013662",
"desc": user_info.get("signature"),
"ip_location": user_info.get("ip_location"),
"follows": user_info.get("following_count", 0),
"fans": user_info.get("max_follower_count", 0),
"interaction": user_info.get("total_favorited", 0),
"videos_count": user_info.get("aweme_count", 0),
"last_modify_ts": utils.get_current_timestamp(),
}
utils.logger.info(f"[store.douyin.save_creator] creator:{local_db_item}")
await DouyinStoreFactory.create_store().store_creator(local_db_item)
async def update_dy_aweme_image(aweme_id, pic_content, extension_file_name):
await DouYinImage().store_image({"aweme_id": aweme_id, "pic_content": pic_content, "extension_file_name": extension_file_name})
async def update_dy_aweme_video(aweme_id, video_content, extension_file_name):
await DouYinVideo().store_video({"aweme_id": aweme_id, "video_content": video_content, "extension_file_name": extension_file_name}) | --- +++ @@ -51,6 +51,15 @@
def _extract_note_image_list(aweme_detail: Dict) -> List[str]:
+ """
+ Extract note image list
+
+ Args:
+ aweme_detail (Dict): Douyin content details
+
+ Returns:
+ List[str]: Note image list
+ """
images_res: List[str] = []
images: List[Dict] = aweme_detail.get("images", [])
@@ -66,6 +75,15 @@
def _extract_comment_image_list(comment_item: Dict) -> List[str]:
+ """
+ Extract comment image list
+
+ Args:
+ comment_item (Dict): Douyin comment
+
+ Returns:
+ List[str]: Comment image list
+ """
images_res: List[str] = []
image_list: List[Dict] = comment_item.get("image_list", [])
@@ -81,6 +99,15 @@
def _extract_content_cover_url(aweme_detail: Dict) -> str:
+ """
+ Extract video cover URL
+
+ Args:
+ aweme_detail (Dict): Douyin content details
+
+ Returns:
+ str: Video cover URL
+ """
res_cover_url = ""
video_item = aweme_detail.get("video", {})
@@ -92,6 +119,15 @@
def _extract_video_download_url(aweme_detail: Dict) -> str:
+ """
+ Extract video download URL
+
+ Args:
+ aweme_detail (Dict): Douyin video
+
+ Returns:
+ str: Video download URL
+ """
video_item = aweme_detail.get("video", {})
url_h264_list = video_item.get("play_addr_h264", {}).get("url_list", [])
url_256_list = video_item.get("play_addr_256", {}).get("url_list", [])
@@ -103,6 +139,15 @@
def _extract_music_download_url(aweme_detail: Dict) -> str:
+ """
+ Extract music download URL
+
+ Args:
+ aweme_detail (Dict): Douyin video
+
+ Returns:
+ str: Music download URL
+ """
music_item = aweme_detail.get("music", {})
play_url = music_item.get("play_url", {})
music_url = play_url.get("uri", "")
@@ -205,10 +250,30 @@
async def update_dy_aweme_image(aweme_id, pic_content, extension_file_name):
+ """
+ Update Douyin note image
+ Args:
+ aweme_id:
+ pic_content:
+ extension_file_name:
+
+ Returns:
+
+ """
await DouYinImage().store_image({"aweme_id": aweme_id, "pic_content": pic_content, "extension_file_name": extension_file_name})
async def update_dy_aweme_video(aweme_id, video_content, extension_file_name):
-
- await DouYinVideo().store_video({"aweme_id": aweme_id, "video_content": video_content, "extension_file_name": extension_file_name})+ """
+ Update Douyin short video
+ Args:
+ aweme_id:
+ video_content:
+ extension_file_name:
+
+ Returns:
+
+ """
+
+ await DouYinVideo().store_video({"aweme_id": aweme_id, "video_content": video_content, "extension_file_name": extension_file_name})
| https://raw.githubusercontent.com/NanmiCoder/MediaCrawler/HEAD/store/douyin/__init__.py |
Generate consistent docstrings | # -*- coding: utf-8 -*-
# Copyright (c) 2025 relakkes@gmail.com
#
# This file is part of MediaCrawler project.
# Repository: https://github.com/NanmiCoder/MediaCrawler/blob/main/store/bilibili/_store_impl.py
# GitHub: https://github.com/NanmiCoder
# Licensed under NON-COMMERCIAL LEARNING LICENSE 1.1
#
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
# 1. 不得用于任何商业用途。
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
# 3. 不得进行大规模爬取或对平台造成运营干扰。
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
# 5. 不得用于任何非法或不当的用途。
#
# 详细许可条款请参阅项目根目录下的LICENSE文件。
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
# -*- coding: utf-8 -*-
# @Author : persist1@126.com
# @Time : 2025/9/5 19:34
# @Desc : Bilibili storage implementation class
import asyncio
import csv
import json
import os
import pathlib
from typing import Dict
import aiofiles
from sqlalchemy import select
from sqlalchemy.orm import sessionmaker
import config
from base.base_crawler import AbstractStore
from database.db_session import get_session
from database.models import BilibiliVideoComment, BilibiliVideo, BilibiliUpInfo, BilibiliUpDynamic, BilibiliContactInfo
from tools.async_file_writer import AsyncFileWriter
from tools import utils, words
from var import crawler_type_var
from database.mongodb_store_base import MongoDBStoreBase
class BiliCsvStoreImplement(AbstractStore):
def __init__(self):
self.file_writer = AsyncFileWriter(
crawler_type=crawler_type_var.get(),
platform="bili"
)
async def store_content(self, content_item: Dict):
await self.file_writer.write_to_csv(
item=content_item,
item_type="videos"
)
async def store_comment(self, comment_item: Dict):
await self.file_writer.write_to_csv(
item=comment_item,
item_type="comments"
)
async def store_creator(self, creator: Dict):
await self.file_writer.write_to_csv(
item=creator,
item_type="creators"
)
async def store_contact(self, contact_item: Dict):
await self.file_writer.write_to_csv(
item=contact_item,
item_type="contacts"
)
async def store_dynamic(self, dynamic_item: Dict):
await self.file_writer.write_to_csv(
item=dynamic_item,
item_type="dynamics"
)
class BiliDbStoreImplement(AbstractStore):
async def store_content(self, content_item: Dict):
video_id = int(content_item.get("video_id"))
content_item["video_id"] = video_id
content_item["user_id"] = int(content_item.get("user_id", 0) or 0)
content_item["liked_count"] = int(content_item.get("liked_count", 0) or 0)
content_item["create_time"] = int(content_item.get("create_time", 0) or 0)
async with get_session() as session:
result = await session.execute(select(BilibiliVideo).where(BilibiliVideo.video_id == video_id))
video_detail = result.scalar_one_or_none()
if not video_detail:
content_item["add_ts"] = utils.get_current_timestamp()
content_item["last_modify_ts"] = utils.get_current_timestamp()
new_content = BilibiliVideo(**content_item)
session.add(new_content)
else:
content_item["last_modify_ts"] = utils.get_current_timestamp()
for key, value in content_item.items():
setattr(video_detail, key, value)
await session.commit()
async def store_comment(self, comment_item: Dict):
comment_id = int(comment_item.get("comment_id"))
comment_item["comment_id"] = comment_id
comment_item["video_id"] = int(comment_item.get("video_id", 0) or 0)
comment_item["create_time"] = int(comment_item.get("create_time", 0) or 0)
comment_item["like_count"] = str(comment_item.get("like_count", "0"))
comment_item["sub_comment_count"] = str(comment_item.get("sub_comment_count", "0"))
comment_item["parent_comment_id"] = str(comment_item.get("parent_comment_id", "0"))
async with get_session() as session:
result = await session.execute(select(BilibiliVideoComment).where(BilibiliVideoComment.comment_id == comment_id))
comment_detail = result.scalar_one_or_none()
if not comment_detail:
comment_item["add_ts"] = utils.get_current_timestamp()
comment_item["last_modify_ts"] = utils.get_current_timestamp()
new_comment = BilibiliVideoComment(**comment_item)
session.add(new_comment)
else:
comment_item["last_modify_ts"] = utils.get_current_timestamp()
for key, value in comment_item.items():
setattr(comment_detail, key, value)
await session.commit()
async def store_creator(self, creator: Dict):
creator_id = int(creator.get("user_id"))
creator["user_id"] = creator_id
creator["total_fans"] = int(creator.get("total_fans", 0) or 0)
creator["total_liked"] = int(creator.get("total_liked", 0) or 0)
creator["user_rank"] = int(creator.get("user_rank", 0) or 0)
creator["is_official"] = int(creator.get("is_official", 0) or 0)
async with get_session() as session:
result = await session.execute(select(BilibiliUpInfo).where(BilibiliUpInfo.user_id == creator_id))
creator_detail = result.scalar_one_or_none()
if not creator_detail:
creator["add_ts"] = utils.get_current_timestamp()
creator["last_modify_ts"] = utils.get_current_timestamp()
new_creator = BilibiliUpInfo(**creator)
session.add(new_creator)
else:
creator["last_modify_ts"] = utils.get_current_timestamp()
for key, value in creator.items():
setattr(creator_detail, key, value)
await session.commit()
async def store_contact(self, contact_item: Dict):
up_id = int(contact_item.get("up_id"))
fan_id = int(contact_item.get("fan_id"))
contact_item["up_id"] = up_id
contact_item["fan_id"] = fan_id
async with get_session() as session:
result = await session.execute(
select(BilibiliContactInfo).where(BilibiliContactInfo.up_id == up_id, BilibiliContactInfo.fan_id == fan_id)
)
contact_detail = result.scalar_one_or_none()
if not contact_detail:
contact_item["add_ts"] = utils.get_current_timestamp()
contact_item["last_modify_ts"] = utils.get_current_timestamp()
new_contact = BilibiliContactInfo(**contact_item)
session.add(new_contact)
else:
contact_item["last_modify_ts"] = utils.get_current_timestamp()
for key, value in contact_item.items():
setattr(contact_detail, key, value)
await session.commit()
async def store_dynamic(self, dynamic_item):
dynamic_id = int(dynamic_item.get("dynamic_id"))
dynamic_item["dynamic_id"] = dynamic_id
async with get_session() as session:
result = await session.execute(select(BilibiliUpDynamic).where(BilibiliUpDynamic.dynamic_id == dynamic_id))
dynamic_detail = result.scalar_one_or_none()
if not dynamic_detail:
dynamic_item["add_ts"] = utils.get_current_timestamp()
dynamic_item["last_modify_ts"] = utils.get_current_timestamp()
new_dynamic = BilibiliUpDynamic(**dynamic_item)
session.add(new_dynamic)
else:
dynamic_item["last_modify_ts"] = utils.get_current_timestamp()
for key, value in dynamic_item.items():
setattr(dynamic_detail, key, value)
await session.commit()
class BiliJsonStoreImplement(AbstractStore):
def __init__(self):
self.file_writer = AsyncFileWriter(
crawler_type=crawler_type_var.get(),
platform="bili"
)
async def store_content(self, content_item: Dict):
await self.file_writer.write_single_item_to_json(
item=content_item,
item_type="contents"
)
async def store_comment(self, comment_item: Dict):
await self.file_writer.write_single_item_to_json(
item=comment_item,
item_type="comments"
)
async def store_creator(self, creator: Dict):
await self.file_writer.write_single_item_to_json(
item=creator,
item_type="creators"
)
async def store_contact(self, contact_item: Dict):
await self.file_writer.write_single_item_to_json(
item=contact_item,
item_type="contacts"
)
async def store_dynamic(self, dynamic_item: Dict):
await self.file_writer.write_single_item_to_json(
item=dynamic_item,
item_type="dynamics"
)
class BiliJsonlStoreImplement(AbstractStore):
def __init__(self):
self.file_writer = AsyncFileWriter(
crawler_type=crawler_type_var.get(),
platform="bili"
)
async def store_content(self, content_item: Dict):
await self.file_writer.write_to_jsonl(
item=content_item,
item_type="contents"
)
async def store_comment(self, comment_item: Dict):
await self.file_writer.write_to_jsonl(
item=comment_item,
item_type="comments"
)
async def store_creator(self, creator: Dict):
await self.file_writer.write_to_jsonl(
item=creator,
item_type="creators"
)
async def store_contact(self, contact_item: Dict):
await self.file_writer.write_to_jsonl(
item=contact_item,
item_type="contacts"
)
async def store_dynamic(self, dynamic_item: Dict):
await self.file_writer.write_to_jsonl(
item=dynamic_item,
item_type="dynamics"
)
class BiliSqliteStoreImplement(BiliDbStoreImplement):
pass
class BiliMongoStoreImplement(AbstractStore):
def __init__(self):
self.mongo_store = MongoDBStoreBase(collection_prefix="bilibili")
async def store_content(self, content_item: Dict):
video_id = content_item.get("video_id")
if not video_id:
return
await self.mongo_store.save_or_update(
collection_suffix="contents",
query={"video_id": video_id},
data=content_item
)
utils.logger.info(f"[BiliMongoStoreImplement.store_content] Saved video {video_id} to MongoDB")
async def store_comment(self, comment_item: Dict):
comment_id = comment_item.get("comment_id")
if not comment_id:
return
await self.mongo_store.save_or_update(
collection_suffix="comments",
query={"comment_id": comment_id},
data=comment_item
)
utils.logger.info(f"[BiliMongoStoreImplement.store_comment] Saved comment {comment_id} to MongoDB")
async def store_creator(self, creator_item: Dict):
user_id = creator_item.get("user_id")
if not user_id:
return
await self.mongo_store.save_or_update(
collection_suffix="creators",
query={"user_id": user_id},
data=creator_item
)
utils.logger.info(f"[BiliMongoStoreImplement.store_creator] Saved creator {user_id} to MongoDB")
class BiliExcelStoreImplement:
def __new__(cls, *args, **kwargs):
from store.excel_store_base import ExcelStoreBase
return ExcelStoreBase.get_instance(
platform="bilibili",
crawler_type=crawler_type_var.get()
) | --- +++ @@ -51,30 +51,70 @@ )
async def store_content(self, content_item: Dict):
+ """
+ content CSV storage implementation
+ Args:
+ content_item:
+
+ Returns:
+
+ """
await self.file_writer.write_to_csv(
item=content_item,
item_type="videos"
)
async def store_comment(self, comment_item: Dict):
+ """
+ comment CSV storage implementation
+ Args:
+ comment_item:
+
+ Returns:
+
+ """
await self.file_writer.write_to_csv(
item=comment_item,
item_type="comments"
)
async def store_creator(self, creator: Dict):
+ """
+ creator CSV storage implementation
+ Args:
+ creator:
+
+ Returns:
+
+ """
await self.file_writer.write_to_csv(
item=creator,
item_type="creators"
)
async def store_contact(self, contact_item: Dict):
+ """
+ creator contact CSV storage implementation
+ Args:
+ contact_item: creator's contact item dict
+
+ Returns:
+
+ """
await self.file_writer.write_to_csv(
item=contact_item,
item_type="contacts"
)
async def store_dynamic(self, dynamic_item: Dict):
+ """
+ creator dynamic CSV storage implementation
+ Args:
+ dynamic_item: creator's contact item dict
+
+ Returns:
+
+ """
await self.file_writer.write_to_csv(
item=dynamic_item,
item_type="dynamics"
@@ -83,6 +123,11 @@
class BiliDbStoreImplement(AbstractStore):
async def store_content(self, content_item: Dict):
+ """
+ Bilibili content DB storage implementation
+ Args:
+ content_item: content item dict
+ """
video_id = int(content_item.get("video_id"))
content_item["video_id"] = video_id
content_item["user_id"] = int(content_item.get("user_id", 0) or 0)
@@ -105,6 +150,11 @@ await session.commit()
async def store_comment(self, comment_item: Dict):
+ """
+ Bilibili comment DB storage implementation
+ Args:
+ comment_item: comment item dict
+ """
comment_id = int(comment_item.get("comment_id"))
comment_item["comment_id"] = comment_id
comment_item["video_id"] = int(comment_item.get("video_id", 0) or 0)
@@ -129,6 +179,11 @@ await session.commit()
async def store_creator(self, creator: Dict):
+ """
+ Bilibili creator DB storage implementation
+ Args:
+ creator: creator item dict
+ """
creator_id = int(creator.get("user_id"))
creator["user_id"] = creator_id
creator["total_fans"] = int(creator.get("total_fans", 0) or 0)
@@ -152,6 +207,11 @@ await session.commit()
async def store_contact(self, contact_item: Dict):
+ """
+ Bilibili contact DB storage implementation
+ Args:
+ contact_item: contact item dict
+ """
up_id = int(contact_item.get("up_id"))
fan_id = int(contact_item.get("fan_id"))
contact_item["up_id"] = up_id
@@ -175,6 +235,11 @@ await session.commit()
async def store_dynamic(self, dynamic_item):
+ """
+ Bilibili dynamic DB storage implementation
+ Args:
+ dynamic_item: dynamic item dict
+ """
dynamic_id = int(dynamic_item.get("dynamic_id"))
dynamic_item["dynamic_id"] = dynamic_id
@@ -202,30 +267,70 @@ )
async def store_content(self, content_item: Dict):
+ """
+ content JSON storage implementation
+ Args:
+ content_item:
+
+ Returns:
+
+ """
await self.file_writer.write_single_item_to_json(
item=content_item,
item_type="contents"
)
async def store_comment(self, comment_item: Dict):
+ """
+ comment JSON storage implementation
+ Args:
+ comment_item:
+
+ Returns:
+
+ """
await self.file_writer.write_single_item_to_json(
item=comment_item,
item_type="comments"
)
async def store_creator(self, creator: Dict):
+ """
+ creator JSON storage implementation
+ Args:
+ creator:
+
+ Returns:
+
+ """
await self.file_writer.write_single_item_to_json(
item=creator,
item_type="creators"
)
async def store_contact(self, contact_item: Dict):
+ """
+ creator contact JSON storage implementation
+ Args:
+ contact_item: creator's contact item dict
+
+ Returns:
+
+ """
await self.file_writer.write_single_item_to_json(
item=contact_item,
item_type="contacts"
)
async def store_dynamic(self, dynamic_item: Dict):
+ """
+ creator dynamic JSON storage implementation
+ Args:
+ dynamic_item: creator's contact item dict
+
+ Returns:
+
+ """
await self.file_writer.write_single_item_to_json(
item=dynamic_item,
item_type="dynamics"
@@ -276,11 +381,17 @@
class BiliMongoStoreImplement(AbstractStore):
+ """Bilibili MongoDB storage implementation"""
def __init__(self):
self.mongo_store = MongoDBStoreBase(collection_prefix="bilibili")
async def store_content(self, content_item: Dict):
+ """
+ Store video content to MongoDB
+ Args:
+ content_item: Video content data
+ """
video_id = content_item.get("video_id")
if not video_id:
return
@@ -293,6 +404,11 @@ utils.logger.info(f"[BiliMongoStoreImplement.store_content] Saved video {video_id} to MongoDB")
async def store_comment(self, comment_item: Dict):
+ """
+ Store comment to MongoDB
+ Args:
+ comment_item: Comment data
+ """
comment_id = comment_item.get("comment_id")
if not comment_id:
return
@@ -305,6 +421,11 @@ utils.logger.info(f"[BiliMongoStoreImplement.store_comment] Saved comment {comment_id} to MongoDB")
async def store_creator(self, creator_item: Dict):
+ """
+ Store UP master information to MongoDB
+ Args:
+ creator_item: UP master data
+ """
user_id = creator_item.get("user_id")
if not user_id:
return
@@ -318,10 +439,11 @@
class BiliExcelStoreImplement:
+ """Bilibili Excel storage implementation - Global singleton"""
def __new__(cls, *args, **kwargs):
from store.excel_store_base import ExcelStoreBase
return ExcelStoreBase.get_instance(
platform="bilibili",
crawler_type=crawler_type_var.get()
- )+ )
| https://raw.githubusercontent.com/NanmiCoder/MediaCrawler/HEAD/store/bilibili/_store_impl.py |
Add docstrings to existing functions | # -*- coding: utf-8 -*-
# Copyright (c) 2025 relakkes@gmail.com
#
# This file is part of MediaCrawler project.
# Repository: https://github.com/NanmiCoder/MediaCrawler/blob/main/store/excel_store_base.py
# GitHub: https://github.com/NanmiCoder
# Licensed under NON-COMMERCIAL LEARNING LICENSE 1.1
#
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
# 1. 不得用于任何商业用途。
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
# 3. 不得进行大规模爬取或对平台造成运营干扰。
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
# 5. 不得用于任何非法或不当的用途。
#
# 详细许可条款请参阅项目根目录下的LICENSE文件。
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
# 1. 不得用于任何商业用途。
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
# 3. 不得进行大规模爬取或对平台造成运营干扰。
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
# 5. 不得用于任何非法或不当的用途。
#
# 详细许可条款请参阅项目根目录下的LICENSE文件。
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
import threading
from datetime import datetime
from typing import Dict, List, Any
from pathlib import Path
try:
import openpyxl
from openpyxl.styles import Font, PatternFill, Alignment, Border, Side
from openpyxl.utils import get_column_letter
EXCEL_AVAILABLE = True
except ImportError:
EXCEL_AVAILABLE = False
from base.base_crawler import AbstractStore
from tools import utils
import config
class ExcelStoreBase(AbstractStore):
# Class-level singleton management
_instances: Dict[str, "ExcelStoreBase"] = {}
_lock = threading.Lock()
@classmethod
def get_instance(cls, platform: str, crawler_type: str) -> "ExcelStoreBase":
key = f"{platform}_{crawler_type}"
with cls._lock:
if key not in cls._instances:
cls._instances[key] = cls(platform, crawler_type)
return cls._instances[key]
@classmethod
def flush_all(cls):
with cls._lock:
for key, instance in cls._instances.items():
try:
instance.flush()
utils.logger.info(f"[ExcelStoreBase] Flushed instance: {key}")
except Exception as e:
utils.logger.error(f"[ExcelStoreBase] Error flushing {key}: {e}")
cls._instances.clear()
def __init__(self, platform: str, crawler_type: str = "search"):
if not EXCEL_AVAILABLE:
raise ImportError(
"openpyxl is required for Excel export. "
"Install it with: pip install openpyxl"
)
super().__init__()
self.platform = platform
self.crawler_type = crawler_type
# Create data directory
if config.SAVE_DATA_PATH:
self.data_dir = Path(config.SAVE_DATA_PATH) / platform
else:
self.data_dir = Path("data") / platform
self.data_dir.mkdir(parents=True, exist_ok=True)
# Initialize workbook
self.workbook = openpyxl.Workbook()
self.workbook.remove(self.workbook.active) # Remove default sheet
# Create sheets
self.contents_sheet = self.workbook.create_sheet("Contents")
self.comments_sheet = self.workbook.create_sheet("Comments")
self.creators_sheet = self.workbook.create_sheet("Creators")
# Track if headers are written
self.contents_headers_written = False
self.comments_headers_written = False
self.creators_headers_written = False
self.contacts_headers_written = False
self.dynamics_headers_written = False
# Optional sheets for platforms that need them (e.g., Bilibili)
self.contacts_sheet = None
self.dynamics_sheet = None
# Generate filename
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
self.filename = self.data_dir / f"{platform}_{crawler_type}_{timestamp}.xlsx"
utils.logger.info(f"[ExcelStoreBase] Initialized Excel export to: {self.filename}")
def _apply_header_style(self, sheet, row_num: int = 1):
header_fill = PatternFill(start_color="366092", end_color="366092", fill_type="solid")
header_font = Font(bold=True, color="FFFFFF", size=11)
header_alignment = Alignment(horizontal="center", vertical="center", wrap_text=True)
border = Border(
left=Side(style='thin'),
right=Side(style='thin'),
top=Side(style='thin'),
bottom=Side(style='thin')
)
for cell in sheet[row_num]:
cell.fill = header_fill
cell.font = header_font
cell.alignment = header_alignment
cell.border = border
def _auto_adjust_column_width(self, sheet):
for column in sheet.columns:
max_length = 0
column_letter = get_column_letter(column[0].column)
for cell in column:
try:
if cell.value:
max_length = max(max_length, len(str(cell.value)))
except (TypeError, AttributeError):
pass
# Set width with min/max constraints
adjusted_width = min(max(max_length + 2, 10), 50)
sheet.column_dimensions[column_letter].width = adjusted_width
def _write_headers(self, sheet, headers: List[str]):
for col_num, header in enumerate(headers, 1):
sheet.cell(row=1, column=col_num, value=header)
self._apply_header_style(sheet)
def _write_row(self, sheet, data: Dict[str, Any], headers: List[str]):
row_num = sheet.max_row + 1
for col_num, header in enumerate(headers, 1):
value = data.get(header, "")
# Handle different data types
if isinstance(value, (list, dict)):
value = str(value)
elif value is None:
value = ""
cell = sheet.cell(row=row_num, column=col_num, value=value)
# Apply basic formatting
cell.alignment = Alignment(vertical="top", wrap_text=True)
cell.border = Border(
left=Side(style='thin'),
right=Side(style='thin'),
top=Side(style='thin'),
bottom=Side(style='thin')
)
async def store_content(self, content_item: Dict):
# Define headers (customize based on platform)
headers = list(content_item.keys())
# Write headers if first time
if not self.contents_headers_written:
self._write_headers(self.contents_sheet, headers)
self.contents_headers_written = True
# Write data row
self._write_row(self.contents_sheet, content_item, headers)
# Get ID from various possible field names
content_id = content_item.get('note_id') or content_item.get('aweme_id') or content_item.get('video_id') or content_item.get('content_id') or 'N/A'
utils.logger.info(f"[ExcelStoreBase] Stored content to Excel: {content_id}")
async def store_comment(self, comment_item: Dict):
# Define headers
headers = list(comment_item.keys())
# Write headers if first time
if not self.comments_headers_written:
self._write_headers(self.comments_sheet, headers)
self.comments_headers_written = True
# Write data row
self._write_row(self.comments_sheet, comment_item, headers)
utils.logger.info(f"[ExcelStoreBase] Stored comment to Excel: {comment_item.get('comment_id', 'N/A')}")
async def store_creator(self, creator: Dict):
# Define headers
headers = list(creator.keys())
# Write headers if first time
if not self.creators_headers_written:
self._write_headers(self.creators_sheet, headers)
self.creators_headers_written = True
# Write data row
self._write_row(self.creators_sheet, creator, headers)
utils.logger.info(f"[ExcelStoreBase] Stored creator to Excel: {creator.get('user_id', 'N/A')}")
async def store_contact(self, contact_item: Dict):
# Create contacts sheet if not exists
if self.contacts_sheet is None:
self.contacts_sheet = self.workbook.create_sheet("Contacts")
# Define headers
headers = list(contact_item.keys())
# Write headers if first time
if not self.contacts_headers_written:
self._write_headers(self.contacts_sheet, headers)
self.contacts_headers_written = True
# Write data row
self._write_row(self.contacts_sheet, contact_item, headers)
utils.logger.info(f"[ExcelStoreBase] Stored contact to Excel: up_id={contact_item.get('up_id', 'N/A')}, fan_id={contact_item.get('fan_id', 'N/A')}")
async def store_dynamic(self, dynamic_item: Dict):
# Create dynamics sheet if not exists
if self.dynamics_sheet is None:
self.dynamics_sheet = self.workbook.create_sheet("Dynamics")
# Define headers
headers = list(dynamic_item.keys())
# Write headers if first time
if not self.dynamics_headers_written:
self._write_headers(self.dynamics_sheet, headers)
self.dynamics_headers_written = True
# Write data row
self._write_row(self.dynamics_sheet, dynamic_item, headers)
utils.logger.info(f"[ExcelStoreBase] Stored dynamic to Excel: {dynamic_item.get('dynamic_id', 'N/A')}")
def flush(self):
try:
# Auto-adjust column widths for all sheets
self._auto_adjust_column_width(self.contents_sheet)
self._auto_adjust_column_width(self.comments_sheet)
self._auto_adjust_column_width(self.creators_sheet)
if self.contacts_sheet is not None:
self._auto_adjust_column_width(self.contacts_sheet)
if self.dynamics_sheet is not None:
self._auto_adjust_column_width(self.dynamics_sheet)
# Remove empty sheets (only header row)
if self.contents_sheet.max_row == 1:
self.workbook.remove(self.contents_sheet)
if self.comments_sheet.max_row == 1:
self.workbook.remove(self.comments_sheet)
if self.creators_sheet.max_row == 1:
self.workbook.remove(self.creators_sheet)
if self.contacts_sheet is not None and self.contacts_sheet.max_row == 1:
self.workbook.remove(self.contacts_sheet)
if self.dynamics_sheet is not None and self.dynamics_sheet.max_row == 1:
self.workbook.remove(self.dynamics_sheet)
# Check if there are any sheets left
if len(self.workbook.sheetnames) == 0:
utils.logger.info(f"[ExcelStoreBase] No data to save, skipping file creation: {self.filename}")
return
# Save workbook
self.workbook.save(self.filename)
utils.logger.info(f"[ExcelStoreBase] Excel file saved successfully: {self.filename}")
except Exception as e:
utils.logger.error(f"[ExcelStoreBase] Error saving Excel file: {e}")
raise | --- +++ @@ -26,6 +26,10 @@ # 详细许可条款请参阅项目根目录下的LICENSE文件。
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
+"""
+Excel Store Base Implementation
+Provides Excel export functionality for crawled data with formatted sheets
+"""
import threading
from datetime import datetime
@@ -46,6 +50,11 @@
class ExcelStoreBase(AbstractStore):
+ """
+ Base class for Excel storage implementation
+ Provides formatted Excel export with multiple sheets for contents, comments, and creators
+ Uses singleton pattern to maintain state across multiple store calls
+ """
# Class-level singleton management
_instances: Dict[str, "ExcelStoreBase"] = {}
@@ -53,6 +62,16 @@
@classmethod
def get_instance(cls, platform: str, crawler_type: str) -> "ExcelStoreBase":
+ """
+ Get or create a singleton instance for the given platform and crawler type
+
+ Args:
+ platform: Platform name (xhs, dy, ks, etc.)
+ crawler_type: Type of crawler (search, detail, creator)
+
+ Returns:
+ ExcelStoreBase instance
+ """
key = f"{platform}_{crawler_type}"
with cls._lock:
if key not in cls._instances:
@@ -61,6 +80,10 @@
@classmethod
def flush_all(cls):
+ """
+ Flush all Excel store instances and save to files
+ Should be called at the end of crawler execution
+ """
with cls._lock:
for key, instance in cls._instances.items():
try:
@@ -71,6 +94,13 @@ cls._instances.clear()
def __init__(self, platform: str, crawler_type: str = "search"):
+ """
+ Initialize Excel store
+
+ Args:
+ platform: Platform name (xhs, dy, ks, etc.)
+ crawler_type: Type of crawler (search, detail, creator)
+ """
if not EXCEL_AVAILABLE:
raise ImportError(
"openpyxl is required for Excel export. "
@@ -115,6 +145,13 @@ utils.logger.info(f"[ExcelStoreBase] Initialized Excel export to: {self.filename}")
def _apply_header_style(self, sheet, row_num: int = 1):
+ """
+ Apply formatting to header row
+
+ Args:
+ sheet: Worksheet object
+ row_num: Row number for headers (default: 1)
+ """
header_fill = PatternFill(start_color="366092", end_color="366092", fill_type="solid")
header_font = Font(bold=True, color="FFFFFF", size=11)
header_alignment = Alignment(horizontal="center", vertical="center", wrap_text=True)
@@ -132,6 +169,12 @@ cell.border = border
def _auto_adjust_column_width(self, sheet):
+ """
+ Auto-adjust column widths based on content
+
+ Args:
+ sheet: Worksheet object
+ """
for column in sheet.columns:
max_length = 0
column_letter = get_column_letter(column[0].column)
@@ -148,12 +191,27 @@ sheet.column_dimensions[column_letter].width = adjusted_width
def _write_headers(self, sheet, headers: List[str]):
+ """
+ Write headers to sheet
+
+ Args:
+ sheet: Worksheet object
+ headers: List of header names
+ """
for col_num, header in enumerate(headers, 1):
sheet.cell(row=1, column=col_num, value=header)
self._apply_header_style(sheet)
def _write_row(self, sheet, data: Dict[str, Any], headers: List[str]):
+ """
+ Write data row to sheet
+
+ Args:
+ sheet: Worksheet object
+ data: Data dictionary
+ headers: List of header names (defines column order)
+ """
row_num = sheet.max_row + 1
for col_num, header in enumerate(headers, 1):
@@ -177,6 +235,12 @@ )
async def store_content(self, content_item: Dict):
+ """
+ Store content data to Excel
+
+ Args:
+ content_item: Content data dictionary
+ """
# Define headers (customize based on platform)
headers = list(content_item.keys())
@@ -193,6 +257,12 @@ utils.logger.info(f"[ExcelStoreBase] Stored content to Excel: {content_id}")
async def store_comment(self, comment_item: Dict):
+ """
+ Store comment data to Excel
+
+ Args:
+ comment_item: Comment data dictionary
+ """
# Define headers
headers = list(comment_item.keys())
@@ -207,6 +277,12 @@ utils.logger.info(f"[ExcelStoreBase] Stored comment to Excel: {comment_item.get('comment_id', 'N/A')}")
async def store_creator(self, creator: Dict):
+ """
+ Store creator data to Excel
+
+ Args:
+ creator: Creator data dictionary
+ """
# Define headers
headers = list(creator.keys())
@@ -221,6 +297,12 @@ utils.logger.info(f"[ExcelStoreBase] Stored creator to Excel: {creator.get('user_id', 'N/A')}")
async def store_contact(self, contact_item: Dict):
+ """
+ Store contact data to Excel (for platforms like Bilibili)
+
+ Args:
+ contact_item: Contact data dictionary
+ """
# Create contacts sheet if not exists
if self.contacts_sheet is None:
self.contacts_sheet = self.workbook.create_sheet("Contacts")
@@ -239,6 +321,12 @@ utils.logger.info(f"[ExcelStoreBase] Stored contact to Excel: up_id={contact_item.get('up_id', 'N/A')}, fan_id={contact_item.get('fan_id', 'N/A')}")
async def store_dynamic(self, dynamic_item: Dict):
+ """
+ Store dynamic data to Excel (for platforms like Bilibili)
+
+ Args:
+ dynamic_item: Dynamic data dictionary
+ """
# Create dynamics sheet if not exists
if self.dynamics_sheet is None:
self.dynamics_sheet = self.workbook.create_sheet("Dynamics")
@@ -257,6 +345,9 @@ utils.logger.info(f"[ExcelStoreBase] Stored dynamic to Excel: {dynamic_item.get('dynamic_id', 'N/A')}")
def flush(self):
+ """
+ Save workbook to file
+ """
try:
# Auto-adjust column widths for all sheets
self._auto_adjust_column_width(self.contents_sheet)
@@ -290,4 +381,4 @@
except Exception as e:
utils.logger.error(f"[ExcelStoreBase] Error saving Excel file: {e}")
- raise+ raise
| https://raw.githubusercontent.com/NanmiCoder/MediaCrawler/HEAD/store/excel_store_base.py |
Add docstrings to existing functions | # -*- coding: utf-8 -*-
# Copyright (c) 2025 relakkes@gmail.com
#
# This file is part of MediaCrawler project.
# Repository: https://github.com/NanmiCoder/MediaCrawler/blob/main/store/douyin/douyin_store_media.py
# GitHub: https://github.com/NanmiCoder
# Licensed under NON-COMMERCIAL LEARNING LICENSE 1.1
#
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
# 1. 不得用于任何商业用途。
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
# 3. 不得进行大规模爬取或对平台造成运营干扰。
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
# 5. 不得用于任何非法或不当的用途。
#
# 详细许可条款请参阅项目根目录下的LICENSE文件。
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
import pathlib
from typing import Dict
import aiofiles
from base.base_crawler import AbstractStoreImage, AbstractStoreVideo
from tools import utils
import config
class DouYinImage(AbstractStoreImage):
def __init__(self):
if config.SAVE_DATA_PATH:
self.image_store_path = f"{config.SAVE_DATA_PATH}/douyin/images"
else:
self.image_store_path = "data/douyin/images"
async def store_image(self, image_content_item: Dict):
await self.save_image(image_content_item.get("aweme_id"), image_content_item.get("pic_content"), image_content_item.get("extension_file_name"))
def make_save_file_name(self, aweme_id: str, extension_file_name: str) -> str:
return f"{self.image_store_path}/{aweme_id}/{extension_file_name}"
async def save_image(self, aweme_id: str, pic_content: str, extension_file_name):
pathlib.Path(self.image_store_path + "/" + aweme_id).mkdir(parents=True, exist_ok=True)
save_file_name = self.make_save_file_name(aweme_id, extension_file_name)
async with aiofiles.open(save_file_name, 'wb') as f:
await f.write(pic_content)
utils.logger.info(f"[DouYinImageStoreImplement.save_image] save image {save_file_name} success ...")
class DouYinVideo(AbstractStoreVideo):
def __init__(self):
if config.SAVE_DATA_PATH:
self.video_store_path = f"{config.SAVE_DATA_PATH}/douyin/videos"
else:
self.video_store_path = "data/douyin/videos"
async def store_video(self, video_content_item: Dict):
await self.save_video(video_content_item.get("aweme_id"), video_content_item.get("video_content"), video_content_item.get("extension_file_name"))
def make_save_file_name(self, aweme_id: str, extension_file_name: str) -> str:
return f"{self.video_store_path}/{aweme_id}/{extension_file_name}"
async def save_video(self, aweme_id: str, video_content: str, extension_file_name):
pathlib.Path(self.video_store_path + "/" + aweme_id).mkdir(parents=True, exist_ok=True)
save_file_name = self.make_save_file_name(aweme_id, extension_file_name)
async with aiofiles.open(save_file_name, 'wb') as f:
await f.write(video_content)
utils.logger.info(f"[DouYinVideoStoreImplement.save_video] save video {save_file_name} success ...") | --- +++ @@ -35,12 +35,42 @@ self.image_store_path = "data/douyin/images"
async def store_image(self, image_content_item: Dict):
+ """
+ store content
+
+ Args:
+ image_content_item:
+
+ Returns:
+
+ """
await self.save_image(image_content_item.get("aweme_id"), image_content_item.get("pic_content"), image_content_item.get("extension_file_name"))
def make_save_file_name(self, aweme_id: str, extension_file_name: str) -> str:
+ """
+ make save file name by store type
+
+ Args:
+ aweme_id: aweme id
+ extension_file_name: image filename with extension
+
+ Returns:
+
+ """
return f"{self.image_store_path}/{aweme_id}/{extension_file_name}"
async def save_image(self, aweme_id: str, pic_content: str, extension_file_name):
+ """
+ save image to local
+
+ Args:
+ aweme_id: aweme id
+ pic_content: image content
+ extension_file_name: image filename with extension
+
+ Returns:
+
+ """
pathlib.Path(self.image_store_path + "/" + aweme_id).mkdir(parents=True, exist_ok=True)
save_file_name = self.make_save_file_name(aweme_id, extension_file_name)
async with aiofiles.open(save_file_name, 'wb') as f:
@@ -56,14 +86,44 @@ self.video_store_path = "data/douyin/videos"
async def store_video(self, video_content_item: Dict):
+ """
+ store content
+
+ Args:
+ video_content_item:
+
+ Returns:
+
+ """
await self.save_video(video_content_item.get("aweme_id"), video_content_item.get("video_content"), video_content_item.get("extension_file_name"))
def make_save_file_name(self, aweme_id: str, extension_file_name: str) -> str:
+ """
+ make save file name by store type
+
+ Args:
+ aweme_id: aweme id
+ extension_file_name: video filename with extension
+
+ Returns:
+
+ """
return f"{self.video_store_path}/{aweme_id}/{extension_file_name}"
async def save_video(self, aweme_id: str, video_content: str, extension_file_name):
+ """
+ save video to local
+
+ Args:
+ aweme_id: aweme id
+ video_content: video content
+ extension_file_name: video filename with extension
+
+ Returns:
+
+ """
pathlib.Path(self.video_store_path + "/" + aweme_id).mkdir(parents=True, exist_ok=True)
save_file_name = self.make_save_file_name(aweme_id, extension_file_name)
async with aiofiles.open(save_file_name, 'wb') as f:
await f.write(video_content)
- utils.logger.info(f"[DouYinVideoStoreImplement.save_video] save video {save_file_name} success ...")+ utils.logger.info(f"[DouYinVideoStoreImplement.save_video] save video {save_file_name} success ...")
| https://raw.githubusercontent.com/NanmiCoder/MediaCrawler/HEAD/store/douyin/douyin_store_media.py |
Can you add docstrings to this Python file? | # -*- coding: utf-8 -*-
# Copyright (c) 2025 relakkes@gmail.com
#
# This file is part of MediaCrawler project.
# Repository: https://github.com/NanmiCoder/MediaCrawler/blob/main/tools/slider_util.py
# GitHub: https://github.com/NanmiCoder
# Licensed under NON-COMMERCIAL LEARNING LICENSE 1.1
#
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
# 1. 不得用于任何商业用途。
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
# 3. 不得进行大规模爬取或对平台造成运营干扰。
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
# 5. 不得用于任何非法或不当的用途。
#
# 详细许可条款请参阅项目根目录下的LICENSE文件。
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
# -*- coding: utf-8 -*-
# @Author : relakkes@gmail.com
# @Time : 2023/12/2 12:55
# @Desc : Slider verification utility package
import os
from typing import List
from urllib.parse import urlparse
import cv2
import httpx
import numpy as np
class Slide:
def __init__(self, gap, bg, gap_size=None, bg_size=None, out=None):
self.img_dir = os.path.join(os.getcwd(), 'temp_image')
if not os.path.exists(self.img_dir):
os.makedirs(self.img_dir)
bg_resize = bg_size if bg_size else (340, 212)
gap_size = gap_size if gap_size else (68, 68)
self.bg = self.check_is_img_path(bg, 'bg', resize=bg_resize)
self.gap = self.check_is_img_path(gap, 'gap', resize=gap_size)
self.out = out if out else os.path.join(self.img_dir, 'out.jpg')
@staticmethod
def check_is_img_path(img, img_type, resize):
if img.startswith('http'):
headers = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;"
"q=0.8,application/signed-exchange;v=b3;q=0.9",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "zh-CN,zh;q=0.9,en-GB;q=0.8,en;q=0.7,ja;q=0.6",
"AbstractCache-Control": "max-age=0",
"Connection": "keep-alive",
"Host": urlparse(img).hostname,
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/91.0.4472.164 Safari/537.36",
}
img_res = httpx.get(img, headers=headers)
if img_res.status_code == 200:
img_path = f'./temp_image/{img_type}.jpg'
image = np.asarray(bytearray(img_res.content), dtype="uint8")
image = cv2.imdecode(image, cv2.IMREAD_COLOR)
if resize:
image = cv2.resize(image, dsize=resize)
cv2.imwrite(img_path, image)
return img_path
else:
raise Exception(f"Failed to save {img_type} image")
else:
return img
@staticmethod
def clear_white(img):
img = cv2.imread(img)
rows, cols, channel = img.shape
min_x = 255
min_y = 255
max_x = 0
max_y = 0
for x in range(1, rows):
for y in range(1, cols):
t = set(img[x, y])
if len(t) >= 2:
if x <= min_x:
min_x = x
elif x >= max_x:
max_x = x
if y <= min_y:
min_y = y
elif y >= max_y:
max_y = y
img1 = img[min_x:max_x, min_y: max_y]
return img1
def template_match(self, tpl, target):
th, tw = tpl.shape[:2]
result = cv2.matchTemplate(target, tpl, cv2.TM_CCOEFF_NORMED)
# Find min and max value positions in matrix
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result)
tl = max_loc
br = (tl[0] + tw, tl[1] + th)
# Draw rectangle border to mark the matched area
# target: target image
# tl: rectangle top-left corner
# br: rectangle width and height
# (0,0,255): rectangle border color
# 1: rectangle border size
cv2.rectangle(target, tl, br, (0, 0, 255), 2)
cv2.imwrite(self.out, target)
return tl[0]
@staticmethod
def image_edge_detection(img):
edges = cv2.Canny(img, 100, 200)
return edges
def discern(self):
img1 = self.clear_white(self.gap)
img1 = cv2.cvtColor(img1, cv2.COLOR_RGB2GRAY)
slide = self.image_edge_detection(img1)
back = cv2.imread(self.bg, cv2.COLOR_RGB2GRAY)
back = self.image_edge_detection(back)
slide_pic = cv2.cvtColor(slide, cv2.COLOR_GRAY2RGB)
back_pic = cv2.cvtColor(back, cv2.COLOR_GRAY2RGB)
x = self.template_match(slide_pic, back_pic)
# Output x-coordinate, i.e., slider position on image
return x
def get_track_simple(distance) -> List[int]:
# Some detection checks movement speed - constant speed will be detected, so use gradual acceleration
# distance is the total distance to move
# Movement track
track: List[int] = []
# Current displacement
current = 0
# Deceleration threshold
mid = distance * 4 / 5
# Time interval
t = 0.2
# Initial velocity
v = 1
while current < distance:
if current < mid:
# Acceleration = 4
a = 4
else:
# Acceleration = -3
a = -3
v0 = v
# Current velocity
v = v0 + a * t # type: ignore
# Movement distance
move = v0 * t + 1 / 2 * a * t * t
# Current displacement
current += move # type: ignore
# Add to track
track.append(round(move))
return track
def get_tracks(distance: int, level: str = "easy") -> List[int]:
if level == "easy":
return get_track_simple(distance)
else:
from . import easing
_, tricks = easing.get_tracks(distance, seconds=2, ease_func="ease_out_expo")
return tricks | --- +++ @@ -32,7 +32,15 @@
class Slide:
+ """
+ copy from https://blog.csdn.net/weixin_43582101 thanks for author
+ update: relakkes
+ """
def __init__(self, gap, bg, gap_size=None, bg_size=None, out=None):
+ """
+ :param gap: Gap image path or url
+ :param bg: Background image with gap path or url
+ """
self.img_dir = os.path.join(os.getcwd(), 'temp_image')
if not os.path.exists(self.img_dir):
os.makedirs(self.img_dir)
@@ -74,6 +82,7 @@
@staticmethod
def clear_white(img):
+ """Clear whitespace from image, mainly clearing slider whitespace"""
img = cv2.imread(img)
rows, cols, channel = img.shape
min_x = 255
@@ -172,4 +181,4 @@ else:
from . import easing
_, tricks = easing.get_tracks(distance, seconds=2, ease_func="ease_out_expo")
- return tricks+ return tricks
| https://raw.githubusercontent.com/NanmiCoder/MediaCrawler/HEAD/tools/slider_util.py |
Add docstrings to existing functions | # -*- coding: utf-8 -*-
# Copyright (c) 2025 relakkes@gmail.com
#
# This file is part of MediaCrawler project.
# Repository: https://github.com/NanmiCoder/MediaCrawler/blob/main/recv_sms.py
# GitHub: https://github.com/NanmiCoder
# Licensed under NON-COMMERCIAL LEARNING LICENSE 1.1
#
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
# 1. 不得用于任何商业用途。
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
# 3. 不得进行大规模爬取或对平台造成运营干扰。
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
# 5. 不得用于任何非法或不当的用途。
#
# 详细许可条款请参阅项目根目录下的LICENSE文件。
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
import re
from typing import List
import uvicorn
from fastapi import FastAPI, HTTPException, status
from pydantic import BaseModel
import config
from cache.abs_cache import AbstractCache
from cache.cache_factory import CacheFactory
from tools import utils
app = FastAPI()
cache_client : AbstractCache = CacheFactory.create_cache(cache_type=config.CACHE_TYPE_MEMORY)
class SmsNotification(BaseModel):
platform: str
current_number: str
from_number: str
sms_content: str
timestamp: str
def extract_verification_code(message: str) -> str:
pattern = re.compile(r'\b[0-9]{6}\b')
codes: List[str] = pattern.findall(message)
return codes[0] if codes else ""
@app.post("/")
def receive_sms_notification(sms: SmsNotification):
utils.logger.info(f"Received SMS notification: {sms.platform}, {sms.current_number}")
sms_code = extract_verification_code(sms.sms_content)
if sms_code:
# Save the verification code in Redis and set the expiration time to 3 minutes.
key = f"{sms.platform}_{sms.current_number}"
cache_client.set(key, sms_code, expire_time=60 * 3)
return {"status": "ok"}
@app.get("/", status_code=status.HTTP_404_NOT_FOUND)
async def not_found():
raise HTTPException(status_code=404, detail="Not Found")
if __name__ == '__main__':
uvicorn.run(app, port=8000, host='0.0.0.0') | --- +++ @@ -44,6 +44,9 @@
def extract_verification_code(message: str) -> str:
+ """
+ Extract verification code of 6 digits from the SMS.
+ """
pattern = re.compile(r'\b[0-9]{6}\b')
codes: List[str] = pattern.findall(message)
return codes[0] if codes else ""
@@ -51,6 +54,21 @@
@app.post("/")
def receive_sms_notification(sms: SmsNotification):
+ """
+ Receive SMS notification and send it to Redis.
+ Args:
+ sms:
+ {
+ "platform": "xhs",
+ "from_number": "1069421xxx134",
+ "sms_content": "【小红书】您的验证码是: 171959, 3分钟内有效。请勿向他人泄漏。如非本人操作,可忽略本消息。",
+ "timestamp": "1686720601614",
+ "current_number": "13152442222"
+ }
+
+ Returns:
+
+ """
utils.logger.info(f"Received SMS notification: {sms.platform}, {sms.current_number}")
sms_code = extract_verification_code(sms.sms_content)
if sms_code:
@@ -67,4 +85,4 @@
if __name__ == '__main__':
- uvicorn.run(app, port=8000, host='0.0.0.0')+ uvicorn.run(app, port=8000, host='0.0.0.0')
| https://raw.githubusercontent.com/NanmiCoder/MediaCrawler/HEAD/recv_sms.py |
Can you add docstrings to this Python file? |
import re
import tempfile
import zipfile
import lxml.etree
from .base import BaseSchemaValidator
class DOCXSchemaValidator(BaseSchemaValidator):
# Word-specific namespace
WORD_2006_NAMESPACE = "http://schemas.openxmlformats.org/wordprocessingml/2006/main"
# Word-specific element to relationship type mappings
# Start with empty mapping - add specific cases as we discover them
ELEMENT_RELATIONSHIP_TYPES = {}
def validate(self):
# Test 0: XML well-formedness
if not self.validate_xml():
return False
# Test 1: Namespace declarations
all_valid = True
if not self.validate_namespaces():
all_valid = False
# Test 2: Unique IDs
if not self.validate_unique_ids():
all_valid = False
# Test 3: Relationship and file reference validation
if not self.validate_file_references():
all_valid = False
# Test 4: Content type declarations
if not self.validate_content_types():
all_valid = False
# Test 5: XSD schema validation
if not self.validate_against_xsd():
all_valid = False
# Test 6: Whitespace preservation
if not self.validate_whitespace_preservation():
all_valid = False
# Test 7: Deletion validation
if not self.validate_deletions():
all_valid = False
# Test 8: Insertion validation
if not self.validate_insertions():
all_valid = False
# Test 9: Relationship ID reference validation
if not self.validate_all_relationship_ids():
all_valid = False
# Count and compare paragraphs
self.compare_paragraph_counts()
return all_valid
def validate_whitespace_preservation(self):
errors = []
for xml_file in self.xml_files:
# Only check document.xml files
if xml_file.name != "document.xml":
continue
try:
root = lxml.etree.parse(str(xml_file)).getroot()
# Find all w:t elements
for elem in root.iter(f"{{{self.WORD_2006_NAMESPACE}}}t"):
if elem.text:
text = elem.text
# Check if text starts or ends with whitespace
if re.match(r"^\s.*", text) or re.match(r".*\s$", text):
# Check if xml:space="preserve" attribute exists
xml_space_attr = f"{{{self.XML_NAMESPACE}}}space"
if (
xml_space_attr not in elem.attrib
or elem.attrib[xml_space_attr] != "preserve"
):
# Show a preview of the text
text_preview = (
repr(text)[:50] + "..."
if len(repr(text)) > 50
else repr(text)
)
errors.append(
f" {xml_file.relative_to(self.unpacked_dir)}: "
f"Line {elem.sourceline}: w:t element with whitespace missing xml:space='preserve': {text_preview}"
)
except (lxml.etree.XMLSyntaxError, Exception) as e:
errors.append(
f" {xml_file.relative_to(self.unpacked_dir)}: Error: {e}"
)
if errors:
print(f"FAILED - Found {len(errors)} whitespace preservation violations:")
for error in errors:
print(error)
return False
else:
if self.verbose:
print("PASSED - All whitespace is properly preserved")
return True
def validate_deletions(self):
errors = []
for xml_file in self.xml_files:
# Only check document.xml files
if xml_file.name != "document.xml":
continue
try:
root = lxml.etree.parse(str(xml_file)).getroot()
# Find all w:t elements that are descendants of w:del elements
namespaces = {"w": self.WORD_2006_NAMESPACE}
xpath_expression = ".//w:del//w:t"
problematic_t_elements = root.xpath(
xpath_expression, namespaces=namespaces
)
for t_elem in problematic_t_elements:
if t_elem.text:
# Show a preview of the text
text_preview = (
repr(t_elem.text)[:50] + "..."
if len(repr(t_elem.text)) > 50
else repr(t_elem.text)
)
errors.append(
f" {xml_file.relative_to(self.unpacked_dir)}: "
f"Line {t_elem.sourceline}: <w:t> found within <w:del>: {text_preview}"
)
except (lxml.etree.XMLSyntaxError, Exception) as e:
errors.append(
f" {xml_file.relative_to(self.unpacked_dir)}: Error: {e}"
)
if errors:
print(f"FAILED - Found {len(errors)} deletion validation violations:")
for error in errors:
print(error)
return False
else:
if self.verbose:
print("PASSED - No w:t elements found within w:del elements")
return True
def count_paragraphs_in_unpacked(self):
count = 0
for xml_file in self.xml_files:
# Only check document.xml files
if xml_file.name != "document.xml":
continue
try:
root = lxml.etree.parse(str(xml_file)).getroot()
# Count all w:p elements
paragraphs = root.findall(f".//{{{self.WORD_2006_NAMESPACE}}}p")
count = len(paragraphs)
except Exception as e:
print(f"Error counting paragraphs in unpacked document: {e}")
return count
def count_paragraphs_in_original(self):
count = 0
try:
# Create temporary directory to unpack original
with tempfile.TemporaryDirectory() as temp_dir:
# Unpack original docx
with zipfile.ZipFile(self.original_file, "r") as zip_ref:
zip_ref.extractall(temp_dir)
# Parse document.xml
doc_xml_path = temp_dir + "/word/document.xml"
root = lxml.etree.parse(doc_xml_path).getroot()
# Count all w:p elements
paragraphs = root.findall(f".//{{{self.WORD_2006_NAMESPACE}}}p")
count = len(paragraphs)
except Exception as e:
print(f"Error counting paragraphs in original document: {e}")
return count
def validate_insertions(self):
errors = []
for xml_file in self.xml_files:
if xml_file.name != "document.xml":
continue
try:
root = lxml.etree.parse(str(xml_file)).getroot()
namespaces = {"w": self.WORD_2006_NAMESPACE}
# Find w:delText in w:ins that are NOT within w:del
invalid_elements = root.xpath(
".//w:ins//w:delText[not(ancestor::w:del)]",
namespaces=namespaces
)
for elem in invalid_elements:
text_preview = (
repr(elem.text or "")[:50] + "..."
if len(repr(elem.text or "")) > 50
else repr(elem.text or "")
)
errors.append(
f" {xml_file.relative_to(self.unpacked_dir)}: "
f"Line {elem.sourceline}: <w:delText> within <w:ins>: {text_preview}"
)
except (lxml.etree.XMLSyntaxError, Exception) as e:
errors.append(
f" {xml_file.relative_to(self.unpacked_dir)}: Error: {e}"
)
if errors:
print(f"FAILED - Found {len(errors)} insertion validation violations:")
for error in errors:
print(error)
return False
else:
if self.verbose:
print("PASSED - No w:delText elements within w:ins elements")
return True
def compare_paragraph_counts(self):
original_count = self.count_paragraphs_in_original()
new_count = self.count_paragraphs_in_unpacked()
diff = new_count - original_count
diff_str = f"+{diff}" if diff > 0 else str(diff)
print(f"\nParagraphs: {original_count} → {new_count} ({diff_str})")
if __name__ == "__main__":
raise RuntimeError("This module should not be run directly.") | --- +++ @@ -1,3 +1,6 @@+"""
+Validator for Word document XML files against XSD schemas.
+"""
import re
import tempfile
@@ -9,6 +12,7 @@
class DOCXSchemaValidator(BaseSchemaValidator):
+ """Validator for Word document XML files against XSD schemas."""
# Word-specific namespace
WORD_2006_NAMESPACE = "http://schemas.openxmlformats.org/wordprocessingml/2006/main"
@@ -18,6 +22,7 @@ ELEMENT_RELATIONSHIP_TYPES = {}
def validate(self):
+ """Run all validation checks and return True if all pass."""
# Test 0: XML well-formedness
if not self.validate_xml():
return False
@@ -65,6 +70,9 @@ return all_valid
def validate_whitespace_preservation(self):
+ """
+ Validate that w:t elements with whitespace have xml:space='preserve'.
+ """
errors = []
for xml_file in self.xml_files:
@@ -114,6 +122,10 @@ return True
def validate_deletions(self):
+ """
+ Validate that w:t elements are not within w:del elements.
+ For some reason, XSD validation does not catch this, so we do it manually.
+ """
errors = []
for xml_file in self.xml_files:
@@ -159,6 +171,7 @@ return True
def count_paragraphs_in_unpacked(self):
+ """Count the number of paragraphs in the unpacked document."""
count = 0
for xml_file in self.xml_files:
@@ -177,6 +190,7 @@ return count
def count_paragraphs_in_original(self):
+ """Count the number of paragraphs in the original docx file."""
count = 0
try:
@@ -200,6 +214,10 @@ return count
def validate_insertions(self):
+ """
+ Validate that w:delText elements are not within w:ins elements.
+ w:delText is only allowed in w:ins if nested within a w:del.
+ """
errors = []
for xml_file in self.xml_files:
@@ -243,6 +261,7 @@ return True
def compare_paragraph_counts(self):
+ """Compare paragraph counts between original and new document."""
original_count = self.count_paragraphs_in_original()
new_count = self.count_paragraphs_in_unpacked()
@@ -252,4 +271,4 @@
if __name__ == "__main__":
- raise RuntimeError("This module should not be run directly.")+ raise RuntimeError("This module should not be run directly.")
| https://raw.githubusercontent.com/ComposioHQ/awesome-claude-skills/HEAD/document-skills/pptx/ooxml/scripts/validation/docx.py |
Can you add docstrings to this Python file? | #!/usr/bin/env python3
from PIL import Image, ImageDraw, ImageFont
import numpy as np
from typing import Optional
def create_blank_frame(width: int, height: int, color: tuple[int, int, int] = (255, 255, 255)) -> Image.Image:
return Image.new('RGB', (width, height), color)
def draw_circle(frame: Image.Image, center: tuple[int, int], radius: int,
fill_color: Optional[tuple[int, int, int]] = None,
outline_color: Optional[tuple[int, int, int]] = None,
outline_width: int = 1) -> Image.Image:
draw = ImageDraw.Draw(frame)
x, y = center
bbox = [x - radius, y - radius, x + radius, y + radius]
draw.ellipse(bbox, fill=fill_color, outline=outline_color, width=outline_width)
return frame
def draw_rectangle(frame: Image.Image, top_left: tuple[int, int], bottom_right: tuple[int, int],
fill_color: Optional[tuple[int, int, int]] = None,
outline_color: Optional[tuple[int, int, int]] = None,
outline_width: int = 1) -> Image.Image:
draw = ImageDraw.Draw(frame)
draw.rectangle([top_left, bottom_right], fill=fill_color, outline=outline_color, width=outline_width)
return frame
def draw_line(frame: Image.Image, start: tuple[int, int], end: tuple[int, int],
color: tuple[int, int, int] = (0, 0, 0), width: int = 2) -> Image.Image:
draw = ImageDraw.Draw(frame)
draw.line([start, end], fill=color, width=width)
return frame
def draw_text(frame: Image.Image, text: str, position: tuple[int, int],
font_size: int = 40, color: tuple[int, int, int] = (0, 0, 0),
centered: bool = False) -> Image.Image:
draw = ImageDraw.Draw(frame)
# Try to use default font, fall back to basic if not available
try:
font = ImageFont.truetype("/System/Library/Fonts/Helvetica.ttc", font_size)
except:
font = ImageFont.load_default()
if centered:
bbox = draw.textbbox((0, 0), text, font=font)
text_width = bbox[2] - bbox[0]
text_height = bbox[3] - bbox[1]
x = position[0] - text_width // 2
y = position[1] - text_height // 2
position = (x, y)
draw.text(position, text, fill=color, font=font)
return frame
def draw_emoji(frame: Image.Image, emoji: str, position: tuple[int, int], size: int = 60) -> Image.Image:
draw = ImageDraw.Draw(frame)
# Use Apple Color Emoji font on macOS
try:
font = ImageFont.truetype("/System/Library/Fonts/Apple Color Emoji.ttc", size)
except:
# Fallback to text-based emoji
font = ImageFont.truetype("/System/Library/Fonts/Helvetica.ttc", size)
draw.text(position, emoji, font=font, embedded_color=True)
return frame
def composite_layers(base: Image.Image, overlay: Image.Image,
position: tuple[int, int] = (0, 0), alpha: float = 1.0) -> Image.Image:
# Convert to RGBA for transparency support
base_rgba = base.convert('RGBA')
overlay_rgba = overlay.convert('RGBA')
# Apply alpha
if alpha < 1.0:
overlay_rgba = overlay_rgba.copy()
overlay_rgba.putalpha(int(255 * alpha))
# Paste overlay onto base
base_rgba.paste(overlay_rgba, position, overlay_rgba)
# Convert back to RGB
return base_rgba.convert('RGB')
def draw_stick_figure(frame: Image.Image, position: tuple[int, int], scale: float = 1.0,
color: tuple[int, int, int] = (0, 0, 0), line_width: int = 3) -> Image.Image:
draw = ImageDraw.Draw(frame)
x, y = position
# Scale dimensions
head_radius = int(15 * scale)
body_length = int(40 * scale)
arm_length = int(25 * scale)
leg_length = int(35 * scale)
leg_spread = int(15 * scale)
# Head
draw.ellipse([x - head_radius, y - head_radius, x + head_radius, y + head_radius],
outline=color, width=line_width)
# Body
body_start = y + head_radius
body_end = body_start + body_length
draw.line([(x, body_start), (x, body_end)], fill=color, width=line_width)
# Arms
arm_y = body_start + int(body_length * 0.3)
draw.line([(x - arm_length, arm_y), (x + arm_length, arm_y)], fill=color, width=line_width)
# Legs
draw.line([(x, body_end), (x - leg_spread, body_end + leg_length)], fill=color, width=line_width)
draw.line([(x, body_end), (x + leg_spread, body_end + leg_length)], fill=color, width=line_width)
return frame
def create_gradient_background(width: int, height: int,
top_color: tuple[int, int, int],
bottom_color: tuple[int, int, int]) -> Image.Image:
frame = Image.new('RGB', (width, height))
draw = ImageDraw.Draw(frame)
# Calculate color step for each row
r1, g1, b1 = top_color
r2, g2, b2 = bottom_color
for y in range(height):
# Interpolate color
ratio = y / height
r = int(r1 * (1 - ratio) + r2 * ratio)
g = int(g1 * (1 - ratio) + g2 * ratio)
b = int(b1 * (1 - ratio) + b2 * ratio)
# Draw horizontal line
draw.line([(0, y), (width, y)], fill=(r, g, b))
return frame
def draw_emoji_enhanced(frame: Image.Image, emoji: str, position: tuple[int, int],
size: int = 60, shadow: bool = True,
shadow_offset: tuple[int, int] = (2, 2)) -> Image.Image:
draw = ImageDraw.Draw(frame)
# Ensure minimum size to avoid font rendering errors
size = max(12, size)
# Use Apple Color Emoji font on macOS
try:
font = ImageFont.truetype("/System/Library/Fonts/Apple Color Emoji.ttc", size)
except:
# Fallback to text-based emoji
try:
font = ImageFont.truetype("/System/Library/Fonts/Helvetica.ttc", size)
except:
font = ImageFont.load_default()
# Draw shadow first if enabled
if shadow and size >= 20: # Only draw shadow for larger emojis
shadow_pos = (position[0] + shadow_offset[0], position[1] + shadow_offset[1])
# Draw semi-transparent shadow (simulated by drawing multiple times)
for offset in range(1, 3):
try:
draw.text((shadow_pos[0] + offset, shadow_pos[1] + offset),
emoji, font=font, embedded_color=True, fill=(0, 0, 0, 100))
except:
pass # Skip shadow if it fails
# Draw main emoji
try:
draw.text(position, emoji, font=font, embedded_color=True)
except:
# Fallback to basic drawing if embedded color fails
draw.text(position, emoji, font=font, fill=(0, 0, 0))
return frame
def draw_circle_with_shadow(frame: Image.Image, center: tuple[int, int], radius: int,
fill_color: tuple[int, int, int],
shadow_offset: tuple[int, int] = (3, 3),
shadow_color: tuple[int, int, int] = (0, 0, 0)) -> Image.Image:
draw = ImageDraw.Draw(frame)
x, y = center
# Draw shadow
shadow_center = (x + shadow_offset[0], y + shadow_offset[1])
shadow_bbox = [
shadow_center[0] - radius,
shadow_center[1] - radius,
shadow_center[0] + radius,
shadow_center[1] + radius
]
draw.ellipse(shadow_bbox, fill=shadow_color)
# Draw main circle
bbox = [x - radius, y - radius, x + radius, y + radius]
draw.ellipse(bbox, fill=fill_color)
return frame
def draw_rounded_rectangle(frame: Image.Image, top_left: tuple[int, int],
bottom_right: tuple[int, int], radius: int,
fill_color: Optional[tuple[int, int, int]] = None,
outline_color: Optional[tuple[int, int, int]] = None,
outline_width: int = 1) -> Image.Image:
draw = ImageDraw.Draw(frame)
x1, y1 = top_left
x2, y2 = bottom_right
# Draw rounded rectangle using PIL's built-in method
draw.rounded_rectangle([x1, y1, x2, y2], radius=radius,
fill=fill_color, outline=outline_color, width=outline_width)
return frame
def add_vignette(frame: Image.Image, strength: float = 0.5) -> Image.Image:
width, height = frame.size
# Create radial gradient mask
center_x, center_y = width // 2, height // 2
max_dist = ((width / 2) ** 2 + (height / 2) ** 2) ** 0.5
# Create overlay
overlay = Image.new('RGB', (width, height), (0, 0, 0))
pixels = overlay.load()
for y in range(height):
for x in range(width):
# Calculate distance from center
dx = x - center_x
dy = y - center_y
dist = (dx ** 2 + dy ** 2) ** 0.5
# Calculate vignette value
vignette = min(1, (dist / max_dist) * strength)
value = int(255 * (1 - vignette))
pixels[x, y] = (value, value, value)
# Blend with original using multiply
frame_array = np.array(frame, dtype=np.float32) / 255
overlay_array = np.array(overlay, dtype=np.float32) / 255
result = frame_array * overlay_array
result = (result * 255).astype(np.uint8)
return Image.fromarray(result)
def draw_star(frame: Image.Image, center: tuple[int, int], size: int,
fill_color: tuple[int, int, int],
outline_color: Optional[tuple[int, int, int]] = None,
outline_width: int = 1) -> Image.Image:
import math
draw = ImageDraw.Draw(frame)
x, y = center
# Calculate star points
points = []
for i in range(10):
angle = (i * 36 - 90) * math.pi / 180 # 36 degrees per point, start at top
radius = size if i % 2 == 0 else size * 0.4 # Alternate between outer and inner
px = x + radius * math.cos(angle)
py = y + radius * math.sin(angle)
points.append((px, py))
# Draw star
draw.polygon(points, fill=fill_color, outline=outline_color, width=outline_width)
return frame | --- +++ @@ -1,4 +1,10 @@ #!/usr/bin/env python3
+"""
+Frame Composer - Utilities for composing visual elements into frames.
+
+Provides functions for drawing shapes, text, emojis, and compositing elements
+together to create animation frames.
+"""
from PIL import Image, ImageDraw, ImageFont
import numpy as np
@@ -6,6 +12,17 @@
def create_blank_frame(width: int, height: int, color: tuple[int, int, int] = (255, 255, 255)) -> Image.Image:
+ """
+ Create a blank frame with solid color background.
+
+ Args:
+ width: Frame width
+ height: Frame height
+ color: RGB color tuple (default: white)
+
+ Returns:
+ PIL Image
+ """
return Image.new('RGB', (width, height), color)
@@ -13,6 +30,20 @@ fill_color: Optional[tuple[int, int, int]] = None,
outline_color: Optional[tuple[int, int, int]] = None,
outline_width: int = 1) -> Image.Image:
+ """
+ Draw a circle on a frame.
+
+ Args:
+ frame: PIL Image to draw on
+ center: (x, y) center position
+ radius: Circle radius
+ fill_color: RGB fill color (None for no fill)
+ outline_color: RGB outline color (None for no outline)
+ outline_width: Outline width in pixels
+
+ Returns:
+ Modified frame
+ """
draw = ImageDraw.Draw(frame)
x, y = center
bbox = [x - radius, y - radius, x + radius, y + radius]
@@ -24,6 +55,20 @@ fill_color: Optional[tuple[int, int, int]] = None,
outline_color: Optional[tuple[int, int, int]] = None,
outline_width: int = 1) -> Image.Image:
+ """
+ Draw a rectangle on a frame.
+
+ Args:
+ frame: PIL Image to draw on
+ top_left: (x, y) top-left corner
+ bottom_right: (x, y) bottom-right corner
+ fill_color: RGB fill color (None for no fill)
+ outline_color: RGB outline color (None for no outline)
+ outline_width: Outline width in pixels
+
+ Returns:
+ Modified frame
+ """
draw = ImageDraw.Draw(frame)
draw.rectangle([top_left, bottom_right], fill=fill_color, outline=outline_color, width=outline_width)
return frame
@@ -31,6 +76,19 @@
def draw_line(frame: Image.Image, start: tuple[int, int], end: tuple[int, int],
color: tuple[int, int, int] = (0, 0, 0), width: int = 2) -> Image.Image:
+ """
+ Draw a line on a frame.
+
+ Args:
+ frame: PIL Image to draw on
+ start: (x, y) start position
+ end: (x, y) end position
+ color: RGB line color
+ width: Line width in pixels
+
+ Returns:
+ Modified frame
+ """
draw = ImageDraw.Draw(frame)
draw.line([start, end], fill=color, width=width)
return frame
@@ -39,6 +97,20 @@ def draw_text(frame: Image.Image, text: str, position: tuple[int, int],
font_size: int = 40, color: tuple[int, int, int] = (0, 0, 0),
centered: bool = False) -> Image.Image:
+ """
+ Draw text on a frame.
+
+ Args:
+ frame: PIL Image to draw on
+ text: Text to draw
+ position: (x, y) position (top-left unless centered=True)
+ font_size: Font size in pixels
+ color: RGB text color
+ centered: If True, center text at position
+
+ Returns:
+ Modified frame
+ """
draw = ImageDraw.Draw(frame)
# Try to use default font, fall back to basic if not available
@@ -60,6 +132,18 @@
def draw_emoji(frame: Image.Image, emoji: str, position: tuple[int, int], size: int = 60) -> Image.Image:
+ """
+ Draw emoji text on a frame (requires system emoji support).
+
+ Args:
+ frame: PIL Image to draw on
+ emoji: Emoji character(s)
+ position: (x, y) position
+ size: Emoji size in pixels
+
+ Returns:
+ Modified frame
+ """
draw = ImageDraw.Draw(frame)
# Use Apple Color Emoji font on macOS
@@ -75,6 +159,18 @@
def composite_layers(base: Image.Image, overlay: Image.Image,
position: tuple[int, int] = (0, 0), alpha: float = 1.0) -> Image.Image:
+ """
+ Composite one image on top of another.
+
+ Args:
+ base: Base image
+ overlay: Image to overlay on top
+ position: (x, y) position to place overlay
+ alpha: Opacity of overlay (0.0 = transparent, 1.0 = opaque)
+
+ Returns:
+ Composite image
+ """
# Convert to RGBA for transparency support
base_rgba = base.convert('RGBA')
overlay_rgba = overlay.convert('RGBA')
@@ -93,6 +189,19 @@
def draw_stick_figure(frame: Image.Image, position: tuple[int, int], scale: float = 1.0,
color: tuple[int, int, int] = (0, 0, 0), line_width: int = 3) -> Image.Image:
+ """
+ Draw a simple stick figure.
+
+ Args:
+ frame: PIL Image to draw on
+ position: (x, y) center position of head
+ scale: Size multiplier
+ color: RGB line color
+ line_width: Line width in pixels
+
+ Returns:
+ Modified frame
+ """
draw = ImageDraw.Draw(frame)
x, y = position
@@ -126,6 +235,18 @@ def create_gradient_background(width: int, height: int,
top_color: tuple[int, int, int],
bottom_color: tuple[int, int, int]) -> Image.Image:
+ """
+ Create a vertical gradient background.
+
+ Args:
+ width: Frame width
+ height: Frame height
+ top_color: RGB color at top
+ bottom_color: RGB color at bottom
+
+ Returns:
+ PIL Image with gradient
+ """
frame = Image.new('RGB', (width, height))
draw = ImageDraw.Draw(frame)
@@ -149,6 +270,20 @@ def draw_emoji_enhanced(frame: Image.Image, emoji: str, position: tuple[int, int],
size: int = 60, shadow: bool = True,
shadow_offset: tuple[int, int] = (2, 2)) -> Image.Image:
+ """
+ Draw emoji with optional shadow for better visual quality.
+
+ Args:
+ frame: PIL Image to draw on
+ emoji: Emoji character(s)
+ position: (x, y) position
+ size: Emoji size in pixels (minimum 12)
+ shadow: Whether to add drop shadow
+ shadow_offset: Shadow offset
+
+ Returns:
+ Modified frame
+ """
draw = ImageDraw.Draw(frame)
# Ensure minimum size to avoid font rendering errors
@@ -189,6 +324,20 @@ fill_color: tuple[int, int, int],
shadow_offset: tuple[int, int] = (3, 3),
shadow_color: tuple[int, int, int] = (0, 0, 0)) -> Image.Image:
+ """
+ Draw a circle with drop shadow.
+
+ Args:
+ frame: PIL Image to draw on
+ center: (x, y) center position
+ radius: Circle radius
+ fill_color: RGB fill color
+ shadow_offset: (x, y) shadow offset
+ shadow_color: RGB shadow color
+
+ Returns:
+ Modified frame
+ """
draw = ImageDraw.Draw(frame)
x, y = center
@@ -214,6 +363,21 @@ fill_color: Optional[tuple[int, int, int]] = None,
outline_color: Optional[tuple[int, int, int]] = None,
outline_width: int = 1) -> Image.Image:
+ """
+ Draw a rectangle with rounded corners.
+
+ Args:
+ frame: PIL Image to draw on
+ top_left: (x, y) top-left corner
+ bottom_right: (x, y) bottom-right corner
+ radius: Corner radius
+ fill_color: RGB fill color (None for no fill)
+ outline_color: RGB outline color (None for no outline)
+ outline_width: Outline width
+
+ Returns:
+ Modified frame
+ """
draw = ImageDraw.Draw(frame)
x1, y1 = top_left
x2, y2 = bottom_right
@@ -226,6 +390,16 @@
def add_vignette(frame: Image.Image, strength: float = 0.5) -> Image.Image:
+ """
+ Add a vignette effect (darkened edges) to frame.
+
+ Args:
+ frame: PIL Image
+ strength: Vignette strength (0.0-1.0)
+
+ Returns:
+ Frame with vignette
+ """
width, height = frame.size
# Create radial gradient mask
@@ -262,6 +436,20 @@ fill_color: tuple[int, int, int],
outline_color: Optional[tuple[int, int, int]] = None,
outline_width: int = 1) -> Image.Image:
+ """
+ Draw a 5-pointed star.
+
+ Args:
+ frame: PIL Image to draw on
+ center: (x, y) center position
+ size: Star size (outer radius)
+ fill_color: RGB fill color
+ outline_color: RGB outline color (None for no outline)
+ outline_width: Outline width
+
+ Returns:
+ Modified frame
+ """
import math
draw = ImageDraw.Draw(frame)
x, y = center
| https://raw.githubusercontent.com/ComposioHQ/awesome-claude-skills/HEAD/slack-gif-creator/core/frame_composer.py |
Add docstrings that explain inputs and outputs | #!/usr/bin/env python3
import sys
import math
from pathlib import Path
sys.path.append(str(Path(__file__).parent.parent))
from core.gif_builder import GIFBuilder
from core.frame_composer import create_blank_frame, draw_circle, draw_emoji, draw_text
from core.easing import ease_out_quad
def create_shake_animation(
object_type: str = 'emoji',
object_data: dict = None,
num_frames: int = 20,
shake_intensity: int = 15,
center_x: int = 240,
center_y: int = 240,
direction: str = 'horizontal', # 'horizontal', 'vertical', or 'both'
frame_width: int = 480,
frame_height: int = 480,
bg_color: tuple[int, int, int] = (255, 255, 255)
) -> list:
frames = []
# Default object data
if object_data is None:
if object_type == 'emoji':
object_data = {'emoji': '😱', 'size': 80}
elif object_type == 'text':
object_data = {'text': 'SHAKE!', 'font_size': 50, 'color': (255, 0, 0)}
for i in range(num_frames):
frame = create_blank_frame(frame_width, frame_height, bg_color)
# Calculate progress
t = i / (num_frames - 1) if num_frames > 1 else 0
# Decay shake intensity over time
intensity = shake_intensity * (1 - ease_out_quad(t))
# Calculate shake offset using sine wave for smooth oscillation
freq = 3 # Oscillation frequency
offset_x = 0
offset_y = 0
if direction in ['horizontal', 'both']:
offset_x = int(math.sin(t * freq * 2 * math.pi) * intensity)
if direction in ['vertical', 'both']:
offset_y = int(math.cos(t * freq * 2 * math.pi) * intensity)
# Apply offset
x = center_x + offset_x
y = center_y + offset_y
# Draw object
if object_type == 'emoji':
draw_emoji(
frame,
emoji=object_data['emoji'],
position=(x - object_data['size'] // 2, y - object_data['size'] // 2),
size=object_data['size']
)
elif object_type == 'text':
draw_text(
frame,
text=object_data['text'],
position=(x, y),
font_size=object_data['font_size'],
color=object_data['color'],
centered=True
)
elif object_type == 'circle':
draw_circle(
frame,
center=(x, y),
radius=object_data.get('radius', 30),
fill_color=object_data.get('color', (100, 100, 255))
)
frames.append(frame)
return frames
# Example usage
if __name__ == '__main__':
print("Creating shake GIF...")
builder = GIFBuilder(width=480, height=480, fps=24)
frames = create_shake_animation(
object_type='emoji',
object_data={'emoji': '😱', 'size': 100},
num_frames=30,
shake_intensity=20,
direction='both'
)
builder.add_frames(frames)
builder.save('shake_test.gif', num_colors=128) | --- +++ @@ -1,4 +1,9 @@ #!/usr/bin/env python3
+"""
+Shake Animation Template - Creates shaking/vibrating motion.
+
+Use this for impact effects, emphasis, or nervous/excited reactions.
+"""
import sys
import math
@@ -23,6 +28,24 @@ frame_height: int = 480,
bg_color: tuple[int, int, int] = (255, 255, 255)
) -> list:
+ """
+ Create frames for a shaking animation.
+
+ Args:
+ object_type: 'circle', 'emoji', 'text', or 'custom'
+ object_data: Data for the object
+ num_frames: Number of frames
+ shake_intensity: Maximum shake displacement in pixels
+ center_x: Center X position
+ center_y: Center Y position
+ direction: 'horizontal', 'vertical', or 'both'
+ frame_width: Frame width
+ frame_height: Frame height
+ bg_color: Background color
+
+ Returns:
+ List of frames
+ """
frames = []
# Default object data
| https://raw.githubusercontent.com/ComposioHQ/awesome-claude-skills/HEAD/slack-gif-creator/templates/shake.py |
Add docstrings to improve code quality | #!/usr/bin/env python3
import json
import sys
from pathlib import Path
from typing import Any, Dict, List
from inventory import InventoryData, extract_text_inventory
from pptx import Presentation
from pptx.dml.color import RGBColor
from pptx.enum.dml import MSO_THEME_COLOR
from pptx.enum.text import PP_ALIGN
from pptx.oxml.xmlchemy import OxmlElement
from pptx.util import Pt
def clear_paragraph_bullets(paragraph):
pPr = paragraph._element.get_or_add_pPr()
# Remove existing bullet elements
for child in list(pPr):
if (
child.tag.endswith("buChar")
or child.tag.endswith("buNone")
or child.tag.endswith("buAutoNum")
or child.tag.endswith("buFont")
):
pPr.remove(child)
return pPr
def apply_paragraph_properties(paragraph, para_data: Dict[str, Any]):
# Get the text but don't set it on paragraph directly yet
text = para_data.get("text", "")
# Get or create paragraph properties
pPr = clear_paragraph_bullets(paragraph)
# Handle bullet formatting
if para_data.get("bullet", False):
level = para_data.get("level", 0)
paragraph.level = level
# Calculate font-proportional indentation
font_size = para_data.get("font_size", 18.0)
level_indent_emu = int((font_size * (1.6 + level * 1.6)) * 12700)
hanging_indent_emu = int(-font_size * 0.8 * 12700)
# Set indentation
pPr.attrib["marL"] = str(level_indent_emu)
pPr.attrib["indent"] = str(hanging_indent_emu)
# Add bullet character
buChar = OxmlElement("a:buChar")
buChar.set("char", "•")
pPr.append(buChar)
# Default to left alignment for bullets if not specified
if "alignment" not in para_data:
paragraph.alignment = PP_ALIGN.LEFT
else:
# Remove indentation for non-bullet text
pPr.attrib["marL"] = "0"
pPr.attrib["indent"] = "0"
# Add buNone element
buNone = OxmlElement("a:buNone")
pPr.insert(0, buNone)
# Apply alignment
if "alignment" in para_data:
alignment_map = {
"LEFT": PP_ALIGN.LEFT,
"CENTER": PP_ALIGN.CENTER,
"RIGHT": PP_ALIGN.RIGHT,
"JUSTIFY": PP_ALIGN.JUSTIFY,
}
if para_data["alignment"] in alignment_map:
paragraph.alignment = alignment_map[para_data["alignment"]]
# Apply spacing
if "space_before" in para_data:
paragraph.space_before = Pt(para_data["space_before"])
if "space_after" in para_data:
paragraph.space_after = Pt(para_data["space_after"])
if "line_spacing" in para_data:
paragraph.line_spacing = Pt(para_data["line_spacing"])
# Apply run-level formatting
if not paragraph.runs:
run = paragraph.add_run()
run.text = text
else:
run = paragraph.runs[0]
run.text = text
# Apply font properties
apply_font_properties(run, para_data)
def apply_font_properties(run, para_data: Dict[str, Any]):
if "bold" in para_data:
run.font.bold = para_data["bold"]
if "italic" in para_data:
run.font.italic = para_data["italic"]
if "underline" in para_data:
run.font.underline = para_data["underline"]
if "font_size" in para_data:
run.font.size = Pt(para_data["font_size"])
if "font_name" in para_data:
run.font.name = para_data["font_name"]
# Apply color - prefer RGB, fall back to theme_color
if "color" in para_data:
color_hex = para_data["color"].lstrip("#")
if len(color_hex) == 6:
r = int(color_hex[0:2], 16)
g = int(color_hex[2:4], 16)
b = int(color_hex[4:6], 16)
run.font.color.rgb = RGBColor(r, g, b)
elif "theme_color" in para_data:
# Get theme color by name (e.g., "DARK_1", "ACCENT_1")
theme_name = para_data["theme_color"]
try:
run.font.color.theme_color = getattr(MSO_THEME_COLOR, theme_name)
except AttributeError:
print(f" WARNING: Unknown theme color name '{theme_name}'")
def detect_frame_overflow(inventory: InventoryData) -> Dict[str, Dict[str, float]]:
overflow_map = {}
for slide_key, shapes_dict in inventory.items():
for shape_key, shape_data in shapes_dict.items():
# Check for frame overflow (text exceeding shape bounds)
if shape_data.frame_overflow_bottom is not None:
if slide_key not in overflow_map:
overflow_map[slide_key] = {}
overflow_map[slide_key][shape_key] = shape_data.frame_overflow_bottom
return overflow_map
def validate_replacements(inventory: InventoryData, replacements: Dict) -> List[str]:
errors = []
for slide_key, shapes_data in replacements.items():
if not slide_key.startswith("slide-"):
continue
# Check if slide exists
if slide_key not in inventory:
errors.append(f"Slide '{slide_key}' not found in inventory")
continue
# Check each shape
for shape_key in shapes_data.keys():
if shape_key not in inventory[slide_key]:
# Find shapes without replacements defined and show their content
unused_with_content = []
for k in inventory[slide_key].keys():
if k not in shapes_data:
shape_data = inventory[slide_key][k]
# Get text from paragraphs as preview
paragraphs = shape_data.paragraphs
if paragraphs and paragraphs[0].text:
first_text = paragraphs[0].text[:50]
if len(paragraphs[0].text) > 50:
first_text += "..."
unused_with_content.append(f"{k} ('{first_text}')")
else:
unused_with_content.append(k)
errors.append(
f"Shape '{shape_key}' not found on '{slide_key}'. "
f"Shapes without replacements: {', '.join(sorted(unused_with_content)) if unused_with_content else 'none'}"
)
return errors
def check_duplicate_keys(pairs):
result = {}
for key, value in pairs:
if key in result:
raise ValueError(f"Duplicate key found in JSON: '{key}'")
result[key] = value
return result
def apply_replacements(pptx_file: str, json_file: str, output_file: str):
# Load presentation
prs = Presentation(pptx_file)
# Get inventory of all text shapes (returns ShapeData objects)
# Pass prs to use same Presentation instance
inventory = extract_text_inventory(Path(pptx_file), prs)
# Detect text overflow in original presentation
original_overflow = detect_frame_overflow(inventory)
# Load replacement data with duplicate key detection
with open(json_file, "r") as f:
replacements = json.load(f, object_pairs_hook=check_duplicate_keys)
# Validate replacements
errors = validate_replacements(inventory, replacements)
if errors:
print("ERROR: Invalid shapes in replacement JSON:")
for error in errors:
print(f" - {error}")
print("\nPlease check the inventory and update your replacement JSON.")
print(
"You can regenerate the inventory with: python inventory.py <input.pptx> <output.json>"
)
raise ValueError(f"Found {len(errors)} validation error(s)")
# Track statistics
shapes_processed = 0
shapes_cleared = 0
shapes_replaced = 0
# Process each slide from inventory
for slide_key, shapes_dict in inventory.items():
if not slide_key.startswith("slide-"):
continue
slide_index = int(slide_key.split("-")[1])
if slide_index >= len(prs.slides):
print(f"Warning: Slide {slide_index} not found")
continue
# Process each shape from inventory
for shape_key, shape_data in shapes_dict.items():
shapes_processed += 1
# Get the shape directly from ShapeData
shape = shape_data.shape
if not shape:
print(f"Warning: {shape_key} has no shape reference")
continue
# ShapeData already validates text_frame in __init__
text_frame = shape.text_frame # type: ignore
text_frame.clear() # type: ignore
shapes_cleared += 1
# Check for replacement paragraphs
replacement_shape_data = replacements.get(slide_key, {}).get(shape_key, {})
if "paragraphs" not in replacement_shape_data:
continue
shapes_replaced += 1
# Add replacement paragraphs
for i, para_data in enumerate(replacement_shape_data["paragraphs"]):
if i == 0:
p = text_frame.paragraphs[0] # type: ignore
else:
p = text_frame.add_paragraph() # type: ignore
apply_paragraph_properties(p, para_data)
# Check for issues after replacements
# Save to a temporary file and reload to avoid modifying the presentation during inventory
# (extract_text_inventory accesses font.color which adds empty <a:solidFill/> elements)
import tempfile
with tempfile.NamedTemporaryFile(suffix=".pptx", delete=False) as tmp:
tmp_path = Path(tmp.name)
prs.save(str(tmp_path))
try:
updated_inventory = extract_text_inventory(tmp_path)
updated_overflow = detect_frame_overflow(updated_inventory)
finally:
tmp_path.unlink() # Clean up temp file
# Check if any text overflow got worse
overflow_errors = []
for slide_key, shape_overflows in updated_overflow.items():
for shape_key, new_overflow in shape_overflows.items():
# Get original overflow (0 if there was no overflow before)
original = original_overflow.get(slide_key, {}).get(shape_key, 0.0)
# Error if overflow increased
if new_overflow > original + 0.01: # Small tolerance for rounding
increase = new_overflow - original
overflow_errors.append(
f'{slide_key}/{shape_key}: overflow worsened by {increase:.2f}" '
f'(was {original:.2f}", now {new_overflow:.2f}")'
)
# Collect warnings from updated shapes
warnings = []
for slide_key, shapes_dict in updated_inventory.items():
for shape_key, shape_data in shapes_dict.items():
if shape_data.warnings:
for warning in shape_data.warnings:
warnings.append(f"{slide_key}/{shape_key}: {warning}")
# Fail if there are any issues
if overflow_errors or warnings:
print("\nERROR: Issues detected in replacement output:")
if overflow_errors:
print("\nText overflow worsened:")
for error in overflow_errors:
print(f" - {error}")
if warnings:
print("\nFormatting warnings:")
for warning in warnings:
print(f" - {warning}")
print("\nPlease fix these issues before saving.")
raise ValueError(
f"Found {len(overflow_errors)} overflow error(s) and {len(warnings)} warning(s)"
)
# Save the presentation
prs.save(output_file)
# Report results
print(f"Saved updated presentation to: {output_file}")
print(f"Processed {len(prs.slides)} slides")
print(f" - Shapes processed: {shapes_processed}")
print(f" - Shapes cleared: {shapes_cleared}")
print(f" - Shapes replaced: {shapes_replaced}")
def main():
if len(sys.argv) != 4:
print(__doc__)
sys.exit(1)
input_pptx = Path(sys.argv[1])
replacements_json = Path(sys.argv[2])
output_pptx = Path(sys.argv[3])
if not input_pptx.exists():
print(f"Error: Input file '{input_pptx}' not found")
sys.exit(1)
if not replacements_json.exists():
print(f"Error: Replacements JSON file '{replacements_json}' not found")
sys.exit(1)
try:
apply_replacements(str(input_pptx), str(replacements_json), str(output_pptx))
except Exception as e:
print(f"Error applying replacements: {e}")
import traceback
traceback.print_exc()
sys.exit(1)
if __name__ == "__main__":
main() | --- +++ @@ -1,4 +1,13 @@ #!/usr/bin/env python3
+"""Apply text replacements to PowerPoint presentation.
+
+Usage:
+ python replace.py <input.pptx> <replacements.json> <output.pptx>
+
+The replacements JSON should have the structure output by inventory.py.
+ALL text shapes identified by inventory.py will have their text cleared
+unless "paragraphs" is specified in the replacements for that shape.
+"""
import json
import sys
@@ -15,6 +24,7 @@
def clear_paragraph_bullets(paragraph):
+ """Clear bullet formatting from a paragraph."""
pPr = paragraph._element.get_or_add_pPr()
# Remove existing bullet elements
@@ -31,6 +41,7 @@
def apply_paragraph_properties(paragraph, para_data: Dict[str, Any]):
+ """Apply formatting properties to a paragraph."""
# Get the text but don't set it on paragraph directly yet
text = para_data.get("text", "")
@@ -100,6 +111,7 @@
def apply_font_properties(run, para_data: Dict[str, Any]):
+ """Apply font properties to a text run."""
if "bold" in para_data:
run.font.bold = para_data["bold"]
if "italic" in para_data:
@@ -129,6 +141,11 @@
def detect_frame_overflow(inventory: InventoryData) -> Dict[str, Dict[str, float]]:
+ """Detect text overflow in shapes (text exceeding shape bounds).
+
+ Returns dict of slide_key -> shape_key -> overflow_inches.
+ Only includes shapes that have text overflow.
+ """
overflow_map = {}
for slide_key, shapes_dict in inventory.items():
@@ -143,6 +160,10 @@
def validate_replacements(inventory: InventoryData, replacements: Dict) -> List[str]:
+ """Validate that all shapes in replacements exist in inventory.
+
+ Returns list of error messages.
+ """
errors = []
for slide_key, shapes_data in replacements.items():
@@ -181,6 +202,7 @@
def check_duplicate_keys(pairs):
+ """Check for duplicate keys when loading JSON."""
result = {}
for key, value in pairs:
if key in result:
@@ -190,6 +212,7 @@
def apply_replacements(pptx_file: str, json_file: str, output_file: str):
+ """Apply text replacements from JSON to PowerPoint presentation."""
# Load presentation
prs = Presentation(pptx_file)
@@ -331,6 +354,7 @@
def main():
+ """Main entry point for command-line usage."""
if len(sys.argv) != 4:
print(__doc__)
sys.exit(1)
@@ -358,4 +382,4 @@
if __name__ == "__main__":
- main()+ main()
| https://raw.githubusercontent.com/ComposioHQ/awesome-claude-skills/HEAD/document-skills/pptx/scripts/replace.py |
Add docstrings to improve readability |
import subprocess
import tempfile
import zipfile
from pathlib import Path
class RedliningValidator:
def __init__(self, unpacked_dir, original_docx, verbose=False):
self.unpacked_dir = Path(unpacked_dir)
self.original_docx = Path(original_docx)
self.verbose = verbose
self.namespaces = {
"w": "http://schemas.openxmlformats.org/wordprocessingml/2006/main"
}
def validate(self):
# Verify unpacked directory exists and has correct structure
modified_file = self.unpacked_dir / "word" / "document.xml"
if not modified_file.exists():
print(f"FAILED - Modified document.xml not found at {modified_file}")
return False
# First, check if there are any tracked changes by Claude to validate
try:
import xml.etree.ElementTree as ET
tree = ET.parse(modified_file)
root = tree.getroot()
# Check for w:del or w:ins tags authored by Claude
del_elements = root.findall(".//w:del", self.namespaces)
ins_elements = root.findall(".//w:ins", self.namespaces)
# Filter to only include changes by Claude
claude_del_elements = [
elem
for elem in del_elements
if elem.get(f"{{{self.namespaces['w']}}}author") == "Claude"
]
claude_ins_elements = [
elem
for elem in ins_elements
if elem.get(f"{{{self.namespaces['w']}}}author") == "Claude"
]
# Redlining validation is only needed if tracked changes by Claude have been used.
if not claude_del_elements and not claude_ins_elements:
if self.verbose:
print("PASSED - No tracked changes by Claude found.")
return True
except Exception:
# If we can't parse the XML, continue with full validation
pass
# Create temporary directory for unpacking original docx
with tempfile.TemporaryDirectory() as temp_dir:
temp_path = Path(temp_dir)
# Unpack original docx
try:
with zipfile.ZipFile(self.original_docx, "r") as zip_ref:
zip_ref.extractall(temp_path)
except Exception as e:
print(f"FAILED - Error unpacking original docx: {e}")
return False
original_file = temp_path / "word" / "document.xml"
if not original_file.exists():
print(
f"FAILED - Original document.xml not found in {self.original_docx}"
)
return False
# Parse both XML files using xml.etree.ElementTree for redlining validation
try:
import xml.etree.ElementTree as ET
modified_tree = ET.parse(modified_file)
modified_root = modified_tree.getroot()
original_tree = ET.parse(original_file)
original_root = original_tree.getroot()
except ET.ParseError as e:
print(f"FAILED - Error parsing XML files: {e}")
return False
# Remove Claude's tracked changes from both documents
self._remove_claude_tracked_changes(original_root)
self._remove_claude_tracked_changes(modified_root)
# Extract and compare text content
modified_text = self._extract_text_content(modified_root)
original_text = self._extract_text_content(original_root)
if modified_text != original_text:
# Show detailed character-level differences for each paragraph
error_message = self._generate_detailed_diff(
original_text, modified_text
)
print(error_message)
return False
if self.verbose:
print("PASSED - All changes by Claude are properly tracked")
return True
def _generate_detailed_diff(self, original_text, modified_text):
error_parts = [
"FAILED - Document text doesn't match after removing Claude's tracked changes",
"",
"Likely causes:",
" 1. Modified text inside another author's <w:ins> or <w:del> tags",
" 2. Made edits without proper tracked changes",
" 3. Didn't nest <w:del> inside <w:ins> when deleting another's insertion",
"",
"For pre-redlined documents, use correct patterns:",
" - To reject another's INSERTION: Nest <w:del> inside their <w:ins>",
" - To restore another's DELETION: Add new <w:ins> AFTER their <w:del>",
"",
]
# Show git word diff
git_diff = self._get_git_word_diff(original_text, modified_text)
if git_diff:
error_parts.extend(["Differences:", "============", git_diff])
else:
error_parts.append("Unable to generate word diff (git not available)")
return "\n".join(error_parts)
def _get_git_word_diff(self, original_text, modified_text):
try:
with tempfile.TemporaryDirectory() as temp_dir:
temp_path = Path(temp_dir)
# Create two files
original_file = temp_path / "original.txt"
modified_file = temp_path / "modified.txt"
original_file.write_text(original_text, encoding="utf-8")
modified_file.write_text(modified_text, encoding="utf-8")
# Try character-level diff first for precise differences
result = subprocess.run(
[
"git",
"diff",
"--word-diff=plain",
"--word-diff-regex=.", # Character-by-character diff
"-U0", # Zero lines of context - show only changed lines
"--no-index",
str(original_file),
str(modified_file),
],
capture_output=True,
text=True,
)
if result.stdout.strip():
# Clean up the output - remove git diff header lines
lines = result.stdout.split("\n")
# Skip the header lines (diff --git, index, +++, ---, @@)
content_lines = []
in_content = False
for line in lines:
if line.startswith("@@"):
in_content = True
continue
if in_content and line.strip():
content_lines.append(line)
if content_lines:
return "\n".join(content_lines)
# Fallback to word-level diff if character-level is too verbose
result = subprocess.run(
[
"git",
"diff",
"--word-diff=plain",
"-U0", # Zero lines of context
"--no-index",
str(original_file),
str(modified_file),
],
capture_output=True,
text=True,
)
if result.stdout.strip():
lines = result.stdout.split("\n")
content_lines = []
in_content = False
for line in lines:
if line.startswith("@@"):
in_content = True
continue
if in_content and line.strip():
content_lines.append(line)
return "\n".join(content_lines)
except (subprocess.CalledProcessError, FileNotFoundError, Exception):
# Git not available or other error, return None to use fallback
pass
return None
def _remove_claude_tracked_changes(self, root):
ins_tag = f"{{{self.namespaces['w']}}}ins"
del_tag = f"{{{self.namespaces['w']}}}del"
author_attr = f"{{{self.namespaces['w']}}}author"
# Remove w:ins elements
for parent in root.iter():
to_remove = []
for child in parent:
if child.tag == ins_tag and child.get(author_attr) == "Claude":
to_remove.append(child)
for elem in to_remove:
parent.remove(elem)
# Unwrap content in w:del elements where author is "Claude"
deltext_tag = f"{{{self.namespaces['w']}}}delText"
t_tag = f"{{{self.namespaces['w']}}}t"
for parent in root.iter():
to_process = []
for child in parent:
if child.tag == del_tag and child.get(author_attr) == "Claude":
to_process.append((child, list(parent).index(child)))
# Process in reverse order to maintain indices
for del_elem, del_index in reversed(to_process):
# Convert w:delText to w:t before moving
for elem in del_elem.iter():
if elem.tag == deltext_tag:
elem.tag = t_tag
# Move all children of w:del to its parent before removing w:del
for child in reversed(list(del_elem)):
parent.insert(del_index, child)
parent.remove(del_elem)
def _extract_text_content(self, root):
p_tag = f"{{{self.namespaces['w']}}}p"
t_tag = f"{{{self.namespaces['w']}}}t"
paragraphs = []
for p_elem in root.findall(f".//{p_tag}"):
# Get all text elements within this paragraph
text_parts = []
for t_elem in p_elem.findall(f".//{t_tag}"):
if t_elem.text:
text_parts.append(t_elem.text)
paragraph_text = "".join(text_parts)
# Skip empty paragraphs - they don't affect content validation
if paragraph_text:
paragraphs.append(paragraph_text)
return "\n".join(paragraphs)
if __name__ == "__main__":
raise RuntimeError("This module should not be run directly.") | --- +++ @@ -1,3 +1,6 @@+"""
+Validator for tracked changes in Word documents.
+"""
import subprocess
import tempfile
@@ -6,6 +9,7 @@
class RedliningValidator:
+ """Validator for tracked changes in Word documents."""
def __init__(self, unpacked_dir, original_docx, verbose=False):
self.unpacked_dir = Path(unpacked_dir)
@@ -16,6 +20,7 @@ }
def validate(self):
+ """Main validation method that returns True if valid, False otherwise."""
# Verify unpacked directory exists and has correct structure
modified_file = self.unpacked_dir / "word" / "document.xml"
if not modified_file.exists():
@@ -107,6 +112,7 @@ return True
def _generate_detailed_diff(self, original_text, modified_text):
+ """Generate detailed word-level differences using git word diff."""
error_parts = [
"FAILED - Document text doesn't match after removing Claude's tracked changes",
"",
@@ -131,6 +137,7 @@ return "\n".join(error_parts)
def _get_git_word_diff(self, original_text, modified_text):
+ """Generate word diff using git with character-level precision."""
try:
with tempfile.TemporaryDirectory() as temp_dir:
temp_path = Path(temp_dir)
@@ -208,6 +215,7 @@ return None
def _remove_claude_tracked_changes(self, root):
+ """Remove tracked changes authored by Claude from the XML root."""
ins_tag = f"{{{self.namespaces['w']}}}ins"
del_tag = f"{{{self.namespaces['w']}}}del"
author_attr = f"{{{self.namespaces['w']}}}author"
@@ -244,6 +252,11 @@ parent.remove(del_elem)
def _extract_text_content(self, root):
+ """Extract text content from Word XML, preserving paragraph structure.
+
+ Empty paragraphs are skipped to avoid false positives when tracked
+ insertions add only structural elements without text content.
+ """
p_tag = f"{{{self.namespaces['w']}}}p"
t_tag = f"{{{self.namespaces['w']}}}t"
@@ -263,4 +276,4 @@
if __name__ == "__main__":
- raise RuntimeError("This module should not be run directly.")+ raise RuntimeError("This module should not be run directly.")
| https://raw.githubusercontent.com/ComposioHQ/awesome-claude-skills/HEAD/document-skills/pptx/ooxml/scripts/validation/redlining.py |
Generate consistent documentation across files | #!/usr/bin/env python3
import sys
from pathlib import Path
sys.path.append(str(Path(__file__).parent.parent))
from PIL import Image, ImageDraw
import numpy as np
from core.gif_builder import GIFBuilder
from core.frame_composer import create_blank_frame, draw_emoji_enhanced
from core.easing import interpolate
def create_fade_animation(
object_type: str = 'emoji',
object_data: dict | None = None,
num_frames: int = 30,
fade_type: str = 'in', # 'in', 'out', 'in_out', 'blink'
easing: str = 'ease_in_out',
center_pos: tuple[int, int] = (240, 240),
frame_width: int = 480,
frame_height: int = 480,
bg_color: tuple[int, int, int] = (255, 255, 255)
) -> list[Image.Image]:
frames = []
# Default object data
if object_data is None:
if object_type == 'emoji':
object_data = {'emoji': '✨', 'size': 100}
for i in range(num_frames):
t = i / (num_frames - 1) if num_frames > 1 else 0
# Calculate opacity based on fade type
if fade_type == 'in':
opacity = interpolate(0, 1, t, easing)
elif fade_type == 'out':
opacity = interpolate(1, 0, t, easing)
elif fade_type == 'in_out':
if t < 0.5:
opacity = interpolate(0, 1, t * 2, easing)
else:
opacity = interpolate(1, 0, (t - 0.5) * 2, easing)
elif fade_type == 'blink':
# Quick fade out and back in
if t < 0.2:
opacity = interpolate(1, 0, t / 0.2, 'ease_in')
elif t < 0.4:
opacity = interpolate(0, 1, (t - 0.2) / 0.2, 'ease_out')
else:
opacity = 1.0
else:
opacity = interpolate(0, 1, t, easing)
# Create background
frame_bg = create_blank_frame(frame_width, frame_height, bg_color)
# Create object layer with transparency
if object_type == 'emoji':
# Create RGBA canvas for emoji
emoji_canvas = Image.new('RGBA', (frame_width, frame_height), (0, 0, 0, 0))
emoji_size = object_data['size']
draw_emoji_enhanced(
emoji_canvas,
emoji=object_data['emoji'],
position=(center_pos[0] - emoji_size // 2, center_pos[1] - emoji_size // 2),
size=emoji_size,
shadow=object_data.get('shadow', False)
)
# Apply opacity
emoji_canvas = apply_opacity(emoji_canvas, opacity)
# Composite onto background
frame_bg_rgba = frame_bg.convert('RGBA')
frame = Image.alpha_composite(frame_bg_rgba, emoji_canvas)
frame = frame.convert('RGB')
elif object_type == 'text':
from core.typography import draw_text_with_outline
# Create text on separate layer
text_canvas = Image.new('RGBA', (frame_width, frame_height), (0, 0, 0, 0))
text_canvas_rgb = text_canvas.convert('RGB')
text_canvas_rgb.paste(bg_color, (0, 0, frame_width, frame_height))
draw_text_with_outline(
text_canvas_rgb,
text=object_data.get('text', 'FADE'),
position=center_pos,
font_size=object_data.get('font_size', 60),
text_color=object_data.get('text_color', (0, 0, 0)),
outline_color=object_data.get('outline_color', (255, 255, 255)),
outline_width=3,
centered=True
)
# Convert to RGBA and make background transparent
text_canvas = text_canvas_rgb.convert('RGBA')
data = text_canvas.getdata()
new_data = []
for item in data:
if item[:3] == bg_color:
new_data.append((255, 255, 255, 0))
else:
new_data.append(item)
text_canvas.putdata(new_data)
# Apply opacity
text_canvas = apply_opacity(text_canvas, opacity)
# Composite
frame_bg_rgba = frame_bg.convert('RGBA')
frame = Image.alpha_composite(frame_bg_rgba, text_canvas)
frame = frame.convert('RGB')
else:
frame = frame_bg
frames.append(frame)
return frames
def apply_opacity(image: Image.Image, opacity: float) -> Image.Image:
if image.mode != 'RGBA':
image = image.convert('RGBA')
# Get alpha channel
r, g, b, a = image.split()
# Multiply alpha by opacity
a_array = np.array(a, dtype=np.float32)
a_array = a_array * opacity
a = Image.fromarray(a_array.astype(np.uint8))
# Merge back
return Image.merge('RGBA', (r, g, b, a))
def create_crossfade(
object1_data: dict,
object2_data: dict,
num_frames: int = 30,
easing: str = 'ease_in_out',
object_type: str = 'emoji',
center_pos: tuple[int, int] = (240, 240),
frame_width: int = 480,
frame_height: int = 480,
bg_color: tuple[int, int, int] = (255, 255, 255)
) -> list[Image.Image]:
frames = []
for i in range(num_frames):
t = i / (num_frames - 1) if num_frames > 1 else 0
# Calculate opacities
opacity1 = interpolate(1, 0, t, easing)
opacity2 = interpolate(0, 1, t, easing)
# Create background
frame = create_blank_frame(frame_width, frame_height, bg_color)
if object_type == 'emoji':
# Create first emoji
emoji1_canvas = Image.new('RGBA', (frame_width, frame_height), (0, 0, 0, 0))
size1 = object1_data['size']
draw_emoji_enhanced(
emoji1_canvas,
emoji=object1_data['emoji'],
position=(center_pos[0] - size1 // 2, center_pos[1] - size1 // 2),
size=size1,
shadow=False
)
emoji1_canvas = apply_opacity(emoji1_canvas, opacity1)
# Create second emoji
emoji2_canvas = Image.new('RGBA', (frame_width, frame_height), (0, 0, 0, 0))
size2 = object2_data['size']
draw_emoji_enhanced(
emoji2_canvas,
emoji=object2_data['emoji'],
position=(center_pos[0] - size2 // 2, center_pos[1] - size2 // 2),
size=size2,
shadow=False
)
emoji2_canvas = apply_opacity(emoji2_canvas, opacity2)
# Composite both
frame_rgba = frame.convert('RGBA')
frame_rgba = Image.alpha_composite(frame_rgba, emoji1_canvas)
frame_rgba = Image.alpha_composite(frame_rgba, emoji2_canvas)
frame = frame_rgba.convert('RGB')
frames.append(frame)
return frames
def create_fade_to_color(
start_color: tuple[int, int, int],
end_color: tuple[int, int, int],
num_frames: int = 20,
easing: str = 'linear',
frame_width: int = 480,
frame_height: int = 480
) -> list[Image.Image]:
frames = []
for i in range(num_frames):
t = i / (num_frames - 1) if num_frames > 1 else 0
# Interpolate each color channel
r = int(interpolate(start_color[0], end_color[0], t, easing))
g = int(interpolate(start_color[1], end_color[1], t, easing))
b = int(interpolate(start_color[2], end_color[2], t, easing))
color = (r, g, b)
frame = create_blank_frame(frame_width, frame_height, color)
frames.append(frame)
return frames
# Example usage
if __name__ == '__main__':
print("Creating fade animations...")
builder = GIFBuilder(width=480, height=480, fps=20)
# Example 1: Fade in
frames = create_fade_animation(
object_type='emoji',
object_data={'emoji': '✨', 'size': 120},
num_frames=30,
fade_type='in',
easing='ease_out'
)
builder.add_frames(frames)
builder.save('fade_in.gif', num_colors=128)
# Example 2: Crossfade
builder.clear()
frames = create_crossfade(
object1_data={'emoji': '😊', 'size': 100},
object2_data={'emoji': '😂', 'size': 100},
num_frames=30,
object_type='emoji'
)
builder.add_frames(frames)
builder.save('fade_crossfade.gif', num_colors=128)
# Example 3: Blink
builder.clear()
frames = create_fade_animation(
object_type='emoji',
object_data={'emoji': '👀', 'size': 100},
num_frames=20,
fade_type='blink'
)
builder.add_frames(frames)
builder.save('fade_blink.gif', num_colors=128)
print("Created fade animations!") | --- +++ @@ -1,4 +1,9 @@ #!/usr/bin/env python3
+"""
+Fade Animation - Fade in, fade out, and crossfade effects.
+
+Creates smooth opacity transitions for appearing, disappearing, and transitioning.
+"""
import sys
from pathlib import Path
@@ -23,6 +28,23 @@ frame_height: int = 480,
bg_color: tuple[int, int, int] = (255, 255, 255)
) -> list[Image.Image]:
+ """
+ Create fade animation.
+
+ Args:
+ object_type: 'emoji', 'text', 'image'
+ object_data: Object configuration
+ num_frames: Number of frames
+ fade_type: Type of fade effect
+ easing: Easing function
+ center_pos: Center position
+ frame_width: Frame width
+ frame_height: Frame height
+ bg_color: Background color
+
+ Returns:
+ List of frames
+ """
frames = []
# Default object data
@@ -125,6 +147,16 @@
def apply_opacity(image: Image.Image, opacity: float) -> Image.Image:
+ """
+ Apply opacity to an RGBA image.
+
+ Args:
+ image: RGBA image
+ opacity: Opacity value (0.0 to 1.0)
+
+ Returns:
+ Image with adjusted opacity
+ """
if image.mode != 'RGBA':
image = image.convert('RGBA')
@@ -151,6 +183,23 @@ frame_height: int = 480,
bg_color: tuple[int, int, int] = (255, 255, 255)
) -> list[Image.Image]:
+ """
+ Crossfade between two objects.
+
+ Args:
+ object1_data: First object configuration
+ object2_data: Second object configuration
+ num_frames: Number of frames
+ easing: Easing function
+ object_type: Type of objects
+ center_pos: Center position
+ frame_width: Frame width
+ frame_height: Frame height
+ bg_color: Background color
+
+ Returns:
+ List of frames
+ """
frames = []
for i in range(num_frames):
@@ -207,6 +256,20 @@ frame_width: int = 480,
frame_height: int = 480
) -> list[Image.Image]:
+ """
+ Fade from one solid color to another.
+
+ Args:
+ start_color: Starting RGB color
+ end_color: Ending RGB color
+ num_frames: Number of frames
+ easing: Easing function
+ frame_width: Frame width
+ frame_height: Frame height
+
+ Returns:
+ List of frames
+ """
frames = []
for i in range(num_frames):
@@ -263,4 +326,4 @@ builder.add_frames(frames)
builder.save('fade_blink.gif', num_colors=128)
- print("Created fade animations!")+ print("Created fade animations!")
| https://raw.githubusercontent.com/ComposioHQ/awesome-claude-skills/HEAD/slack-gif-creator/templates/fade.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.