instruction stringclasses 100
values | code stringlengths 78 193k | response stringlengths 259 170k | file stringlengths 59 203 |
|---|---|---|---|
Improve documentation using docstrings | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# WARNING ⚠️ wandb is deprecated and will be removed in future release.
# See supported integrations at https://github.com/ultralytics/yolov5#integrations
import logging
import os
import sys
from contextlib import contextmanager
from pathlib import Path
from utils.general import LOGGER, colorstr
FILE = Path(__file__).resolve()
ROOT = FILE.parents[3] # YOLOv5 root directory
if str(ROOT) not in sys.path:
sys.path.append(str(ROOT)) # add ROOT to PATH
RANK = int(os.getenv("RANK", -1))
DEPRECATION_WARNING = (
f"{colorstr('wandb')}: WARNING ⚠️ wandb is deprecated and will be removed in a future release. "
f"See supported integrations at https://github.com/ultralytics/yolov5#integrations."
)
try:
import wandb
assert hasattr(wandb, "__version__") # verify package import not local dir
LOGGER.warning(DEPRECATION_WARNING)
except (ImportError, AssertionError):
wandb = None
class WandbLogger:
def __init__(self, opt, run_id=None, job_type="Training"):
# Pre-training routine
self.job_type = job_type
self.wandb, self.wandb_run = wandb, wandb.run if wandb else None
self.val_artifact, self.train_artifact = None, None
self.train_artifact_path, self.val_artifact_path = None, None
self.result_artifact = None
self.val_table, self.result_table = None, None
self.max_imgs_to_log = 16
self.data_dict = None
if self.wandb:
self.wandb_run = wandb.run or wandb.init(
config=opt,
resume="allow",
project="YOLOv5" if opt.project == "runs/train" else Path(opt.project).stem,
entity=opt.entity,
name=opt.name if opt.name != "exp" else None,
job_type=job_type,
id=run_id,
allow_val_change=True,
)
if self.wandb_run and self.job_type == "Training":
if isinstance(opt.data, dict):
# This means another dataset manager has already processed the dataset info (e.g. ClearML)
# and they will have stored the already processed dict in opt.data
self.data_dict = opt.data
self.setup_training(opt)
def setup_training(self, opt):
self.log_dict, self.current_epoch = {}, 0
self.bbox_interval = opt.bbox_interval
if isinstance(opt.resume, str):
model_dir, _ = self.download_model_artifact(opt)
if model_dir:
self.weights = Path(model_dir) / "last.pt"
config = self.wandb_run.config
opt.weights, opt.save_period, opt.batch_size, opt.bbox_interval, opt.epochs, opt.hyp, opt.imgsz = (
str(self.weights),
config.save_period,
config.batch_size,
config.bbox_interval,
config.epochs,
config.hyp,
config.imgsz,
)
if opt.bbox_interval == -1:
self.bbox_interval = opt.bbox_interval = (opt.epochs // 10) if opt.epochs > 10 else 1
if opt.evolve or opt.noplots:
self.bbox_interval = opt.bbox_interval = opt.epochs + 1 # disable bbox_interval
def log_model(self, path, opt, epoch, fitness_score, best_model=False):
model_artifact = wandb.Artifact(
f"run_{wandb.run.id}_model",
type="model",
metadata={
"original_url": str(path),
"epochs_trained": epoch + 1,
"save period": opt.save_period,
"project": opt.project,
"total_epochs": opt.epochs,
"fitness_score": fitness_score,
},
)
model_artifact.add_file(str(path / "last.pt"), name="last.pt")
wandb.log_artifact(
model_artifact,
aliases=[
"latest",
"last",
f"epoch {self.current_epoch!s}",
"best" if best_model else "",
],
)
LOGGER.info(f"Saving model artifact on epoch {epoch + 1}")
def val_one_image(self, pred, predn, path, names, im):
pass
def log(self, log_dict):
if self.wandb_run:
for key, value in log_dict.items():
self.log_dict[key] = value
def end_epoch(self):
if self.wandb_run:
with all_logging_disabled():
try:
wandb.log(self.log_dict)
except BaseException as e:
LOGGER.info(
f"An error occurred in wandb logger. The training will proceed without interruption. More info\n{e}"
)
self.wandb_run.finish()
self.wandb_run = None
self.log_dict = {}
def finish_run(self):
if self.wandb_run:
if self.log_dict:
with all_logging_disabled():
wandb.log(self.log_dict)
wandb.run.finish()
LOGGER.warning(DEPRECATION_WARNING)
@contextmanager
def all_logging_disabled(highest_level=logging.CRITICAL):
previous_level = logging.root.manager.disable
logging.disable(highest_level)
try:
yield
finally:
logging.disable(previous_level) | --- +++ @@ -31,8 +31,26 @@
class WandbLogger:
+ """Log training runs, datasets, models, and predictions to Weights & Biases.
+
+ This logger sends information to W&B at wandb.ai. By default, this information includes hyperparameters, system
+ configuration and metrics, model metrics, and basic data metrics and analyses.
+
+ By providing additional command line arguments to train.py, datasets, models and predictions can also be logged.
+
+ For more on how this logger is used, see the Weights & Biases documentation:
+ https://docs.wandb.com/guides/integrations/yolov5
+ """
def __init__(self, opt, run_id=None, job_type="Training"):
+ """- Initialize WandbLogger instance - Upload dataset if opt.upload_dataset is True - Setup training processes
+ if job_type is 'Training'.
+
+ Args:
+ opt (namespace): Commandline arguments for this run:
+ run_id (str): Run ID of W&B run to be resumed
+ job_type (str): To set the job_type for this run
+ """
# Pre-training routine
self.job_type = job_type
self.wandb, self.wandb_run = wandb, wandb.run if wandb else None
@@ -62,6 +80,14 @@ self.setup_training(opt)
def setup_training(self, opt):
+ """Setup the necessary processes for training YOLO models: - Attempt to download model checkpoint and dataset
+ artifacts if opt.resume stats with WANDB_ARTIFACT_PREFIX - Update data_dict, to contain info of previous
+ run if resumed and the paths of dataset artifact if downloaded - Setup log_dict,
+ initialize bbox_interval.
+
+ Args:
+ opt (namespace): commandline arguments for this run
+ """
self.log_dict, self.current_epoch = {}, 0
self.bbox_interval = opt.bbox_interval
if isinstance(opt.resume, str):
@@ -85,6 +111,15 @@ self.bbox_interval = opt.bbox_interval = opt.epochs + 1 # disable bbox_interval
def log_model(self, path, opt, epoch, fitness_score, best_model=False):
+ """Log the model checkpoint as W&B artifact.
+
+ Args:
+ path (Path): Path of directory containing the checkpoints
+ opt (namespace): Command line arguments for this run
+ epoch (int): Current epoch number
+ fitness_score (float): fitness score for current epoch
+ best_model (boolean): Boolean representing if the current checkpoint is the best yet.
+ """
model_artifact = wandb.Artifact(
f"run_{wandb.run.id}_model",
type="model",
@@ -110,14 +145,25 @@ LOGGER.info(f"Saving model artifact on epoch {epoch + 1}")
def val_one_image(self, pred, predn, path, names, im):
+ """Evaluates model prediction for a single image, returning metrics and visualizations."""
pass
def log(self, log_dict):
+ """Save the metrics to the logging dictionary.
+
+ Args:
+ log_dict (Dict): metrics/media to be logged in current step
+ """
if self.wandb_run:
for key, value in log_dict.items():
self.log_dict[key] = value
def end_epoch(self):
+ """Commit the log_dict, model artifacts and Tables to W&B and flush the log_dict.
+
+ Args:
+ best_result (boolean): Boolean representing if the result of this evaluation is best or not
+ """
if self.wandb_run:
with all_logging_disabled():
try:
@@ -131,6 +177,7 @@ self.log_dict = {}
def finish_run(self):
+ """Log metrics if any and finish the current W&B run."""
if self.wandb_run:
if self.log_dict:
with all_logging_disabled():
@@ -141,9 +188,14 @@
@contextmanager
def all_logging_disabled(highest_level=logging.CRITICAL):
+ """Source - https://gist.github.com/simon-weber/7853144
+ A context manager that will prevent any logging messages triggered during the body from being processed.
+ :param highest_level: the maximum logging level in use.
+ This would only need to be changed if a custom level greater than CRITICAL is defined.
+ """
previous_level = logging.root.manager.disable
logging.disable(highest_level)
try:
yield
finally:
- logging.disable(previous_level)+ logging.disable(previous_level)
| https://raw.githubusercontent.com/ultralytics/yolov5/HEAD/utils/loggers/wandb/wandb_utils.py |
Write docstrings including parameters and return values | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
from __future__ import annotations
import contextlib
import glob
import inspect
import logging
import logging.config
import math
import os
import platform
import random
import re
import signal
import subprocess
import sys
import time
import urllib
from copy import deepcopy
from datetime import datetime
from itertools import repeat
from multiprocessing.pool import ThreadPool
from pathlib import Path
from subprocess import check_output
from tarfile import is_tarfile
from zipfile import ZipFile, is_zipfile
import cv2
import numpy as np
import packaging
import pandas as pd
import torch
import torchvision
import yaml
# Import 'ultralytics' package or install if missing
try:
import ultralytics
assert hasattr(ultralytics, "__version__") # verify package is not directory
except (ImportError, AssertionError):
os.system("pip install -U ultralytics")
import ultralytics
from ultralytics.utils.checks import check_requirements
from ultralytics.utils.patches import torch_load
from utils import TryExcept, emojis
from utils.downloads import curl_download, gsutil_getsize
from utils.metrics import box_iou, fitness
FILE = Path(__file__).resolve()
ROOT = FILE.parents[1] # YOLOv5 root directory
RANK = int(os.getenv("RANK", -1))
# Settings
NUM_THREADS = min(8, max(1, os.cpu_count() - 1)) # number of YOLOv5 multiprocessing threads
DATASETS_DIR = Path(os.getenv("YOLOv5_DATASETS_DIR", ROOT.parent / "datasets")) # global datasets directory
AUTOINSTALL = str(os.getenv("YOLOv5_AUTOINSTALL", True)).lower() == "true" # global auto-install mode
VERBOSE = str(os.getenv("YOLOv5_VERBOSE", True)).lower() == "true" # global verbose mode
TQDM_BAR_FORMAT = "{l_bar}{bar:10}{r_bar}" # tqdm bar format
FONT = "Arial.ttf" # https://github.com/ultralytics/assets/releases/download/v0.0.0/Arial.ttf
torch.set_printoptions(linewidth=320, precision=5, profile="long")
np.set_printoptions(linewidth=320, formatter={"float_kind": "{:11.5g}".format}) # format short g, %precision=5
pd.options.display.max_columns = 10
cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader)
os.environ["NUMEXPR_MAX_THREADS"] = str(NUM_THREADS) # NumExpr max threads
os.environ["OMP_NUM_THREADS"] = "1" if platform.system() == "darwin" else str(NUM_THREADS) # OpenMP (PyTorch and SciPy)
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2" # suppress verbose TF compiler warnings in Colab
os.environ["TORCH_CPP_LOG_LEVEL"] = "ERROR" # suppress "NNPACK.cpp could not initialize NNPACK" warnings
os.environ["KINETO_LOG_LEVEL"] = "5" # suppress verbose PyTorch profiler output when computing FLOPs
def is_ascii(s=""):
s = str(s) # convert list, tuple, None, etc. to str
return len(s.encode().decode("ascii", "ignore")) == len(s)
def is_chinese(s="人工智能"):
return bool(re.search("[\u4e00-\u9fff]", str(s)))
def is_colab():
return "google.colab" in sys.modules
def is_jupyter():
with contextlib.suppress(Exception):
from IPython import get_ipython
return get_ipython() is not None
return False
def is_kaggle():
return os.environ.get("PWD") == "/kaggle/working" and os.environ.get("KAGGLE_URL_BASE") == "https://www.kaggle.com"
def is_docker() -> bool:
if Path("/.dockerenv").exists():
return True
try: # check if docker is in control groups
with open("/proc/self/cgroup") as file:
return any("docker" in line for line in file)
except OSError:
return False
def is_writeable(dir, test=False):
if not test:
return os.access(dir, os.W_OK) # possible issues on Windows
file = Path(dir) / "tmp.txt"
try:
with open(file, "w"): # open file with write permissions
pass
file.unlink() # remove file
return True
except OSError:
return False
LOGGING_NAME = "yolov5"
def set_logging(name=LOGGING_NAME, verbose=True):
rank = int(os.getenv("RANK", -1)) # rank in world for Multi-GPU trainings
level = logging.INFO if verbose and rank in {-1, 0} else logging.ERROR
logging.config.dictConfig(
{
"version": 1,
"disable_existing_loggers": False,
"formatters": {name: {"format": "%(message)s"}},
"handlers": {
name: {
"class": "logging.StreamHandler",
"formatter": name,
"level": level,
}
},
"loggers": {
name: {
"level": level,
"handlers": [name],
"propagate": False,
}
},
}
)
set_logging(LOGGING_NAME) # run before defining LOGGER
LOGGER = logging.getLogger(LOGGING_NAME) # define globally (used in train.py, val.py, detect.py, etc.)
if platform.system() == "Windows":
for fn in LOGGER.info, LOGGER.warning:
setattr(LOGGER, fn.__name__, lambda x: fn(emojis(x))) # emoji safe logging
def user_config_dir(dir="Ultralytics", env_var="YOLOV5_CONFIG_DIR"):
if env := os.getenv(env_var):
path = Path(env) # use environment variable
else:
cfg = {"Windows": "AppData/Roaming", "Linux": ".config", "Darwin": "Library/Application Support"} # 3 OS dirs
path = Path.home() / cfg.get(platform.system(), "") # OS-specific config dir
path = (path if is_writeable(path) else Path("/tmp")) / dir # GCP and AWS lambda fix, only /tmp is writeable
path.mkdir(exist_ok=True) # make if required
return path
CONFIG_DIR = user_config_dir() # Ultralytics settings dir
class Profile(contextlib.ContextDecorator):
def __init__(self, t=0.0, device: torch.device = None):
self.t = t
self.device = device
self.cuda = bool(device and str(device).startswith("cuda"))
def __enter__(self):
self.start = self.time()
return self
def __exit__(self, type, value, traceback):
self.dt = self.time() - self.start # delta-time
self.t += self.dt # accumulate dt
def time(self):
if self.cuda:
torch.cuda.synchronize(self.device)
return time.time()
class Timeout(contextlib.ContextDecorator):
def __init__(self, seconds, *, timeout_msg="", suppress_timeout_errors=True):
self.seconds = int(seconds)
self.timeout_message = timeout_msg
self.suppress = bool(suppress_timeout_errors)
def _timeout_handler(self, signum, frame):
raise TimeoutError(self.timeout_message)
def __enter__(self):
if platform.system() != "Windows": # not supported on Windows
signal.signal(signal.SIGALRM, self._timeout_handler) # Set handler for SIGALRM
signal.alarm(self.seconds) # start countdown for SIGALRM to be raised
def __exit__(self, exc_type, exc_val, exc_tb):
if platform.system() != "Windows":
signal.alarm(0) # Cancel SIGALRM if it's scheduled
if self.suppress and exc_type is TimeoutError: # Suppress TimeoutError
return True
class WorkingDirectory(contextlib.ContextDecorator):
def __init__(self, new_dir):
self.dir = new_dir # new dir
self.cwd = Path.cwd().resolve() # current dir
def __enter__(self):
os.chdir(self.dir)
def __exit__(self, exc_type, exc_val, exc_tb):
os.chdir(self.cwd)
def methods(instance):
return [f for f in dir(instance) if callable(getattr(instance, f)) and not f.startswith("__")]
def print_args(args: dict | None = None, show_file=True, show_func=False):
x = inspect.currentframe().f_back # previous frame
file, _, func, _, _ = inspect.getframeinfo(x)
if args is None: # get args automatically
args, _, _, frm = inspect.getargvalues(x)
args = {k: v for k, v in frm.items() if k in args}
try:
file = Path(file).resolve().relative_to(ROOT).with_suffix("")
except ValueError:
file = Path(file).stem
s = (f"{file}: " if show_file else "") + (f"{func}: " if show_func else "")
LOGGER.info(colorstr(s) + ", ".join(f"{k}={v}" for k, v in args.items()))
def init_seeds(seed=0, deterministic=False):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed) # for Multi-GPU, exception safe
# torch.backends.cudnn.benchmark = True # AutoBatch problem https://github.com/ultralytics/yolov5/issues/9287
if deterministic and check_version(torch.__version__, "1.12.0"): # https://github.com/ultralytics/yolov5/pull/8213
torch.use_deterministic_algorithms(True)
torch.backends.cudnn.deterministic = True
os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":4096:8"
os.environ["PYTHONHASHSEED"] = str(seed)
def intersect_dicts(da, db, exclude=()):
return {k: v for k, v in da.items() if k in db and all(x not in k for x in exclude) and v.shape == db[k].shape}
def get_default_args(func):
signature = inspect.signature(func)
return {k: v.default for k, v in signature.parameters.items() if v.default is not inspect.Parameter.empty}
def get_latest_run(search_dir="."):
last_list = glob.glob(f"{search_dir}/**/last*.pt", recursive=True)
return max(last_list, key=os.path.getctime) if last_list else ""
def file_age(path=__file__):
dt = datetime.now() - datetime.fromtimestamp(Path(path).stat().st_mtime) # delta
return dt.days # + dt.seconds / 86400 # fractional days
def file_date(path=__file__):
t = datetime.fromtimestamp(Path(path).stat().st_mtime)
return f"{t.year}-{t.month}-{t.day}"
def file_size(path):
mb = 1 << 20 # bytes to MiB (1024 ** 2)
path = Path(path)
if path.is_file():
return path.stat().st_size / mb
elif path.is_dir():
return sum(f.stat().st_size for f in path.glob("**/*") if f.is_file()) / mb
else:
return 0.0
def check_online():
import socket
def run_once():
try:
socket.create_connection(("1.1.1.1", 443), 5) # check host accessibility
return True
except OSError:
return False
return run_once() or run_once() # check twice to increase robustness to intermittent connectivity issues
def git_describe(path=ROOT):
try:
assert (Path(path) / ".git").is_dir()
return check_output(f"git -C {path} describe --tags --long --always", shell=True).decode()[:-1]
except Exception:
return ""
@TryExcept()
@WorkingDirectory(ROOT)
def check_git_status(repo="ultralytics/yolov5", branch="master"):
url = f"https://github.com/{repo}"
msg = f", for updates see {url}"
s = colorstr("github: ") # string
assert Path(".git").exists(), s + "skipping check (not a git repository)" + msg
assert check_online(), s + "skipping check (offline)" + msg
splits = re.split(pattern=r"\s", string=check_output("git remote -v", shell=True).decode())
matches = [repo in s for s in splits]
if any(matches):
remote = splits[matches.index(True) - 1]
else:
remote = "ultralytics"
check_output(f"git remote add {remote} {url}", shell=True)
check_output(f"git fetch {remote}", shell=True, timeout=5) # git fetch
local_branch = check_output("git rev-parse --abbrev-ref HEAD", shell=True).decode().strip() # checked out
n = int(check_output(f"git rev-list {local_branch}..{remote}/{branch} --count", shell=True)) # commits behind
if n > 0:
pull = "git pull" if remote == "origin" else f"git pull {remote} {branch}"
s += f"⚠️ YOLOv5 is out of date by {n} commit{'s' * (n > 1)}. Use '{pull}' or 'git clone {url}' to update."
else:
s += f"up to date with {url} ✅"
LOGGER.info(s)
@WorkingDirectory(ROOT)
def check_git_info(path="."):
check_requirements("gitpython")
import git
try:
repo = git.Repo(path)
remote = repo.remotes.origin.url.replace(".git", "") # i.e. 'https://github.com/ultralytics/yolov5'
commit = repo.head.commit.hexsha # i.e. '3134699c73af83aac2a481435550b968d5792c0d'
try:
branch = repo.active_branch.name # i.e. 'main'
except TypeError: # not on any branch
branch = None # i.e. 'detached HEAD' state
return {"remote": remote, "branch": branch, "commit": commit}
except git.exc.InvalidGitRepositoryError: # path is not a git dir
return {"remote": None, "branch": None, "commit": None}
def check_python(minimum="3.8.0"):
check_version(platform.python_version(), minimum, name="Python ", hard=True)
def check_version(current="0.0.0", minimum="0.0.0", name="version ", pinned=False, hard=False, verbose=False):
current, minimum = (packaging.version.parse(x) for x in (current, minimum))
result = (current == minimum) if pinned else (current >= minimum) # bool
s = f"WARNING ⚠️ {name}{minimum} is required by YOLOv5, but {name}{current} is currently installed" # string
if hard:
assert result, emojis(s) # assert min requirements met
if verbose and not result:
LOGGER.warning(s)
return result
def check_img_size(imgsz, s=32, floor=0):
if isinstance(imgsz, int): # integer i.e. img_size=640
new_size = max(make_divisible(imgsz, int(s)), floor)
else: # list i.e. img_size=[640, 480]
imgsz = list(imgsz) # convert to list if tuple
new_size = [max(make_divisible(x, int(s)), floor) for x in imgsz]
if new_size != imgsz:
LOGGER.warning(f"WARNING ⚠️ --img-size {imgsz} must be multiple of max stride {s}, updating to {new_size}")
return new_size
def check_imshow(warn=False):
try:
assert not is_jupyter()
assert not is_docker()
cv2.imshow("test", np.zeros((1, 1, 3)))
cv2.waitKey(1)
cv2.destroyAllWindows()
cv2.waitKey(1)
return True
except Exception as e:
if warn:
LOGGER.warning(f"WARNING ⚠️ Environment does not support cv2.imshow() or PIL Image.show()\n{e}")
return False
def check_suffix(file="yolov5s.pt", suffix=(".pt",), msg=""):
if file and suffix:
if isinstance(suffix, str):
suffix = [suffix]
for f in file if isinstance(file, (list, tuple)) else [file]:
s = Path(f).suffix.lower() # file suffix
if len(s):
assert s in suffix, f"{msg}{f} acceptable suffix is {suffix}"
def check_yaml(file, suffix=(".yaml", ".yml")):
return check_file(file, suffix)
def check_file(file, suffix=""):
check_suffix(file, suffix) # optional
file = str(file) # convert to str()
if os.path.isfile(file) or not file: # exists
return file
elif file.startswith(("http:/", "https:/")): # download
url = file # warning: Pathlib turns :// -> :/
file = Path(urllib.parse.unquote(file).split("?")[0]).name # '%2F' to '/', split https://url.com/file.txt?auth
if os.path.isfile(file):
LOGGER.info(f"Found {url} locally at {file}") # file already exists
else:
LOGGER.info(f"Downloading {url} to {file}...")
torch.hub.download_url_to_file(url, file)
assert Path(file).exists() and Path(file).stat().st_size > 0, f"File download failed: {url}" # check
return file
elif file.startswith("clearml://"): # ClearML Dataset ID
assert "clearml" in sys.modules, (
"ClearML is not installed, so cannot use ClearML dataset. Try running 'pip install clearml'."
)
return file
else: # search
files = []
for d in "data", "models", "utils": # search directories
files.extend(glob.glob(str(ROOT / d / "**" / file), recursive=True)) # find file
assert len(files), f"File not found: {file}" # assert file was found
assert len(files) == 1, f"Multiple files match '{file}', specify exact path: {files}" # assert unique
return files[0] # return file
def check_font(font=FONT, progress=False):
font = Path(font)
file = CONFIG_DIR / font.name
if not font.exists() and not file.exists():
url = f"https://github.com/ultralytics/assets/releases/download/v0.0.0/{font.name}"
LOGGER.info(f"Downloading {url} to {file}...")
torch.hub.download_url_to_file(url, str(file), progress=progress)
def check_dataset(data, autodownload=True):
# Download (optional)
extract_dir = ""
if isinstance(data, (str, Path)) and (is_zipfile(data) or is_tarfile(data)):
download(data, dir=f"{DATASETS_DIR}/{Path(data).stem}", unzip=True, delete=False, curl=False, threads=1)
data = next((DATASETS_DIR / Path(data).stem).rglob("*.yaml"))
extract_dir, autodownload = data.parent, False
# Read yaml (optional)
if isinstance(data, (str, Path)):
data = yaml_load(data) # dictionary
# Checks
for k in "train", "val", "names":
assert k in data, emojis(f"data.yaml '{k}:' field missing ❌")
if isinstance(data["names"], (list, tuple)): # old array format
data["names"] = dict(enumerate(data["names"])) # convert to dict
assert all(isinstance(k, int) for k in data["names"].keys()), "data.yaml names keys must be integers, i.e. 2: car"
data["nc"] = len(data["names"])
# Resolve paths
path = Path(extract_dir or data.get("path") or "") # optional 'path' default to '.'
if not path.is_absolute():
path = (ROOT / path).resolve()
data["path"] = path # download scripts
for k in "train", "val", "test":
if data.get(k): # prepend path
if isinstance(data[k], str):
x = (path / data[k]).resolve()
if not x.exists() and data[k].startswith("../"):
x = (path / data[k][3:]).resolve()
data[k] = str(x)
else:
data[k] = [str((path / x).resolve()) for x in data[k]]
# Parse yaml
_train, val, _test, s = (data.get(x) for x in ("train", "val", "test", "download"))
if val:
val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path
if not all(x.exists() for x in val):
LOGGER.info("\nDataset not found ⚠️, missing paths %s" % [str(x) for x in val if not x.exists()])
if not s or not autodownload:
raise Exception("Dataset not found ❌")
t = time.time()
if s.startswith("http") and s.endswith(".zip"): # URL
f = Path(s).name # filename
LOGGER.info(f"Downloading {s} to {f}...")
torch.hub.download_url_to_file(s, f)
Path(DATASETS_DIR).mkdir(parents=True, exist_ok=True) # create root
unzip_file(f, path=DATASETS_DIR) # unzip
Path(f).unlink() # remove zip
r = None # success
elif s.startswith("bash "): # bash script
LOGGER.info(f"Running {s} ...")
r = subprocess.run(s, shell=True)
else: # python script
r = exec(s, {"yaml": data}) # return None
dt = f"({round(time.time() - t, 1)}s)"
s = f"success ✅ {dt}, saved to {colorstr('bold', DATASETS_DIR)}" if r in (0, None) else f"failure {dt} ❌"
LOGGER.info(f"Dataset download {s}")
check_font("Arial.ttf" if is_ascii(data["names"]) else "Arial.Unicode.ttf", progress=True) # download fonts
return data # dictionary
def check_amp(model):
from models.common import AutoShape, DetectMultiBackend
def amp_allclose(model, im):
m = AutoShape(model, verbose=False) # model
a = m(im).xywhn[0] # FP32 inference
m.amp = True
b = m(im).xywhn[0] # AMP inference
return a.shape == b.shape and torch.allclose(a, b, atol=0.1) # close to 10% absolute tolerance
prefix = colorstr("AMP: ")
device = next(model.parameters()).device # get model device
if device.type in ("cpu", "mps"):
return False # AMP only used on CUDA devices
f = ROOT / "data" / "images" / "bus.jpg" # image to check
im = f if f.exists() else "https://ultralytics.com/images/bus.jpg" if check_online() else np.ones((640, 640, 3))
try:
assert amp_allclose(deepcopy(model), im) or amp_allclose(DetectMultiBackend("yolov5n.pt", device), im)
LOGGER.info(f"{prefix}checks passed ✅")
return True
except Exception:
help_url = "https://github.com/ultralytics/yolov5/issues/7908"
LOGGER.warning(f"{prefix}checks failed ❌, disabling Automatic Mixed Precision. See {help_url}")
return False
def yaml_load(file="data.yaml"):
with open(file, errors="ignore") as f:
return yaml.safe_load(f)
def yaml_save(file="data.yaml", data=None):
if data is None:
data = {}
with open(file, "w") as f:
yaml.safe_dump({k: str(v) if isinstance(v, Path) else v for k, v in data.items()}, f, sort_keys=False)
def unzip_file(file, path=None, exclude=(".DS_Store", "__MACOSX")):
if path is None:
path = Path(file).parent # default path
with ZipFile(file) as zipObj:
for f in zipObj.namelist(): # list all archived filenames in the zip
if all(x not in f for x in exclude):
zipObj.extract(f, path=path)
def url2file(url):
url = str(Path(url)).replace(":/", "://") # Pathlib turns :// -> :/
return Path(urllib.parse.unquote(url)).name.split("?")[0] # '%2F' to '/', split https://url.com/file.txt?auth
def download(url, dir=".", unzip=True, delete=True, curl=False, threads=1, retry=3):
def download_one(url, dir):
success = True
if os.path.isfile(url):
f = Path(url) # filename
else: # does not exist
f = dir / Path(url).name
LOGGER.info(f"Downloading {url} to {f}...")
for i in range(retry + 1):
if curl:
success = curl_download(url, f, silent=(threads > 1))
else:
torch.hub.download_url_to_file(url, f, progress=threads == 1) # torch download
success = f.is_file()
if success:
break
elif i < retry:
LOGGER.warning(f"⚠️ Download failure, retrying {i + 1}/{retry} {url}...")
else:
LOGGER.warning(f"❌ Failed to download {url}...")
if unzip and success and (f.suffix == ".gz" or is_zipfile(f) or is_tarfile(f)):
LOGGER.info(f"Unzipping {f}...")
if is_zipfile(f):
unzip_file(f, dir) # unzip
elif is_tarfile(f):
subprocess.run(["tar", "xf", f, "--directory", f.parent], check=True) # unzip
elif f.suffix == ".gz":
subprocess.run(["tar", "xfz", f, "--directory", f.parent], check=True) # unzip
if delete:
f.unlink() # remove zip
dir = Path(dir)
dir.mkdir(parents=True, exist_ok=True) # make directory
if threads > 1:
pool = ThreadPool(threads)
pool.imap(lambda x: download_one(*x), zip(url, repeat(dir))) # multithreaded
pool.close()
pool.join()
else:
for u in [url] if isinstance(url, (str, Path)) else url:
download_one(u, dir)
def make_divisible(x, divisor):
if isinstance(divisor, torch.Tensor):
divisor = int(divisor.max()) # to int
return math.ceil(x / divisor) * divisor
def clean_str(s):
return re.sub(pattern="[|@#!¡·$€%&()=?¿^*;:,¨´><+]", repl="_", string=s)
def one_cycle(y1=0.0, y2=1.0, steps=100):
return lambda x: ((1 - math.cos(x * math.pi / steps)) / 2) * (y2 - y1) + y1
def colorstr(*input):
*args, string = input if len(input) > 1 else ("blue", "bold", input[0]) # color arguments, string
colors = {
"black": "\033[30m", # basic colors
"red": "\033[31m",
"green": "\033[32m",
"yellow": "\033[33m",
"blue": "\033[34m",
"magenta": "\033[35m",
"cyan": "\033[36m",
"white": "\033[37m",
"bright_black": "\033[90m", # bright colors
"bright_red": "\033[91m",
"bright_green": "\033[92m",
"bright_yellow": "\033[93m",
"bright_blue": "\033[94m",
"bright_magenta": "\033[95m",
"bright_cyan": "\033[96m",
"bright_white": "\033[97m",
"end": "\033[0m", # misc
"bold": "\033[1m",
"underline": "\033[4m",
}
return "".join(colors[x] for x in args) + f"{string}" + colors["end"]
def labels_to_class_weights(labels, nc=80):
if labels[0] is None: # no labels loaded
return torch.Tensor()
labels = np.concatenate(labels, 0) # labels.shape = (866643, 5) for COCO
classes = labels[:, 0].astype(int) # labels = [class xywh]
weights = np.bincount(classes, minlength=nc) # occurrences per class
# Prepend gridpoint count (for uCE training)
# gpi = ((320 / 32 * np.array([1, 2, 4])) ** 2 * 3).sum() # gridpoints per image
# weights = np.hstack([gpi * len(labels) - weights.sum() * 9, weights * 9]) ** 0.5 # prepend gridpoints to start
weights[weights == 0] = 1 # replace empty bins with 1
weights = 1 / weights # number of targets per class
weights /= weights.sum() # normalize
return torch.from_numpy(weights).float()
def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)):
# Usage: index = random.choices(range(n), weights=image_weights, k=1) # weighted image sample
class_counts = np.array([np.bincount(x[:, 0].astype(int), minlength=nc) for x in labels])
return (class_weights.reshape(1, nc) * class_counts).sum(1)
def coco80_to_coco91_class():
# a = np.loadtxt('data/coco.names', dtype='str', delimiter='\n')
# b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\n')
# x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco
# x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet
return [
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
27,
28,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
46,
47,
48,
49,
50,
51,
52,
53,
54,
55,
56,
57,
58,
59,
60,
61,
62,
63,
64,
65,
67,
70,
72,
73,
74,
75,
76,
77,
78,
79,
80,
81,
82,
84,
85,
86,
87,
88,
89,
90,
]
def xyxy2xywh(x):
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[..., 0] = (x[..., 0] + x[..., 2]) / 2 # x center
y[..., 1] = (x[..., 1] + x[..., 3]) / 2 # y center
y[..., 2] = x[..., 2] - x[..., 0] # width
y[..., 3] = x[..., 3] - x[..., 1] # height
return y
def xywh2xyxy(x):
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[..., 0] = x[..., 0] - x[..., 2] / 2 # top left x
y[..., 1] = x[..., 1] - x[..., 3] / 2 # top left y
y[..., 2] = x[..., 0] + x[..., 2] / 2 # bottom right x
y[..., 3] = x[..., 1] + x[..., 3] / 2 # bottom right y
return y
def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0):
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[..., 0] = w * (x[..., 0] - x[..., 2] / 2) + padw # top left x
y[..., 1] = h * (x[..., 1] - x[..., 3] / 2) + padh # top left y
y[..., 2] = w * (x[..., 0] + x[..., 2] / 2) + padw # bottom right x
y[..., 3] = h * (x[..., 1] + x[..., 3] / 2) + padh # bottom right y
return y
def xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0):
if clip:
clip_boxes(x, (h - eps, w - eps)) # warning: inplace clip
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[..., 0] = ((x[..., 0] + x[..., 2]) / 2) / w # x center
y[..., 1] = ((x[..., 1] + x[..., 3]) / 2) / h # y center
y[..., 2] = (x[..., 2] - x[..., 0]) / w # width
y[..., 3] = (x[..., 3] - x[..., 1]) / h # height
return y
def xyn2xy(x, w=640, h=640, padw=0, padh=0):
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[..., 0] = w * x[..., 0] + padw # top left x
y[..., 1] = h * x[..., 1] + padh # top left y
return y
def segment2box(segment, width=640, height=640):
x, y = segment.T # segment xy
inside = (x >= 0) & (y >= 0) & (x <= width) & (y <= height)
(
x,
y,
) = x[inside], y[inside]
return np.array([x.min(), y.min(), x.max(), y.max()]) if any(x) else np.zeros((1, 4)) # xyxy
def segments2boxes(segments):
boxes = []
for s in segments:
x, y = s.T # segment xy
boxes.append([x.min(), y.min(), x.max(), y.max()]) # cls, xyxy
return xyxy2xywh(np.array(boxes)) # cls, xywh
def resample_segments(segments, n=1000):
for i, s in enumerate(segments):
s = np.concatenate((s, s[0:1, :]), axis=0)
x = np.linspace(0, len(s) - 1, n)
xp = np.arange(len(s))
segments[i] = np.concatenate([np.interp(x, xp, s[:, i]) for i in range(2)]).reshape(2, -1).T # segment xy
return segments
def scale_boxes(img1_shape, boxes, img0_shape, ratio_pad=None):
if ratio_pad is None: # calculate from img0_shape
gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new
pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
else:
gain = ratio_pad[0][0]
pad = ratio_pad[1]
boxes[..., [0, 2]] -= pad[0] # x padding
boxes[..., [1, 3]] -= pad[1] # y padding
boxes[..., :4] /= gain
clip_boxes(boxes, img0_shape)
return boxes
def scale_segments(img1_shape, segments, img0_shape, ratio_pad=None, normalize=False):
if ratio_pad is None: # calculate from img0_shape
gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new
pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
else:
gain = ratio_pad[0][0]
pad = ratio_pad[1]
segments[:, 0] -= pad[0] # x padding
segments[:, 1] -= pad[1] # y padding
segments /= gain
clip_segments(segments, img0_shape)
if normalize:
segments[:, 0] /= img0_shape[1] # width
segments[:, 1] /= img0_shape[0] # height
return segments
def clip_boxes(boxes, shape):
if isinstance(boxes, torch.Tensor): # faster individually
boxes[..., 0].clamp_(0, shape[1]) # x1
boxes[..., 1].clamp_(0, shape[0]) # y1
boxes[..., 2].clamp_(0, shape[1]) # x2
boxes[..., 3].clamp_(0, shape[0]) # y2
else: # np.array (faster grouped)
boxes[..., [0, 2]] = boxes[..., [0, 2]].clip(0, shape[1]) # x1, x2
boxes[..., [1, 3]] = boxes[..., [1, 3]].clip(0, shape[0]) # y1, y2
def clip_segments(segments, shape):
if isinstance(segments, torch.Tensor): # faster individually
segments[:, 0].clamp_(0, shape[1]) # x
segments[:, 1].clamp_(0, shape[0]) # y
else: # np.array (faster grouped)
segments[:, 0] = segments[:, 0].clip(0, shape[1]) # x
segments[:, 1] = segments[:, 1].clip(0, shape[0]) # y
def non_max_suppression(
prediction,
conf_thres=0.25,
iou_thres=0.45,
classes=None,
agnostic=False,
multi_label=False,
labels=(),
max_det=300,
nm=0, # number of masks
):
# Checks
assert 0 <= conf_thres <= 1, f"Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0"
assert 0 <= iou_thres <= 1, f"Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0"
if isinstance(prediction, (list, tuple)): # YOLOv5 model in validation model, output = (inference_out, loss_out)
prediction = prediction[0] # select only inference output
device = prediction.device
mps = "mps" in device.type # Apple MPS
if mps: # MPS not fully supported yet, convert tensors to CPU before NMS
prediction = prediction.cpu()
bs = prediction.shape[0] # batch size
nc = prediction.shape[2] - nm - 5 # number of classes
xc = prediction[..., 4] > conf_thres # candidates
# Settings
# min_wh = 2 # (pixels) minimum box width and height
max_wh = 7680 # (pixels) maximum box width and height
max_nms = 30000 # maximum number of boxes into torchvision.ops.nms()
time_limit = 0.5 + 0.05 * bs # seconds to quit after
redundant = True # require redundant detections
multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img)
merge = False # use merge-NMS
t = time.time()
mi = 5 + nc # mask start index
output = [torch.zeros((0, 6 + nm), device=prediction.device)] * bs
for xi, x in enumerate(prediction): # image index, image inference
# Apply constraints
# x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height
x = x[xc[xi]] # confidence
# Cat apriori labels if autolabelling
if labels and len(labels[xi]):
lb = labels[xi]
v = torch.zeros((len(lb), nc + nm + 5), device=x.device)
v[:, :4] = lb[:, 1:5] # box
v[:, 4] = 1.0 # conf
v[range(len(lb)), lb[:, 0].long() + 5] = 1.0 # cls
x = torch.cat((x, v), 0)
# If none remain process next image
if not x.shape[0]:
continue
# Compute conf
x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf
# Box/Mask
box = xywh2xyxy(x[:, :4]) # center_x, center_y, width, height) to (x1, y1, x2, y2)
mask = x[:, mi:] # zero columns if no masks
# Detections matrix nx6 (xyxy, conf, cls)
if multi_label:
i, j = (x[:, 5:mi] > conf_thres).nonzero(as_tuple=False).T
x = torch.cat((box[i], x[i, 5 + j, None], j[:, None].float(), mask[i]), 1)
else: # best class only
conf, j = x[:, 5:mi].max(1, keepdim=True)
x = torch.cat((box, conf, j.float(), mask), 1)[conf.view(-1) > conf_thres]
# Filter by class
if classes is not None:
x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]
# Apply finite constraint
# if not torch.isfinite(x).all():
# x = x[torch.isfinite(x).all(1)]
# Check shape
n = x.shape[0] # number of boxes
if not n: # no boxes
continue
x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence and remove excess boxes
# Batched NMS
c = x[:, 5:6] * (0 if agnostic else max_wh) # classes
boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores
i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS
i = i[:max_det] # limit detections
if merge and (1 < n < 3e3): # Merge NMS (boxes merged using weighted mean)
# update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)
iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix
weights = iou * scores[None] # box weights
x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes
if redundant:
i = i[iou.sum(1) > 1] # require redundancy
output[xi] = x[i]
if mps:
output[xi] = output[xi].to(device)
if (time.time() - t) > time_limit:
LOGGER.warning(f"WARNING ⚠️ NMS time limit {time_limit:.3f}s exceeded")
break # time limit exceeded
return output
def strip_optimizer(f="best.pt", s=""):
x = torch_load(f, map_location=torch.device("cpu"))
if x.get("ema"):
x["model"] = x["ema"] # replace model with ema
for k in "optimizer", "best_fitness", "ema", "updates": # keys
x[k] = None
x["epoch"] = -1
x["model"].half() # to FP16
for p in x["model"].parameters():
p.requires_grad = False
torch.save(x, s or f)
mb = os.path.getsize(s or f) / 1e6 # filesize
LOGGER.info(f"Optimizer stripped from {f},{f' saved as {s},' if s else ''} {mb:.1f}MB")
def print_mutation(keys, results, hyp, save_dir, bucket, prefix=colorstr("evolve: ")):
evolve_csv = save_dir / "evolve.csv"
evolve_yaml = save_dir / "hyp_evolve.yaml"
keys = tuple(keys) + tuple(hyp.keys()) # [results + hyps]
keys = tuple(x.strip() for x in keys)
vals = results + tuple(hyp.values())
n = len(keys)
# Download (optional)
if bucket:
url = f"gs://{bucket}/evolve.csv"
if gsutil_getsize(url) > (evolve_csv.stat().st_size if evolve_csv.exists() else 0):
subprocess.run(["gsutil", "cp", f"{url}", f"{save_dir}"]) # download evolve.csv if larger than local
# Log to evolve.csv
s = "" if evolve_csv.exists() else (("%20s," * n % keys).rstrip(",") + "\n") # add header
with open(evolve_csv, "a") as f:
f.write(s + ("%20.5g," * n % vals).rstrip(",") + "\n")
# Save yaml
with open(evolve_yaml, "w") as f:
data = pd.read_csv(evolve_csv, skipinitialspace=True)
data = data.rename(columns=lambda x: x.strip()) # strip keys
i = np.argmax(fitness(data.values[:, :4])) #
generations = len(data)
f.write(
"# YOLOv5 Hyperparameter Evolution Results\n"
+ f"# Best generation: {i}\n"
+ f"# Last generation: {generations - 1}\n"
+ "# "
+ ", ".join(f"{x.strip():>20s}" for x in keys[:7])
+ "\n"
+ "# "
+ ", ".join(f"{x:>20.5g}" for x in data.values[i, :7])
+ "\n\n"
)
yaml.safe_dump(data.loc[i][7:].to_dict(), f, sort_keys=False)
# Print to screen
LOGGER.info(
prefix
+ f"{generations} generations finished, current result:\n"
+ prefix
+ ", ".join(f"{x.strip():>20s}" for x in keys)
+ "\n"
+ prefix
+ ", ".join(f"{x:20.5g}" for x in vals)
+ "\n\n"
)
if bucket:
subprocess.run(["gsutil", "cp", f"{evolve_csv}", f"{evolve_yaml}", f"gs://{bucket}"]) # upload
def apply_classifier(x, model, img, im0):
# Example model = torchvision.models.__dict__['efficientnet_b0'](pretrained=True).to(device).eval()
im0 = [im0] if isinstance(im0, np.ndarray) else im0
for i, d in enumerate(x): # per image
if d is not None and len(d):
d = d.clone()
# Reshape and pad cutouts
b = xyxy2xywh(d[:, :4]) # boxes
b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # rectangle to square
b[:, 2:] = b[:, 2:] * 1.3 + 30 # pad
d[:, :4] = xywh2xyxy(b).long()
# Rescale boxes from img_size to im0 size
scale_boxes(img.shape[2:], d[:, :4], im0[i].shape)
# Classes
pred_cls1 = d[:, 5].long()
ims = []
for a in d:
cutout = im0[i][int(a[1]) : int(a[3]), int(a[0]) : int(a[2])]
im = cv2.resize(cutout, (224, 224)) # BGR
im = im[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
im = np.ascontiguousarray(im, dtype=np.float32) # uint8 to float32
im /= 255 # 0 - 255 to 0.0 - 1.0
ims.append(im)
pred_cls2 = model(torch.Tensor(ims).to(d.device)).argmax(1) # classifier prediction
x[i] = x[i][pred_cls1 == pred_cls2] # retain matching class detections
return x
def increment_path(path, exist_ok=False, sep="", mkdir=False):
path = Path(path) # os-agnostic
if path.exists() and not exist_ok:
path, suffix = (path.with_suffix(""), path.suffix) if path.is_file() else (path, "")
# Method 1
for n in range(2, 9999):
p = f"{path}{sep}{n}{suffix}" # increment path
if not os.path.exists(p): #
break
path = Path(p)
# Method 2 (deprecated)
# dirs = glob.glob(f"{path}{sep}*") # similar paths
# matches = [re.search(rf"{path.stem}{sep}(\d+)", d) for d in dirs]
# i = [int(m.groups()[0]) for m in matches if m] # indices
# n = max(i) + 1 if i else 2 # increment number
# path = Path(f"{path}{sep}{n}{suffix}") # increment path
if mkdir:
path.mkdir(parents=True, exist_ok=True) # make directory
return path
# OpenCV Multilanguage-friendly functions ------------------------------------------------------------------------------------
imshow_ = cv2.imshow # copy to avoid recursion errors
def imread(filename, flags=cv2.IMREAD_COLOR):
return cv2.imdecode(np.fromfile(filename, np.uint8), flags)
def imwrite(filename, img):
try:
cv2.imencode(Path(filename).suffix, img)[1].tofile(filename)
return True
except Exception:
return False
def imshow(path, im):
imshow_(path.encode("unicode_escape").decode(), im)
if Path(inspect.stack()[0].filename).parent.parent.as_posix() in inspect.stack()[-1].filename:
cv2.imread, cv2.imwrite, cv2.imshow = imread, imwrite, imshow # redefine
# Variables ------------------------------------------------------------------------------------------------------------ | --- +++ @@ -1,4 +1,5 @@ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
+"""General utils."""
from __future__ import annotations
@@ -74,19 +75,28 @@
def is_ascii(s=""):
+ """Checks if input string `s` contains only ASCII characters; returns `True` if so, otherwise `False`."""
s = str(s) # convert list, tuple, None, etc. to str
return len(s.encode().decode("ascii", "ignore")) == len(s)
def is_chinese(s="人工智能"):
+ """Determines if a string `s` contains any Chinese characters; returns `True` if so, otherwise `False`."""
return bool(re.search("[\u4e00-\u9fff]", str(s)))
def is_colab():
+ """Checks if the current environment is a Google Colab instance; returns `True` for Colab, otherwise `False`."""
return "google.colab" in sys.modules
def is_jupyter():
+ """Check if the current script is running inside a Jupyter Notebook. Verified on Colab, Jupyterlab, Kaggle,
+ Paperspace.
+
+ Returns:
+ bool: True if running inside a Jupyter Notebook, False otherwise.
+ """
with contextlib.suppress(Exception):
from IPython import get_ipython
@@ -95,10 +105,12 @@
def is_kaggle():
+ """Checks if the current environment is a Kaggle Notebook by validating environment variables."""
return os.environ.get("PWD") == "/kaggle/working" and os.environ.get("KAGGLE_URL_BASE") == "https://www.kaggle.com"
def is_docker() -> bool:
+ """Check if the process runs inside a docker container."""
if Path("/.dockerenv").exists():
return True
try: # check if docker is in control groups
@@ -109,6 +121,7 @@
def is_writeable(dir, test=False):
+ """Checks if a directory is writable, optionally testing by creating a temporary file if `test=True`."""
if not test:
return os.access(dir, os.W_OK) # possible issues on Windows
file = Path(dir) / "tmp.txt"
@@ -125,6 +138,7 @@
def set_logging(name=LOGGING_NAME, verbose=True):
+ """Configures logging with specified verbosity; `name` sets the logger's name, `verbose` controls logging level."""
rank = int(os.getenv("RANK", -1)) # rank in world for Multi-GPU trainings
level = logging.INFO if verbose and rank in {-1, 0} else logging.ERROR
logging.config.dictConfig(
@@ -158,6 +172,9 @@
def user_config_dir(dir="Ultralytics", env_var="YOLOV5_CONFIG_DIR"):
+ """Returns user configuration directory path, preferring environment variable `YOLOV5_CONFIG_DIR` if set, else OS-
+ specific.
+ """
if env := os.getenv(env_var):
path = Path(env) # use environment variable
else:
@@ -172,42 +189,52 @@
class Profile(contextlib.ContextDecorator):
+ """Context manager and decorator for profiling code execution time, with optional CUDA synchronization."""
def __init__(self, t=0.0, device: torch.device = None):
+ """Initializes a profiling context for YOLOv5 with optional timing threshold and device specification."""
self.t = t
self.device = device
self.cuda = bool(device and str(device).startswith("cuda"))
def __enter__(self):
+ """Initializes timing at the start of a profiling context block for performance measurement."""
self.start = self.time()
return self
def __exit__(self, type, value, traceback):
+ """Concludes timing, updating duration for profiling upon exiting a context block."""
self.dt = self.time() - self.start # delta-time
self.t += self.dt # accumulate dt
def time(self):
+ """Measures and returns the current time, synchronizing CUDA operations if `cuda` is True."""
if self.cuda:
torch.cuda.synchronize(self.device)
return time.time()
class Timeout(contextlib.ContextDecorator):
+ """Enforces a timeout on code execution, raising TimeoutError if the specified duration is exceeded."""
def __init__(self, seconds, *, timeout_msg="", suppress_timeout_errors=True):
+ """Initializes a timeout context/decorator with defined seconds, optional message, and error suppression."""
self.seconds = int(seconds)
self.timeout_message = timeout_msg
self.suppress = bool(suppress_timeout_errors)
def _timeout_handler(self, signum, frame):
+ """Raises a TimeoutError with a custom message when a timeout event occurs."""
raise TimeoutError(self.timeout_message)
def __enter__(self):
+ """Initializes timeout mechanism on non-Windows platforms, starting a countdown to raise TimeoutError."""
if platform.system() != "Windows": # not supported on Windows
signal.signal(signal.SIGALRM, self._timeout_handler) # Set handler for SIGALRM
signal.alarm(self.seconds) # start countdown for SIGALRM to be raised
def __exit__(self, exc_type, exc_val, exc_tb):
+ """Disables active alarm on non-Windows systems and optionally suppresses TimeoutError if set."""
if platform.system() != "Windows":
signal.alarm(0) # Cancel SIGALRM if it's scheduled
if self.suppress and exc_type is TimeoutError: # Suppress TimeoutError
@@ -215,23 +242,29 @@
class WorkingDirectory(contextlib.ContextDecorator):
+ """Context manager/decorator to temporarily change the working directory within a 'with' statement or decorator."""
def __init__(self, new_dir):
+ """Initializes a context manager/decorator to temporarily change the working directory."""
self.dir = new_dir # new dir
self.cwd = Path.cwd().resolve() # current dir
def __enter__(self):
+ """Temporarily changes the working directory within a 'with' statement context."""
os.chdir(self.dir)
def __exit__(self, exc_type, exc_val, exc_tb):
+ """Restores the original working directory upon exiting a 'with' statement context."""
os.chdir(self.cwd)
def methods(instance):
+ """Returns list of method names for a class/instance excluding dunder methods."""
return [f for f in dir(instance) if callable(getattr(instance, f)) and not f.startswith("__")]
def print_args(args: dict | None = None, show_file=True, show_func=False):
+ """Logs the arguments of the calling function, with options to include the filename and function name."""
x = inspect.currentframe().f_back # previous frame
file, _, func, _, _ = inspect.getframeinfo(x)
if args is None: # get args automatically
@@ -246,6 +279,10 @@
def init_seeds(seed=0, deterministic=False):
+ """Initializes RNG seeds and sets deterministic options if specified.
+
+ See https://pytorch.org/docs/stable/notes/randomness.html
+ """
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
@@ -260,30 +297,38 @@
def intersect_dicts(da, db, exclude=()):
+ """Returns intersection of `da` and `db` dicts with matching keys and shapes, excluding `exclude` keys; uses `da`
+ values.
+ """
return {k: v for k, v in da.items() if k in db and all(x not in k for x in exclude) and v.shape == db[k].shape}
def get_default_args(func):
+ """Returns a dict of `func` default arguments by inspecting its signature."""
signature = inspect.signature(func)
return {k: v.default for k, v in signature.parameters.items() if v.default is not inspect.Parameter.empty}
def get_latest_run(search_dir="."):
+ """Returns the path to the most recent 'last.pt' file in /runs to resume from, searches in `search_dir`."""
last_list = glob.glob(f"{search_dir}/**/last*.pt", recursive=True)
return max(last_list, key=os.path.getctime) if last_list else ""
def file_age(path=__file__):
+ """Calculates and returns the age of a file in days based on its last modification time."""
dt = datetime.now() - datetime.fromtimestamp(Path(path).stat().st_mtime) # delta
return dt.days # + dt.seconds / 86400 # fractional days
def file_date(path=__file__):
+ """Returns a human-readable file modification date in 'YYYY-M-D' format, given a file path."""
t = datetime.fromtimestamp(Path(path).stat().st_mtime)
return f"{t.year}-{t.month}-{t.day}"
def file_size(path):
+ """Returns file or directory size in megabytes (MB) for a given path, where directories are recursively summed."""
mb = 1 << 20 # bytes to MiB (1024 ** 2)
path = Path(path)
if path.is_file():
@@ -295,9 +340,13 @@
def check_online():
+ """Checks internet connectivity by attempting to create a connection to "1.1.1.1" on port 443, retries once if the
+ first attempt fails.
+ """
import socket
def run_once():
+ """Checks internet connectivity by attempting to create a connection to "1.1.1.1" on port 443."""
try:
socket.create_connection(("1.1.1.1", 443), 5) # check host accessibility
return True
@@ -308,6 +357,10 @@
def git_describe(path=ROOT):
+ """Returns a human-readable git description of the repository at `path`, or an empty string on failure.
+
+ Example output is 'fv5.0-5-g3e25f1e'. See https://git-scm.com/docs/git-describe.
+ """
try:
assert (Path(path) / ".git").is_dir()
return check_output(f"git -C {path} describe --tags --long --always", shell=True).decode()[:-1]
@@ -318,6 +371,9 @@ @TryExcept()
@WorkingDirectory(ROOT)
def check_git_status(repo="ultralytics/yolov5", branch="master"):
+ """Checks if YOLOv5 code is up-to-date with the repository, advising 'git pull' if behind; errors return informative
+ messages.
+ """
url = f"https://github.com/{repo}"
msg = f", for updates see {url}"
s = colorstr("github: ") # string
@@ -344,6 +400,7 @@
@WorkingDirectory(ROOT)
def check_git_info(path="."):
+ """Checks YOLOv5 git info, returning a dict with remote URL, branch name, and commit hash."""
check_requirements("gitpython")
import git
@@ -361,10 +418,12 @@
def check_python(minimum="3.8.0"):
+ """Checks if current Python version meets the minimum required version, exits if not."""
check_version(platform.python_version(), minimum, name="Python ", hard=True)
def check_version(current="0.0.0", minimum="0.0.0", name="version ", pinned=False, hard=False, verbose=False):
+ """Checks if the current version meets the minimum required version, exits or warns based on parameters."""
current, minimum = (packaging.version.parse(x) for x in (current, minimum))
result = (current == minimum) if pinned else (current >= minimum) # bool
s = f"WARNING ⚠️ {name}{minimum} is required by YOLOv5, but {name}{current} is currently installed" # string
@@ -376,6 +435,7 @@
def check_img_size(imgsz, s=32, floor=0):
+ """Adjusts image size to be divisible by stride `s`, supports int or list/tuple input, returns adjusted size."""
if isinstance(imgsz, int): # integer i.e. img_size=640
new_size = max(make_divisible(imgsz, int(s)), floor)
else: # list i.e. img_size=[640, 480]
@@ -387,6 +447,7 @@
def check_imshow(warn=False):
+ """Checks environment support for image display; warns on failure if `warn=True`."""
try:
assert not is_jupyter()
assert not is_docker()
@@ -402,6 +463,7 @@
def check_suffix(file="yolov5s.pt", suffix=(".pt",), msg=""):
+ """Validates if a file or files have an acceptable suffix, raising an error if not."""
if file and suffix:
if isinstance(suffix, str):
suffix = [suffix]
@@ -412,10 +474,12 @@
def check_yaml(file, suffix=(".yaml", ".yml")):
+ """Searches/downloads a YAML file, verifies its suffix (.yaml or .yml), and returns the file path."""
return check_file(file, suffix)
def check_file(file, suffix=""):
+ """Searches/downloads a file, checks its suffix (if provided), and returns the file path."""
check_suffix(file, suffix) # optional
file = str(file) # convert to str()
if os.path.isfile(file) or not file: # exists
@@ -445,6 +509,7 @@
def check_font(font=FONT, progress=False):
+ """Ensures specified font exists or downloads it from Ultralytics assets, optionally displaying progress."""
font = Path(font)
file = CONFIG_DIR / font.name
if not font.exists() and not file.exists():
@@ -454,6 +519,7 @@
def check_dataset(data, autodownload=True):
+ """Validates and/or auto-downloads a dataset, returning its configuration as a dictionary."""
# Download (optional)
extract_dir = ""
if isinstance(data, (str, Path)) and (is_zipfile(data) or is_tarfile(data)):
@@ -518,9 +584,11 @@
def check_amp(model):
+ """Checks PyTorch AMP functionality for a model, returns True if AMP operates correctly, otherwise False."""
from models.common import AutoShape, DetectMultiBackend
def amp_allclose(model, im):
+ """Compares FP32 and AMP model inference outputs, ensuring they are close within a 10% absolute tolerance."""
m = AutoShape(model, verbose=False) # model
a = m(im).xywhn[0] # FP32 inference
m.amp = True
@@ -544,11 +612,15 @@
def yaml_load(file="data.yaml"):
+ """Safely loads and returns the contents of a YAML file specified by `file` argument."""
with open(file, errors="ignore") as f:
return yaml.safe_load(f)
def yaml_save(file="data.yaml", data=None):
+ """Safely saves `data` to a YAML file specified by `file`, converting `Path` objects to strings; `data` is a
+ dictionary.
+ """
if data is None:
data = {}
with open(file, "w") as f:
@@ -556,6 +628,9 @@
def unzip_file(file, path=None, exclude=(".DS_Store", "__MACOSX")):
+ """Unzips `file` to `path` (default: file's parent), excluding filenames containing any in `exclude` (`.DS_Store`,
+ `__MACOSX`).
+ """
if path is None:
path = Path(file).parent # default path
with ZipFile(file) as zipObj:
@@ -565,13 +640,19 @@
def url2file(url):
+ """Converts a URL string to a valid filename by stripping protocol, domain, and any query parameters.
+
+ Example https://url.com/file.txt?auth -> file.txt
+ """
url = str(Path(url)).replace(":/", "://") # Pathlib turns :// -> :/
return Path(urllib.parse.unquote(url)).name.split("?")[0] # '%2F' to '/', split https://url.com/file.txt?auth
def download(url, dir=".", unzip=True, delete=True, curl=False, threads=1, retry=3):
+ """Downloads and optionally unzips files concurrently, supporting retries and curl fallback."""
def download_one(url, dir):
+ """Downloads a single file from `url` to `dir`, with retry support and optional curl fallback."""
success = True
if os.path.isfile(url):
f = Path(url) # filename
@@ -615,20 +696,32 @@
def make_divisible(x, divisor):
+ """Adjusts `x` to be divisible by `divisor`, returning the nearest greater or equal value."""
if isinstance(divisor, torch.Tensor):
divisor = int(divisor.max()) # to int
return math.ceil(x / divisor) * divisor
def clean_str(s):
+ """Cleans a string by replacing special characters with underscore, e.g., `clean_str('#example!')` returns
+ '_example_'.
+ """
return re.sub(pattern="[|@#!¡·$€%&()=?¿^*;:,¨´><+]", repl="_", string=s)
def one_cycle(y1=0.0, y2=1.0, steps=100):
+ """Generates a lambda for a sinusoidal ramp from y1 to y2 over 'steps'.
+
+ See https://arxiv.org/pdf/1812.01187.pdf for details.
+ """
return lambda x: ((1 - math.cos(x * math.pi / steps)) / 2) * (y2 - y1) + y1
def colorstr(*input):
+ """Colors a string using ANSI escape codes, e.g., colorstr('blue', 'hello world').
+
+ See https://en.wikipedia.org/wiki/ANSI_escape_code.
+ """
*args, string = input if len(input) > 1 else ("blue", "bold", input[0]) # color arguments, string
colors = {
"black": "\033[30m", # basic colors
@@ -655,6 +748,7 @@
def labels_to_class_weights(labels, nc=80):
+ """Calculates class weights from labels to handle class imbalance in training; input shape: (n, 5)."""
if labels[0] is None: # no labels loaded
return torch.Tensor()
@@ -673,12 +767,17 @@
def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)):
+ """Calculates image weights from labels using class weights for weighted sampling."""
# Usage: index = random.choices(range(n), weights=image_weights, k=1) # weighted image sample
class_counts = np.array([np.bincount(x[:, 0].astype(int), minlength=nc) for x in labels])
return (class_weights.reshape(1, nc) * class_counts).sum(1)
def coco80_to_coco91_class():
+ """Converts COCO 80-class index to COCO 91-class index used in the paper.
+
+ Reference: https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/
+ """
# a = np.loadtxt('data/coco.names', dtype='str', delimiter='\n')
# b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\n')
# x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco
@@ -768,6 +867,7 @@
def xyxy2xywh(x):
+ """Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right."""
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[..., 0] = (x[..., 0] + x[..., 2]) / 2 # x center
y[..., 1] = (x[..., 1] + x[..., 3]) / 2 # y center
@@ -777,6 +877,7 @@
def xywh2xyxy(x):
+ """Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right."""
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[..., 0] = x[..., 0] - x[..., 2] / 2 # top left x
y[..., 1] = x[..., 1] - x[..., 3] / 2 # top left y
@@ -786,6 +887,7 @@
def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0):
+ """Convert nx4 boxes from [x, y, w, h] normalized to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right."""
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[..., 0] = w * (x[..., 0] - x[..., 2] / 2) + padw # top left x
y[..., 1] = h * (x[..., 1] - x[..., 3] / 2) + padh # top left y
@@ -795,6 +897,7 @@
def xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0):
+ """Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] normalized where xy1=top-left, xy2=bottom-right."""
if clip:
clip_boxes(x, (h - eps, w - eps)) # warning: inplace clip
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
@@ -806,6 +909,7 @@
def xyn2xy(x, w=640, h=640, padw=0, padh=0):
+ """Convert normalized segments into pixel segments, shape (n,2)."""
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[..., 0] = w * x[..., 0] + padw # top left x
y[..., 1] = h * x[..., 1] + padh # top left y
@@ -813,6 +917,7 @@
def segment2box(segment, width=640, height=640):
+ """Convert 1 segment label to 1 box label, applying inside-image constraint, i.e. (xy1, xy2, ...) to (xyxy)."""
x, y = segment.T # segment xy
inside = (x >= 0) & (y >= 0) & (x <= width) & (y <= height)
(
@@ -823,6 +928,7 @@
def segments2boxes(segments):
+ """Convert segment labels to box labels, i.e. (cls, xy1, xy2, ...) to (cls, xywh)."""
boxes = []
for s in segments:
x, y = s.T # segment xy
@@ -831,6 +937,7 @@
def resample_segments(segments, n=1000):
+ """Resamples an (n,2) segment to a fixed number of points for consistent representation."""
for i, s in enumerate(segments):
s = np.concatenate((s, s[0:1, :]), axis=0)
x = np.linspace(0, len(s) - 1, n)
@@ -840,6 +947,7 @@
def scale_boxes(img1_shape, boxes, img0_shape, ratio_pad=None):
+ """Rescales (xyxy) bounding boxes from img1_shape to img0_shape, optionally using provided `ratio_pad`."""
if ratio_pad is None: # calculate from img0_shape
gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new
pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
@@ -855,6 +963,7 @@
def scale_segments(img1_shape, segments, img0_shape, ratio_pad=None, normalize=False):
+ """Rescales segment coordinates from img1_shape to img0_shape, optionally normalizing them with custom padding."""
if ratio_pad is None: # calculate from img0_shape
gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new
pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
@@ -873,6 +982,7 @@
def clip_boxes(boxes, shape):
+ """Clips bounding box coordinates (xyxy) to fit within the specified image shape (height, width)."""
if isinstance(boxes, torch.Tensor): # faster individually
boxes[..., 0].clamp_(0, shape[1]) # x1
boxes[..., 1].clamp_(0, shape[0]) # y1
@@ -884,6 +994,7 @@
def clip_segments(segments, shape):
+ """Clips segment coordinates (xy1, xy2, ...) to an image's boundaries given its shape (height, width)."""
if isinstance(segments, torch.Tensor): # faster individually
segments[:, 0].clamp_(0, shape[1]) # x
segments[:, 1].clamp_(0, shape[0]) # y
@@ -903,6 +1014,11 @@ max_det=300,
nm=0, # number of masks
):
+ """Non-Maximum Suppression (NMS) on inference results to reject overlapping detections.
+
+ Returns:
+ list of detections, on (n,6) tensor per image [xyxy, conf, cls]
+ """
# Checks
assert 0 <= conf_thres <= 1, f"Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0"
assert 0 <= iou_thres <= 1, f"Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0"
@@ -1000,6 +1116,11 @@
def strip_optimizer(f="best.pt", s=""):
+ """Strips optimizer and optionally saves checkpoint to finalize training; arguments are file path 'f' and save path
+ 's'.
+
+ Example: from utils.general import *; strip_optimizer()
+ """
x = torch_load(f, map_location=torch.device("cpu"))
if x.get("ema"):
x["model"] = x["ema"] # replace model with ema
@@ -1015,6 +1136,7 @@
def print_mutation(keys, results, hyp, save_dir, bucket, prefix=colorstr("evolve: ")):
+ """Logs evolution results and saves to CSV and YAML in `save_dir`, optionally syncs with `bucket`."""
evolve_csv = save_dir / "evolve.csv"
evolve_yaml = save_dir / "hyp_evolve.yaml"
keys = tuple(keys) + tuple(hyp.keys()) # [results + hyps]
@@ -1069,6 +1191,7 @@
def apply_classifier(x, model, img, im0):
+ """Applies second-stage classifier to YOLO outputs, filtering detections by class match."""
# Example model = torchvision.models.__dict__['efficientnet_b0'](pretrained=True).to(device).eval()
im0 = [im0] if isinstance(im0, np.ndarray) else im0
for i, d in enumerate(x): # per image
@@ -1103,6 +1226,11 @@
def increment_path(path, exist_ok=False, sep="", mkdir=False):
+ """Generates an incremented file or directory path if it exists, with optional mkdir; args: path, exist_ok=False,
+ sep="", mkdir=False.
+
+ Example: runs/exp --> runs/exp{sep}2, runs/exp{sep}3, ... etc
+ """
path = Path(path) # os-agnostic
if path.exists() and not exist_ok:
path, suffix = (path.with_suffix(""), path.suffix) if path.is_file() else (path, "")
@@ -1132,10 +1260,14 @@
def imread(filename, flags=cv2.IMREAD_COLOR):
+ """Reads an image from a file and returns it as a numpy array, using OpenCV's imdecode to support multilanguage
+ paths.
+ """
return cv2.imdecode(np.fromfile(filename, np.uint8), flags)
def imwrite(filename, img):
+ """Writes an image to a file, returns True on success and False on failure, supports multilanguage paths."""
try:
cv2.imencode(Path(filename).suffix, img)[1].tofile(filename)
return True
@@ -1144,10 +1276,11 @@
def imshow(path, im):
+ """Displays an image using Unicode path, requires encoded path and image matrix as input."""
imshow_(path.encode("unicode_escape").decode(), im)
if Path(inspect.stack()[0].filename).parent.parent.as_posix() in inspect.stack()[-1].filename:
cv2.imread, cv2.imwrite, cv2.imshow = imread, imwrite, imshow # redefine
-# Variables ------------------------------------------------------------------------------------------------------------+# Variables ------------------------------------------------------------------------------------------------------------
| https://raw.githubusercontent.com/ultralytics/yolov5/HEAD/utils/general.py |
Generate consistent docstrings | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
import numpy as np
from ..metrics import ap_per_class
def fitness(x):
w = [0.0, 0.0, 0.1, 0.9, 0.0, 0.0, 0.1, 0.9]
return (x[:, :8] * w).sum(1)
def ap_per_class_box_and_mask(
tp_m,
tp_b,
conf,
pred_cls,
target_cls,
plot=False,
save_dir=".",
names=(),
):
results_boxes = ap_per_class(
tp_b, conf, pred_cls, target_cls, plot=plot, save_dir=save_dir, names=names, prefix="Box"
)[2:]
results_masks = ap_per_class(
tp_m, conf, pred_cls, target_cls, plot=plot, save_dir=save_dir, names=names, prefix="Mask"
)[2:]
return {
"boxes": {
"p": results_boxes[0],
"r": results_boxes[1],
"ap": results_boxes[3],
"f1": results_boxes[2],
"ap_class": results_boxes[4],
},
"masks": {
"p": results_masks[0],
"r": results_masks[1],
"ap": results_masks[3],
"f1": results_masks[2],
"ap_class": results_masks[4],
},
}
class Metric:
def __init__(self) -> None:
self.p = [] # (nc, )
self.r = [] # (nc, )
self.f1 = [] # (nc, )
self.all_ap = [] # (nc, 10)
self.ap_class_index = [] # (nc, )
@property
def ap50(self):
return self.all_ap[:, 0] if len(self.all_ap) else []
@property
def ap(self):
return self.all_ap.mean(1) if len(self.all_ap) else []
@property
def mp(self):
return self.p.mean() if len(self.p) else 0.0
@property
def mr(self):
return self.r.mean() if len(self.r) else 0.0
@property
def map50(self):
return self.all_ap[:, 0].mean() if len(self.all_ap) else 0.0
@property
def map(self):
return self.all_ap.mean() if len(self.all_ap) else 0.0
def mean_results(self):
return (self.mp, self.mr, self.map50, self.map)
def class_result(self, i):
return (self.p[i], self.r[i], self.ap50[i], self.ap[i])
def get_maps(self, nc):
maps = np.zeros(nc) + self.map
for i, c in enumerate(self.ap_class_index):
maps[c] = self.ap[i]
return maps
def update(self, results):
p, r, all_ap, f1, ap_class_index = results
self.p = p
self.r = r
self.all_ap = all_ap
self.f1 = f1
self.ap_class_index = ap_class_index
class Metrics:
def __init__(self) -> None:
self.metric_box = Metric()
self.metric_mask = Metric()
def update(self, results):
self.metric_box.update(list(results["boxes"].values()))
self.metric_mask.update(list(results["masks"].values()))
def mean_results(self):
return self.metric_box.mean_results() + self.metric_mask.mean_results()
def class_result(self, i):
return self.metric_box.class_result(i) + self.metric_mask.class_result(i)
def get_maps(self, nc):
return self.metric_box.get_maps(nc) + self.metric_mask.get_maps(nc)
@property
def ap_class_index(self):
return self.metric_box.ap_class_index
KEYS = [
"train/box_loss",
"train/seg_loss", # train loss
"train/obj_loss",
"train/cls_loss",
"metrics/precision(B)",
"metrics/recall(B)",
"metrics/mAP_0.5(B)",
"metrics/mAP_0.5:0.95(B)", # metrics
"metrics/precision(M)",
"metrics/recall(M)",
"metrics/mAP_0.5(M)",
"metrics/mAP_0.5:0.95(M)", # metrics
"val/box_loss",
"val/seg_loss", # val loss
"val/obj_loss",
"val/cls_loss",
"x/lr0",
"x/lr1",
"x/lr2",
]
BEST_KEYS = [
"best/epoch",
"best/precision(B)",
"best/recall(B)",
"best/mAP_0.5(B)",
"best/mAP_0.5:0.95(B)",
"best/precision(M)",
"best/recall(M)",
"best/mAP_0.5(M)",
"best/mAP_0.5:0.95(M)",
] | --- +++ @@ -1,4 +1,5 @@ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
+"""Model validation metrics."""
import numpy as np
@@ -6,6 +7,7 @@
def fitness(x):
+ """Evaluates model fitness by a weighted sum of 8 metrics, `x`: [N,8] array, weights: [0.1, 0.9] for mAP and F1."""
w = [0.0, 0.0, 0.1, 0.9, 0.0, 0.0, 0.1, 0.9]
return (x[:, :8] * w).sum(1)
@@ -20,6 +22,12 @@ save_dir=".",
names=(),
):
+ """
+ Args:
+ tp_b: tp of boxes.
+ tp_m: tp of masks.
+ other arguments see `func: ap_per_class`.
+ """
results_boxes = ap_per_class(
tp_b, conf, pred_cls, target_cls, plot=plot, save_dir=save_dir, names=names, prefix="Box"
)[2:]
@@ -46,8 +54,12 @@
class Metric:
+ """Computes performance metrics like precision, recall, F1 score, and average precision for model evaluation."""
def __init__(self) -> None:
+ """Initializes performance metric attributes for precision, recall, F1 score, average precision, and class
+ indices.
+ """
self.p = [] # (nc, )
self.r = [] # (nc, )
self.f1 = [] # (nc, )
@@ -56,41 +68,78 @@
@property
def ap50(self):
+ """AP@0.5 of all classes.
+
+ Returns:
+ (nc, ) or [].
+ """
return self.all_ap[:, 0] if len(self.all_ap) else []
@property
def ap(self):
+ """AP@0.5:0.95.
+
+ Returns:
+ (nc, ) or []
+ """
return self.all_ap.mean(1) if len(self.all_ap) else []
@property
def mp(self):
+ """Mean precision of all classes.
+
+ Returns:
+ float.
+ """
return self.p.mean() if len(self.p) else 0.0
@property
def mr(self):
+ """Mean recall of all classes.
+
+ Returns:
+ float.
+ """
return self.r.mean() if len(self.r) else 0.0
@property
def map50(self):
+ """Mean AP@0.5 of all classes.
+
+ Returns:
+ float.
+ """
return self.all_ap[:, 0].mean() if len(self.all_ap) else 0.0
@property
def map(self):
+ """Mean AP@0.5:0.95 of all classes.
+
+ Returns:
+ float.
+ """
return self.all_ap.mean() if len(self.all_ap) else 0.0
def mean_results(self):
+ """Mean of results, return mp, mr, map50, map."""
return (self.mp, self.mr, self.map50, self.map)
def class_result(self, i):
+ """Class-aware result, return p[i], r[i], ap50[i], ap[i]."""
return (self.p[i], self.r[i], self.ap50[i], self.ap[i])
def get_maps(self, nc):
+ """Calculates and returns mean Average Precision (mAP) for each class given number of classes `nc`."""
maps = np.zeros(nc) + self.map
for i, c in enumerate(self.ap_class_index):
maps[c] = self.ap[i]
return maps
def update(self, results):
+ """
+ Args:
+ results: tuple(p, r, ap, f1, ap_class).
+ """
p, r, all_ap, f1, ap_class_index = results
self.p = p
self.r = r
@@ -100,26 +149,38 @@
class Metrics:
+ """Metric for boxes and masks."""
def __init__(self) -> None:
+ """Initialize Metric objects for bounding boxes and masks to compute performance metrics."""
self.metric_box = Metric()
self.metric_mask = Metric()
def update(self, results):
+ """
+ Args:
+ results: Dict{'boxes': Dict{}, 'masks': Dict{}}.
+ """
self.metric_box.update(list(results["boxes"].values()))
self.metric_mask.update(list(results["masks"].values()))
def mean_results(self):
+ """Computes and returns the mean results for both box and mask metrics by summing their individual means."""
return self.metric_box.mean_results() + self.metric_mask.mean_results()
def class_result(self, i):
+ """Returns the sum of box and mask metric results for a specified class index `i`."""
return self.metric_box.class_result(i) + self.metric_mask.class_result(i)
def get_maps(self, nc):
+ """Calculates and returns the sum of mean average precisions (mAPs) for both box and mask metrics for `nc`
+ classes.
+ """
return self.metric_box.get_maps(nc) + self.metric_mask.get_maps(nc)
@property
def ap_class_index(self):
+ """Returns the class index for average precision, shared by both box and mask metrics."""
return self.metric_box.ap_class_index
@@ -155,4 +216,4 @@ "best/recall(M)",
"best/mAP_0.5(M)",
"best/mAP_0.5:0.95(M)",
-]+]
| https://raw.githubusercontent.com/ultralytics/yolov5/HEAD/utils/segment/metrics.py |
Add concise docstrings to each method | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
import ast
import contextlib
import json
import math
import platform
import warnings
import zipfile
from collections import OrderedDict, namedtuple
from copy import copy
from pathlib import Path
from urllib.parse import urlparse
import cv2
import numpy as np
import pandas as pd
import requests
import torch
import torch.nn as nn
from PIL import Image
from torch.cuda import amp
# Import 'ultralytics' package or install if missing
try:
import ultralytics
assert hasattr(ultralytics, "__version__") # verify package is not directory
except (ImportError, AssertionError):
import os
os.system("pip install -U ultralytics")
import ultralytics
from ultralytics.utils.plotting import Annotator, colors, save_one_box
from utils import TryExcept
from utils.dataloaders import exif_transpose, letterbox
from utils.general import (
LOGGER,
ROOT,
Profile,
check_requirements,
check_suffix,
check_version,
colorstr,
increment_path,
is_jupyter,
make_divisible,
non_max_suppression,
scale_boxes,
xywh2xyxy,
xyxy2xywh,
yaml_load,
)
from utils.torch_utils import copy_attr, smart_inference_mode
def autopad(k, p=None, d=1):
if d > 1:
k = d * (k - 1) + 1 if isinstance(k, int) else [d * (x - 1) + 1 for x in k] # actual kernel-size
if p is None:
p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad
return p
class Conv(nn.Module):
default_act = nn.SiLU() # default activation
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, d=1, act=True):
super().__init__()
self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p, d), groups=g, dilation=d, bias=False)
self.bn = nn.BatchNorm2d(c2)
self.act = self.default_act if act is True else act if isinstance(act, nn.Module) else nn.Identity()
def forward(self, x):
return self.act(self.bn(self.conv(x)))
def forward_fuse(self, x):
return self.act(self.conv(x))
class DWConv(Conv):
def __init__(self, c1, c2, k=1, s=1, d=1, act=True):
super().__init__(c1, c2, k, s, g=math.gcd(c1, c2), d=d, act=act)
class DWConvTranspose2d(nn.ConvTranspose2d):
def __init__(self, c1, c2, k=1, s=1, p1=0, p2=0):
super().__init__(c1, c2, k, s, p1, p2, groups=math.gcd(c1, c2))
class TransformerLayer(nn.Module):
def __init__(self, c, num_heads):
super().__init__()
self.q = nn.Linear(c, c, bias=False)
self.k = nn.Linear(c, c, bias=False)
self.v = nn.Linear(c, c, bias=False)
self.ma = nn.MultiheadAttention(embed_dim=c, num_heads=num_heads)
self.fc1 = nn.Linear(c, c, bias=False)
self.fc2 = nn.Linear(c, c, bias=False)
def forward(self, x):
x = self.ma(self.q(x), self.k(x), self.v(x))[0] + x
x = self.fc2(self.fc1(x)) + x
return x
class TransformerBlock(nn.Module):
def __init__(self, c1, c2, num_heads, num_layers):
super().__init__()
self.conv = None
if c1 != c2:
self.conv = Conv(c1, c2)
self.linear = nn.Linear(c2, c2) # learnable position embedding
self.tr = nn.Sequential(*(TransformerLayer(c2, num_heads) for _ in range(num_layers)))
self.c2 = c2
def forward(self, x):
if self.conv is not None:
x = self.conv(x)
b, _, w, h = x.shape
p = x.flatten(2).permute(2, 0, 1)
return self.tr(p + self.linear(p)).permute(1, 2, 0).reshape(b, self.c2, w, h)
class Bottleneck(nn.Module):
def __init__(self, c1, c2, shortcut=True, g=1, e=0.5):
super().__init__()
c_ = int(c2 * e) # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = Conv(c_, c2, 3, 1, g=g)
self.add = shortcut and c1 == c2
def forward(self, x):
return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
class BottleneckCSP(nn.Module):
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
super().__init__()
c_ = int(c2 * e) # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)
self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False)
self.cv4 = Conv(2 * c_, c2, 1, 1)
self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3)
self.act = nn.SiLU()
self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)))
def forward(self, x):
y1 = self.cv3(self.m(self.cv1(x)))
y2 = self.cv2(x)
return self.cv4(self.act(self.bn(torch.cat((y1, y2), 1))))
class CrossConv(nn.Module):
def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False):
super().__init__()
c_ = int(c2 * e) # hidden channels
self.cv1 = Conv(c1, c_, (1, k), (1, s))
self.cv2 = Conv(c_, c2, (k, 1), (s, 1), g=g)
self.add = shortcut and c1 == c2
def forward(self, x):
return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
class C3(nn.Module):
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
super().__init__()
c_ = int(c2 * e) # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = Conv(c1, c_, 1, 1)
self.cv3 = Conv(2 * c_, c2, 1) # optional act=FReLU(c2)
self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)))
def forward(self, x):
return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), 1))
class C3x(C3):
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
super().__init__(c1, c2, n, shortcut, g, e)
c_ = int(c2 * e)
self.m = nn.Sequential(*(CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n)))
class C3TR(C3):
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
super().__init__(c1, c2, n, shortcut, g, e)
c_ = int(c2 * e)
self.m = TransformerBlock(c_, c_, 4, n)
class C3SPP(C3):
def __init__(self, c1, c2, k=(5, 9, 13), n=1, shortcut=True, g=1, e=0.5):
super().__init__(c1, c2, n, shortcut, g, e)
c_ = int(c2 * e)
self.m = SPP(c_, c_, k)
class C3Ghost(C3):
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
super().__init__(c1, c2, n, shortcut, g, e)
c_ = int(c2 * e) # hidden channels
self.m = nn.Sequential(*(GhostBottleneck(c_, c_) for _ in range(n)))
class SPP(nn.Module):
def __init__(self, c1, c2, k=(5, 9, 13)):
super().__init__()
c_ = c1 // 2 # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1)
self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k])
def forward(self, x):
x = self.cv1(x)
with warnings.catch_warnings():
warnings.simplefilter("ignore") # suppress torch 1.9.0 max_pool2d() warning
return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1))
class SPPF(nn.Module):
def __init__(self, c1, c2, k=5):
super().__init__()
c_ = c1 // 2 # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = Conv(c_ * 4, c2, 1, 1)
self.m = nn.MaxPool2d(kernel_size=k, stride=1, padding=k // 2)
def forward(self, x):
x = self.cv1(x)
with warnings.catch_warnings():
warnings.simplefilter("ignore") # suppress torch 1.9.0 max_pool2d() warning
y1 = self.m(x)
y2 = self.m(y1)
return self.cv2(torch.cat((x, y1, y2, self.m(y2)), 1))
class Focus(nn.Module):
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True):
super().__init__()
self.conv = Conv(c1 * 4, c2, k, s, p, g, act=act)
# self.contract = Contract(gain=2)
def forward(self, x):
return self.conv(torch.cat((x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]), 1))
# return self.conv(self.contract(x))
class GhostConv(nn.Module):
def __init__(self, c1, c2, k=1, s=1, g=1, act=True):
super().__init__()
c_ = c2 // 2 # hidden channels
self.cv1 = Conv(c1, c_, k, s, None, g, act=act)
self.cv2 = Conv(c_, c_, 5, 1, None, c_, act=act)
def forward(self, x):
y = self.cv1(x)
return torch.cat((y, self.cv2(y)), 1)
class GhostBottleneck(nn.Module):
def __init__(self, c1, c2, k=3, s=1):
super().__init__()
c_ = c2 // 2
self.conv = nn.Sequential(
GhostConv(c1, c_, 1, 1), # pw
DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(), # dw
GhostConv(c_, c2, 1, 1, act=False),
) # pw-linear
self.shortcut = (
nn.Sequential(DWConv(c1, c1, k, s, act=False), Conv(c1, c2, 1, 1, act=False)) if s == 2 else nn.Identity()
)
def forward(self, x):
return self.conv(x) + self.shortcut(x)
class Contract(nn.Module):
def __init__(self, gain=2):
super().__init__()
self.gain = gain
def forward(self, x):
b, c, h, w = x.size() # assert (h / s == 0) and (W / s == 0), 'Indivisible gain'
s = self.gain
x = x.view(b, c, h // s, s, w // s, s) # x(1,64,40,2,40,2)
x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # x(1,2,2,64,40,40)
return x.view(b, c * s * s, h // s, w // s) # x(1,256,40,40)
class Expand(nn.Module):
def __init__(self, gain=2):
super().__init__()
self.gain = gain
def forward(self, x):
b, c, h, w = x.size() # assert C / s ** 2 == 0, 'Indivisible gain'
s = self.gain
x = x.view(b, s, s, c // s**2, h, w) # x(1,2,2,16,80,80)
x = x.permute(0, 3, 4, 1, 5, 2).contiguous() # x(1,16,80,2,80,2)
return x.view(b, c // s**2, h * s, w * s) # x(1,16,160,160)
class Concat(nn.Module):
def __init__(self, dimension=1):
super().__init__()
self.d = dimension
def forward(self, x):
return torch.cat(x, self.d)
class DetectMultiBackend(nn.Module):
def __init__(self, weights="yolov5s.pt", device=torch.device("cpu"), dnn=False, data=None, fp16=False, fuse=True):
# PyTorch: weights = *.pt
# TorchScript: *.torchscript
# ONNX Runtime: *.onnx
# ONNX OpenCV DNN: *.onnx --dnn
# OpenVINO: *_openvino_model
# CoreML: *.mlpackage
# TensorRT: *.engine
# TensorFlow SavedModel: *_saved_model
# TensorFlow GraphDef: *.pb
# TensorFlow Lite: *.tflite
# TensorFlow Edge TPU: *_edgetpu.tflite
# PaddlePaddle: *_paddle_model
from models.experimental import attempt_download, attempt_load # scoped to avoid circular import
super().__init__()
w = str(weights[0] if isinstance(weights, list) else weights)
pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle, triton = self._model_type(w)
fp16 &= pt or jit or onnx or engine or triton # FP16
nhwc = coreml or saved_model or pb or tflite or edgetpu # BHWC formats (vs torch BCWH)
stride = 32 # default stride
cuda = torch.cuda.is_available() and device.type != "cpu" # use CUDA
if not (pt or triton):
w = attempt_download(w) # download if not local
if pt: # PyTorch
model = attempt_load(weights if isinstance(weights, list) else w, device=device, inplace=True, fuse=fuse)
stride = max(int(model.stride.max()), 32) # model stride
names = model.module.names if hasattr(model, "module") else model.names # get class names
model.half() if fp16 else model.float()
self.model = model # explicitly assign for to(), cpu(), cuda(), half()
elif jit: # TorchScript
LOGGER.info(f"Loading {w} for TorchScript inference...")
extra_files = {"config.txt": ""} # model metadata
model = torch.jit.load(w, _extra_files=extra_files, map_location=device)
model.half() if fp16 else model.float()
if extra_files["config.txt"]: # load metadata dict
d = json.loads(
extra_files["config.txt"],
object_hook=lambda d: {int(k) if k.isdigit() else k: v for k, v in d.items()},
)
stride, names = int(d["stride"]), d["names"]
elif dnn: # ONNX OpenCV DNN
LOGGER.info(f"Loading {w} for ONNX OpenCV DNN inference...")
check_requirements("opencv-python>=4.5.4")
net = cv2.dnn.readNetFromONNX(w)
elif onnx: # ONNX Runtime
LOGGER.info(f"Loading {w} for ONNX Runtime inference...")
check_requirements(("onnx", "onnxruntime-gpu" if cuda else "onnxruntime"))
import onnxruntime
providers = ["CUDAExecutionProvider", "CPUExecutionProvider"] if cuda else ["CPUExecutionProvider"]
session = onnxruntime.InferenceSession(w, providers=providers)
output_names = [x.name for x in session.get_outputs()]
meta = session.get_modelmeta().custom_metadata_map # metadata
if "stride" in meta:
stride, names = int(meta["stride"]), eval(meta["names"])
elif xml: # OpenVINO
LOGGER.info(f"Loading {w} for OpenVINO inference...")
check_requirements("openvino>=2023.0") # requires openvino-dev: https://pypi.org/project/openvino-dev/
from openvino.runtime import Core, Layout, get_batch
core = Core()
if not Path(w).is_file(): # if not *.xml
w = next(Path(w).glob("*.xml")) # get *.xml file from *_openvino_model dir
ov_model = core.read_model(model=w, weights=Path(w).with_suffix(".bin"))
if ov_model.get_parameters()[0].get_layout().empty:
ov_model.get_parameters()[0].set_layout(Layout("NCHW"))
batch_dim = get_batch(ov_model)
if batch_dim.is_static:
batch_size = batch_dim.get_length()
ov_compiled_model = core.compile_model(ov_model, device_name="AUTO") # AUTO selects best available device
stride, names = self._load_metadata(Path(w).with_suffix(".yaml")) # load metadata
elif engine: # TensorRT
LOGGER.info(f"Loading {w} for TensorRT inference...")
import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download
check_version(trt.__version__, "7.0.0", hard=True) # require tensorrt>=7.0.0
if device.type == "cpu":
device = torch.device("cuda:0")
Binding = namedtuple("Binding", ("name", "dtype", "shape", "data", "ptr"))
logger = trt.Logger(trt.Logger.INFO)
with open(w, "rb") as f, trt.Runtime(logger) as runtime:
model = runtime.deserialize_cuda_engine(f.read())
context = model.create_execution_context()
bindings = OrderedDict()
output_names = []
fp16 = False # default updated below
dynamic = False
is_trt10 = not hasattr(model, "num_bindings")
num = range(model.num_io_tensors) if is_trt10 else range(model.num_bindings)
for i in num:
if is_trt10:
name = model.get_tensor_name(i)
dtype = trt.nptype(model.get_tensor_dtype(name))
is_input = model.get_tensor_mode(name) == trt.TensorIOMode.INPUT
if is_input:
if -1 in tuple(model.get_tensor_shape(name)): # dynamic
dynamic = True
context.set_input_shape(name, tuple(model.get_profile_shape(name, 0)[2]))
if dtype == np.float16:
fp16 = True
else: # output
output_names.append(name)
shape = tuple(context.get_tensor_shape(name))
else:
name = model.get_binding_name(i)
dtype = trt.nptype(model.get_binding_dtype(i))
if model.binding_is_input(i):
if -1 in tuple(model.get_binding_shape(i)): # dynamic
dynamic = True
context.set_binding_shape(i, tuple(model.get_profile_shape(0, i)[2]))
if dtype == np.float16:
fp16 = True
else: # output
output_names.append(name)
shape = tuple(context.get_binding_shape(i))
im = torch.from_numpy(np.empty(shape, dtype=dtype)).to(device)
bindings[name] = Binding(name, dtype, shape, im, int(im.data_ptr()))
binding_addrs = OrderedDict((n, d.ptr) for n, d in bindings.items())
batch_size = bindings["images"].shape[0] # if dynamic, this is instead max batch size
elif coreml: # CoreML
LOGGER.info(f"Loading {w} for CoreML inference...")
import coremltools as ct
model = ct.models.MLModel(w)
elif saved_model: # TF SavedModel
LOGGER.info(f"Loading {w} for TensorFlow SavedModel inference...")
import tensorflow as tf
keras = False # assume TF1 saved_model
model = tf.keras.models.load_model(w) if keras else tf.saved_model.load(w)
elif pb: # GraphDef https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt
LOGGER.info(f"Loading {w} for TensorFlow GraphDef inference...")
import tensorflow as tf
def wrap_frozen_graph(gd, inputs, outputs):
x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=""), []) # wrapped
ge = x.graph.as_graph_element
return x.prune(tf.nest.map_structure(ge, inputs), tf.nest.map_structure(ge, outputs))
def gd_outputs(gd):
name_list, input_list = [], []
for node in gd.node: # tensorflow.core.framework.node_def_pb2.NodeDef
name_list.append(node.name)
input_list.extend(node.input)
return sorted(f"{x}:0" for x in list(set(name_list) - set(input_list)) if not x.startswith("NoOp"))
gd = tf.Graph().as_graph_def() # TF GraphDef
with open(w, "rb") as f:
gd.ParseFromString(f.read())
frozen_func = wrap_frozen_graph(gd, inputs="x:0", outputs=gd_outputs(gd))
elif tflite or edgetpu: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python
try: # https://coral.ai/docs/edgetpu/tflite-python/#update-existing-tf-lite-code-for-the-edge-tpu
from tflite_runtime.interpreter import Interpreter, load_delegate
except ImportError:
import tensorflow as tf
Interpreter, load_delegate = (
tf.lite.Interpreter,
tf.lite.experimental.load_delegate,
)
if edgetpu: # TF Edge TPU https://coral.ai/software/#edgetpu-runtime
LOGGER.info(f"Loading {w} for TensorFlow Lite Edge TPU inference...")
delegate = {"Linux": "libedgetpu.so.1", "Darwin": "libedgetpu.1.dylib", "Windows": "edgetpu.dll"}[
platform.system()
]
interpreter = Interpreter(model_path=w, experimental_delegates=[load_delegate(delegate)])
else: # TFLite
LOGGER.info(f"Loading {w} for TensorFlow Lite inference...")
interpreter = Interpreter(model_path=w) # load TFLite model
interpreter.allocate_tensors() # allocate
input_details = interpreter.get_input_details() # inputs
output_details = interpreter.get_output_details() # outputs
# load metadata
with contextlib.suppress(zipfile.BadZipFile):
with zipfile.ZipFile(w, "r") as model:
meta_file = model.namelist()[0]
meta = ast.literal_eval(model.read(meta_file).decode("utf-8"))
stride, names = int(meta["stride"]), meta["names"]
elif tfjs: # TF.js
raise NotImplementedError("ERROR: YOLOv5 TF.js inference is not supported")
# PaddlePaddle
elif paddle:
LOGGER.info(f"Loading {w} for PaddlePaddle inference...")
check_requirements("paddlepaddle-gpu" if cuda else "paddlepaddle>=3.0.0")
import paddle.inference as pdi
w = Path(w)
if w.is_dir():
model_file = next(w.rglob("*.json"), None)
params_file = next(w.rglob("*.pdiparams"), None)
elif w.suffix == ".pdiparams":
model_file = w.with_name("model.json")
params_file = w
else:
raise ValueError(f"Invalid model path {w}. Provide model directory or a .pdiparams file.")
if not (model_file and params_file and model_file.is_file() and params_file.is_file()):
raise FileNotFoundError(f"Model files not found in {w}. Both .json and .pdiparams files are required.")
config = pdi.Config(str(model_file), str(params_file))
if cuda:
config.enable_use_gpu(memory_pool_init_size_mb=2048, device_id=0)
config.disable_mkldnn() # disable MKL-DNN for PIR compatibility
predictor = pdi.create_predictor(config)
input_handle = predictor.get_input_handle(predictor.get_input_names()[0])
output_names = predictor.get_output_names()
elif triton: # NVIDIA Triton Inference Server
LOGGER.info(f"Using {w} as Triton Inference Server...")
check_requirements("tritonclient[all]")
from utils.triton import TritonRemoteModel
model = TritonRemoteModel(url=w)
nhwc = model.runtime.startswith("tensorflow")
else:
raise NotImplementedError(f"ERROR: {w} is not a supported format")
# class names
if "names" not in locals():
names = yaml_load(data)["names"] if data else {i: f"class{i}" for i in range(999)}
if names[0] == "n01440764" and len(names) == 1000: # ImageNet
names = yaml_load(ROOT / "data/ImageNet.yaml")["names"] # human-readable names
self.__dict__.update(locals()) # assign all variables to self
def forward(self, im, augment=False, visualize=False):
_b, _ch, h, w = im.shape # batch, channel, height, width
if self.fp16 and im.dtype != torch.float16:
im = im.half() # to FP16
if self.nhwc:
im = im.permute(0, 2, 3, 1) # torch BCHW to numpy BHWC shape(1,320,192,3)
if self.pt: # PyTorch
y = self.model(im, augment=augment, visualize=visualize) if augment or visualize else self.model(im)
elif self.jit: # TorchScript
y = self.model(im)
elif self.dnn: # ONNX OpenCV DNN
im = im.cpu().numpy() # torch to numpy
self.net.setInput(im)
y = self.net.forward()
elif self.onnx: # ONNX Runtime
im = im.cpu().numpy() # torch to numpy
y = self.session.run(self.output_names, {self.session.get_inputs()[0].name: im})
elif self.xml: # OpenVINO
im = im.cpu().numpy() # FP32
y = list(self.ov_compiled_model(im).values())
elif self.engine: # TensorRT
if self.dynamic and im.shape != self.bindings["images"].shape:
i = self.model.get_binding_index("images")
self.context.set_binding_shape(i, im.shape) # reshape if dynamic
self.bindings["images"] = self.bindings["images"]._replace(shape=im.shape)
for name in self.output_names:
i = self.model.get_binding_index(name)
self.bindings[name].data.resize_(tuple(self.context.get_binding_shape(i)))
s = self.bindings["images"].shape
assert im.shape == s, f"input size {im.shape} {'>' if self.dynamic else 'not equal to'} max model size {s}"
self.binding_addrs["images"] = int(im.data_ptr())
self.context.execute_v2(list(self.binding_addrs.values()))
y = [self.bindings[x].data for x in sorted(self.output_names)]
elif self.coreml: # CoreML
im = im.cpu().numpy()
im = Image.fromarray((im[0] * 255).astype("uint8"))
# im = im.resize((192, 320), Image.BILINEAR)
y = self.model.predict({"image": im}) # coordinates are xywh normalized
if "confidence" in y:
box = xywh2xyxy(y["coordinates"] * [[w, h, w, h]]) # xyxy pixels
conf, cls = y["confidence"].max(1), y["confidence"].argmax(1).astype(float)
y = np.concatenate((box, conf.reshape(-1, 1), cls.reshape(-1, 1)), 1)
else:
y = list(reversed(y.values())) # reversed for segmentation models (pred, proto)
elif self.paddle: # PaddlePaddle
im = im.cpu().numpy().astype(np.float32)
self.input_handle.copy_from_cpu(im)
self.predictor.run()
y = [self.predictor.get_output_handle(x).copy_to_cpu() for x in self.output_names]
elif self.triton: # NVIDIA Triton Inference Server
y = self.model(im)
else: # TensorFlow (SavedModel, GraphDef, Lite, Edge TPU)
im = im.cpu().numpy()
if self.saved_model: # SavedModel
y = self.model(im, training=False) if self.keras else self.model(im)
elif self.pb: # GraphDef
y = self.frozen_func(x=self.tf.constant(im))
else: # Lite or Edge TPU
input = self.input_details[0]
int8 = input["dtype"] == np.uint8 # is TFLite quantized uint8 model
if int8:
scale, zero_point = input["quantization"]
im = (im / scale + zero_point).astype(np.uint8) # de-scale
self.interpreter.set_tensor(input["index"], im)
self.interpreter.invoke()
y = []
for output in self.output_details:
x = self.interpreter.get_tensor(output["index"])
if int8:
scale, zero_point = output["quantization"]
x = (x.astype(np.float32) - zero_point) * scale # re-scale
y.append(x)
if len(y) == 2 and len(y[1].shape) != 4:
y = list(reversed(y))
y = [x if isinstance(x, np.ndarray) else x.numpy() for x in y]
y[0][..., :4] *= [w, h, w, h] # xywh normalized to pixels
if isinstance(y, (list, tuple)):
return self.from_numpy(y[0]) if len(y) == 1 else [self.from_numpy(x) for x in y]
else:
return self.from_numpy(y)
def from_numpy(self, x):
return torch.from_numpy(x).to(self.device) if isinstance(x, np.ndarray) else x
def warmup(self, imgsz=(1, 3, 640, 640)):
warmup_types = self.pt, self.jit, self.onnx, self.engine, self.saved_model, self.pb, self.triton
if any(warmup_types) and (self.device.type != "cpu" or self.triton):
im = torch.empty(*imgsz, dtype=torch.half if self.fp16 else torch.float, device=self.device) # input
for _ in range(2 if self.jit else 1): #
self.forward(im) # warmup
@staticmethod
def _model_type(p="path/to/model.pt"):
# types = [pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle]
from export import export_formats
from utils.downloads import is_url
sf = list(export_formats().Suffix) # export suffixes
if not is_url(p, check=False):
check_suffix(p, sf) # checks
url = urlparse(p) # if url may be Triton inference server
types = [s in Path(p).name for s in sf]
types[8] &= not types[9] # tflite &= not edgetpu
triton = not any(types) and all([any(s in url.scheme for s in ["http", "grpc"]), url.netloc])
return [*types, triton]
@staticmethod
def _load_metadata(f=Path("path/to/meta.yaml")):
if f.exists():
d = yaml_load(f)
return d["stride"], d["names"] # assign stride, names
return None, None
class AutoShape(nn.Module):
conf = 0.25 # NMS confidence threshold
iou = 0.45 # NMS IoU threshold
agnostic = False # NMS class-agnostic
multi_label = False # NMS multiple labels per box
classes = None # (optional list) filter by class, i.e. = [0, 15, 16] for COCO persons, cats and dogs
max_det = 1000 # maximum number of detections per image
amp = False # Automatic Mixed Precision (AMP) inference
def __init__(self, model, verbose=True):
super().__init__()
if verbose:
LOGGER.info("Adding AutoShape... ")
copy_attr(self, model, include=("yaml", "nc", "hyp", "names", "stride", "abc"), exclude=()) # copy attributes
self.dmb = isinstance(model, DetectMultiBackend) # DetectMultiBackend() instance
self.pt = not self.dmb or model.pt # PyTorch model
self.model = model.eval()
if self.pt:
m = self.model.model.model[-1] if self.dmb else self.model.model[-1] # Detect()
m.inplace = False # Detect.inplace=False for safe multithread inference
m.export = True # do not output loss values
def _apply(self, fn):
self = super()._apply(fn)
if self.pt:
m = self.model.model.model[-1] if self.dmb else self.model.model[-1] # Detect()
m.stride = fn(m.stride)
m.grid = list(map(fn, m.grid))
if isinstance(m.anchor_grid, list):
m.anchor_grid = list(map(fn, m.anchor_grid))
return self
@smart_inference_mode()
def forward(self, ims, size=640, augment=False, profile=False):
# For size(height=640, width=1280), RGB images example inputs are:
# file: ims = 'data/images/zidane.jpg' # str or PosixPath
# URI: = 'https://ultralytics.com/images/zidane.jpg'
# OpenCV: = cv2.imread('image.jpg')[:,:,::-1] # HWC BGR to RGB x(640,1280,3)
# PIL: = Image.open('image.jpg') or ImageGrab.grab() # HWC x(640,1280,3)
# numpy: = np.zeros((640,1280,3)) # HWC
# torch: = torch.zeros(16,3,320,640) # BCHW (scaled to size=640, 0-1 values)
# multiple: = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...] # list of images
dt = (Profile(), Profile(), Profile())
with dt[0]:
if isinstance(size, int): # expand
size = (size, size)
p = next(self.model.parameters()) if self.pt else torch.empty(1, device=self.model.device) # param
autocast = self.amp and (p.device.type != "cpu") # Automatic Mixed Precision (AMP) inference
if isinstance(ims, torch.Tensor): # torch
with amp.autocast(autocast):
return self.model(ims.to(p.device).type_as(p), augment=augment) # inference
# Pre-process
n, ims = (len(ims), list(ims)) if isinstance(ims, (list, tuple)) else (1, [ims]) # number, list of images
shape0, shape1, files = [], [], [] # image and inference shapes, filenames
for i, im in enumerate(ims):
f = f"image{i}" # filename
if isinstance(im, (str, Path)): # filename or uri
im, f = Image.open(requests.get(im, stream=True).raw if str(im).startswith("http") else im), im
im = np.asarray(exif_transpose(im))
elif isinstance(im, Image.Image): # PIL Image
im, f = np.asarray(exif_transpose(im)), getattr(im, "filename", f) or f
files.append(Path(f).with_suffix(".jpg").name)
if im.shape[0] < 5: # image in CHW
im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1)
im = im[..., :3] if im.ndim == 3 else cv2.cvtColor(im, cv2.COLOR_GRAY2BGR) # enforce 3ch input
s = im.shape[:2] # HWC
shape0.append(s) # image shape
g = max(size) / max(s) # gain
shape1.append([int(y * g) for y in s])
ims[i] = im if im.data.contiguous else np.ascontiguousarray(im) # update
shape1 = [make_divisible(x, self.stride) for x in np.array(shape1).max(0)] # inf shape
x = [letterbox(im, shape1, auto=False)[0] for im in ims] # pad
x = np.ascontiguousarray(np.array(x).transpose((0, 3, 1, 2))) # stack and BHWC to BCHW
x = torch.from_numpy(x).to(p.device).type_as(p) / 255 # uint8 to fp16/32
with amp.autocast(autocast):
# Inference
with dt[1]:
y = self.model(x, augment=augment) # forward
# Post-process
with dt[2]:
y = non_max_suppression(
y if self.dmb else y[0],
self.conf,
self.iou,
self.classes,
self.agnostic,
self.multi_label,
max_det=self.max_det,
) # NMS
for i in range(n):
scale_boxes(shape1, y[i][:, :4], shape0[i])
return Detections(ims, y, files, dt, self.names, x.shape)
class Detections:
def __init__(self, ims, pred, files, times=(0, 0, 0), names=None, shape=None):
super().__init__()
d = pred[0].device # device
gn = [torch.tensor([*(im.shape[i] for i in [1, 0, 1, 0]), 1, 1], device=d) for im in ims] # normalizations
self.ims = ims # list of images as numpy arrays
self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls)
self.names = names # class names
self.files = files # image filenames
self.times = times # profiling times
self.xyxy = pred # xyxy pixels
self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels
self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized
self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized
self.n = len(self.pred) # number of images (batch size)
self.t = tuple(x.t / self.n * 1e3 for x in times) # timestamps (ms)
self.s = tuple(shape) # inference BCHW shape
def _run(self, pprint=False, show=False, save=False, crop=False, render=False, labels=True, save_dir=Path("")):
s, crops = "", []
for i, (im, pred) in enumerate(zip(self.ims, self.pred)):
s += f"\nimage {i + 1}/{len(self.pred)}: {im.shape[0]}x{im.shape[1]} " # string
if pred.shape[0]:
for c in pred[:, -1].unique():
n = (pred[:, -1] == c).sum() # detections per class
s += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string
s = s.rstrip(", ")
if show or save or render or crop:
annotator = Annotator(im, example=str(self.names))
for *box, conf, cls in reversed(pred): # xyxy, confidence, class
label = f"{self.names[int(cls)]} {conf:.2f}"
if crop:
file = save_dir / "crops" / self.names[int(cls)] / self.files[i] if save else None
crops.append(
{
"box": box,
"conf": conf,
"cls": cls,
"label": label,
"im": save_one_box(box, im, file=file, save=save),
}
)
else: # all others
annotator.box_label(box, label if labels else "", color=colors(cls))
im = annotator.im
else:
s += "(no detections)"
im = Image.fromarray(im.astype(np.uint8)) if isinstance(im, np.ndarray) else im # from np
if show:
if is_jupyter():
from IPython.display import display
display(im)
else:
im.show(self.files[i])
if save:
f = self.files[i]
im.save(save_dir / f) # save
if i == self.n - 1:
LOGGER.info(f"Saved {self.n} image{'s' * (self.n > 1)} to {colorstr('bold', save_dir)}")
if render:
self.ims[i] = np.asarray(im)
if pprint:
s = s.lstrip("\n")
return f"{s}\nSpeed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {self.s}" % self.t
if crop:
if save:
LOGGER.info(f"Saved results to {save_dir}\n")
return crops
@TryExcept("Showing images is not supported in this environment")
def show(self, labels=True):
self._run(show=True, labels=labels) # show results
def save(self, labels=True, save_dir="runs/detect/exp", exist_ok=False):
save_dir = increment_path(save_dir, exist_ok, mkdir=True) # increment save_dir
self._run(save=True, labels=labels, save_dir=save_dir) # save results
def crop(self, save=True, save_dir="runs/detect/exp", exist_ok=False):
save_dir = increment_path(save_dir, exist_ok, mkdir=True) if save else None
return self._run(crop=True, save=save, save_dir=save_dir) # crop results
def render(self, labels=True):
self._run(render=True, labels=labels) # render results
return self.ims
def pandas(self):
new = copy(self) # return copy
ca = "xmin", "ymin", "xmax", "ymax", "confidence", "class", "name" # xyxy columns
cb = "xcenter", "ycenter", "width", "height", "confidence", "class", "name" # xywh columns
for k, c in zip(["xyxy", "xyxyn", "xywh", "xywhn"], [ca, ca, cb, cb]):
a = [[[*x[:5], int(x[5]), self.names[int(x[5])]] for x in x.tolist()] for x in getattr(self, k)] # update
setattr(new, k, [pd.DataFrame(x, columns=c) for x in a])
return new
def tolist(self):
r = range(self.n) # iterable
return [
Detections(
[self.ims[i]],
[self.pred[i]],
[self.files[i]],
self.times,
self.names,
self.s,
)
for i in r
]
def print(self):
LOGGER.info(self.__str__())
def __len__(self):
return self.n
def __str__(self):
return self._run(pprint=True) # print results
def __repr__(self):
return f"YOLOv5 {self.__class__} instance\n" + self.__str__()
class Proto(nn.Module):
def __init__(self, c1, c_=256, c2=32):
super().__init__()
self.cv1 = Conv(c1, c_, k=3)
self.upsample = nn.Upsample(scale_factor=2, mode="nearest")
self.cv2 = Conv(c_, c_, k=3)
self.cv3 = Conv(c_, c2)
def forward(self, x):
return self.cv3(self.cv2(self.upsample(self.cv1(x))))
class Classify(nn.Module):
def __init__(
self, c1, c2, k=1, s=1, p=None, g=1, dropout_p=0.0
): # ch_in, ch_out, kernel, stride, padding, groups, dropout probability
super().__init__()
c_ = 1280 # efficientnet_b0 size
self.conv = Conv(c1, c_, k, s, autopad(k, p), g)
self.pool = nn.AdaptiveAvgPool2d(1) # to x(b,c_,1,1)
self.drop = nn.Dropout(p=dropout_p, inplace=True)
self.linear = nn.Linear(c_, c2) # to x(b,c2)
def forward(self, x):
if isinstance(x, list):
x = torch.cat(x, 1)
return self.linear(self.drop(self.pool(self.conv(x)).flatten(1))) | --- +++ @@ -1,4 +1,5 @@ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
+"""Common modules."""
import ast
import contextlib
@@ -57,6 +58,10 @@
def autopad(k, p=None, d=1):
+ """Pads kernel to 'same' output shape, adjusting for optional dilation; returns padding size.
+
+ `k`: kernel, `p`: padding, `d`: dilation.
+ """
if d > 1:
k = d * (k - 1) + 1 if isinstance(k, int) else [d * (x - 1) + 1 for x in k] # actual kernel-size
if p is None:
@@ -65,37 +70,54 @@
class Conv(nn.Module):
+ """Applies a convolution, batch normalization, and activation function to an input tensor in a neural network."""
default_act = nn.SiLU() # default activation
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, d=1, act=True):
+ """Initializes a standard convolution layer with optional batch normalization and activation."""
super().__init__()
self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p, d), groups=g, dilation=d, bias=False)
self.bn = nn.BatchNorm2d(c2)
self.act = self.default_act if act is True else act if isinstance(act, nn.Module) else nn.Identity()
def forward(self, x):
+ """Applies a convolution followed by batch normalization and an activation function to the input tensor `x`."""
return self.act(self.bn(self.conv(x)))
def forward_fuse(self, x):
+ """Applies a fused convolution and activation function to the input tensor `x`."""
return self.act(self.conv(x))
class DWConv(Conv):
+ """Implements a depth-wise convolution layer with optional activation for efficient spatial filtering."""
def __init__(self, c1, c2, k=1, s=1, d=1, act=True):
+ """Initializes a depth-wise convolution layer with optional activation; args: input channels (c1), output
+ channels (c2), kernel size (k), stride (s), dilation (d), and activation flag (act).
+ """
super().__init__(c1, c2, k, s, g=math.gcd(c1, c2), d=d, act=act)
class DWConvTranspose2d(nn.ConvTranspose2d):
+ """A depth-wise transpose convolutional layer for upsampling in neural networks, particularly in YOLOv5 models."""
def __init__(self, c1, c2, k=1, s=1, p1=0, p2=0):
+ """Initializes a depth-wise transpose convolutional layer for YOLOv5; args: input channels (c1), output channels
+ (c2), kernel size (k), stride (s), input padding (p1), output padding (p2).
+ """
super().__init__(c1, c2, k, s, p1, p2, groups=math.gcd(c1, c2))
class TransformerLayer(nn.Module):
+ """Transformer layer with multihead attention and linear layers, optimized by removing LayerNorm."""
def __init__(self, c, num_heads):
+ """Initializes a transformer layer, sans LayerNorm for performance, with multihead attention and linear layers.
+
+ See as described in https://arxiv.org/abs/2010.11929.
+ """
super().__init__()
self.q = nn.Linear(c, c, bias=False)
self.k = nn.Linear(c, c, bias=False)
@@ -105,14 +127,19 @@ self.fc2 = nn.Linear(c, c, bias=False)
def forward(self, x):
+ """Performs forward pass using MultiheadAttention and two linear transformations with residual connections."""
x = self.ma(self.q(x), self.k(x), self.v(x))[0] + x
x = self.fc2(self.fc1(x)) + x
return x
class TransformerBlock(nn.Module):
+ """A Transformer block for vision tasks with convolution, position embeddings, and Transformer layers."""
def __init__(self, c1, c2, num_heads, num_layers):
+ """Initializes a Transformer block for vision tasks, adapting dimensions if necessary and stacking specified
+ layers.
+ """
super().__init__()
self.conv = None
if c1 != c2:
@@ -122,6 +149,9 @@ self.c2 = c2
def forward(self, x):
+ """Processes input through an optional convolution, followed by Transformer layers and position embeddings for
+ object detection.
+ """
if self.conv is not None:
x = self.conv(x)
b, _, w, h = x.shape
@@ -130,8 +160,12 @@
class Bottleneck(nn.Module):
+ """A bottleneck layer with optional shortcut and group convolution for efficient feature extraction."""
def __init__(self, c1, c2, shortcut=True, g=1, e=0.5):
+ """Initializes a standard bottleneck layer with optional shortcut and group convolution, supporting channel
+ expansion.
+ """
super().__init__()
c_ = int(c2 * e) # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
@@ -139,12 +173,19 @@ self.add = shortcut and c1 == c2
def forward(self, x):
+ """Processes input through two convolutions, optionally adds shortcut if channel dimensions match; input is a
+ tensor.
+ """
return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
class BottleneckCSP(nn.Module):
+ """CSP bottleneck layer for feature extraction with cross-stage partial connections and optional shortcuts."""
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
+ """Initializes CSP bottleneck with optional shortcuts; args: ch_in, ch_out, number of repeats, shortcut bool,
+ groups, expansion.
+ """
super().__init__()
c_ = int(c2 * e) # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
@@ -156,14 +197,23 @@ self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)))
def forward(self, x):
+ """Performs forward pass by applying layers, activation, and concatenation on input x, returning feature-
+ enhanced output.
+ """
y1 = self.cv3(self.m(self.cv1(x)))
y2 = self.cv2(x)
return self.cv4(self.act(self.bn(torch.cat((y1, y2), 1))))
class CrossConv(nn.Module):
+ """Implements a cross convolution layer with downsampling, expansion, and optional shortcut."""
def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False):
+ """Initializes CrossConv with downsampling, expanding, and optionally shortcutting; `c1` input, `c2` output
+ channels.
+
+ Inputs are ch_in, ch_out, kernel, stride, groups, expansion, shortcut.
+ """
super().__init__()
c_ = int(c2 * e) # hidden channels
self.cv1 = Conv(c1, c_, (1, k), (1, s))
@@ -171,12 +221,17 @@ self.add = shortcut and c1 == c2
def forward(self, x):
+ """Performs feature sampling, expanding, and applies shortcut if channels match; expects `x` input tensor."""
return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
class C3(nn.Module):
+ """Implements a CSP Bottleneck module with three convolutions for enhanced feature extraction in neural networks."""
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
+ """Initializes C3 module with options for channel count, bottleneck repetition, shortcut usage, group
+ convolutions, and expansion.
+ """
super().__init__()
c_ = int(c2 * e) # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
@@ -185,44 +240,63 @@ self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)))
def forward(self, x):
+ """Performs forward propagation using concatenated outputs from two convolutions and a Bottleneck sequence."""
return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), 1))
class C3x(C3):
+ """Extends the C3 module with cross-convolutions for enhanced feature extraction in neural networks."""
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
+ """Initializes C3x module with cross-convolutions, extending C3 with customizable channel dimensions, groups,
+ and expansion.
+ """
super().__init__(c1, c2, n, shortcut, g, e)
c_ = int(c2 * e)
self.m = nn.Sequential(*(CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n)))
class C3TR(C3):
+ """C3 module with TransformerBlock for enhanced feature extraction in object detection models."""
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
+ """Initializes C3 module with TransformerBlock for enhanced feature extraction, accepts channel sizes, shortcut
+ config, group, and expansion.
+ """
super().__init__(c1, c2, n, shortcut, g, e)
c_ = int(c2 * e)
self.m = TransformerBlock(c_, c_, 4, n)
class C3SPP(C3):
+ """Extends the C3 module with an SPP layer for enhanced spatial feature extraction and customizable channels."""
def __init__(self, c1, c2, k=(5, 9, 13), n=1, shortcut=True, g=1, e=0.5):
+ """Initializes a C3 module with SPP layer for advanced spatial feature extraction, given channel sizes, kernel
+ sizes, shortcut, group, and expansion ratio.
+ """
super().__init__(c1, c2, n, shortcut, g, e)
c_ = int(c2 * e)
self.m = SPP(c_, c_, k)
class C3Ghost(C3):
+ """Implements a C3 module with Ghost Bottlenecks for efficient feature extraction in YOLOv5."""
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
+ """Initializes YOLOv5's C3 module with Ghost Bottlenecks for efficient feature extraction."""
super().__init__(c1, c2, n, shortcut, g, e)
c_ = int(c2 * e) # hidden channels
self.m = nn.Sequential(*(GhostBottleneck(c_, c_) for _ in range(n)))
class SPP(nn.Module):
+ """Implements Spatial Pyramid Pooling (SPP) for feature extraction, ref: https://arxiv.org/abs/1406.4729."""
def __init__(self, c1, c2, k=(5, 9, 13)):
+ """Initializes SPP layer with Spatial Pyramid Pooling, ref: https://arxiv.org/abs/1406.4729, args: c1 (input
+ channels), c2 (output channels), k (kernel sizes).
+ """
super().__init__()
c_ = c1 // 2 # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
@@ -230,6 +304,9 @@ self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k])
def forward(self, x):
+ """Applies convolution and max pooling layers to the input tensor `x`, concatenates results, and returns output
+ tensor.
+ """
x = self.cv1(x)
with warnings.catch_warnings():
warnings.simplefilter("ignore") # suppress torch 1.9.0 max_pool2d() warning
@@ -237,8 +314,14 @@
class SPPF(nn.Module):
+ """Implements a fast Spatial Pyramid Pooling (SPPF) layer for efficient feature extraction in YOLOv5 models."""
def __init__(self, c1, c2, k=5):
+ """Initializes YOLOv5 SPPF layer with given channels and kernel size for YOLOv5 model, combining convolution and
+ max pooling.
+
+ Equivalent to SPP(k=(5, 9, 13)).
+ """
super().__init__()
c_ = c1 // 2 # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
@@ -246,6 +329,7 @@ self.m = nn.MaxPool2d(kernel_size=k, stride=1, padding=k // 2)
def forward(self, x):
+ """Processes input through a series of convolutions and max pooling operations for feature extraction."""
x = self.cv1(x)
with warnings.catch_warnings():
warnings.simplefilter("ignore") # suppress torch 1.9.0 max_pool2d() warning
@@ -255,33 +339,47 @@
class Focus(nn.Module):
+ """Focuses spatial information into channel space using slicing and convolution for efficient feature extraction."""
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True):
+ """Initializes Focus module to concentrate width-height info into channel space with configurable convolution
+ parameters.
+ """
super().__init__()
self.conv = Conv(c1 * 4, c2, k, s, p, g, act=act)
# self.contract = Contract(gain=2)
def forward(self, x):
+ """Processes input through Focus mechanism, reshaping (b,c,w,h) to (b,4c,w/2,h/2) then applies convolution."""
return self.conv(torch.cat((x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]), 1))
# return self.conv(self.contract(x))
class GhostConv(nn.Module):
+ """Implements Ghost Convolution for efficient feature extraction, see https://github.com/huawei-noah/ghostnet."""
def __init__(self, c1, c2, k=1, s=1, g=1, act=True):
+ """Initializes GhostConv with in/out channels, kernel size, stride, groups, and activation; halves out channels
+ for efficiency.
+ """
super().__init__()
c_ = c2 // 2 # hidden channels
self.cv1 = Conv(c1, c_, k, s, None, g, act=act)
self.cv2 = Conv(c_, c_, 5, 1, None, c_, act=act)
def forward(self, x):
+ """Performs forward pass, concatenating outputs of two convolutions on input `x`: shape (B,C,H,W)."""
y = self.cv1(x)
return torch.cat((y, self.cv2(y)), 1)
class GhostBottleneck(nn.Module):
+ """Efficient bottleneck layer using Ghost Convolutions, see https://github.com/huawei-noah/ghostnet."""
def __init__(self, c1, c2, k=3, s=1):
+ """Initializes GhostBottleneck with ch_in `c1`, ch_out `c2`, kernel size `k`, stride `s`; see
+ https://github.com/huawei-noah/ghostnet.
+ """
super().__init__()
c_ = c2 // 2
self.conv = nn.Sequential(
@@ -294,16 +392,24 @@ )
def forward(self, x):
+ """Processes input through conv and shortcut layers, returning their summed output."""
return self.conv(x) + self.shortcut(x)
class Contract(nn.Module):
+ """Contracts spatial dimensions into channel dimensions for efficient processing in neural networks."""
def __init__(self, gain=2):
+ """Initializes a layer to contract spatial dimensions (width-height) into channels, e.g., input shape
+ (1,64,80,80) to (1,256,40,40).
+ """
super().__init__()
self.gain = gain
def forward(self, x):
+ """Processes input tensor to expand channel dimensions by contracting spatial dimensions, yielding output shape
+ `(b, c*s*s, h//s, w//s)`.
+ """
b, c, h, w = x.size() # assert (h / s == 0) and (W / s == 0), 'Indivisible gain'
s = self.gain
x = x.view(b, c, h // s, s, w // s, s) # x(1,64,40,2,40,2)
@@ -312,12 +418,19 @@
class Expand(nn.Module):
+ """Expands spatial dimensions by redistributing channels, e.g., from (1,64,80,80) to (1,16,160,160)."""
def __init__(self, gain=2):
+ """Initializes the Expand module to increase spatial dimensions by redistributing channels, with an optional
+ gain factor.
+
+ Example: x(1,64,80,80) to x(1,16,160,160).
+ """
super().__init__()
self.gain = gain
def forward(self, x):
+ """Processes input tensor x to expand spatial dims by redistributing channels, requiring C / gain^2 == 0."""
b, c, h, w = x.size() # assert C / s ** 2 == 0, 'Indivisible gain'
s = self.gain
x = x.view(b, s, s, c // s**2, h, w) # x(1,2,2,16,80,80)
@@ -326,18 +439,23 @@
class Concat(nn.Module):
+ """Concatenates tensors along a specified dimension for efficient tensor manipulation in neural networks."""
def __init__(self, dimension=1):
+ """Initializes a Concat module to concatenate tensors along a specified dimension."""
super().__init__()
self.d = dimension
def forward(self, x):
+ """Concatenates a list of tensors along a specified dims; `x` is a list of tensors, `dimension` is an int."""
return torch.cat(x, self.d)
class DetectMultiBackend(nn.Module):
+ """YOLOv5 MultiBackend class for inference on various backends including PyTorch, ONNX, TensorRT, and more."""
def __init__(self, weights="yolov5s.pt", device=torch.device("cpu"), dnn=False, data=None, fp16=False, fuse=True):
+ """Initializes DetectMultiBackend with support for various inference backends, including PyTorch and ONNX."""
# PyTorch: weights = *.pt
# TorchScript: *.torchscript
# ONNX Runtime: *.onnx
@@ -474,11 +592,13 @@ import tensorflow as tf
def wrap_frozen_graph(gd, inputs, outputs):
+ """Wraps a TensorFlow GraphDef for inference, returning a pruned function."""
x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=""), []) # wrapped
ge = x.graph.as_graph_element
return x.prune(tf.nest.map_structure(ge, inputs), tf.nest.map_structure(ge, outputs))
def gd_outputs(gd):
+ """Generates a sorted list of graph outputs excluding NoOp nodes and inputs, formatted as '<name>:0'."""
name_list, input_list = [], []
for node in gd.node: # tensorflow.core.framework.node_def_pb2.NodeDef
name_list.append(node.name)
@@ -565,6 +685,7 @@ self.__dict__.update(locals()) # assign all variables to self
def forward(self, im, augment=False, visualize=False):
+ """Performs YOLOv5 inference on input images with options for augmentation and visualization."""
_b, _ch, h, w = im.shape # batch, channel, height, width
if self.fp16 and im.dtype != torch.float16:
im = im.half() # to FP16
@@ -648,9 +769,11 @@ return self.from_numpy(y)
def from_numpy(self, x):
+ """Converts a NumPy array to a torch tensor, maintaining device compatibility."""
return torch.from_numpy(x).to(self.device) if isinstance(x, np.ndarray) else x
def warmup(self, imgsz=(1, 3, 640, 640)):
+ """Performs a single inference warmup to initialize model weights, accepting an `imgsz` tuple for image size."""
warmup_types = self.pt, self.jit, self.onnx, self.engine, self.saved_model, self.pb, self.triton
if any(warmup_types) and (self.device.type != "cpu" or self.triton):
im = torch.empty(*imgsz, dtype=torch.half if self.fp16 else torch.float, device=self.device) # input
@@ -659,6 +782,10 @@
@staticmethod
def _model_type(p="path/to/model.pt"):
+ """Determines model type from file path or URL, supporting various export formats.
+
+ Example: path='path/to/model.onnx' -> type=onnx
+ """
# types = [pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle]
from export import export_formats
from utils.downloads import is_url
@@ -674,6 +801,7 @@
@staticmethod
def _load_metadata(f=Path("path/to/meta.yaml")):
+ """Loads metadata from a YAML file, returning strides and names if the file exists, otherwise `None`."""
if f.exists():
d = yaml_load(f)
return d["stride"], d["names"] # assign stride, names
@@ -681,6 +809,7 @@
class AutoShape(nn.Module):
+ """AutoShape class for robust YOLOv5 inference with preprocessing, NMS, and support for various input formats."""
conf = 0.25 # NMS confidence threshold
iou = 0.45 # NMS IoU threshold
@@ -691,6 +820,7 @@ amp = False # Automatic Mixed Precision (AMP) inference
def __init__(self, model, verbose=True):
+ """Initializes YOLOv5 model for inference, setting up attributes and preparing model for evaluation."""
super().__init__()
if verbose:
LOGGER.info("Adding AutoShape... ")
@@ -704,6 +834,10 @@ m.export = True # do not output loss values
def _apply(self, fn):
+ """Applies to(), cpu(), cuda(), half() etc.
+
+ to model tensors excluding parameters or registered buffers.
+ """
self = super()._apply(fn)
if self.pt:
m = self.model.model.model[-1] if self.dmb else self.model.model[-1] # Detect()
@@ -715,6 +849,10 @@
@smart_inference_mode()
def forward(self, ims, size=640, augment=False, profile=False):
+ """Performs inference on inputs with optional augment & profiling.
+
+ Supports various formats including file, URI, OpenCV, PIL, numpy, torch.
+ """
# For size(height=640, width=1280), RGB images example inputs are:
# file: ims = 'data/images/zidane.jpg' # str or PosixPath
# URI: = 'https://ultralytics.com/images/zidane.jpg'
@@ -781,8 +919,10 @@
class Detections:
+ """Manages YOLOv5 detection results with methods for visualization, saving, cropping, and exporting detections."""
def __init__(self, ims, pred, files, times=(0, 0, 0), names=None, shape=None):
+ """Initializes the YOLOv5 Detections class with image info, predictions, filenames, timing and normalization."""
super().__init__()
d = pred[0].device # device
gn = [torch.tensor([*(im.shape[i] for i in [1, 0, 1, 0]), 1, 1], device=d) for im in ims] # normalizations
@@ -800,6 +940,7 @@ self.s = tuple(shape) # inference BCHW shape
def _run(self, pprint=False, show=False, save=False, crop=False, render=False, labels=True, save_dir=Path("")):
+ """Executes model predictions, displaying and/or saving outputs with optional crops and labels."""
s, crops = "", []
for i, (im, pred) in enumerate(zip(self.ims, self.pred)):
s += f"\nimage {i + 1}/{len(self.pred)}: {im.shape[0]}x{im.shape[1]} " # string
@@ -854,21 +995,38 @@
@TryExcept("Showing images is not supported in this environment")
def show(self, labels=True):
+ """Displays detection results with optional labels.
+
+ Usage: show(labels=True)
+ """
self._run(show=True, labels=labels) # show results
def save(self, labels=True, save_dir="runs/detect/exp", exist_ok=False):
+ """Saves detection results with optional labels to a specified directory.
+
+ Usage: save(labels=True, save_dir='runs/detect/exp', exist_ok=False)
+ """
save_dir = increment_path(save_dir, exist_ok, mkdir=True) # increment save_dir
self._run(save=True, labels=labels, save_dir=save_dir) # save results
def crop(self, save=True, save_dir="runs/detect/exp", exist_ok=False):
+ """Crops detection results, optionally saves them to a directory.
+
+ Args: save (bool), save_dir (str), exist_ok (bool).
+ """
save_dir = increment_path(save_dir, exist_ok, mkdir=True) if save else None
return self._run(crop=True, save=save, save_dir=save_dir) # crop results
def render(self, labels=True):
+ """Renders detection results with optional labels on images; args: labels (bool) indicating label inclusion."""
self._run(render=True, labels=labels) # render results
return self.ims
def pandas(self):
+ """Returns detections as pandas DataFrames for various box formats (xyxy, xyxyn, xywh, xywhn).
+
+ Example: print(results.pandas().xyxy[0]).
+ """
new = copy(self) # return copy
ca = "xmin", "ymin", "xmax", "ymax", "confidence", "class", "name" # xyxy columns
cb = "xcenter", "ycenter", "width", "height", "confidence", "class", "name" # xywh columns
@@ -878,6 +1036,10 @@ return new
def tolist(self):
+ """Converts a Detections object into a list of individual detection results for iteration.
+
+ Example: for result in results.tolist():
+ """
r = range(self.n) # iterable
return [
Detections(
@@ -892,21 +1054,29 @@ ]
def print(self):
+ """Logs the string representation of the current object's state via the LOGGER."""
LOGGER.info(self.__str__())
def __len__(self):
+ """Returns the number of results stored, overrides the default len(results)."""
return self.n
def __str__(self):
+ """Returns a string representation of the model's results, suitable for printing, overrides default
+ print(results).
+ """
return self._run(pprint=True) # print results
def __repr__(self):
+ """Returns a string representation of the YOLOv5 object, including its class and formatted results."""
return f"YOLOv5 {self.__class__} instance\n" + self.__str__()
class Proto(nn.Module):
+ """YOLOv5 mask Proto module for segmentation models, performing convolutions and upsampling on input tensors."""
def __init__(self, c1, c_=256, c2=32):
+ """Initializes YOLOv5 Proto module for segmentation with input, proto, and mask channels configuration."""
super().__init__()
self.cv1 = Conv(c1, c_, k=3)
self.upsample = nn.Upsample(scale_factor=2, mode="nearest")
@@ -914,14 +1084,19 @@ self.cv3 = Conv(c_, c2)
def forward(self, x):
+ """Performs a forward pass using convolutional layers and upsampling on input tensor `x`."""
return self.cv3(self.cv2(self.upsample(self.cv1(x))))
class Classify(nn.Module):
+ """YOLOv5 classification head with convolution, pooling, and dropout layers for channel transformation."""
def __init__(
self, c1, c2, k=1, s=1, p=None, g=1, dropout_p=0.0
): # ch_in, ch_out, kernel, stride, padding, groups, dropout probability
+ """Initializes YOLOv5 classification head with convolution, pooling, and dropout layers for input to output
+ channel transformation.
+ """
super().__init__()
c_ = 1280 # efficientnet_b0 size
self.conv = Conv(c1, c_, k, s, autopad(k, p), g)
@@ -930,6 +1105,7 @@ self.linear = nn.Linear(c_, c2) # to x(b,c2)
def forward(self, x):
+ """Processes input through conv, pool, drop, and linear layers; supports list concatenation input."""
if isinstance(x, list):
x = torch.cat(x, 1)
- return self.linear(self.drop(self.pool(self.conv(x)).flatten(1)))+ return self.linear(self.drop(self.pool(self.conv(x)).flatten(1)))
| https://raw.githubusercontent.com/ultralytics/yolov5/HEAD/models/common.py |
Add docstrings to improve readability | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
import argparse
import os
import subprocess
import sys
import time
from copy import deepcopy
from datetime import datetime
from pathlib import Path
import torch
import torch.distributed as dist
import torch.hub as hub
import torch.optim.lr_scheduler as lr_scheduler
import torchvision
from torch.cuda import amp
from tqdm import tqdm
FILE = Path(__file__).resolve()
ROOT = FILE.parents[1] # YOLOv5 root directory
if str(ROOT) not in sys.path:
sys.path.append(str(ROOT)) # add ROOT to PATH
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
from classify import val as validate
from models.experimental import attempt_load
from models.yolo import ClassificationModel, DetectionModel
from utils.dataloaders import create_classification_dataloader
from utils.general import (
DATASETS_DIR,
LOGGER,
TQDM_BAR_FORMAT,
WorkingDirectory,
check_git_info,
check_git_status,
check_requirements,
colorstr,
download,
increment_path,
init_seeds,
print_args,
yaml_save,
)
from utils.loggers import GenericLogger
from utils.plots import imshow_cls
from utils.torch_utils import (
ModelEMA,
de_parallel,
model_info,
reshape_classifier_output,
select_device,
smart_DDP,
smart_optimizer,
smartCrossEntropyLoss,
torch_distributed_zero_first,
)
LOCAL_RANK = int(os.getenv("LOCAL_RANK", -1)) # https://pytorch.org/docs/stable/elastic/run.html
RANK = int(os.getenv("RANK", -1))
WORLD_SIZE = int(os.getenv("WORLD_SIZE", 1))
GIT_INFO = check_git_info()
def train(opt, device):
init_seeds(opt.seed + 1 + RANK, deterministic=True)
save_dir, data, bs, epochs, nw, imgsz, pretrained = (
opt.save_dir,
Path(opt.data),
opt.batch_size,
opt.epochs,
min(os.cpu_count() - 1, opt.workers),
opt.imgsz,
str(opt.pretrained).lower() == "true",
)
cuda = device.type != "cpu"
# Directories
wdir = save_dir / "weights"
wdir.mkdir(parents=True, exist_ok=True) # make dir
last, best = wdir / "last.pt", wdir / "best.pt"
# Save run settings
yaml_save(save_dir / "opt.yaml", vars(opt))
# Logger
logger = GenericLogger(opt=opt, console_logger=LOGGER) if RANK in {-1, 0} else None
# Download Dataset
with torch_distributed_zero_first(LOCAL_RANK), WorkingDirectory(ROOT):
data_dir = data if data.is_dir() else (DATASETS_DIR / data)
if not data_dir.is_dir():
LOGGER.info(f"\nDataset not found ⚠️, missing path {data_dir}, attempting download...")
t = time.time()
if str(data) == "imagenet":
subprocess.run(["bash", str(ROOT / "data/scripts/get_imagenet.sh")], shell=True, check=True)
else:
url = f"https://github.com/ultralytics/assets/releases/download/v0.0.0/{data}.zip"
download(url, dir=data_dir.parent)
s = f"Dataset download success ✅ ({time.time() - t:.1f}s), saved to {colorstr('bold', data_dir)}\n"
LOGGER.info(s)
# Dataloaders
nc = len([x for x in (data_dir / "train").glob("*") if x.is_dir()]) # number of classes
trainloader = create_classification_dataloader(
path=data_dir / "train",
imgsz=imgsz,
batch_size=bs // WORLD_SIZE,
augment=True,
cache=opt.cache,
rank=LOCAL_RANK,
workers=nw,
)
test_dir = data_dir / "test" if (data_dir / "test").exists() else data_dir / "val" # data/test or data/val
if RANK in {-1, 0}:
testloader = create_classification_dataloader(
path=test_dir,
imgsz=imgsz,
batch_size=bs // WORLD_SIZE * 2,
augment=False,
cache=opt.cache,
rank=-1,
workers=nw,
)
# Model
with torch_distributed_zero_first(LOCAL_RANK), WorkingDirectory(ROOT):
if Path(opt.model).is_file() or opt.model.endswith(".pt"):
model = attempt_load(opt.model, device="cpu", fuse=False)
elif opt.model in torchvision.models.__dict__: # TorchVision models i.e. resnet50, efficientnet_b0
model = torchvision.models.__dict__[opt.model](weights="IMAGENET1K_V1" if pretrained else None)
else:
m = hub.list("ultralytics/yolov5") # + hub.list('pytorch/vision') # models
raise ModuleNotFoundError(f"--model {opt.model} not found. Available models are: \n" + "\n".join(m))
if isinstance(model, DetectionModel):
LOGGER.warning("WARNING ⚠️ pass YOLOv5 classifier model with '-cls' suffix, i.e. '--model yolov5s-cls.pt'")
model = ClassificationModel(model=model, nc=nc, cutoff=opt.cutoff or 10) # convert to classification model
reshape_classifier_output(model, nc) # update class count
for m in model.modules():
if not pretrained and hasattr(m, "reset_parameters"):
m.reset_parameters()
if isinstance(m, torch.nn.Dropout) and opt.dropout is not None:
m.p = opt.dropout # set dropout
for p in model.parameters():
p.requires_grad = True # for training
model = model.to(device)
# Info
if RANK in {-1, 0}:
model.names = trainloader.dataset.classes # attach class names
model.transforms = testloader.dataset.torch_transforms # attach inference transforms
model_info(model)
if opt.verbose:
LOGGER.info(model)
images, labels = next(iter(trainloader))
file = imshow_cls(images[:25], labels[:25], names=model.names, f=save_dir / "train_images.jpg")
logger.log_images(file, name="Train Examples")
logger.log_graph(model, imgsz) # log model
# Optimizer
optimizer = smart_optimizer(model, opt.optimizer, opt.lr0, momentum=0.9, decay=opt.decay)
# Scheduler
lrf = 0.01 # final lr (fraction of lr0)
# lf = lambda x: ((1 + math.cos(x * math.pi / epochs)) / 2) * (1 - lrf) + lrf # cosine
def lf(x):
return (1 - x / epochs) * (1 - lrf) + lrf # linear
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
# scheduler = lr_scheduler.OneCycleLR(optimizer, max_lr=lr0, total_steps=epochs, pct_start=0.1,
# final_div_factor=1 / 25 / lrf)
# EMA
ema = ModelEMA(model) if RANK in {-1, 0} else None
# DDP mode
if cuda and RANK != -1:
model = smart_DDP(model)
# Train
t0 = time.time()
criterion = smartCrossEntropyLoss(label_smoothing=opt.label_smoothing) # loss function
best_fitness = 0.0
scaler = amp.GradScaler(enabled=cuda)
val = test_dir.stem # 'val' or 'test'
LOGGER.info(
f"Image sizes {imgsz} train, {imgsz} test\n"
f"Using {nw * WORLD_SIZE} dataloader workers\n"
f"Logging results to {colorstr('bold', save_dir)}\n"
f"Starting {opt.model} training on {data} dataset with {nc} classes for {epochs} epochs...\n\n"
f"{'Epoch':>10}{'GPU_mem':>10}{'train_loss':>12}{f'{val}_loss':>12}{'top1_acc':>12}{'top5_acc':>12}"
)
for epoch in range(epochs): # loop over the dataset multiple times
tloss, vloss, fitness = 0.0, 0.0, 0.0 # train loss, val loss, fitness
model.train()
if RANK != -1:
trainloader.sampler.set_epoch(epoch)
pbar = enumerate(trainloader)
if RANK in {-1, 0}:
pbar = tqdm(enumerate(trainloader), total=len(trainloader), bar_format=TQDM_BAR_FORMAT)
for i, (images, labels) in pbar: # progress bar
images, labels = images.to(device, non_blocking=True), labels.to(device)
# Forward
with amp.autocast(enabled=cuda): # stability issues when enabled
loss = criterion(model(images), labels)
# Backward
scaler.scale(loss).backward()
# Optimize
scaler.unscale_(optimizer) # unscale gradients
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=10.0) # clip gradients
scaler.step(optimizer)
scaler.update()
optimizer.zero_grad()
if ema:
ema.update(model)
if RANK in {-1, 0}:
# Print
tloss = (tloss * i + loss.item()) / (i + 1) # update mean losses
mem = "%.3gG" % (torch.cuda.memory_reserved() / 1e9 if torch.cuda.is_available() else 0) # (GB)
pbar.desc = f"{f'{epoch + 1}/{epochs}':>10}{mem:>10}{tloss:>12.3g}" + " " * 36
# Test
if i == len(pbar) - 1: # last batch
top1, top5, vloss = validate.run(
model=ema.ema, dataloader=testloader, criterion=criterion, pbar=pbar
) # test accuracy, loss
fitness = top1 # define fitness as top1 accuracy
# Scheduler
scheduler.step()
# Log metrics
if RANK in {-1, 0}:
# Best fitness
if fitness > best_fitness:
best_fitness = fitness
# Log
metrics = {
"train/loss": tloss,
f"{val}/loss": vloss,
"metrics/accuracy_top1": top1,
"metrics/accuracy_top5": top5,
"lr/0": optimizer.param_groups[0]["lr"],
} # learning rate
logger.log_metrics(metrics, epoch)
# Save model
final_epoch = epoch + 1 == epochs
if (not opt.nosave) or final_epoch:
ckpt = {
"epoch": epoch,
"best_fitness": best_fitness,
"model": deepcopy(ema.ema).half(), # deepcopy(de_parallel(model)).half(),
"ema": None, # deepcopy(ema.ema).half(),
"updates": ema.updates,
"optimizer": None, # optimizer.state_dict(),
"opt": vars(opt),
"git": GIT_INFO, # {remote, branch, commit} if a git repo
"date": datetime.now().isoformat(),
}
# Save last, best and delete
torch.save(ckpt, last)
if best_fitness == fitness:
torch.save(ckpt, best)
del ckpt
# Train complete
if RANK in {-1, 0} and final_epoch:
LOGGER.info(
f"\nTraining complete ({(time.time() - t0) / 3600:.3f} hours)"
f"\nResults saved to {colorstr('bold', save_dir)}"
f"\nPredict: python classify/predict.py --weights {best} --source im.jpg"
f"\nValidate: python classify/val.py --weights {best} --data {data_dir}"
f"\nExport: python export.py --weights {best} --include onnx"
f"\nPyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', '{best}')"
f"\nVisualize: https://netron.app\n"
)
# Plot examples
images, labels = (x[:25] for x in next(iter(testloader))) # first 25 images and labels
pred = torch.max(ema.ema(images.to(device)), 1)[1]
file = imshow_cls(images, labels, pred, de_parallel(model).names, verbose=False, f=save_dir / "test_images.jpg")
# Log results
meta = {"epochs": epochs, "top1_acc": best_fitness, "date": datetime.now().isoformat()}
logger.log_images(file, name="Test Examples (true-predicted)", epoch=epoch)
logger.log_model(best, epochs, metadata=meta)
def parse_opt(known=False):
parser = argparse.ArgumentParser()
parser.add_argument("--model", type=str, default="yolov5s-cls.pt", help="initial weights path")
parser.add_argument("--data", type=str, default="imagenette160", help="cifar10, cifar100, mnist, imagenet, ...")
parser.add_argument("--epochs", type=int, default=10, help="total training epochs")
parser.add_argument("--batch-size", type=int, default=64, help="total batch size for all GPUs")
parser.add_argument("--imgsz", "--img", "--img-size", type=int, default=224, help="train, val image size (pixels)")
parser.add_argument("--nosave", action="store_true", help="only save final checkpoint")
parser.add_argument("--cache", type=str, nargs="?", const="ram", help='--cache images in "ram" (default) or "disk"')
parser.add_argument("--device", default="", help="cuda device, i.e. 0 or 0,1,2,3 or cpu")
parser.add_argument("--workers", type=int, default=8, help="max dataloader workers (per RANK in DDP mode)")
parser.add_argument("--project", default=ROOT / "runs/train-cls", help="save to project/name")
parser.add_argument("--name", default="exp", help="save to project/name")
parser.add_argument("--exist-ok", action="store_true", help="existing project/name ok, do not increment")
parser.add_argument("--pretrained", nargs="?", const=True, default=True, help="start from i.e. --pretrained False")
parser.add_argument("--optimizer", choices=["SGD", "Adam", "AdamW", "RMSProp"], default="Adam", help="optimizer")
parser.add_argument("--lr0", type=float, default=0.001, help="initial learning rate")
parser.add_argument("--decay", type=float, default=5e-5, help="weight decay")
parser.add_argument("--label-smoothing", type=float, default=0.1, help="Label smoothing epsilon")
parser.add_argument("--cutoff", type=int, default=None, help="Model layer cutoff index for Classify() head")
parser.add_argument("--dropout", type=float, default=None, help="Dropout (fraction)")
parser.add_argument("--verbose", action="store_true", help="Verbose mode")
parser.add_argument("--seed", type=int, default=0, help="Global training seed")
parser.add_argument("--local_rank", type=int, default=-1, help="Automatic DDP Multi-GPU argument, do not modify")
return parser.parse_known_args()[0] if known else parser.parse_args()
def main(opt):
if RANK in {-1, 0}:
print_args(vars(opt))
check_git_status()
check_requirements(ROOT / "requirements.txt")
# DDP mode
device = select_device(opt.device, batch_size=opt.batch_size)
if LOCAL_RANK != -1:
assert opt.batch_size != -1, "AutoBatch is coming soon for classification, please pass a valid --batch-size"
assert opt.batch_size % WORLD_SIZE == 0, f"--batch-size {opt.batch_size} must be multiple of WORLD_SIZE"
assert torch.cuda.device_count() > LOCAL_RANK, "insufficient CUDA devices for DDP command"
torch.cuda.set_device(LOCAL_RANK)
device = torch.device("cuda", LOCAL_RANK)
dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo")
# Parameters
opt.save_dir = increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok) # increment run
# Train
train(opt, device)
def run(**kwargs):
opt = parse_opt(True)
for k, v in kwargs.items():
setattr(opt, k, v)
main(opt)
return opt
if __name__ == "__main__":
opt = parse_opt()
main(opt) | --- +++ @@ -1,4 +1,17 @@ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
+"""
+Train a YOLOv5 classifier model on a classification dataset.
+
+Usage - Single-GPU training:
+ $ python classify/train.py --model yolov5s-cls.pt --data imagenette160 --epochs 5 --img 224
+
+Usage - Multi-GPU DDP training:
+ $ python -m torch.distributed.run --nproc_per_node 4 --master_port 2022 classify/train.py --model yolov5s-cls.pt --data imagenet --epochs 5 --img 224 --device 0,1,2,3
+
+Datasets: --data mnist, fashion-mnist, cifar10, cifar100, imagenette, imagewoof, imagenet, or 'path/to/data'
+YOLOv5-cls models: --model yolov5n-cls.pt, yolov5s-cls.pt, yolov5m-cls.pt, yolov5l-cls.pt, yolov5x-cls.pt
+Torchvision models: --model resnet50, efficientnet_b0, etc. See https://pytorch.org/vision/stable/models.html
+"""
import argparse
import os
@@ -63,6 +76,7 @@
def train(opt, device):
+ """Trains a YOLOv5 model, managing datasets, model optimization, logging, and saving checkpoints."""
init_seeds(opt.seed + 1 + RANK, deterministic=True)
save_dir, data, bs, epochs, nw, imgsz, pretrained = (
opt.save_dir,
@@ -166,6 +180,7 @@
# lf = lambda x: ((1 + math.cos(x * math.pi / epochs)) / 2) * (1 - lrf) + lrf # cosine
def lf(x):
+ """Linear learning rate scheduler function, scaling learning rate from initial value to `lrf` over `epochs`."""
return (1 - x / epochs) * (1 - lrf) + lrf # linear
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
@@ -296,6 +311,9 @@
def parse_opt(known=False):
+ """Parses command line arguments for YOLOv5 training including model path, dataset, epochs, and more, returning
+ parsed arguments.
+ """
parser = argparse.ArgumentParser()
parser.add_argument("--model", type=str, default="yolov5s-cls.pt", help="initial weights path")
parser.add_argument("--data", type=str, default="imagenette160", help="cifar10, cifar100, mnist, imagenet, ...")
@@ -323,6 +341,7 @@
def main(opt):
+ """Executes YOLOv5 training with given options, handling device setup and DDP mode; includes pre-training checks."""
if RANK in {-1, 0}:
print_args(vars(opt))
check_git_status()
@@ -346,6 +365,10 @@
def run(**kwargs):
+ """Executes YOLOv5 model training or inference with specified parameters, returning updated options.
+
+ Example: from yolov5 import classify; classify.train.run(data=mnist, imgsz=320, model='yolov5m')
+ """
opt = parse_opt(True)
for k, v in kwargs.items():
setattr(opt, k, v)
@@ -355,4 +378,4 @@
if __name__ == "__main__":
opt = parse_opt()
- main(opt)+ main(opt)
| https://raw.githubusercontent.com/ultralytics/yolov5/HEAD/classify/train.py |
Write docstrings including parameters and return values | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
import json
import os
import warnings
from pathlib import Path
import torch
from packaging.version import parse
from utils.general import LOGGER, colorstr, cv2
from utils.loggers.clearml.clearml_utils import ClearmlLogger
from utils.loggers.wandb.wandb_utils import WandbLogger
from utils.plots import plot_images, plot_labels, plot_results
from utils.torch_utils import de_parallel
LOGGERS = ("csv", "tb", "wandb", "clearml", "comet") # *.csv, TensorBoard, Weights & Biases, ClearML
RANK = int(os.getenv("RANK", -1))
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
def SummaryWriter(*args):
return None # None = SummaryWriter(str)
try:
import wandb
assert hasattr(wandb, "__version__") # verify package import not local dir
if parse(wandb.__version__) >= parse("0.12.2") and RANK in {0, -1}:
try:
wandb_login_success = wandb.login(timeout=30)
except wandb.errors.UsageError: # known non-TTY terminal issue
wandb_login_success = False
if not wandb_login_success:
wandb = None
except (ImportError, AssertionError):
wandb = None
try:
import clearml
assert hasattr(clearml, "__version__") # verify package import not local dir
except (ImportError, AssertionError):
clearml = None
try:
if RANK in {0, -1}:
import comet_ml
assert hasattr(comet_ml, "__version__") # verify package import not local dir
from utils.loggers.comet import CometLogger
else:
comet_ml = None
except (ImportError, AssertionError):
comet_ml = None
def _json_default(value):
if isinstance(value, torch.Tensor):
try:
value = value.item()
except ValueError: # "only one element tensors can be converted to Python scalars"
pass
return value if isinstance(value, float) else str(value)
class Loggers:
def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, include=LOGGERS):
self.save_dir = save_dir
self.weights = weights
self.opt = opt
self.hyp = hyp
self.plots = not opt.noplots # plot results
self.logger = logger # for printing results to console
self.include = include
self.keys = [
"train/box_loss",
"train/obj_loss",
"train/cls_loss", # train loss
"metrics/precision",
"metrics/recall",
"metrics/mAP_0.5",
"metrics/mAP_0.5:0.95", # metrics
"val/box_loss",
"val/obj_loss",
"val/cls_loss", # val loss
"x/lr0",
"x/lr1",
"x/lr2",
] # params
self.best_keys = ["best/epoch", "best/precision", "best/recall", "best/mAP_0.5", "best/mAP_0.5:0.95"]
for k in LOGGERS:
setattr(self, k, None) # init empty logger dictionary
self.csv = True # always log to csv
self.ndjson_console = "ndjson_console" in self.include # log ndjson to console
self.ndjson_file = "ndjson_file" in self.include # log ndjson to file
# Messages
if not comet_ml:
prefix = colorstr("Comet: ")
s = f"{prefix}run 'pip install comet_ml' to automatically track and visualize YOLOv5 🚀 runs in Comet"
self.logger.info(s)
# TensorBoard
s = self.save_dir
if "tb" in self.include and not self.opt.evolve:
prefix = colorstr("TensorBoard: ")
self.logger.info(f"{prefix}Start with 'tensorboard --logdir {s.parent}', view at http://localhost:6006/")
self.tb = SummaryWriter(str(s))
# W&B
if wandb and "wandb" in self.include:
self.opt.hyp = self.hyp # add hyperparameters
self.wandb = WandbLogger(self.opt)
else:
self.wandb = None
# ClearML
if clearml and "clearml" in self.include:
try:
self.clearml = ClearmlLogger(self.opt, self.hyp)
except Exception:
self.clearml = None
prefix = colorstr("ClearML: ")
LOGGER.warning(
f"{prefix}WARNING ⚠️ ClearML is installed but not configured, skipping ClearML logging."
f" See https://docs.ultralytics.com/yolov5/tutorials/clearml_logging_integration#readme"
)
else:
self.clearml = None
# Comet
if comet_ml and "comet" in self.include:
if isinstance(self.opt.resume, str) and self.opt.resume.startswith("comet://"):
run_id = self.opt.resume.split("/")[-1]
self.comet_logger = CometLogger(self.opt, self.hyp, run_id=run_id)
else:
self.comet_logger = CometLogger(self.opt, self.hyp)
else:
self.comet_logger = None
@property
def remote_dataset(self):
data_dict = None
if self.clearml:
data_dict = self.clearml.data_dict
if self.wandb:
data_dict = self.wandb.data_dict
if self.comet_logger:
data_dict = self.comet_logger.data_dict
return data_dict
def on_train_start(self):
if self.comet_logger:
self.comet_logger.on_train_start()
def on_pretrain_routine_start(self):
if self.comet_logger:
self.comet_logger.on_pretrain_routine_start()
def on_pretrain_routine_end(self, labels, names):
if self.plots:
plot_labels(labels, names, self.save_dir)
paths = self.save_dir.glob("*labels*.jpg") # training labels
if self.wandb:
self.wandb.log({"Labels": [wandb.Image(str(x), caption=x.name) for x in paths]})
if self.comet_logger:
self.comet_logger.on_pretrain_routine_end(paths)
if self.clearml:
for path in paths:
self.clearml.log_plot(title=path.stem, plot_path=path)
def on_train_batch_end(self, model, ni, imgs, targets, paths, vals):
log_dict = dict(zip(self.keys[:3], vals))
# Callback runs on train batch end
# ni: number integrated batches (since train start)
if self.plots:
if ni < 3:
f = self.save_dir / f"train_batch{ni}.jpg" # filename
plot_images(imgs, targets, paths, f)
if ni == 0 and self.tb and not self.opt.sync_bn:
log_tensorboard_graph(self.tb, model, imgsz=(self.opt.imgsz, self.opt.imgsz))
if ni == 10 and (self.wandb or self.clearml):
files = sorted(self.save_dir.glob("train*.jpg"))
if self.wandb:
self.wandb.log({"Mosaics": [wandb.Image(str(f), caption=f.name) for f in files if f.exists()]})
if self.clearml:
self.clearml.log_debug_samples(files, title="Mosaics")
if self.comet_logger:
self.comet_logger.on_train_batch_end(log_dict, step=ni)
def on_train_epoch_end(self, epoch):
if self.wandb:
self.wandb.current_epoch = epoch + 1
if self.comet_logger:
self.comet_logger.on_train_epoch_end(epoch)
def on_val_start(self):
if self.comet_logger:
self.comet_logger.on_val_start()
def on_val_image_end(self, pred, predn, path, names, im):
if self.wandb:
self.wandb.val_one_image(pred, predn, path, names, im)
if self.clearml:
self.clearml.log_image_with_boxes(path, pred, names, im)
def on_val_batch_end(self, batch_i, im, targets, paths, shapes, out):
if self.comet_logger:
self.comet_logger.on_val_batch_end(batch_i, im, targets, paths, shapes, out)
def on_val_end(self, nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix):
if self.wandb or self.clearml:
files = sorted(self.save_dir.glob("val*.jpg"))
if self.wandb:
self.wandb.log({"Validation": [wandb.Image(str(f), caption=f.name) for f in files]})
if self.clearml:
self.clearml.log_debug_samples(files, title="Validation")
if self.comet_logger:
self.comet_logger.on_val_end(nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix)
def on_fit_epoch_end(self, vals, epoch, best_fitness, fi):
x = dict(zip(self.keys, vals))
if self.csv:
file = self.save_dir / "results.csv"
n = len(x) + 1 # number of cols
s = "" if file.exists() else (("%20s," * n % tuple(["epoch", *self.keys])).rstrip(",") + "\n") # add header
with open(file, "a") as f:
f.write(s + ("%20.5g," * n % tuple([epoch, *vals])).rstrip(",") + "\n")
if self.ndjson_console or self.ndjson_file:
json_data = json.dumps(dict(epoch=epoch, **x), default=_json_default)
if self.ndjson_console:
print(json_data)
if self.ndjson_file:
file = self.save_dir / "results.ndjson"
with open(file, "a") as f:
print(json_data, file=f)
if self.tb:
for k, v in x.items():
self.tb.add_scalar(k, v, epoch)
elif self.clearml: # log to ClearML if TensorBoard not used
self.clearml.log_scalars(x, epoch)
if self.wandb:
if best_fitness == fi:
best_results = [epoch, *vals[3:7]]
for i, name in enumerate(self.best_keys):
self.wandb.wandb_run.summary[name] = best_results[i] # log best results in the summary
self.wandb.log(x)
self.wandb.end_epoch()
if self.clearml:
self.clearml.current_epoch_logged_images = set() # reset epoch image limit
self.clearml.current_epoch += 1
if self.comet_logger:
self.comet_logger.on_fit_epoch_end(x, epoch=epoch)
def on_model_save(self, last, epoch, final_epoch, best_fitness, fi):
if (epoch + 1) % self.opt.save_period == 0 and not final_epoch and self.opt.save_period != -1:
if self.wandb:
self.wandb.log_model(last.parent, self.opt, epoch, fi, best_model=best_fitness == fi)
if self.clearml:
self.clearml.task.update_output_model(
model_path=str(last), model_name="Latest Model", auto_delete_file=False
)
if self.comet_logger:
self.comet_logger.on_model_save(last, epoch, final_epoch, best_fitness, fi)
def on_train_end(self, last, best, epoch, results):
if self.plots:
plot_results(file=self.save_dir / "results.csv") # save results.png
files = ["results.png", "confusion_matrix.png", *(f"{x}_curve.png" for x in ("F1", "PR", "P", "R"))]
files = [(self.save_dir / f) for f in files if (self.save_dir / f).exists()] # filter
self.logger.info(f"Results saved to {colorstr('bold', self.save_dir)}")
if self.tb and not self.clearml: # These images are already captured by ClearML by now, we don't want doubles
for f in files:
self.tb.add_image(f.stem, cv2.imread(str(f))[..., ::-1], epoch, dataformats="HWC")
if self.wandb:
self.wandb.log(dict(zip(self.keys[3:10], results)))
self.wandb.log({"Results": [wandb.Image(str(f), caption=f.name) for f in files]})
# Calling wandb.log. TODO: Refactor this into WandbLogger.log_model
if not self.opt.evolve:
wandb.log_artifact(
str(best if best.exists() else last),
type="model",
name=f"run_{self.wandb.wandb_run.id}_model",
aliases=["latest", "best", "stripped"],
)
self.wandb.finish_run()
if self.clearml and not self.opt.evolve:
self.clearml.log_summary(dict(zip(self.keys[3:10], results)))
[self.clearml.log_plot(title=f.stem, plot_path=f) for f in files]
self.clearml.log_model(
str(best if best.exists() else last), "Best Model" if best.exists() else "Last Model", epoch
)
if self.comet_logger:
final_results = dict(zip(self.keys[3:10], results))
self.comet_logger.on_train_end(files, self.save_dir, last, best, epoch, final_results)
def on_params_update(self, params: dict):
if self.wandb:
self.wandb.wandb_run.config.update(params, allow_val_change=True)
if self.comet_logger:
self.comet_logger.on_params_update(params)
if self.clearml:
self.clearml.task.connect(params)
class GenericLogger:
def __init__(self, opt, console_logger, include=("tb", "wandb", "clearml")):
self.save_dir = Path(opt.save_dir)
self.include = include
self.console_logger = console_logger
self.csv = self.save_dir / "results.csv" # CSV logger
if "tb" in self.include:
prefix = colorstr("TensorBoard: ")
self.console_logger.info(
f"{prefix}Start with 'tensorboard --logdir {self.save_dir.parent}', view at http://localhost:6006/"
)
self.tb = SummaryWriter(str(self.save_dir))
if wandb and "wandb" in self.include:
self.wandb = wandb.init(
project=web_project_name(str(opt.project)), name=None if opt.name == "exp" else opt.name, config=opt
)
else:
self.wandb = None
if clearml and "clearml" in self.include:
try:
# Hyp is not available in classification mode
hyp = {} if "hyp" not in opt else opt.hyp
self.clearml = ClearmlLogger(opt, hyp)
except Exception:
self.clearml = None
prefix = colorstr("ClearML: ")
LOGGER.warning(
f"{prefix}WARNING ⚠️ ClearML is installed but not configured, skipping ClearML logging."
f" See https://docs.ultralytics.com/yolov5/tutorials/clearml_logging_integration"
)
else:
self.clearml = None
def log_metrics(self, metrics, epoch):
if self.csv:
keys, vals = list(metrics.keys()), list(metrics.values())
n = len(metrics) + 1 # number of cols
s = "" if self.csv.exists() else (("%23s," * n % tuple(["epoch", *keys])).rstrip(",") + "\n") # header
with open(self.csv, "a") as f:
f.write(s + ("%23.5g," * n % tuple([epoch, *vals])).rstrip(",") + "\n")
if self.tb:
for k, v in metrics.items():
self.tb.add_scalar(k, v, epoch)
if self.wandb:
self.wandb.log(metrics, step=epoch)
if self.clearml:
self.clearml.log_scalars(metrics, epoch)
def log_images(self, files, name="Images", epoch=0):
files = [Path(f) for f in (files if isinstance(files, (tuple, list)) else [files])] # to Path
files = [f for f in files if f.exists()] # filter by exists
if self.tb:
for f in files:
self.tb.add_image(f.stem, cv2.imread(str(f))[..., ::-1], epoch, dataformats="HWC")
if self.wandb:
self.wandb.log({name: [wandb.Image(str(f), caption=f.name) for f in files]}, step=epoch)
if self.clearml:
if name == "Results":
[self.clearml.log_plot(f.stem, f) for f in files]
else:
self.clearml.log_debug_samples(files, title=name)
def log_graph(self, model, imgsz=(640, 640)):
if self.tb:
log_tensorboard_graph(self.tb, model, imgsz)
def log_model(self, model_path, epoch=0, metadata=None):
if metadata is None:
metadata = {}
# Log model to all loggers
if self.wandb:
art = wandb.Artifact(name=f"run_{wandb.run.id}_model", type="model", metadata=metadata)
art.add_file(str(model_path))
wandb.log_artifact(art)
if self.clearml:
self.clearml.log_model(model_path=model_path, model_name=model_path.stem)
def update_params(self, params):
if self.wandb:
wandb.run.config.update(params, allow_val_change=True)
if self.clearml:
self.clearml.task.connect(params)
def log_tensorboard_graph(tb, model, imgsz=(640, 640)):
try:
p = next(model.parameters()) # for device, type
imgsz = (imgsz, imgsz) if isinstance(imgsz, int) else imgsz # expand
im = torch.zeros((1, 3, *imgsz)).to(p.device).type_as(p) # input image (WARNING: must be zeros, not empty)
with warnings.catch_warnings():
warnings.simplefilter("ignore") # suppress jit trace warning
tb.add_graph(torch.jit.trace(de_parallel(model), im, strict=False), [])
except Exception as e:
LOGGER.warning(f"WARNING ⚠️ TensorBoard graph visualization failure {e}")
def web_project_name(project):
if not project.startswith("runs/train"):
return project
suffix = "-Classify" if project.endswith("-cls") else "-Segment" if project.endswith("-seg") else ""
return f"YOLOv5{suffix}" | --- +++ @@ -1,4 +1,5 @@ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
+"""Logging utils."""
import json
import os
@@ -22,6 +23,7 @@ except ImportError:
def SummaryWriter(*args):
+ """Fall back to SummaryWriter returning None if TensorBoard is not installed."""
return None # None = SummaryWriter(str)
@@ -60,6 +62,10 @@
def _json_default(value):
+ """Format `value` for JSON serialization (e.g. unwrap tensors).
+
+ Fall back to strings.
+ """
if isinstance(value, torch.Tensor):
try:
value = value.item()
@@ -69,8 +75,10 @@
class Loggers:
+ """Initializes and manages various logging utilities for tracking YOLOv5 training and validation metrics."""
def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, include=LOGGERS):
+ """Initializes loggers for YOLOv5 training and validation metrics, paths, and options."""
self.save_dir = save_dir
self.weights = weights
self.opt = opt
@@ -148,6 +156,7 @@
@property
def remote_dataset(self):
+ """Fetches dataset dictionary from remote logging services like ClearML, Weights & Biases, or Comet ML."""
data_dict = None
if self.clearml:
data_dict = self.clearml.data_dict
@@ -159,14 +168,17 @@ return data_dict
def on_train_start(self):
+ """Initializes the training process for Comet ML logger if it's configured."""
if self.comet_logger:
self.comet_logger.on_train_start()
def on_pretrain_routine_start(self):
+ """Invokes pre-training routine start hook for Comet ML logger if available."""
if self.comet_logger:
self.comet_logger.on_pretrain_routine_start()
def on_pretrain_routine_end(self, labels, names):
+ """Callback that runs at the end of pre-training routine, logging label plots if enabled."""
if self.plots:
plot_labels(labels, names, self.save_dir)
paths = self.save_dir.glob("*labels*.jpg") # training labels
@@ -179,6 +191,7 @@ self.clearml.log_plot(title=path.stem, plot_path=path)
def on_train_batch_end(self, model, ni, imgs, targets, paths, vals):
+ """Logs training batch end events, plots images, and updates external loggers with batch-end data."""
log_dict = dict(zip(self.keys[:3], vals))
# Callback runs on train batch end
# ni: number integrated batches (since train start)
@@ -199,6 +212,7 @@ self.comet_logger.on_train_batch_end(log_dict, step=ni)
def on_train_epoch_end(self, epoch):
+ """Callback that updates the current epoch in Weights & Biases at the end of a training epoch."""
if self.wandb:
self.wandb.current_epoch = epoch + 1
@@ -206,20 +220,24 @@ self.comet_logger.on_train_epoch_end(epoch)
def on_val_start(self):
+ """Callback that signals the start of a validation phase to the Comet logger."""
if self.comet_logger:
self.comet_logger.on_val_start()
def on_val_image_end(self, pred, predn, path, names, im):
+ """Callback that logs a validation image and its predictions to WandB or ClearML."""
if self.wandb:
self.wandb.val_one_image(pred, predn, path, names, im)
if self.clearml:
self.clearml.log_image_with_boxes(path, pred, names, im)
def on_val_batch_end(self, batch_i, im, targets, paths, shapes, out):
+ """Logs validation batch results to Comet ML during training at the end of each validation batch."""
if self.comet_logger:
self.comet_logger.on_val_batch_end(batch_i, im, targets, paths, shapes, out)
def on_val_end(self, nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix):
+ """Logs validation results to WandB or ClearML at the end of the validation process."""
if self.wandb or self.clearml:
files = sorted(self.save_dir.glob("val*.jpg"))
if self.wandb:
@@ -231,6 +249,7 @@ self.comet_logger.on_val_end(nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix)
def on_fit_epoch_end(self, vals, epoch, best_fitness, fi):
+ """Callback that logs metrics and saves them to CSV or NDJSON at the end of each fit (train+val) epoch."""
x = dict(zip(self.keys, vals))
if self.csv:
file = self.save_dir / "results.csv"
@@ -269,6 +288,7 @@ self.comet_logger.on_fit_epoch_end(x, epoch=epoch)
def on_model_save(self, last, epoch, final_epoch, best_fitness, fi):
+ """Callback that handles model saving events, logging to Weights & Biases or ClearML if enabled."""
if (epoch + 1) % self.opt.save_period == 0 and not final_epoch and self.opt.save_period != -1:
if self.wandb:
self.wandb.log_model(last.parent, self.opt, epoch, fi, best_model=best_fitness == fi)
@@ -281,6 +301,7 @@ self.comet_logger.on_model_save(last, epoch, final_epoch, best_fitness, fi)
def on_train_end(self, last, best, epoch, results):
+ """Callback that runs at the end of training to save plots and log results."""
if self.plots:
plot_results(file=self.save_dir / "results.csv") # save results.png
files = ["results.png", "confusion_matrix.png", *(f"{x}_curve.png" for x in ("F1", "PR", "P", "R"))]
@@ -316,6 +337,7 @@ self.comet_logger.on_train_end(files, self.save_dir, last, best, epoch, final_results)
def on_params_update(self, params: dict):
+ """Updates experiment hyperparameters or configurations in WandB, Comet, or ClearML."""
if self.wandb:
self.wandb.wandb_run.config.update(params, allow_val_change=True)
if self.comet_logger:
@@ -325,8 +347,17 @@
class GenericLogger:
+ """YOLOv5 General purpose logger for non-task specific logging Usage: from utils.loggers import GenericLogger;
+ logger = GenericLogger(...).
+
+ Args:
+ opt: Run arguments
+ console_logger: Console logger
+ include: loggers to include
+ """
def __init__(self, opt, console_logger, include=("tb", "wandb", "clearml")):
+ """Initializes a generic logger with optional TensorBoard, W&B, and ClearML support."""
self.save_dir = Path(opt.save_dir)
self.include = include
self.console_logger = console_logger
@@ -361,6 +392,7 @@ self.clearml = None
def log_metrics(self, metrics, epoch):
+ """Logs metrics to CSV, TensorBoard, W&B, and ClearML; `metrics` is a dict, `epoch` is an int."""
if self.csv:
keys, vals = list(metrics.keys()), list(metrics.values())
n = len(metrics) + 1 # number of cols
@@ -379,6 +411,7 @@ self.clearml.log_scalars(metrics, epoch)
def log_images(self, files, name="Images", epoch=0):
+ """Logs images to all loggers with optional naming and epoch specification."""
files = [Path(f) for f in (files if isinstance(files, (tuple, list)) else [files])] # to Path
files = [f for f in files if f.exists()] # filter by exists
@@ -396,10 +429,12 @@ self.clearml.log_debug_samples(files, title=name)
def log_graph(self, model, imgsz=(640, 640)):
+ """Logs model graph to all configured loggers with specified input image size."""
if self.tb:
log_tensorboard_graph(self.tb, model, imgsz)
def log_model(self, model_path, epoch=0, metadata=None):
+ """Logs the model to all configured loggers with optional epoch and metadata."""
if metadata is None:
metadata = {}
# Log model to all loggers
@@ -411,6 +446,7 @@ self.clearml.log_model(model_path=model_path, model_name=model_path.stem)
def update_params(self, params):
+ """Updates logged parameters in WandB and/or ClearML if enabled."""
if self.wandb:
wandb.run.config.update(params, allow_val_change=True)
if self.clearml:
@@ -418,6 +454,7 @@
def log_tensorboard_graph(tb, model, imgsz=(640, 640)):
+ """Logs the model graph to TensorBoard with specified image size and model."""
try:
p = next(model.parameters()) # for device, type
imgsz = (imgsz, imgsz) if isinstance(imgsz, int) else imgsz # expand
@@ -430,7 +467,8 @@
def web_project_name(project):
+ """Converts a local project name to a standardized web project name with optional suffixes."""
if not project.startswith("runs/train"):
return project
suffix = "-Classify" if project.endswith("-cls") else "-Segment" if project.endswith("-seg") else ""
- return f"YOLOv5{suffix}"+ return f"YOLOv5{suffix}"
| https://raw.githubusercontent.com/ultralytics/yolov5/HEAD/utils/loggers/__init__.py |
Add docstrings that explain inputs and outputs | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
import argparse
import json
import os
import subprocess
import sys
from pathlib import Path
import numpy as np
import torch
from tqdm import tqdm
FILE = Path(__file__).resolve()
ROOT = FILE.parents[0] # YOLOv5 root directory
if str(ROOT) not in sys.path:
sys.path.append(str(ROOT)) # add ROOT to PATH
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
from models.common import DetectMultiBackend
from utils.callbacks import Callbacks
from utils.dataloaders import create_dataloader
from utils.general import (
LOGGER,
TQDM_BAR_FORMAT,
Profile,
check_dataset,
check_img_size,
check_requirements,
check_yaml,
coco80_to_coco91_class,
colorstr,
increment_path,
non_max_suppression,
print_args,
scale_boxes,
xywh2xyxy,
xyxy2xywh,
)
from utils.metrics import ConfusionMatrix, ap_per_class, box_iou
from utils.plots import output_to_target, plot_images, plot_val_study
from utils.torch_utils import select_device, smart_inference_mode
def save_one_txt(predn, save_conf, shape, file):
gn = torch.tensor(shape)[[1, 0, 1, 0]] # normalization gain whwh
for *xyxy, conf, cls in predn.tolist():
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format
with open(file, "a") as f:
f.write(("%g " * len(line)).rstrip() % line + "\n")
def save_one_json(predn, jdict, path, class_map):
image_id = int(path.stem) if path.stem.isnumeric() else path.stem
box = xyxy2xywh(predn[:, :4]) # xywh
box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner
for p, b in zip(predn.tolist(), box.tolist()):
jdict.append(
{
"image_id": image_id,
"category_id": class_map[int(p[5])],
"bbox": [round(x, 3) for x in b],
"score": round(p[4], 5),
}
)
def process_batch(detections, labels, iouv):
correct = np.zeros((detections.shape[0], iouv.shape[0])).astype(bool)
iou = box_iou(labels[:, 1:], detections[:, :4])
correct_class = labels[:, 0:1] == detections[:, 5]
for i in range(len(iouv)):
x = torch.where((iou >= iouv[i]) & correct_class) # IoU > threshold and classes match
if x[0].shape[0]:
matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() # [label, detect, iou]
if x[0].shape[0] > 1:
matches = matches[matches[:, 2].argsort()[::-1]]
matches = matches[np.unique(matches[:, 1], return_index=True)[1]]
# matches = matches[matches[:, 2].argsort()[::-1]]
matches = matches[np.unique(matches[:, 0], return_index=True)[1]]
correct[matches[:, 1].astype(int), i] = True
return torch.tensor(correct, dtype=torch.bool, device=iouv.device)
@smart_inference_mode()
def run(
data,
weights=None, # model.pt path(s)
batch_size=32, # batch size
imgsz=640, # inference size (pixels)
conf_thres=0.001, # confidence threshold
iou_thres=0.6, # NMS IoU threshold
max_det=300, # maximum detections per image
task="val", # train, val, test, speed or study
device="", # cuda device, i.e. 0 or 0,1,2,3 or cpu
workers=8, # max dataloader workers (per RANK in DDP mode)
single_cls=False, # treat as single-class dataset
augment=False, # augmented inference
verbose=False, # verbose output
save_txt=False, # save results to *.txt
save_hybrid=False, # save label+prediction hybrid results to *.txt
save_conf=False, # save confidences in --save-txt labels
save_json=False, # save a COCO-JSON results file
project=ROOT / "runs/val", # save to project/name
name="exp", # save to project/name
exist_ok=False, # existing project/name ok, do not increment
half=True, # use FP16 half-precision inference
dnn=False, # use OpenCV DNN for ONNX inference
model=None,
dataloader=None,
save_dir=Path(""),
plots=True,
callbacks=Callbacks(),
compute_loss=None,
):
# Initialize/load model and set device
training = model is not None
if training: # called by train.py
device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model
half &= device.type != "cpu" # half precision only supported on CUDA
model.half() if half else model.float()
else: # called directly
device = select_device(device, batch_size=batch_size)
# Directories
save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run
(save_dir / "labels" if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
# Load model
model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half)
stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine
imgsz = check_img_size(imgsz, s=stride) # check image size
half = model.fp16 # FP16 supported on limited backends with CUDA
if engine:
batch_size = model.batch_size
else:
device = model.device
if not (pt or jit):
batch_size = 1 # export.py models default to batch-size 1
LOGGER.info(f"Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models")
# Data
data = check_dataset(data) # check
# Configure
model.eval()
cuda = device.type != "cpu"
is_coco = isinstance(data.get("val"), str) and data["val"].endswith(f"coco{os.sep}val2017.txt") # COCO dataset
nc = 1 if single_cls else int(data["nc"]) # number of classes
iouv = torch.linspace(0.5, 0.95, 10, device=device) # iou vector for mAP@0.5:0.95
niou = iouv.numel()
# Dataloader
if not training:
if pt and not single_cls: # check --weights are trained on --data
ncm = model.model.nc
assert ncm == nc, (
f"{weights} ({ncm} classes) trained on different --data than what you passed ({nc} "
f"classes). Pass correct combination of --weights and --data that are trained together."
)
model.warmup(imgsz=(1 if pt else batch_size, 3, imgsz, imgsz)) # warmup
pad, rect = (0.0, False) if task == "speed" else (0.5, pt) # square inference for benchmarks
task = task if task in ("train", "val", "test") else "val" # path to train/val/test images
dataloader = create_dataloader(
data[task],
imgsz,
batch_size,
stride,
single_cls,
pad=pad,
rect=rect,
workers=workers,
prefix=colorstr(f"{task}: "),
)[0]
seen = 0
confusion_matrix = ConfusionMatrix(nc=nc)
names = model.names if hasattr(model, "names") else model.module.names # get class names
if isinstance(names, (list, tuple)): # old format
names = dict(enumerate(names))
class_map = coco80_to_coco91_class() if is_coco else list(range(1000))
s = ("%22s" + "%11s" * 6) % ("Class", "Images", "Instances", "P", "R", "mAP50", "mAP50-95")
tp, fp, p, r, f1, mp, mr, map50, ap50, map = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
dt = Profile(device=device), Profile(device=device), Profile(device=device) # profiling times
loss = torch.zeros(3, device=device)
jdict, stats, ap, ap_class = [], [], [], []
callbacks.run("on_val_start")
pbar = tqdm(dataloader, desc=s, bar_format=TQDM_BAR_FORMAT) # progress bar
for batch_i, (im, targets, paths, shapes) in enumerate(pbar):
callbacks.run("on_val_batch_start")
with dt[0]:
if cuda:
im = im.to(device, non_blocking=True)
targets = targets.to(device)
im = im.half() if half else im.float() # uint8 to fp16/32
im /= 255 # 0 - 255 to 0.0 - 1.0
nb, _, height, width = im.shape # batch size, channels, height, width
# Inference
with dt[1]:
preds, train_out = model(im) if compute_loss else (model(im, augment=augment), None)
# Loss
if compute_loss:
loss += compute_loss(train_out, targets)[1] # box, obj, cls
# NMS
targets[:, 2:] *= torch.tensor((width, height, width, height), device=device) # to pixels
lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling
with dt[2]:
preds = non_max_suppression(
preds, conf_thres, iou_thres, labels=lb, multi_label=True, agnostic=single_cls, max_det=max_det
)
# Metrics
for si, pred in enumerate(preds):
labels = targets[targets[:, 0] == si, 1:]
nl, npr = labels.shape[0], pred.shape[0] # number of labels, predictions
path, shape = Path(paths[si]), shapes[si][0]
correct = torch.zeros(npr, niou, dtype=torch.bool, device=device) # init
seen += 1
if npr == 0:
if nl:
stats.append((correct, *torch.zeros((2, 0), device=device), labels[:, 0]))
if plots:
confusion_matrix.process_batch(detections=None, labels=labels[:, 0])
continue
# Predictions
if single_cls:
pred[:, 5] = 0
predn = pred.clone()
scale_boxes(im[si].shape[1:], predn[:, :4], shape, shapes[si][1]) # native-space pred
# Evaluate
if nl:
tbox = xywh2xyxy(labels[:, 1:5]) # target boxes
scale_boxes(im[si].shape[1:], tbox, shape, shapes[si][1]) # native-space labels
labelsn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels
correct = process_batch(predn, labelsn, iouv)
if plots:
confusion_matrix.process_batch(predn, labelsn)
stats.append((correct, pred[:, 4], pred[:, 5], labels[:, 0])) # (correct, conf, pcls, tcls)
# Save/log
if save_txt:
(save_dir / "labels").mkdir(parents=True, exist_ok=True)
save_one_txt(predn, save_conf, shape, file=save_dir / "labels" / f"{path.stem}.txt")
if save_json:
save_one_json(predn, jdict, path, class_map) # append to COCO-JSON dictionary
callbacks.run("on_val_image_end", pred, predn, path, names, im[si])
# Plot images
if plots and batch_i < 3:
plot_images(im, targets, paths, save_dir / f"val_batch{batch_i}_labels.jpg", names) # labels
plot_images(im, output_to_target(preds), paths, save_dir / f"val_batch{batch_i}_pred.jpg", names) # pred
callbacks.run("on_val_batch_end", batch_i, im, targets, paths, shapes, preds)
# Compute metrics
stats = [torch.cat(x, 0).cpu().numpy() for x in zip(*stats)] # to numpy
if len(stats) and stats[0].any():
tp, fp, p, r, f1, ap, ap_class = ap_per_class(*stats, plot=plots, save_dir=save_dir, names=names)
ap50, ap = ap[:, 0], ap.mean(1) # AP@0.5, AP@0.5:0.95
mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean()
nt = np.bincount(stats[3].astype(int), minlength=nc) # number of targets per class
# Print results
pf = "%22s" + "%11i" * 2 + "%11.3g" * 4 # print format
LOGGER.info(pf % ("all", seen, nt.sum(), mp, mr, map50, map))
if nt.sum() == 0:
LOGGER.warning(f"WARNING ⚠️ no labels found in {task} set, can not compute metrics without labels")
# Print results per class
if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats):
for i, c in enumerate(ap_class):
LOGGER.info(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i]))
# Print speeds
t = tuple(x.t / seen * 1e3 for x in dt) # speeds per image
if not training:
shape = (batch_size, 3, imgsz, imgsz)
LOGGER.info(f"Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {shape}" % t)
# Plots
if plots:
confusion_matrix.plot(save_dir=save_dir, names=list(names.values()))
callbacks.run("on_val_end", nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix)
# Save JSON
if save_json and len(jdict):
w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else "" # weights
anno_json = str(Path("../datasets/coco/annotations/instances_val2017.json")) # annotations
if not os.path.exists(anno_json):
anno_json = os.path.join(data["path"], "annotations", "instances_val2017.json")
pred_json = str(save_dir / f"{w}_predictions.json") # predictions
LOGGER.info(f"\nEvaluating pycocotools mAP... saving {pred_json}...")
with open(pred_json, "w") as f:
json.dump(jdict, f)
try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
check_requirements("pycocotools>=2.0.6")
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
anno = COCO(anno_json) # init annotations api
pred = anno.loadRes(pred_json) # init predictions api
eval = COCOeval(anno, pred, "bbox")
if is_coco:
eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.im_files] # image IDs to evaluate
eval.evaluate()
eval.accumulate()
eval.summarize()
map, map50 = eval.stats[:2] # update results (mAP@0.5:0.95, mAP@0.5)
except Exception as e:
LOGGER.info(f"pycocotools unable to run: {e}")
# Return results
model.float() # for training
if not training:
s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ""
LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}")
maps = np.zeros(nc) + map
for i, c in enumerate(ap_class):
maps[c] = ap[i]
return (mp, mr, map50, map, *(loss.cpu() / len(dataloader)).tolist()), maps, t
def parse_opt():
parser = argparse.ArgumentParser()
parser.add_argument("--data", type=str, default=ROOT / "data/coco128.yaml", help="dataset.yaml path")
parser.add_argument("--weights", nargs="+", type=str, default=ROOT / "yolov5s.pt", help="model path(s)")
parser.add_argument("--batch-size", type=int, default=32, help="batch size")
parser.add_argument("--imgsz", "--img", "--img-size", type=int, default=640, help="inference size (pixels)")
parser.add_argument("--conf-thres", type=float, default=0.001, help="confidence threshold")
parser.add_argument("--iou-thres", type=float, default=0.6, help="NMS IoU threshold")
parser.add_argument("--max-det", type=int, default=300, help="maximum detections per image")
parser.add_argument("--task", default="val", help="train, val, test, speed or study")
parser.add_argument("--device", default="", help="cuda device, i.e. 0 or 0,1,2,3 or cpu")
parser.add_argument("--workers", type=int, default=8, help="max dataloader workers (per RANK in DDP mode)")
parser.add_argument("--single-cls", action="store_true", help="treat as single-class dataset")
parser.add_argument("--augment", action="store_true", help="augmented inference")
parser.add_argument("--verbose", action="store_true", help="report mAP by class")
parser.add_argument("--save-txt", action="store_true", help="save results to *.txt")
parser.add_argument("--save-hybrid", action="store_true", help="save label+prediction hybrid results to *.txt")
parser.add_argument("--save-conf", action="store_true", help="save confidences in --save-txt labels")
parser.add_argument("--save-json", action="store_true", help="save a COCO-JSON results file")
parser.add_argument("--project", default=ROOT / "runs/val", help="save to project/name")
parser.add_argument("--name", default="exp", help="save to project/name")
parser.add_argument("--exist-ok", action="store_true", help="existing project/name ok, do not increment")
parser.add_argument("--half", action="store_true", help="use FP16 half-precision inference")
parser.add_argument("--dnn", action="store_true", help="use OpenCV DNN for ONNX inference")
opt = parser.parse_args()
opt.data = check_yaml(opt.data) # check YAML
opt.save_json |= opt.data.endswith("coco.yaml")
opt.save_txt |= opt.save_hybrid
print_args(vars(opt))
return opt
def main(opt):
check_requirements(ROOT / "requirements.txt", exclude=("tensorboard", "thop"))
if opt.task in ("train", "val", "test"): # run normally
if opt.conf_thres > 0.001: # https://github.com/ultralytics/yolov5/issues/1466
LOGGER.info(f"WARNING ⚠️ confidence threshold {opt.conf_thres} > 0.001 produces invalid results")
if opt.save_hybrid:
LOGGER.info("WARNING ⚠️ --save-hybrid will return high mAP from hybrid labels, not from predictions alone")
run(**vars(opt))
else:
weights = opt.weights if isinstance(opt.weights, list) else [opt.weights]
opt.half = torch.cuda.is_available() and opt.device != "cpu" # FP16 for fastest results
if opt.task == "speed": # speed benchmarks
# python val.py --task speed --data coco.yaml --batch 1 --weights yolov5n.pt yolov5s.pt...
opt.conf_thres, opt.iou_thres, opt.save_json = 0.25, 0.45, False
for opt.weights in weights:
run(**vars(opt), plots=False)
elif opt.task == "study": # speed vs mAP benchmarks
# python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n.pt yolov5s.pt...
for opt.weights in weights:
f = f"study_{Path(opt.data).stem}_{Path(opt.weights).stem}.txt" # filename to save to
x, y = list(range(256, 1536 + 128, 128)), [] # x axis (image sizes), y axis
for opt.imgsz in x: # img-size
LOGGER.info(f"\nRunning {f} --imgsz {opt.imgsz}...")
r, _, t = run(**vars(opt), plots=False)
y.append(r + t) # results and times
np.savetxt(f, y, fmt="%10.4g") # save
subprocess.run(["zip", "-r", "study.zip", "study_*.txt"])
plot_val_study(x=x) # plot
else:
raise NotImplementedError(f'--task {opt.task} not in ("train", "val", "test", "speed", "study")')
if __name__ == "__main__":
opt = parse_opt()
main(opt) | --- +++ @@ -1,4 +1,23 @@ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
+"""
+Validate a trained YOLOv5 detection model on a detection dataset.
+
+Usage:
+ $ python val.py --weights yolov5s.pt --data coco128.yaml --img 640
+
+Usage - formats:
+ $ python val.py --weights yolov5s.pt # PyTorch
+ yolov5s.torchscript # TorchScript
+ yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn
+ yolov5s_openvino_model # OpenVINO
+ yolov5s.engine # TensorRT
+ yolov5s.mlpackage # CoreML (macOS-only)
+ yolov5s_saved_model # TensorFlow SavedModel
+ yolov5s.pb # TensorFlow GraphDef
+ yolov5s.tflite # TensorFlow Lite
+ yolov5s_edgetpu.tflite # TensorFlow Edge TPU
+ yolov5s_paddle_model # PaddlePaddle
+"""
import argparse
import json
@@ -43,6 +62,29 @@
def save_one_txt(predn, save_conf, shape, file):
+ """Saves one detection result to a txt file in normalized xywh format, optionally including confidence.
+
+ Args:
+ predn (torch.Tensor): Predicted bounding boxes and associated confidence scores and classes in xyxy format,
+ tensor of shape (N, 6) where N is the number of detections.
+ save_conf (bool): If True, saves the confidence scores along with the bounding box coordinates.
+ shape (tuple): Shape of the original image as (height, width).
+ file (str | Path): File path where the result will be saved.
+
+ Returns:
+ None
+
+ Examples:
+ ```python
+ predn = torch.tensor([[10, 20, 30, 40, 0.9, 1]]) # example prediction
+ save_one_txt(predn, save_conf=True, shape=(640, 480), file="output.txt")
+ ```
+
+ Notes:
+ The xyxy bounding box format represents the coordinates (xmin, ymin, xmax, ymax).
+ The xywh format represents the coordinates (center_x, center_y, width, height) and is normalized by the width and
+ height of the image.
+ """
gn = torch.tensor(shape)[[1, 0, 1, 0]] # normalization gain whwh
for *xyxy, conf, cls in predn.tolist():
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
@@ -52,6 +94,37 @@
def save_one_json(predn, jdict, path, class_map):
+ """Saves a single JSON detection result, including image ID, category ID, bounding box, and confidence score.
+
+ Args:
+ predn (torch.Tensor): Predicted detections in xyxy format with shape (n, 6) where n is the number of detections.
+ The tensor should contain [x_min, y_min, x_max, y_max, confidence, class_id] for each detection.
+ jdict (list[dict]): List to collect JSON formatted detection results.
+ path (pathlib.Path): Path object of the image file, used to extract image_id.
+ class_map (dict[int, int]): Mapping from model class indices to dataset-specific category IDs.
+
+ Returns:
+ None: Appends detection results as dictionaries to `jdict` list in-place.
+
+ Examples:
+ ```python
+ predn = torch.tensor([[100, 50, 200, 150, 0.9, 0], [50, 30, 100, 80, 0.8, 1]])
+ jdict = []
+ path = Path("42.jpg")
+ class_map = {0: 18, 1: 19}
+ save_one_json(predn, jdict, path, class_map)
+ ```
+ This will append to `jdict`:
+ ```
+ [
+ {'image_id': 42, 'category_id': 18, 'bbox': [125.0, 75.0, 100.0, 100.0], 'score': 0.9},
+ {'image_id': 42, 'category_id': 19, 'bbox': [75.0, 55.0, 50.0, 50.0], 'score': 0.8}
+ ]
+ ```
+
+ Notes:
+ The `bbox` values are formatted as [x, y, width, height], where x and y represent the top-left corner of the box.
+ """
image_id = int(path.stem) if path.stem.isnumeric() else path.stem
box = xyxy2xywh(predn[:, :4]) # xywh
box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner
@@ -67,6 +140,31 @@
def process_batch(detections, labels, iouv):
+ """Return a correct prediction matrix given detections and labels at various IoU thresholds.
+
+ Args:
+ detections (np.ndarray): Array of shape (N, 6) where each row corresponds to a detection with format [x1, y1,
+ x2, y2, conf, class].
+ labels (np.ndarray): Array of shape (M, 5) where each row corresponds to a ground truth label with format
+ [class, x1, y1, x2, y2].
+ iouv (np.ndarray): Array of IoU thresholds to evaluate at.
+
+ Returns:
+ correct (np.ndarray): A binary array of shape (N, len(iouv)) indicating whether each detection is a true
+ positive for each IoU threshold. There are 10 IoU levels used in the evaluation.
+
+ Examples:
+ ```python
+ detections = np.array([[50, 50, 200, 200, 0.9, 1], [30, 30, 150, 150, 0.7, 0]])
+ labels = np.array([[1, 50, 50, 200, 200]])
+ iouv = np.linspace(0.5, 0.95, 10)
+ correct = process_batch(detections, labels, iouv)
+ ```
+
+ Notes:
+ - This function is used as part of the evaluation pipeline for object detection models.
+ - IoU (Intersection over Union) is a common evaluation metric for object detection performance.
+ """
correct = np.zeros((detections.shape[0], iouv.shape[0])).astype(bool)
iou = box_iou(labels[:, 1:], detections[:, :4])
correct_class = labels[:, 0:1] == detections[:, 5]
@@ -114,6 +212,44 @@ callbacks=Callbacks(),
compute_loss=None,
):
+ """Evaluates a YOLOv5 model on a dataset and logs performance metrics.
+
+ Args:
+ data (str | dict): Path to a dataset YAML file or a dataset dictionary.
+ weights (str | list[str], optional): Path to the model weights file(s). Supports various formats including
+ PyTorch, TorchScript, ONNX, OpenVINO, TensorRT, CoreML, TensorFlow SavedModel, TensorFlow GraphDef,
+ TensorFlow Lite, TensorFlow Edge TPU, and PaddlePaddle.
+ batch_size (int, optional): Batch size for inference. Default is 32.
+ imgsz (int, optional): Input image size (pixels). Default is 640.
+ conf_thres (float, optional): Confidence threshold for object detection. Default is 0.001.
+ iou_thres (float, optional): IoU threshold for Non-Maximum Suppression (NMS). Default is 0.6.
+ max_det (int, optional): Maximum number of detections per image. Default is 300.
+ task (str, optional): Task type - 'train', 'val', 'test', 'speed', or 'study'. Default is 'val'.
+ device (str, optional): Device to use for computation, e.g., '0' or '0,1,2,3' for CUDA or 'cpu' for CPU. Default
+ is ''.
+ workers (int, optional): Number of dataloader workers. Default is 8.
+ single_cls (bool, optional): Treat dataset as a single class. Default is False.
+ augment (bool, optional): Enable augmented inference. Default is False.
+ verbose (bool, optional): Enable verbose output. Default is False.
+ save_txt (bool, optional): Save results to *.txt files. Default is False.
+ save_hybrid (bool, optional): Save label and prediction hybrid results to *.txt files. Default is False.
+ save_conf (bool, optional): Save confidences in --save-txt labels. Default is False.
+ save_json (bool, optional): Save a COCO-JSON results file. Default is False.
+ project (str | Path, optional): Directory to save results. Default is ROOT/'runs/val'.
+ name (str, optional): Name of the run. Default is 'exp'.
+ exist_ok (bool, optional): Overwrite existing project/name without incrementing. Default is False.
+ half (bool, optional): Use FP16 half-precision inference. Default is True.
+ dnn (bool, optional): Use OpenCV DNN for ONNX inference. Default is False.
+ model (torch.nn.Module, optional): Model object for training. Default is None.
+ dataloader (torch.utils.data.DataLoader, optional): Dataloader object. Default is None.
+ save_dir (Path, optional): Directory to save results. Default is Path('').
+ plots (bool, optional): Plot validation images and metrics. Default is True.
+ callbacks (utils.callbacks.Callbacks, optional): Callbacks for logging and monitoring. Default is Callbacks().
+ compute_loss (function, optional): Loss function for training. Default is None.
+
+ Returns:
+ dict: Contains performance metrics including precision, recall, mAP50, and mAP50-95.
+ """
# Initialize/load model and set device
training = model is not None
if training: # called by train.py
@@ -329,6 +465,52 @@
def parse_opt():
+ """Parse command-line options for configuring YOLOv5 model inference.
+
+ Args:
+ data (str, optional): Path to the dataset YAML file. Default is 'data/coco128.yaml'.
+ weights (list[str], optional): List of paths to model weight files. Default is 'yolov5s.pt'.
+ batch_size (int, optional): Batch size for inference. Default is 32.
+ imgsz (int, optional): Inference image size in pixels. Default is 640.
+ conf_thres (float, optional): Confidence threshold for predictions. Default is 0.001.
+ iou_thres (float, optional): IoU threshold for Non-Max Suppression (NMS). Default is 0.6.
+ max_det (int, optional): Maximum number of detections per image. Default is 300.
+ task (str, optional): Task type - options are 'train', 'val', 'test', 'speed', or 'study'. Default is 'val'.
+ device (str, optional): Device to run the model on. e.g., '0' or '0,1,2,3' or 'cpu'. Default is empty to let the
+ system choose automatically.
+ workers (int, optional): Maximum number of dataloader workers per rank in DDP mode. Default is 8.
+ single_cls (bool, optional): If set, treats the dataset as a single-class dataset. Default is False.
+ augment (bool, optional): If set, performs augmented inference. Default is False.
+ verbose (bool, optional): If set, reports mAP by class. Default is False.
+ save_txt (bool, optional): If set, saves results to *.txt files. Default is False.
+ save_hybrid (bool, optional): If set, saves label+prediction hybrid results to *.txt files. Default is False.
+ save_conf (bool, optional): If set, saves confidences in --save-txt labels. Default is False.
+ save_json (bool, optional): If set, saves results to a COCO-JSON file. Default is False.
+ project (str, optional): Project directory to save results to. Default is 'runs/val'.
+ name (str, optional): Name of the directory to save results to. Default is 'exp'.
+ exist_ok (bool, optional): If set, existing directory will not be incremented. Default is False.
+ half (bool, optional): If set, uses FP16 half-precision inference. Default is False.
+ dnn (bool, optional): If set, uses OpenCV DNN for ONNX inference. Default is False.
+
+ Returns:
+ argparse.Namespace: Parsed command-line options.
+
+ Examples:
+ To validate a trained YOLOv5 model on a COCO dataset:
+ ```python
+ $ python val.py --weights yolov5s.pt --data coco128.yaml --img 640
+ ```
+ Different model formats could be used instead of `yolov5s.pt`:
+ ```python
+ $ python val.py --weights yolov5s.pt yolov5s.torchscript yolov5s.onnx yolov5s_openvino_model yolov5s.engine
+ ```
+ Additional options include saving results in different formats, selecting devices, and more.
+
+ Notes:
+ - The '--data' parameter is checked to ensure it ends with 'coco.yaml' if '--save-json' is set.
+ - The '--save-txt' option is set to True if '--save-hybrid' is enabled.
+ - Args are printed using `print_args` to facilitate debugging.
+ """
parser = argparse.ArgumentParser()
parser.add_argument("--data", type=str, default=ROOT / "data/coco128.yaml", help="dataset.yaml path")
parser.add_argument("--weights", nargs="+", type=str, default=ROOT / "yolov5s.pt", help="model path(s)")
@@ -361,6 +543,23 @@
def main(opt):
+ """Executes YOLOv5 tasks like training, validation, testing, speed, and study benchmarks based on provided options.
+
+ Args:
+ opt (argparse.Namespace): Parsed command-line options. This includes values for parameters like 'data',
+ 'weights', 'batch_size', 'imgsz', 'conf_thres', 'iou_thres', 'max_det', 'task', 'device', 'workers',
+ 'single_cls', 'augment', 'verbose', 'save_txt', 'save_hybrid', 'save_conf', 'save_json', 'project', 'name',
+ 'exist_ok', 'half', and 'dnn', essential for configuring the YOLOv5 tasks.
+
+ Returns:
+ None
+
+ Examples:
+ To validate a trained YOLOv5 model on the COCO dataset with a specific weights file, use:
+ ```python
+ $ python val.py --weights yolov5s.pt --data coco128.yaml --img 640
+ ```
+ """
check_requirements(ROOT / "requirements.txt", exclude=("tensorboard", "thop"))
if opt.task in ("train", "val", "test"): # run normally
@@ -397,4 +596,4 @@
if __name__ == "__main__":
opt = parse_opt()
- main(opt)+ main(opt)
| https://raw.githubusercontent.com/ultralytics/yolov5/HEAD/val.py |
Create docstrings for reusable components | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
import cv2
import numpy as np
import torch
import torch.nn.functional as F
def crop_mask(masks, boxes):
_n, h, w = masks.shape
x1, y1, x2, y2 = torch.chunk(boxes[:, :, None], 4, 1) # x1 shape(1,1,n)
r = torch.arange(w, device=masks.device, dtype=x1.dtype)[None, None, :] # rows shape(1,w,1)
c = torch.arange(h, device=masks.device, dtype=x1.dtype)[None, :, None] # cols shape(h,1,1)
return masks * ((r >= x1) * (r < x2) * (c >= y1) * (c < y2))
def process_mask_upsample(protos, masks_in, bboxes, shape):
c, mh, mw = protos.shape # CHW
masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw)
masks = F.interpolate(masks[None], shape, mode="bilinear", align_corners=False)[0] # CHW
masks = crop_mask(masks, bboxes) # CHW
return masks.gt_(0.5)
def process_mask(protos, masks_in, bboxes, shape, upsample=False):
c, mh, mw = protos.shape # CHW
ih, iw = shape
masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw) # CHW
downsampled_bboxes = bboxes.clone()
downsampled_bboxes[:, 0] *= mw / iw
downsampled_bboxes[:, 2] *= mw / iw
downsampled_bboxes[:, 3] *= mh / ih
downsampled_bboxes[:, 1] *= mh / ih
masks = crop_mask(masks, downsampled_bboxes) # CHW
if upsample:
masks = F.interpolate(masks[None], shape, mode="bilinear", align_corners=False)[0] # CHW
return masks.gt_(0.5)
def process_mask_native(protos, masks_in, bboxes, shape):
c, mh, mw = protos.shape # CHW
masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw)
gain = min(mh / shape[0], mw / shape[1]) # gain = old / new
pad = (mw - shape[1] * gain) / 2, (mh - shape[0] * gain) / 2 # wh padding
top, left = int(pad[1]), int(pad[0]) # y, x
bottom, right = int(mh - pad[1]), int(mw - pad[0])
masks = masks[:, top:bottom, left:right]
masks = F.interpolate(masks[None], shape, mode="bilinear", align_corners=False)[0] # CHW
masks = crop_mask(masks, bboxes) # CHW
return masks.gt_(0.5)
def scale_image(im1_shape, masks, im0_shape, ratio_pad=None):
# Rescale coordinates (xyxy) from im1_shape to im0_shape
if ratio_pad is None: # calculate from im0_shape
gain = min(im1_shape[0] / im0_shape[0], im1_shape[1] / im0_shape[1]) # gain = old / new
pad = (im1_shape[1] - im0_shape[1] * gain) / 2, (im1_shape[0] - im0_shape[0] * gain) / 2 # wh padding
else:
pad = ratio_pad[1]
top, left = int(pad[1]), int(pad[0]) # y, x
bottom, right = int(im1_shape[0] - pad[1]), int(im1_shape[1] - pad[0])
if len(masks.shape) < 2:
raise ValueError(f'"len of masks shape" should be 2 or 3, but got {len(masks.shape)}')
masks = masks[top:bottom, left:right]
# masks = masks.permute(2, 0, 1).contiguous()
# masks = F.interpolate(masks[None], im0_shape[:2], mode='bilinear', align_corners=False)[0]
# masks = masks.permute(1, 2, 0).contiguous()
masks = cv2.resize(masks, (im0_shape[1], im0_shape[0]))
if len(masks.shape) == 2:
masks = masks[:, :, None]
return masks
def mask_iou(mask1, mask2, eps=1e-7):
intersection = torch.matmul(mask1, mask2.t()).clamp(0)
union = (mask1.sum(1)[:, None] + mask2.sum(1)[None]) - intersection # (area1 + area2) - intersection
return intersection / (union + eps)
def masks_iou(mask1, mask2, eps=1e-7):
intersection = (mask1 * mask2).sum(1).clamp(0) # (N, )
union = (mask1.sum(1) + mask2.sum(1))[None] - intersection # (area1 + area2) - intersection
return intersection / (union + eps)
def masks2segments(masks, strategy="largest"):
segments = []
for x in masks.int().cpu().numpy().astype("uint8"):
c = cv2.findContours(x, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0]
if c:
if strategy == "concat": # concatenate all segments
c = np.concatenate([x.reshape(-1, 2) for x in c])
elif strategy == "largest": # select largest segment
c = np.array(c[np.array([len(x) for x in c]).argmax()]).reshape(-1, 2)
else:
c = np.zeros((0, 2)) # no segments found
segments.append(c.astype("float32"))
return segments | --- +++ @@ -7,6 +7,12 @@
def crop_mask(masks, boxes):
+ """Crop predicted masks by zeroing out everything not in the predicted bbox.
+
+ Args:
+ - masks should be a size [n, h, w] tensor of masks
+ - boxes should be a size [n, 4] tensor of bbox coords in relative point form.
+ """
_n, h, w = masks.shape
x1, y1, x2, y2 = torch.chunk(boxes[:, :, None], 4, 1) # x1 shape(1,1,n)
r = torch.arange(w, device=masks.device, dtype=x1.dtype)[None, None, :] # rows shape(1,w,1)
@@ -16,6 +22,17 @@
def process_mask_upsample(protos, masks_in, bboxes, shape):
+ """Crop after upsample.
+
+ Args:
+ protos: [mask_dim, mask_h, mask_w]
+ masks_in: [n, mask_dim], n is number of masks after nms
+ bboxes: [n, 4], n is number of masks after nms
+ shape: input_image_size, (h, w).
+
+ Returns:
+ h, w, n
+ """
c, mh, mw = protos.shape # CHW
masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw)
masks = F.interpolate(masks[None], shape, mode="bilinear", align_corners=False)[0] # CHW
@@ -24,6 +41,17 @@
def process_mask(protos, masks_in, bboxes, shape, upsample=False):
+ """Crop before upsample.
+
+ Args:
+ proto_out: [mask_dim, mask_h, mask_w]
+ out_masks: [n, mask_dim], n is number of masks after nms
+ bboxes: [n, 4], n is number of masks after nms
+ shape: input_image_size, (h, w).
+
+ Returns:
+ h, w, n
+ """
c, mh, mw = protos.shape # CHW
ih, iw = shape
masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw) # CHW
@@ -41,6 +69,17 @@
def process_mask_native(protos, masks_in, bboxes, shape):
+ """Crop after upsample.
+
+ Args:
+ protos: [mask_dim, mask_h, mask_w]
+ masks_in: [n, mask_dim], n is number of masks after nms
+ bboxes: [n, 4], n is number of masks after nms
+ shape: input_image_size, (h, w).
+
+ Returns:
+ h, w, n
+ """
c, mh, mw = protos.shape # CHW
masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw)
gain = min(mh / shape[0], mw / shape[1]) # gain = old / new
@@ -55,6 +94,7 @@
def scale_image(im1_shape, masks, im0_shape, ratio_pad=None):
+ """Img1_shape: model input shape, [h, w] img0_shape: origin pic shape, [h, w, 3] masks: [h, w, num]."""
# Rescale coordinates (xyxy) from im1_shape to im0_shape
if ratio_pad is None: # calculate from im0_shape
gain = min(im1_shape[0] / im0_shape[0], im1_shape[1] / im0_shape[1]) # gain = old / new
@@ -78,18 +118,43 @@
def mask_iou(mask1, mask2, eps=1e-7):
+ """
+ Args:
+ mask1: [N, n] m1 means number of predicted objects
+ mask2: [M, n] m2 means number of gt objects.
+
+ Returns:
+ masks iou, [N, M]
+
+ Notes:
+ - n means image_w, x image_h.
+ """
intersection = torch.matmul(mask1, mask2.t()).clamp(0)
union = (mask1.sum(1)[:, None] + mask2.sum(1)[None]) - intersection # (area1 + area2) - intersection
return intersection / (union + eps)
def masks_iou(mask1, mask2, eps=1e-7):
+ """
+ Args:
+ mask1: [N, n] m1 means number of predicted objects
+ mask2: [N, n] m2 means number of gt objects.
+
+ Returns:
+ masks iou, (N, )
+
+ Notes:
+ - n means image_w, x image_h.
+ """
intersection = (mask1 * mask2).sum(1).clamp(0) # (N, )
union = (mask1.sum(1) + mask2.sum(1))[None] - intersection # (area1 + area2) - intersection
return intersection / (union + eps)
def masks2segments(masks, strategy="largest"):
+ """Converts binary (n,160,160) masks to polygon segments with options for concatenation or selecting the largest
+ segment.
+ """
segments = []
for x in masks.int().cpu().numpy().astype("uint8"):
c = cv2.findContours(x, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0]
@@ -101,4 +166,4 @@ else:
c = np.zeros((0, 2)) # no segments found
segments.append(c.astype("float32"))
- return segments+ return segments
| https://raw.githubusercontent.com/ultralytics/yolov5/HEAD/utils/segment/general.py |
Create documentation strings for testing functions | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
import torch
import torch.nn as nn
import torch.nn.functional as F
class SiLU(nn.Module):
@staticmethod
def forward(x):
return x * torch.sigmoid(x)
class Hardswish(nn.Module):
@staticmethod
def forward(x):
return x * F.hardtanh(x + 3, 0.0, 6.0) / 6.0 # for TorchScript, CoreML and ONNX
class Mish(nn.Module):
@staticmethod
def forward(x):
return x * F.softplus(x).tanh()
class MemoryEfficientMish(nn.Module):
class F(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return x.mul(torch.tanh(F.softplus(x))) # x * tanh(ln(1 + exp(x)))
@staticmethod
def backward(ctx, grad_output):
x = ctx.saved_tensors[0]
sx = torch.sigmoid(x)
fx = F.softplus(x).tanh()
return grad_output * (fx + x * sx * (1 - fx * fx))
def forward(self, x):
return self.F.apply(x)
class FReLU(nn.Module):
def __init__(self, c1, k=3): # ch_in, kernel
super().__init__()
self.conv = nn.Conv2d(c1, c1, k, 1, 1, groups=c1, bias=False)
self.bn = nn.BatchNorm2d(c1)
def forward(self, x):
return torch.max(x, self.bn(self.conv(x)))
class AconC(nn.Module):
def __init__(self, c1):
super().__init__()
self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1))
self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1))
self.beta = nn.Parameter(torch.ones(1, c1, 1, 1))
def forward(self, x):
dpx = (self.p1 - self.p2) * x
return dpx * torch.sigmoid(self.beta * dpx) + self.p2 * x
class MetaAconC(nn.Module):
def __init__(self, c1, k=1, s=1, r=16):
super().__init__()
c2 = max(r, c1 // r)
self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1))
self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1))
self.fc1 = nn.Conv2d(c1, c2, k, s, bias=True)
self.fc2 = nn.Conv2d(c2, c1, k, s, bias=True)
# self.bn1 = nn.BatchNorm2d(c2)
# self.bn2 = nn.BatchNorm2d(c1)
def forward(self, x):
y = x.mean(dim=2, keepdims=True).mean(dim=3, keepdims=True)
# batch-size 1 bug/instabilities https://github.com/ultralytics/yolov5/issues/2891
# beta = torch.sigmoid(self.bn2(self.fc2(self.bn1(self.fc1(y))))) # bug/unstable
beta = torch.sigmoid(self.fc2(self.fc1(y))) # bug patch BN layers removed
dpx = (self.p1 - self.p2) * x
return dpx * torch.sigmoid(beta * dpx) + self.p2 * x | --- +++ @@ -1,4 +1,5 @@ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
+"""Activation functions."""
import torch
import torch.nn as nn
@@ -6,73 +7,109 @@
class SiLU(nn.Module):
+ """Applies the Sigmoid-weighted Linear Unit (SiLU) activation function, also known as Swish."""
@staticmethod
def forward(x):
+ """Applies the Sigmoid-weighted Linear Unit (SiLU) activation function.
+
+ https://arxiv.org/pdf/1606.08415.pdf.
+ """
return x * torch.sigmoid(x)
class Hardswish(nn.Module):
+ """Applies the Hardswish activation function, which is efficient for mobile and embedded devices."""
@staticmethod
def forward(x):
+ """Applies the Hardswish activation function, compatible with TorchScript, CoreML, and ONNX.
+
+ Equivalent to x * F.hardsigmoid(x)
+ """
return x * F.hardtanh(x + 3, 0.0, 6.0) / 6.0 # for TorchScript, CoreML and ONNX
class Mish(nn.Module):
+ """Mish activation https://github.com/digantamisra98/Mish."""
@staticmethod
def forward(x):
+ """Applies the Mish activation function, a smooth alternative to ReLU."""
return x * F.softplus(x).tanh()
class MemoryEfficientMish(nn.Module):
+ """Efficiently applies the Mish activation function using custom autograd for reduced memory usage."""
class F(torch.autograd.Function):
+ """Implements a custom autograd function for memory-efficient Mish activation."""
@staticmethod
def forward(ctx, x):
+ """Applies the Mish activation function, a smooth ReLU alternative, to the input tensor `x`."""
ctx.save_for_backward(x)
return x.mul(torch.tanh(F.softplus(x))) # x * tanh(ln(1 + exp(x)))
@staticmethod
def backward(ctx, grad_output):
+ """Computes the gradient of the Mish activation function with respect to input `x`."""
x = ctx.saved_tensors[0]
sx = torch.sigmoid(x)
fx = F.softplus(x).tanh()
return grad_output * (fx + x * sx * (1 - fx * fx))
def forward(self, x):
+ """Applies the Mish activation function to the input tensor `x`."""
return self.F.apply(x)
class FReLU(nn.Module):
+ """FReLU activation https://arxiv.org/abs/2007.11824."""
def __init__(self, c1, k=3): # ch_in, kernel
+ """Initializes FReLU activation with channel `c1` and kernel size `k`."""
super().__init__()
self.conv = nn.Conv2d(c1, c1, k, 1, 1, groups=c1, bias=False)
self.bn = nn.BatchNorm2d(c1)
def forward(self, x):
+ """Applies FReLU activation with max operation between input and BN-convolved input.
+
+ https://arxiv.org/abs/2007.11824
+ """
return torch.max(x, self.bn(self.conv(x)))
class AconC(nn.Module):
+ """ACON activation (activate or not) function.
+
+ AconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is a learnable parameter See "Activate or Not: Learning
+ Customized Activation" https://arxiv.org/pdf/2009.04759.pdf.
+ """
def __init__(self, c1):
+ """Initializes AconC with learnable parameters p1, p2, and beta for channel-wise activation control."""
super().__init__()
self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1))
self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1))
self.beta = nn.Parameter(torch.ones(1, c1, 1, 1))
def forward(self, x):
+ """Applies AconC activation function with learnable parameters for channel-wise control on input tensor x."""
dpx = (self.p1 - self.p2) * x
return dpx * torch.sigmoid(self.beta * dpx) + self.p2 * x
class MetaAconC(nn.Module):
+ """ACON activation (activate or not) function.
+
+ AconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is a learnable parameter See "Activate or Not: Learning
+ Customized Activation" https://arxiv.org/pdf/2009.04759.pdf.
+ """
def __init__(self, c1, k=1, s=1, r=16):
+ """Initializes MetaAconC with params: channel_in (c1), kernel size (k=1), stride (s=1), reduction (r=16)."""
super().__init__()
c2 = max(r, c1 // r)
self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1))
@@ -83,9 +120,10 @@ # self.bn2 = nn.BatchNorm2d(c1)
def forward(self, x):
+ """Applies a forward pass transforming input `x` using learnable parameters and sigmoid activation."""
y = x.mean(dim=2, keepdims=True).mean(dim=3, keepdims=True)
# batch-size 1 bug/instabilities https://github.com/ultralytics/yolov5/issues/2891
# beta = torch.sigmoid(self.bn2(self.fc2(self.bn1(self.fc1(y))))) # bug/unstable
beta = torch.sigmoid(self.fc2(self.fc1(y))) # bug patch BN layers removed
dpx = (self.p1 - self.p2) * x
- return dpx * torch.sigmoid(beta * dpx) + self.p2 * x+ return dpx * torch.sigmoid(beta * dpx) + self.p2 * x
| https://raw.githubusercontent.com/ultralytics/yolov5/HEAD/utils/activations.py |
Create documentation strings for testing functions | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
from __future__ import annotations
from urllib.parse import urlparse
import torch
class TritonRemoteModel:
def __init__(self, url: str):
parsed_url = urlparse(url)
if parsed_url.scheme == "grpc":
from tritonclient.grpc import InferenceServerClient, InferInput
self.client = InferenceServerClient(parsed_url.netloc) # Triton GRPC client
model_repository = self.client.get_model_repository_index()
self.model_name = model_repository.models[0].name
self.metadata = self.client.get_model_metadata(self.model_name, as_json=True)
def create_input_placeholders() -> list[InferInput]:
return [
InferInput(i["name"], [int(s) for s in i["shape"]], i["datatype"]) for i in self.metadata["inputs"]
]
else:
from tritonclient.http import InferenceServerClient, InferInput
self.client = InferenceServerClient(parsed_url.netloc) # Triton HTTP client
model_repository = self.client.get_model_repository_index()
self.model_name = model_repository[0]["name"]
self.metadata = self.client.get_model_metadata(self.model_name)
def create_input_placeholders() -> list[InferInput]:
return [
InferInput(i["name"], [int(s) for s in i["shape"]], i["datatype"]) for i in self.metadata["inputs"]
]
self._create_input_placeholders_fn = create_input_placeholders
@property
def runtime(self):
return self.metadata.get("backend", self.metadata.get("platform"))
def __call__(self, *args, **kwargs) -> torch.Tensor | tuple[torch.Tensor, ...]:
inputs = self._create_inputs(*args, **kwargs)
response = self.client.infer(model_name=self.model_name, inputs=inputs)
result = []
for output in self.metadata["outputs"]:
tensor = torch.as_tensor(response.as_numpy(output["name"]))
result.append(tensor)
return result[0] if len(result) == 1 else result
def _create_inputs(self, *args, **kwargs):
args_len, kwargs_len = len(args), len(kwargs)
if not args_len and not kwargs_len:
raise RuntimeError("No inputs provided.")
if args_len and kwargs_len:
raise RuntimeError("Cannot specify args and kwargs at the same time")
placeholders = self._create_input_placeholders_fn()
if args_len:
if args_len != len(placeholders):
raise RuntimeError(f"Expected {len(placeholders)} inputs, got {args_len}.")
for input, value in zip(placeholders, args):
input.set_data_from_numpy(value.cpu().numpy())
else:
for input in placeholders:
value = kwargs[input.name]
input.set_data_from_numpy(value.cpu().numpy())
return placeholders | --- +++ @@ -1,4 +1,5 @@ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
+"""Utils to interact with the Triton Inference Server."""
from __future__ import annotations
@@ -8,8 +9,14 @@
class TritonRemoteModel:
+ """A wrapper over a model served by the Triton Inference Server.
+
+ It can be configured to communicate over GRPC or HTTP. It accepts Torch Tensors as input and returns them as
+ outputs.
+ """
def __init__(self, url: str):
+ """Keyword Arguments: url: Fully qualified address of the Triton server - for e.g. grpc://localhost:8000."""
parsed_url = urlparse(url)
if parsed_url.scheme == "grpc":
from tritonclient.grpc import InferenceServerClient, InferInput
@@ -41,9 +48,15 @@
@property
def runtime(self):
+ """Returns the model runtime."""
return self.metadata.get("backend", self.metadata.get("platform"))
def __call__(self, *args, **kwargs) -> torch.Tensor | tuple[torch.Tensor, ...]:
+ """Invokes the model.
+
+ Parameters can be provided via args or kwargs. args, if provided, are assumed to match the order of inputs of
+ the model. kwargs are matched with the model input names.
+ """
inputs = self._create_inputs(*args, **kwargs)
response = self.client.infer(model_name=self.model_name, inputs=inputs)
result = []
@@ -53,6 +66,7 @@ return result[0] if len(result) == 1 else result
def _create_inputs(self, *args, **kwargs):
+ """Creates input tensors from args or kwargs, not both; raises error if none or both are provided."""
args_len, kwargs_len = len(args), len(kwargs)
if not args_len and not kwargs_len:
raise RuntimeError("No inputs provided.")
@@ -69,4 +83,4 @@ for input in placeholders:
value = kwargs[input.name]
input.set_data_from_numpy(value.cpu().numpy())
- return placeholders+ return placeholders
| https://raw.githubusercontent.com/ultralytics/yolov5/HEAD/utils/triton.py |
Write documentation strings for class attributes | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
import math
import warnings
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import torch
# NumPy 2.0 compatibility: trapezoid was renamed from trapz
trapezoid = np.trapezoid if hasattr(np, "trapezoid") else np.trapz
from utils import TryExcept, threaded
def fitness(x):
w = [0.0, 0.0, 0.1, 0.9] # weights for [P, R, mAP@0.5, mAP@0.5:0.95]
return (x[:, :4] * w).sum(1)
def smooth(y, f=0.05):
nf = round(len(y) * f * 2) // 2 + 1 # number of filter elements (must be odd)
p = np.ones(nf // 2) # ones padding
yp = np.concatenate((p * y[0], y, p * y[-1]), 0) # y padded
return np.convolve(yp, np.ones(nf) / nf, mode="valid") # y-smoothed
def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir=".", names=(), eps=1e-16, prefix=""):
# Sort by objectness
i = np.argsort(-conf)
tp, conf, pred_cls = tp[i], conf[i], pred_cls[i]
# Find unique classes
unique_classes, nt = np.unique(target_cls, return_counts=True)
nc = unique_classes.shape[0] # number of classes, number of detections
# Create Precision-Recall curve and compute AP for each class
px, py = np.linspace(0, 1, 1000), [] # for plotting
ap, p, r = np.zeros((nc, tp.shape[1])), np.zeros((nc, 1000)), np.zeros((nc, 1000))
for ci, c in enumerate(unique_classes):
i = pred_cls == c
n_l = nt[ci] # number of labels
n_p = i.sum() # number of predictions
if n_p == 0 or n_l == 0:
continue
# Accumulate FPs and TPs
fpc = (1 - tp[i]).cumsum(0)
tpc = tp[i].cumsum(0)
# Recall
recall = tpc / (n_l + eps) # recall curve
r[ci] = np.interp(-px, -conf[i], recall[:, 0], left=0) # negative x, xp because xp decreases
# Precision
precision = tpc / (tpc + fpc) # precision curve
p[ci] = np.interp(-px, -conf[i], precision[:, 0], left=1) # p at pr_score
# AP from recall-precision curve
for j in range(tp.shape[1]):
ap[ci, j], mpre, mrec = compute_ap(recall[:, j], precision[:, j])
if plot and j == 0:
py.append(np.interp(px, mrec, mpre)) # precision at mAP@0.5
# Compute F1 (harmonic mean of precision and recall)
f1 = 2 * p * r / (p + r + eps)
names = [v for k, v in names.items() if k in unique_classes] # list: only classes that have data
names = dict(enumerate(names)) # to dict
if plot:
plot_pr_curve(px, py, ap, Path(save_dir) / f"{prefix}PR_curve.png", names)
plot_mc_curve(px, f1, Path(save_dir) / f"{prefix}F1_curve.png", names, ylabel="F1")
plot_mc_curve(px, p, Path(save_dir) / f"{prefix}P_curve.png", names, ylabel="Precision")
plot_mc_curve(px, r, Path(save_dir) / f"{prefix}R_curve.png", names, ylabel="Recall")
i = smooth(f1.mean(0), 0.1).argmax() # max F1 index
p, r, f1 = p[:, i], r[:, i], f1[:, i]
tp = (r * nt).round() # true positives
fp = (tp / (p + eps) - tp).round() # false positives
return tp, fp, p, r, f1, ap, unique_classes.astype(int)
def compute_ap(recall, precision):
# Append sentinel values to beginning and end
mrec = np.concatenate(([0.0], recall, [1.0]))
mpre = np.concatenate(([1.0], precision, [0.0]))
# Compute the precision envelope
mpre = np.flip(np.maximum.accumulate(np.flip(mpre)))
# Integrate area under curve
method = "interp" # methods: 'continuous', 'interp'
if method == "interp":
x = np.linspace(0, 1, 101) # 101-point interp (COCO)
ap = trapezoid(np.interp(x, mrec, mpre), x) # integrate
else: # 'continuous'
i = np.where(mrec[1:] != mrec[:-1])[0] # points where x axis (recall) changes
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) # area under curve
return ap, mpre, mrec
class ConfusionMatrix:
def __init__(self, nc, conf=0.25, iou_thres=0.45):
self.matrix = np.zeros((nc + 1, nc + 1))
self.nc = nc # number of classes
self.conf = conf
self.iou_thres = iou_thres
def process_batch(self, detections, labels):
if detections is None:
gt_classes = labels.int()
for gc in gt_classes:
self.matrix[self.nc, gc] += 1 # background FN
return
detections = detections[detections[:, 4] > self.conf]
gt_classes = labels[:, 0].int()
detection_classes = detections[:, 5].int()
iou = box_iou(labels[:, 1:], detections[:, :4])
x = torch.where(iou > self.iou_thres)
if x[0].shape[0]:
matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy()
if x[0].shape[0] > 1:
matches = matches[matches[:, 2].argsort()[::-1]]
matches = matches[np.unique(matches[:, 1], return_index=True)[1]]
matches = matches[matches[:, 2].argsort()[::-1]]
matches = matches[np.unique(matches[:, 0], return_index=True)[1]]
else:
matches = np.zeros((0, 3))
n = matches.shape[0] > 0
m0, m1, _ = matches.transpose().astype(int)
for i, gc in enumerate(gt_classes):
j = m0 == i
if n and sum(j) == 1:
self.matrix[detection_classes[m1[j]], gc] += 1 # correct
else:
self.matrix[self.nc, gc] += 1 # true background
if n:
for i, dc in enumerate(detection_classes):
if not any(m1 == i):
self.matrix[dc, self.nc] += 1 # predicted background
def tp_fp(self):
tp = self.matrix.diagonal() # true positives
fp = self.matrix.sum(1) - tp # false positives
# fn = self.matrix.sum(0) - tp # false negatives (missed detections)
return tp[:-1], fp[:-1] # remove background class
@TryExcept("WARNING ⚠️ ConfusionMatrix plot failure")
def plot(self, normalize=True, save_dir="", names=()):
import seaborn as sn
array = self.matrix / ((self.matrix.sum(0).reshape(1, -1) + 1e-9) if normalize else 1) # normalize columns
array[array < 0.005] = np.nan # don't annotate (would appear as 0.00)
fig, ax = plt.subplots(1, 1, figsize=(12, 9), tight_layout=True)
nc, nn = self.nc, len(names) # number of classes, names
sn.set(font_scale=1.0 if nc < 50 else 0.8) # for label size
labels = (0 < nn < 99) and (nn == nc) # apply names to ticklabels
ticklabels = ([*names, "background"]) if labels else "auto"
with warnings.catch_warnings():
warnings.simplefilter("ignore") # suppress empty matrix RuntimeWarning: All-NaN slice encountered
sn.heatmap(
array,
ax=ax,
annot=nc < 30,
annot_kws={"size": 8},
cmap="Blues",
fmt=".2f",
square=True,
vmin=0.0,
xticklabels=ticklabels,
yticklabels=ticklabels,
).set_facecolor((1, 1, 1))
ax.set_xlabel("True")
ax.set_ylabel("Predicted")
ax.set_title("Confusion Matrix")
fig.savefig(Path(save_dir) / "confusion_matrix.png", dpi=250)
plt.close(fig)
def print(self):
for i in range(self.nc + 1):
print(" ".join(map(str, self.matrix[i])))
def bbox_iou(box1, box2, xywh=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7):
# Get the coordinates of bounding boxes
if xywh: # transform from xywh to xyxy
(x1, y1, w1, h1), (x2, y2, w2, h2) = box1.chunk(4, -1), box2.chunk(4, -1)
w1_, h1_, w2_, h2_ = w1 / 2, h1 / 2, w2 / 2, h2 / 2
b1_x1, b1_x2, b1_y1, b1_y2 = x1 - w1_, x1 + w1_, y1 - h1_, y1 + h1_
b2_x1, b2_x2, b2_y1, b2_y2 = x2 - w2_, x2 + w2_, y2 - h2_, y2 + h2_
else: # x1, y1, x2, y2 = box1
b1_x1, b1_y1, b1_x2, b1_y2 = box1.chunk(4, -1)
b2_x1, b2_y1, b2_x2, b2_y2 = box2.chunk(4, -1)
w1, h1 = b1_x2 - b1_x1, (b1_y2 - b1_y1).clamp(eps)
w2, h2 = b2_x2 - b2_x1, (b2_y2 - b2_y1).clamp(eps)
# Intersection area
inter = (b1_x2.minimum(b2_x2) - b1_x1.maximum(b2_x1)).clamp(0) * (
b1_y2.minimum(b2_y2) - b1_y1.maximum(b2_y1)
).clamp(0)
# Union Area
union = w1 * h1 + w2 * h2 - inter + eps
# IoU
iou = inter / union
if CIoU or DIoU or GIoU:
cw = b1_x2.maximum(b2_x2) - b1_x1.minimum(b2_x1) # convex (smallest enclosing box) width
ch = b1_y2.maximum(b2_y2) - b1_y1.minimum(b2_y1) # convex height
if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1
c2 = cw**2 + ch**2 + eps # convex diagonal squared
rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 + (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center dist ** 2
if CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47
v = (4 / math.pi**2) * (torch.atan(w2 / h2) - torch.atan(w1 / h1)).pow(2)
with torch.no_grad():
alpha = v / (v - iou + (1 + eps))
return iou - (rho2 / c2 + v * alpha) # CIoU
return iou - rho2 / c2 # DIoU
c_area = cw * ch + eps # convex area
return iou - (c_area - union) / c_area # GIoU https://arxiv.org/pdf/1902.09630.pdf
return iou # IoU
def box_iou(box1, box2, eps=1e-7):
# https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py
# inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2)
(a1, a2), (b1, b2) = box1.unsqueeze(1).chunk(2, 2), box2.unsqueeze(0).chunk(2, 2)
inter = (torch.min(a2, b2) - torch.max(a1, b1)).clamp(0).prod(2)
# IoU = inter / (area1 + area2 - inter)
return inter / ((a2 - a1).prod(2) + (b2 - b1).prod(2) - inter + eps)
def bbox_ioa(box1, box2, eps=1e-7):
# Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1
b2_x1, b2_y1, b2_x2, b2_y2 = box2.T
# Intersection area
inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * (
np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)
).clip(0)
# box2 area
box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + eps
# Intersection over box2 area
return inter_area / box2_area
def wh_iou(wh1, wh2, eps=1e-7):
wh1 = wh1[:, None] # [N,1,2]
wh2 = wh2[None] # [1,M,2]
inter = torch.min(wh1, wh2).prod(2) # [N,M]
return inter / (wh1.prod(2) + wh2.prod(2) - inter + eps) # iou = inter / (area1 + area2 - inter)
# Plots ----------------------------------------------------------------------------------------------------------------
@threaded
def plot_pr_curve(px, py, ap, save_dir=Path("pr_curve.png"), names=()):
fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True)
py = np.stack(py, axis=1)
if 0 < len(names) < 21: # display per-class legend if < 21 classes
for i, y in enumerate(py.T):
ax.plot(px, y, linewidth=1, label=f"{names[i]} {ap[i, 0]:.3f}") # plot(recall, precision)
else:
ax.plot(px, py, linewidth=1, color="grey") # plot(recall, precision)
ax.plot(px, py.mean(1), linewidth=3, color="blue", label=f"all classes {ap[:, 0].mean():.3f} mAP@0.5")
ax.set_xlabel("Recall")
ax.set_ylabel("Precision")
ax.set_xlim(0, 1)
ax.set_ylim(0, 1)
ax.legend(bbox_to_anchor=(1.04, 1), loc="upper left")
ax.set_title("Precision-Recall Curve")
fig.savefig(save_dir, dpi=250)
plt.close(fig)
@threaded
def plot_mc_curve(px, py, save_dir=Path("mc_curve.png"), names=(), xlabel="Confidence", ylabel="Metric"):
fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True)
if 0 < len(names) < 21: # display per-class legend if < 21 classes
for i, y in enumerate(py):
ax.plot(px, y, linewidth=1, label=f"{names[i]}") # plot(confidence, metric)
else:
ax.plot(px, py.T, linewidth=1, color="grey") # plot(confidence, metric)
y = smooth(py.mean(0), 0.05)
ax.plot(px, y, linewidth=3, color="blue", label=f"all classes {y.max():.2f} at {px[y.argmax()]:.3f}")
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_xlim(0, 1)
ax.set_ylim(0, 1)
ax.legend(bbox_to_anchor=(1.04, 1), loc="upper left")
ax.set_title(f"{ylabel}-Confidence Curve")
fig.savefig(save_dir, dpi=250)
plt.close(fig) | --- +++ @@ -1,4 +1,5 @@ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
+"""Model validation metrics."""
import math
import warnings
@@ -15,11 +16,13 @@
def fitness(x):
+ """Calculates fitness of a model using weighted sum of metrics P, R, mAP@0.5, mAP@0.5:0.95."""
w = [0.0, 0.0, 0.1, 0.9] # weights for [P, R, mAP@0.5, mAP@0.5:0.95]
return (x[:, :4] * w).sum(1)
def smooth(y, f=0.05):
+ """Applies box filter smoothing to array `y` with fraction `f`, yielding a smoothed array."""
nf = round(len(y) * f * 2) // 2 + 1 # number of filter elements (must be odd)
p = np.ones(nf // 2) # ones padding
yp = np.concatenate((p * y[0], y, p * y[-1]), 0) # y padded
@@ -27,6 +30,21 @@
def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir=".", names=(), eps=1e-16, prefix=""):
+ """Compute the average precision, given the recall and precision curves.
+
+ Source: https://github.com/rafaelpadilla/Object-Detection-Metrics.
+
+ Args:
+ tp: True positives (nparray, nx1 or nx10).
+ conf: Objectness value from 0-1 (nparray).
+ pred_cls: Predicted object classes (nparray).
+ target_cls: True object classes (nparray).
+ plot: Plot precision-recall curve at mAP@0.5
+ save_dir: Plot save directory
+
+ Returns:
+ The average precision as computed in py-faster-rcnn.
+ """
# Sort by objectness
i = np.argsort(-conf)
tp, conf, pred_cls = tp[i], conf[i], pred_cls[i]
@@ -81,6 +99,17 @@
def compute_ap(recall, precision):
+ """Compute the average precision, given the recall and precision curves.
+
+ Args:
+ recall: The recall curve (list)
+ precision: The precision curve (list)
+
+ Returns:
+ Average precision
+ precision curve
+ recall curve
+ """
# Append sentinel values to beginning and end
mrec = np.concatenate(([0.0], recall, [1.0]))
mpre = np.concatenate(([1.0], precision, [0.0]))
@@ -101,14 +130,27 @@
class ConfusionMatrix:
+ """Generates and visualizes a confusion matrix for evaluating object detection classification performance."""
def __init__(self, nc, conf=0.25, iou_thres=0.45):
+ """Initializes ConfusionMatrix with given number of classes, confidence, and IoU threshold."""
self.matrix = np.zeros((nc + 1, nc + 1))
self.nc = nc # number of classes
self.conf = conf
self.iou_thres = iou_thres
def process_batch(self, detections, labels):
+ """Return intersection-over-union (Jaccard index) of boxes.
+
+ Both sets of boxes are expected to be in (x1, y1, x2, y2) format.
+
+ Args:
+ detections (Array[N, 6]): x1, y1, x2, y2, conf, class
+ labels (Array[M, 5]): class, x1, y1, x2, y2
+
+ Returns:
+ None, updates confusion matrix accordingly
+ """
if detections is None:
gt_classes = labels.int()
for gc in gt_classes:
@@ -146,6 +188,9 @@ self.matrix[dc, self.nc] += 1 # predicted background
def tp_fp(self):
+ """Calculates true positives (tp) and false positives (fp) excluding the background class from the confusion
+ matrix.
+ """
tp = self.matrix.diagonal() # true positives
fp = self.matrix.sum(1) - tp # false positives
# fn = self.matrix.sum(0) - tp # false negatives (missed detections)
@@ -153,6 +198,7 @@
@TryExcept("WARNING ⚠️ ConfusionMatrix plot failure")
def plot(self, normalize=True, save_dir="", names=()):
+ """Plots confusion matrix using seaborn, optional normalization; can save plot to specified directory."""
import seaborn as sn
array = self.matrix / ((self.matrix.sum(0).reshape(1, -1) + 1e-9) if normalize else 1) # normalize columns
@@ -184,11 +230,16 @@ plt.close(fig)
def print(self):
+ """Prints the confusion matrix row-wise, with each class and its predictions separated by spaces."""
for i in range(self.nc + 1):
print(" ".join(map(str, self.matrix[i])))
def bbox_iou(box1, box2, xywh=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7):
+ """Calculates IoU, GIoU, DIoU, or CIoU between two boxes, supporting xywh/xyxy formats.
+
+ Input shapes are box1(1,4) to box2(n,4).
+ """
# Get the coordinates of bounding boxes
if xywh: # transform from xywh to xyxy
(x1, y1, w1, h1), (x2, y2, w2, h2) = box1.chunk(4, -1), box2.chunk(4, -1)
@@ -230,6 +281,17 @@
def box_iou(box1, box2, eps=1e-7):
# https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py
+ """Return intersection-over-union (Jaccard index) of boxes.
+
+ Both sets of boxes are expected to be in (x1, y1, x2, y2) format.
+
+ Args:
+ box1: (Tensor[N, 4])
+ box2: (Tensor[M, 4])
+
+ Returns:
+ iou (Tensor[N, M]): the NxM matrix containing the pairwise IoU values for every element in boxes1 and boxes2
+ """
# inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2)
(a1, a2), (b1, b2) = box1.unsqueeze(1).chunk(2, 2), box2.unsqueeze(0).chunk(2, 2)
inter = (torch.min(a2, b2) - torch.max(a1, b1)).clamp(0).prod(2)
@@ -239,6 +301,18 @@
def bbox_ioa(box1, box2, eps=1e-7):
+ """Returns the intersection over box2 area given box1, box2.
+
+ Args:
+ box1: np.array of shape(4)
+ box2: np.array of shape(nx4)
+
+ Returns:
+ np.array of shape(n)
+
+ Notes:
+ - Boxes are x1y1x2y2
+ """
# Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1
b2_x1, b2_y1, b2_x2, b2_y2 = box2.T
@@ -256,6 +330,9 @@
def wh_iou(wh1, wh2, eps=1e-7):
+ """Calculates the Intersection over Union (IoU) for two sets of widths and heights; `wh1` and `wh2` should be nx2
+ and mx2 tensors.
+ """
wh1 = wh1[:, None] # [N,1,2]
wh2 = wh2[None] # [1,M,2]
inter = torch.min(wh1, wh2).prod(2) # [N,M]
@@ -267,6 +344,9 @@
@threaded
def plot_pr_curve(px, py, ap, save_dir=Path("pr_curve.png"), names=()):
+ """Plots precision-recall curve, optionally per class, saving to `save_dir`; `px`, `py` are lists, `ap` is Nx2
+ array, `names` optional.
+ """
fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True)
py = np.stack(py, axis=1)
@@ -289,6 +369,7 @@
@threaded
def plot_mc_curve(px, py, save_dir=Path("mc_curve.png"), names=(), xlabel="Confidence", ylabel="Metric"):
+ """Plots a metric-confidence curve for model predictions, supporting per-class visualization and smoothing."""
fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True)
if 0 < len(names) < 21: # display per-class legend if < 21 classes
@@ -306,4 +387,4 @@ ax.legend(bbox_to_anchor=(1.04, 1), loc="upper left")
ax.set_title(f"{ylabel}-Confidence Curve")
fig.savefig(save_dir, dpi=250)
- plt.close(fig)+ plt.close(fig)
| https://raw.githubusercontent.com/ultralytics/yolov5/HEAD/utils/metrics.py |
Create docstrings for all classes and functions | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
from ultralytics.utils.patches import torch_load
def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
from pathlib import Path
from models.common import AutoShape, DetectMultiBackend
from models.experimental import attempt_load
from models.yolo import ClassificationModel, DetectionModel, SegmentationModel
from utils.downloads import attempt_download
from utils.general import LOGGER, ROOT, check_requirements, intersect_dicts, logging
from utils.torch_utils import select_device
if not verbose:
LOGGER.setLevel(logging.WARNING)
check_requirements(ROOT / "requirements.txt", exclude=("opencv-python", "tensorboard", "thop"))
name = Path(name)
path = name.with_suffix(".pt") if name.suffix == "" and not name.is_dir() else name # checkpoint path
try:
device = select_device(device)
if pretrained and channels == 3 and classes == 80:
try:
model = DetectMultiBackend(path, device=device, fuse=autoshape) # detection model
if autoshape:
if model.pt and isinstance(model.model, ClassificationModel):
LOGGER.warning(
"WARNING ⚠️ YOLOv5 ClassificationModel is not yet AutoShape compatible. "
"You must pass torch tensors in BCHW to this model, i.e. shape(1,3,224,224)."
)
elif model.pt and isinstance(model.model, SegmentationModel):
LOGGER.warning(
"WARNING ⚠️ YOLOv5 SegmentationModel is not yet AutoShape compatible. "
"You will not be able to run inference with this model."
)
else:
model = AutoShape(model) # for file/URI/PIL/cv2/np inputs and NMS
except Exception:
model = attempt_load(path, device=device, fuse=False) # arbitrary model
else:
cfg = next(iter((Path(__file__).parent / "models").rglob(f"{path.stem}.yaml"))) # model.yaml path
model = DetectionModel(cfg, channels, classes) # create model
if pretrained:
ckpt = torch_load(attempt_download(path), map_location=device) # load
csd = ckpt["model"].float().state_dict() # checkpoint state_dict as FP32
csd = intersect_dicts(csd, model.state_dict(), exclude=["anchors"]) # intersect
model.load_state_dict(csd, strict=False) # load
if len(ckpt["model"].names) == classes:
model.names = ckpt["model"].names # set class names attribute
if not verbose:
LOGGER.setLevel(logging.INFO) # reset to default
return model.to(device)
except Exception as e:
help_url = "https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading"
s = f"{e}. Cache may be out of date, try `force_reload=True` or see {help_url} for help."
raise Exception(s) from e
def custom(path="path/to/model.pt", autoshape=True, _verbose=True, device=None):
return _create(path, autoshape=autoshape, verbose=_verbose, device=device)
def yolov5n(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
return _create("yolov5n", pretrained, channels, classes, autoshape, _verbose, device)
def yolov5s(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
return _create("yolov5s", pretrained, channels, classes, autoshape, _verbose, device)
def yolov5m(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
return _create("yolov5m", pretrained, channels, classes, autoshape, _verbose, device)
def yolov5l(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
return _create("yolov5l", pretrained, channels, classes, autoshape, _verbose, device)
def yolov5x(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
return _create("yolov5x", pretrained, channels, classes, autoshape, _verbose, device)
def yolov5n6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
return _create("yolov5n6", pretrained, channels, classes, autoshape, _verbose, device)
def yolov5s6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
return _create("yolov5s6", pretrained, channels, classes, autoshape, _verbose, device)
def yolov5m6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
return _create("yolov5m6", pretrained, channels, classes, autoshape, _verbose, device)
def yolov5l6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
return _create("yolov5l6", pretrained, channels, classes, autoshape, _verbose, device)
def yolov5x6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
return _create("yolov5x6", pretrained, channels, classes, autoshape, _verbose, device)
if __name__ == "__main__":
import argparse
from pathlib import Path
import numpy as np
from PIL import Image
from utils.general import cv2, print_args
# Argparser
parser = argparse.ArgumentParser()
parser.add_argument("--model", type=str, default="yolov5s", help="model name")
opt = parser.parse_args()
print_args(vars(opt))
# Model
model = _create(name=opt.model, pretrained=True, channels=3, classes=80, autoshape=True, verbose=True)
# model = custom(path='path/to/model.pt') # custom
# Images
imgs = [
"data/images/zidane.jpg", # filename
Path("data/images/zidane.jpg"), # Path
"https://ultralytics.com/images/zidane.jpg", # URI
cv2.imread("data/images/bus.jpg")[:, :, ::-1], # OpenCV
Image.open("data/images/bus.jpg"), # PIL
np.zeros((320, 640, 3)),
] # numpy
# Inference
results = model(imgs, size=320) # batched inference
# Results
results.print()
results.save() | --- +++ @@ -1,9 +1,55 @@ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
+"""
+PyTorch Hub models https://pytorch.org/hub/ultralytics_yolov5.
+
+Usage:
+ import torch
+ model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # official model
+ model = torch.hub.load('ultralytics/yolov5:master', 'yolov5s') # from branch
+ model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5s.pt') # custom/local model
+ model = torch.hub.load('.', 'custom', 'yolov5s.pt', source='local') # local repo
+"""
from ultralytics.utils.patches import torch_load
def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
+ """Creates or loads a YOLOv5 model, with options for pretrained weights and model customization.
+
+ Args:
+ name (str): Model name (e.g., 'yolov5s') or path to the model checkpoint (e.g., 'path/to/best.pt').
+ pretrained (bool, optional): If True, loads pretrained weights into the model. Defaults to True.
+ channels (int, optional): Number of input channels the model expects. Defaults to 3.
+ classes (int, optional): Number of classes the model is expected to detect. Defaults to 80.
+ autoshape (bool, optional): If True, applies the YOLOv5 .autoshape() wrapper for various input formats. Defaults
+ to True.
+ verbose (bool, optional): If True, prints detailed information during the model creation/loading process.
+ Defaults to True.
+ device (str | torch.device | None, optional): Device to use for model parameters (e.g., 'cpu', 'cuda'). If None,
+ selects the best available device. Defaults to None.
+
+ Returns:
+ (DetectMultiBackend | AutoShape): The loaded YOLOv5 model, potentially wrapped with AutoShape if specified.
+
+ Examples:
+ ```python
+ import torch
+ from ultralytics import _create
+
+ # Load an official YOLOv5s model with pretrained weights
+ model = _create('yolov5s')
+
+ # Load a custom model from a local checkpoint
+ model = _create('path/to/custom_model.pt', pretrained=False)
+
+ # Load a model with specific input channels and classes
+ model = _create('yolov5s', channels=1, classes=10)
+ ```
+
+ Notes:
+ For more information on model loading and customization, visit the
+ [YOLOv5 PyTorch Hub Documentation](https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading/).
+ """
from pathlib import Path
from models.common import AutoShape, DetectMultiBackend
@@ -59,46 +105,363 @@
def custom(path="path/to/model.pt", autoshape=True, _verbose=True, device=None):
+ """Loads a custom or local YOLOv5 model from a given path with optional autoshaping and device specification.
+
+ Args:
+ path (str): Path to the custom model file (e.g., 'path/to/model.pt').
+ autoshape (bool): Apply YOLOv5 .autoshape() wrapper to model if True, enabling compatibility with various input
+ types (default is True).
+ _verbose (bool): If True, prints all informational messages to the screen; otherwise, operates silently (default
+ is True).
+ device (str | torch.device | None): Device to load the model on, e.g., 'cpu', 'cuda', torch.device('cuda:0'),
+ etc. (default is None, which automatically selects the best available device).
+
+ Returns:
+ torch.nn.Module: A YOLOv5 model loaded with the specified parameters.
+
+ Examples:
+ ```python
+ # Load model from a given path with autoshape enabled on the best available device
+ model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5s.pt')
+
+ # Load model from a local path without autoshape on the CPU device
+ model = torch.hub.load('.', 'custom', 'yolov5s.pt', source='local', autoshape=False, device='cpu')
+ ```
+
+ Notes:
+ For more details on loading models from PyTorch Hub:
+ https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading
+ """
return _create(path, autoshape=autoshape, verbose=_verbose, device=device)
def yolov5n(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
+ """Instantiates the YOLOv5-nano model with options for pretraining, input channels, class count, autoshaping,
+ verbosity, and device.
+
+ Args:
+ pretrained (bool): If True, loads pretrained weights into the model. Defaults to True.
+ channels (int): Number of input channels for the model. Defaults to 3.
+ classes (int): Number of classes for object detection. Defaults to 80.
+ autoshape (bool): If True, applies the YOLOv5 .autoshape() wrapper to the model for various formats
+ (file/URI/PIL/cv2/np) and non-maximum suppression (NMS) during inference. Defaults to True.
+ _verbose (bool): If True, prints detailed information to the screen. Defaults to True.
+ device (str | torch.device | None): Specifies the device to use for model computation. If None, uses the best
+ device available (i.e., GPU if available, otherwise CPU). Defaults to None.
+
+ Returns:
+ DetectionModel | ClassificationModel | SegmentationModel: The instantiated YOLOv5-nano model, potentially with
+ pretrained weights and autoshaping applied.
+
+ Examples:
+ ```python
+ import torch
+ from ultralytics import yolov5n
+
+ # Load the YOLOv5-nano model with defaults
+ model = yolov5n()
+
+ # Load the YOLOv5-nano model with a specific device
+ model = yolov5n(device='cuda')
+ ```
+
+ Notes:
+ For further details on loading models from PyTorch Hub, refer to [PyTorch Hub models](https://pytorch.org/hub/
+ ultralytics_yolov5).
+ """
return _create("yolov5n", pretrained, channels, classes, autoshape, _verbose, device)
def yolov5s(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
+ """Create a YOLOv5-small (yolov5s) model with options for pretraining, input channels, class count, autoshaping,
+ verbosity, and device configuration.
+
+ Args:
+ pretrained (bool, optional): Flag to load pretrained weights into the model. Defaults to True.
+ channels (int, optional): Number of input channels. Defaults to 3.
+ classes (int, optional): Number of model classes. Defaults to 80.
+ autoshape (bool, optional): Whether to wrap the model with YOLOv5's .autoshape() for handling various input
+ formats. Defaults to True.
+ _verbose (bool, optional): Flag to print detailed information regarding model loading. Defaults to True.
+ device (str | torch.device | None, optional): Device to use for model computation, can be 'cpu', 'cuda', or
+ torch.device instances. If None, automatically selects the best available device. Defaults to None.
+
+ Returns:
+ torch.nn.Module: The YOLOv5-small model configured and loaded according to the specified parameters.
+
+ Examples:
+ ```python
+ import torch
+
+ # Load the official YOLOv5-small model with pretrained weights
+ model = torch.hub.load('ultralytics/yolov5', 'yolov5s')
+
+ # Load the YOLOv5-small model from a specific branch
+ model = torch.hub.load('ultralytics/yolov5:master', 'yolov5s')
+
+ # Load a custom YOLOv5-small model from a local checkpoint
+ model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5s.pt')
+
+ # Load a local YOLOv5-small model specifying source as local repository
+ model = torch.hub.load('.', 'custom', 'yolov5s.pt', source='local')
+ ```
+
+ Notes:
+ For more details on model loading and customization, visit
+ the [YOLOv5 PyTorch Hub Documentation](https://pytorch.org/hub/ultralytics_yolov5/).
+ """
return _create("yolov5s", pretrained, channels, classes, autoshape, _verbose, device)
def yolov5m(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
+ """Instantiates the YOLOv5-medium model with customizable pretraining, channel count, class count, autoshaping,
+ verbosity, and device.
+
+ Args:
+ pretrained (bool, optional): Whether to load pretrained weights into the model. Default is True.
+ channels (int, optional): Number of input channels. Default is 3.
+ classes (int, optional): Number of model classes. Default is 80.
+ autoshape (bool, optional): Apply YOLOv5 .autoshape() wrapper to the model for handling various input formats.
+ Default is True.
+ _verbose (bool, optional): Whether to print detailed information to the screen. Default is True.
+ device (str | torch.device | None, optional): Device specification to use for model parameters (e.g., 'cpu',
+ 'cuda'). Default is None.
+
+ Returns:
+ torch.nn.Module: The instantiated YOLOv5-medium model.
+
+ Examples:
+ ```python
+ import torch
+
+ model = torch.hub.load('ultralytics/yolov5', 'yolov5m') # Load YOLOv5-medium from Ultralytics repository
+ model = torch.hub.load('ultralytics/yolov5:master', 'yolov5m') # Load from the master branch
+ model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5m.pt') # Load a custom/local YOLOv5-medium model
+ model = torch.hub.load('.', 'custom', 'yolov5m.pt', source='local') # Load from a local repository
+ ```
+
+ Notes:
+ For more information, visit https://pytorch.org/hub/ultralytics_yolov5.
+ """
return _create("yolov5m", pretrained, channels, classes, autoshape, _verbose, device)
def yolov5l(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
+ """Creates YOLOv5-large model with options for pretraining, channels, classes, autoshaping, verbosity, and device
+ selection.
+
+ Args:
+ pretrained (bool): Load pretrained weights into the model. Default is True.
+ channels (int): Number of input channels. Default is 3.
+ classes (int): Number of model classes. Default is 80.
+ autoshape (bool): Apply YOLOv5 .autoshape() wrapper to model. Default is True.
+ _verbose (bool): Print all information to screen. Default is True.
+ device (str | torch.device | None): Device to use for model parameters, e.g., 'cpu', 'cuda', or a torch.device
+ instance. Default is None.
+
+ Returns:
+ YOLOv5 model (torch.nn.Module): The YOLOv5-large model instantiated with specified configurations and possibly
+ pretrained weights.
+
+ Examples:
+ ```python
+ import torch
+ model = torch.hub.load('ultralytics/yolov5', 'yolov5l')
+ ```
+
+ Notes:
+ For additional details, refer to the PyTorch Hub models documentation:
+ https://pytorch.org/hub/ultralytics_yolov5
+ """
return _create("yolov5l", pretrained, channels, classes, autoshape, _verbose, device)
def yolov5x(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
+ """Perform object detection using the YOLOv5-xlarge model with options for pretraining, input channels, class count,
+ autoshaping, verbosity, and device specification.
+
+ Args:
+ pretrained (bool): If True, loads pretrained weights into the model. Defaults to True.
+ channels (int): Number of input channels for the model. Defaults to 3.
+ classes (int): Number of model classes for object detection. Defaults to 80.
+ autoshape (bool): If True, applies the YOLOv5 .autoshape() wrapper for handling different input formats.
+ Defaults to True.
+ _verbose (bool): If True, prints detailed information during model loading. Defaults to True.
+ device (str | torch.device | None): Device specification for computing the model, e.g., 'cpu', 'cuda:0',
+ torch.device('cuda'). Defaults to None.
+
+ Returns:
+ torch.nn.Module: The YOLOv5-xlarge model loaded with the specified parameters, optionally with pretrained
+ weights and autoshaping applied.
+
+ Examples:
+ ```python
+ import torch
+ model = torch.hub.load('ultralytics/yolov5', 'yolov5x')
+ ```
+
+ For additional details, refer to the official YOLOv5 PyTorch Hub models documentation:
+ https://pytorch.org/hub/ultralytics_yolov5
+ """
return _create("yolov5x", pretrained, channels, classes, autoshape, _verbose, device)
def yolov5n6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
+ """Creates YOLOv5-nano-P6 model with options for pretraining, channels, classes, autoshaping, verbosity, and device.
+
+ Args:
+ pretrained (bool, optional): If True, loads pretrained weights into the model. Default is True.
+ channels (int, optional): Number of input channels. Default is 3.
+ classes (int, optional): Number of model classes. Default is 80.
+ autoshape (bool, optional): If True, applies the YOLOv5 .autoshape() wrapper to the model. Default is True.
+ _verbose (bool, optional): If True, prints all information to screen. Default is True.
+ device (str | torch.device | None, optional): Device to use for model parameters. Can be 'cpu', 'cuda', or None.
+ Default is None.
+
+ Returns:
+ torch.nn.Module: YOLOv5-nano-P6 model loaded with the specified configurations.
+
+ Examples:
+ ```python
+ import torch
+ model = yolov5n6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device='cuda')
+ ```
+
+ Notes:
+ For more information on PyTorch Hub models, visit: https://pytorch.org/hub/ultralytics_yolov5
+ """
return _create("yolov5n6", pretrained, channels, classes, autoshape, _verbose, device)
def yolov5s6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
+ """Instantiate the YOLOv5-small-P6 model with options for pretraining, input channels, number of classes,
+ autoshaping, verbosity, and device selection.
+
+ Args:
+ pretrained (bool): If True, loads pretrained weights. Default is True.
+ channels (int): Number of input channels. Default is 3.
+ classes (int): Number of object detection classes. Default is 80.
+ autoshape (bool): If True, applies YOLOv5 .autoshape() wrapper to the model, allowing for varied input formats.
+ Default is True.
+ _verbose (bool): If True, prints detailed information during model loading. Default is True.
+ device (str | torch.device | None): Device specification for model parameters (e.g., 'cpu', 'cuda', or
+ torch.device). Default is None, which selects an available device automatically.
+
+ Returns:
+ torch.nn.Module: The YOLOv5-small-P6 model instance.
+
+ Raises:
+ Exception: If there is an error during model creation or loading, with a suggestion to visit the YOLOv5
+ tutorials for help.
+
+ Examples:
+ ```python
+ import torch
+
+ model = torch.hub.load('ultralytics/yolov5', 'yolov5s6')
+ model = torch.hub.load('ultralytics/yolov5:master', 'yolov5s6') # load from a specific branch
+ model = torch.hub.load('ultralytics/yolov5', 'custom', 'path/to/yolov5s6.pt') # custom/local model
+ model = torch.hub.load('.', 'custom', 'path/to/yolov5s6.pt', source='local') # local repo model
+ ```
+
+ Notes:
+ - For more information, refer to the PyTorch Hub models documentation at https://pytorch.org/hub/ultralytics_yolov5
+ """
return _create("yolov5s6", pretrained, channels, classes, autoshape, _verbose, device)
def yolov5m6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
+ """Create YOLOv5-medium-P6 model with options for pretraining, channel count, class count, autoshaping, verbosity,
+ and device.
+
+ Args:
+ pretrained (bool): If True, loads pretrained weights. Default is True.
+ channels (int): Number of input channels. Default is 3.
+ classes (int): Number of model classes. Default is 80.
+ autoshape (bool): Apply YOLOv5 .autoshape() wrapper to the model for file/URI/PIL/cv2/np inputs and NMS. Default
+ is True.
+ _verbose (bool): If True, prints detailed information to the screen. Default is True.
+ device (str | torch.device | None): Device to use for model parameters. Default is None, which uses the best
+ available device.
+
+ Returns:
+ torch.nn.Module: The YOLOv5-medium-P6 model.
+ Refer to the PyTorch Hub models documentation: https://pytorch.org/hub/ultralytics_yolov5 for
+ additional details.
+
+ Examples:
+ ```python
+ import torch
+
+ # Load YOLOv5-medium-P6 model
+ model = torch.hub.load('ultralytics/yolov5', 'yolov5m6')
+ ```
+
+ Notes:
+ - The model can be loaded with pre-trained weights for better performance on specific tasks.
+ - The autoshape feature simplifies input handling by allowing various popular data formats.
+ """
return _create("yolov5m6", pretrained, channels, classes, autoshape, _verbose, device)
def yolov5l6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
+ """Instantiate the YOLOv5-large-P6 model with options for pretraining, channel and class counts, autoshaping,
+ verbosity, and device selection.
+
+ Args:
+ pretrained (bool, optional): If True, load pretrained weights into the model. Default is True.
+ channels (int, optional): Number of input channels. Default is 3.
+ classes (int, optional): Number of model classes. Default is 80.
+ autoshape (bool, optional): If True, apply YOLOv5 .autoshape() wrapper to the model for input flexibility.
+ Default is True.
+ _verbose (bool, optional): If True, print all information to the screen. Default is True.
+ device (str | torch.device | None, optional): Device to use for model parameters, e.g., 'cpu', 'cuda', or
+ torch.device. If None, automatically selects the best available device. Default is None.
+
+ Returns:
+ torch.nn.Module: The instantiated YOLOv5-large-P6 model.
+
+ Examples:
+ ```python
+ import torch
+ model = torch.hub.load('ultralytics/yolov5', 'yolov5l6') # official model
+ model = torch.hub.load('ultralytics/yolov5:master', 'yolov5l6') # from specific branch
+ model = torch.hub.load('ultralytics/yolov5', 'custom', 'path/to/yolov5l6.pt') # custom/local model
+ model = torch.hub.load('.', 'custom', 'path/to/yolov5l6.pt', source='local') # local repository
+ ```
+
+ Notes:
+ Refer to [PyTorch Hub Documentation](https://pytorch.org/hub/ultralytics_yolov5/) for additional usage instructions.
+ """
return _create("yolov5l6", pretrained, channels, classes, autoshape, _verbose, device)
def yolov5x6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
+ """Creates the YOLOv5-xlarge-P6 model with options for pretraining, number of input channels, class count,
+ autoshaping, verbosity, and device selection.
+
+ Args:
+ pretrained (bool): If True, loads pretrained weights into the model. Default is True.
+ channels (int): Number of input channels. Default is 3.
+ classes (int): Number of model classes. Default is 80.
+ autoshape (bool): If True, applies YOLOv5 .autoshape() wrapper to the model. Default is True.
+ _verbose (bool): If True, prints all information to the screen. Default is True.
+ device (str | torch.device | None): Device to use for model parameters, can be a string, torch.device object, or
+ None for default device selection. Default is None.
+
+ Returns:
+ torch.nn.Module: The instantiated YOLOv5-xlarge-P6 model.
+
+ Examples:
+ ```python
+ import torch
+ model = torch.hub.load('ultralytics/yolov5', 'yolov5x6') # load the YOLOv5-xlarge-P6 model
+ ```
+
+ Notes:
+ For more information on YOLOv5 models, visit the official documentation:
+ https://docs.ultralytics.com/yolov5
+ """
return _create("yolov5x6", pretrained, channels, classes, autoshape, _verbose, device)
@@ -136,4 +499,4 @@
# Results
results.print()
- results.save()+ results.save()
| https://raw.githubusercontent.com/ultralytics/yolov5/HEAD/hubconf.py |
Help me document legacy Python code | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
import threading
class Callbacks:
def __init__(self):
self._callbacks = {
"on_pretrain_routine_start": [],
"on_pretrain_routine_end": [],
"on_train_start": [],
"on_train_epoch_start": [],
"on_train_batch_start": [],
"optimizer_step": [],
"on_before_zero_grad": [],
"on_train_batch_end": [],
"on_train_epoch_end": [],
"on_val_start": [],
"on_val_batch_start": [],
"on_val_image_end": [],
"on_val_batch_end": [],
"on_val_end": [],
"on_fit_epoch_end": [], # fit = train + val
"on_model_save": [],
"on_train_end": [],
"on_params_update": [],
"teardown": [],
}
self.stop_training = False # set True to interrupt training
def register_action(self, hook, name="", callback=None):
assert hook in self._callbacks, f"hook '{hook}' not found in callbacks {self._callbacks}"
assert callable(callback), f"callback '{callback}' is not callable"
self._callbacks[hook].append({"name": name, "callback": callback})
def get_registered_actions(self, hook=None):
return self._callbacks[hook] if hook else self._callbacks
def run(self, hook, *args, thread=False, **kwargs):
assert hook in self._callbacks, f"hook '{hook}' not found in callbacks {self._callbacks}"
for logger in self._callbacks[hook]:
if thread:
threading.Thread(target=logger["callback"], args=args, kwargs=kwargs, daemon=True).start()
else:
logger["callback"](*args, **kwargs) | --- +++ @@ -1,11 +1,14 @@ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
+"""Callback utils."""
import threading
class Callbacks:
+ """Handles all registered callbacks for YOLOv5 Hooks."""
def __init__(self):
+ """Initializes a Callbacks object to manage registered YOLOv5 training event hooks."""
self._callbacks = {
"on_pretrain_routine_start": [],
"on_pretrain_routine_end": [],
@@ -30,17 +33,37 @@ self.stop_training = False # set True to interrupt training
def register_action(self, hook, name="", callback=None):
+ """Register a new action to a callback hook.
+
+ Args:
+ hook: The callback hook name to register the action to
+ name: The name of the action for later reference
+ callback: The callback to fire
+ """
assert hook in self._callbacks, f"hook '{hook}' not found in callbacks {self._callbacks}"
assert callable(callback), f"callback '{callback}' is not callable"
self._callbacks[hook].append({"name": name, "callback": callback})
def get_registered_actions(self, hook=None):
+ """Returns all the registered actions by callback hook.
+
+ Args:
+ hook: The name of the hook to check, defaults to all
+ """
return self._callbacks[hook] if hook else self._callbacks
def run(self, hook, *args, thread=False, **kwargs):
+ """Loop through the registered actions and fire all callbacks on main thread.
+
+ Args:
+ hook: The name of the hook to check, defaults to all
+ args: Arguments to receive from YOLOv5
+ thread: (boolean) Run callbacks in daemon thread
+ kwargs: Keyword Arguments to receive from YOLOv5
+ """
assert hook in self._callbacks, f"hook '{hook}' not found in callbacks {self._callbacks}"
for logger in self._callbacks[hook]:
if thread:
threading.Thread(target=logger["callback"], args=args, kwargs=kwargs, daemon=True).start()
else:
- logger["callback"](*args, **kwargs)+ logger["callback"](*args, **kwargs)
| https://raw.githubusercontent.com/ultralytics/yolov5/HEAD/utils/callbacks.py |
Help me write clear docstrings | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
import contextlib
import platform
import threading
def emojis(str=""):
return str.encode().decode("ascii", "ignore") if platform.system() == "Windows" else str
class TryExcept(contextlib.ContextDecorator):
def __init__(self, msg=""):
self.msg = msg
def __enter__(self):
pass
def __exit__(self, exc_type, value, traceback):
if value:
print(emojis(f"{self.msg}{': ' if self.msg else ''}{value}"))
return True
def threaded(func):
def wrapper(*args, **kwargs):
thread = threading.Thread(target=func, args=args, kwargs=kwargs, daemon=True)
thread.start()
return thread
return wrapper
def join_threads(verbose=False):
main_thread = threading.current_thread()
for t in threading.enumerate():
if t is not main_thread:
if verbose:
print(f"Joining thread {t.name}")
t.join()
def notebook_init(verbose=True):
print("Checking setup...")
import os
import shutil
from ultralytics.utils.checks import check_requirements
from utils.general import check_font, is_colab
from utils.torch_utils import select_device # imports
check_font()
import psutil
if check_requirements("wandb", install=False):
os.system("pip uninstall -y wandb") # eliminate unexpected account creation prompt with infinite hang
if is_colab():
shutil.rmtree("/content/sample_data", ignore_errors=True) # remove colab /sample_data directory
# System info
display = None
if verbose:
gb = 1 << 30 # bytes to GiB (1024 ** 3)
ram = psutil.virtual_memory().total
total, _used, free = shutil.disk_usage("/")
with contextlib.suppress(Exception): # clear display if ipython is installed
from IPython import display
display.clear_output()
s = f"({os.cpu_count()} CPUs, {ram / gb:.1f} GB RAM, {(total - free) / gb:.1f}/{total / gb:.1f} GB disk)"
else:
s = ""
select_device(newline=False)
print(emojis(f"Setup complete ✅ {s}"))
return display | --- +++ @@ -1,4 +1,5 @@ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
+"""utils/initialization."""
import contextlib
import platform
@@ -6,26 +7,35 @@
def emojis(str=""):
+ """Returns an emoji-safe version of a string, stripped of emojis on Windows platforms."""
return str.encode().decode("ascii", "ignore") if platform.system() == "Windows" else str
class TryExcept(contextlib.ContextDecorator):
+ """A context manager and decorator for error handling that prints an optional message with emojis on exception."""
def __init__(self, msg=""):
+ """Initializes TryExcept with an optional message, used as a decorator or context manager for error handling."""
self.msg = msg
def __enter__(self):
+ """Enter the runtime context related to this object for error handling with an optional message."""
pass
def __exit__(self, exc_type, value, traceback):
+ """Context manager exit method that prints an error message with emojis if an exception occurred, always returns
+ True.
+ """
if value:
print(emojis(f"{self.msg}{': ' if self.msg else ''}{value}"))
return True
def threaded(func):
+ """Decorator @threaded to run a function in a separate thread, returning the thread instance."""
def wrapper(*args, **kwargs):
+ """Runs the decorated function in a separate daemon thread and returns the thread instance."""
thread = threading.Thread(target=func, args=args, kwargs=kwargs, daemon=True)
thread.start()
return thread
@@ -34,6 +44,10 @@
def join_threads(verbose=False):
+ """Joins all daemon threads, optionally printing their names if verbose is True.
+
+ Example: atexit.register(lambda: join_threads())
+ """
main_thread = threading.current_thread()
for t in threading.enumerate():
if t is not main_thread:
@@ -43,6 +57,7 @@
def notebook_init(verbose=True):
+ """Initializes notebook environment by checking requirements, cleaning up, and displaying system info."""
print("Checking setup...")
import os
@@ -78,4 +93,4 @@
select_device(newline=False)
print(emojis(f"Setup complete ✅ {s}"))
- return display+ return display
| https://raw.githubusercontent.com/ultralytics/yolov5/HEAD/utils/__init__.py |
Replace inline comments with docstrings | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
import argparse
import contextlib
import json
import os
import platform
import re
import subprocess
import sys
import time
import warnings
from pathlib import Path
import pandas as pd
import torch
from torch.utils.mobile_optimizer import optimize_for_mobile
FILE = Path(__file__).resolve()
ROOT = FILE.parents[0] # YOLOv5 root directory
if str(ROOT) not in sys.path:
sys.path.append(str(ROOT)) # add ROOT to PATH
if platform.system() != "Windows":
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
from models.experimental import attempt_load
from models.yolo import ClassificationModel, Detect, DetectionModel, SegmentationModel
from utils.dataloaders import LoadImages
from utils.general import (
LOGGER,
Profile,
check_dataset,
check_img_size,
check_requirements,
check_version,
check_yaml,
colorstr,
file_size,
get_default_args,
print_args,
url2file,
yaml_save,
)
from utils.torch_utils import select_device, smart_inference_mode
MACOS = platform.system() == "Darwin" # macOS environment
class iOSModel(torch.nn.Module):
def __init__(self, model, im):
super().__init__()
_b, _c, h, w = im.shape # batch, channel, height, width
self.model = model
self.nc = model.nc # number of classes
if w == h:
self.normalize = 1.0 / w
else:
self.normalize = torch.tensor([1.0 / w, 1.0 / h, 1.0 / w, 1.0 / h]) # broadcast (slower, smaller)
# np = model(im)[0].shape[1] # number of points
# self.normalize = torch.tensor([1. / w, 1. / h, 1. / w, 1. / h]).expand(np, 4) # explicit (faster, larger)
def forward(self, x):
xywh, conf, cls = self.model(x)[0].squeeze().split((4, 1, self.nc), 1)
return cls * conf, xywh * self.normalize # confidence (3780, 80), coordinates (3780, 4)
def export_formats():
x = [
["PyTorch", "-", ".pt", True, True],
["TorchScript", "torchscript", ".torchscript", True, True],
["ONNX", "onnx", ".onnx", True, True],
["OpenVINO", "openvino", "_openvino_model", True, False],
["TensorRT", "engine", ".engine", False, True],
["CoreML", "coreml", ".mlpackage", True, False],
["TensorFlow SavedModel", "saved_model", "_saved_model", True, True],
["TensorFlow GraphDef", "pb", ".pb", True, True],
["TensorFlow Lite", "tflite", ".tflite", True, False],
["TensorFlow Edge TPU", "edgetpu", "_edgetpu.tflite", False, False],
["TensorFlow.js", "tfjs", "_web_model", False, False],
["PaddlePaddle", "paddle", "_paddle_model", True, True],
]
return pd.DataFrame(x, columns=["Format", "Argument", "Suffix", "CPU", "GPU"])
def try_export(inner_func):
inner_args = get_default_args(inner_func)
def outer_func(*args, **kwargs):
prefix = inner_args["prefix"]
try:
with Profile() as dt:
f, model = inner_func(*args, **kwargs)
LOGGER.info(f"{prefix} export success ✅ {dt.t:.1f}s, saved as {f} ({file_size(f):.1f} MB)")
return f, model
except Exception as e:
LOGGER.info(f"{prefix} export failure ❌ {dt.t:.1f}s: {e}")
return None, None
return outer_func
@try_export
def export_torchscript(model, im, file, optimize, prefix=colorstr("TorchScript:")):
LOGGER.info(f"\n{prefix} starting export with torch {torch.__version__}...")
f = file.with_suffix(".torchscript")
ts = torch.jit.trace(model, im, strict=False)
d = {"shape": im.shape, "stride": int(max(model.stride)), "names": model.names}
extra_files = {"config.txt": json.dumps(d)} # torch._C.ExtraFilesMap()
if optimize: # https://pytorch.org/tutorials/recipes/mobile_interpreter.html
optimize_for_mobile(ts)._save_for_lite_interpreter(str(f), _extra_files=extra_files)
else:
ts.save(str(f), _extra_files=extra_files)
return f, None
@try_export
def export_onnx(model, im, file, opset, dynamic, simplify, prefix=colorstr("ONNX:")):
check_requirements(("onnx>=1.12.0", "onnxscript"))
import onnx
LOGGER.info(f"\n{prefix} starting export with onnx {onnx.__version__}...")
f = str(file.with_suffix(".onnx"))
output_names = ["output0", "output1"] if isinstance(model, SegmentationModel) else ["output0"]
if dynamic:
dynamic = {"images": {0: "batch", 2: "height", 3: "width"}} # shape(1,3,640,640)
if isinstance(model, SegmentationModel):
dynamic["output0"] = {0: "batch", 1: "anchors"} # shape(1,25200,85)
dynamic["output1"] = {0: "batch", 2: "mask_height", 3: "mask_width"} # shape(1,32,160,160)
elif isinstance(model, DetectionModel):
dynamic["output0"] = {0: "batch", 1: "anchors"} # shape(1,25200,85)
torch.onnx.export(
model.cpu() if dynamic else model, # --dynamic only compatible with cpu
im.cpu() if dynamic else im,
f,
verbose=False,
opset_version=opset,
do_constant_folding=True, # WARNING: DNN inference with torch>=1.12 may require do_constant_folding=False
input_names=["images"],
output_names=output_names,
dynamic_axes=dynamic or None,
)
# Checks
model_onnx = onnx.load(f) # load onnx model
onnx.checker.check_model(model_onnx) # check onnx model
# Metadata
d = {"stride": int(max(model.stride)), "names": model.names}
for k, v in d.items():
meta = model_onnx.metadata_props.add()
meta.key, meta.value = k, str(v)
onnx.save(model_onnx, f)
# Simplify
if simplify:
try:
cuda = torch.cuda.is_available()
check_requirements(("onnxruntime-gpu" if cuda else "onnxruntime", "onnxslim"))
import onnxslim
LOGGER.info(f"{prefix} slimming with onnxslim {onnxslim.__version__}...")
model_onnx = onnxslim.slim(model_onnx)
onnx.save(model_onnx, f)
except Exception as e:
LOGGER.info(f"{prefix} simplifier failure: {e}")
return f, model_onnx
@try_export
def export_openvino(file, metadata, half, int8, data, prefix=colorstr("OpenVINO:")):
check_requirements("openvino-dev>=2023.0") # requires openvino-dev: https://pypi.org/project/openvino-dev/
import openvino.runtime as ov
from openvino.tools import mo
LOGGER.info(f"\n{prefix} starting export with openvino {ov.__version__}...")
f = str(file).replace(file.suffix, f"_{'int8_' if int8 else ''}openvino_model{os.sep}")
f_onnx = file.with_suffix(".onnx")
f_ov = str(Path(f) / file.with_suffix(".xml").name)
ov_model = mo.convert_model(f_onnx, model_name=file.stem, framework="onnx", compress_to_fp16=half) # export
if int8:
check_requirements("nncf>=2.5.0") # requires at least version 2.5.0 to use the post-training quantization
import nncf
import numpy as np
from utils.dataloaders import create_dataloader
def gen_dataloader(yaml_path, task="train", imgsz=640, workers=4):
data_yaml = check_yaml(yaml_path)
data = check_dataset(data_yaml)
dataloader = create_dataloader(
data[task], imgsz=imgsz, batch_size=1, stride=32, pad=0.5, single_cls=False, rect=False, workers=workers
)[0]
return dataloader
def transform_fn(data_item):
assert data_item[0].dtype == torch.uint8, "input image must be uint8 for the quantization preprocessing"
img = data_item[0].numpy().astype(np.float32) # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
return np.expand_dims(img, 0) if img.ndim == 3 else img
ds = gen_dataloader(data)
quantization_dataset = nncf.Dataset(ds, transform_fn)
ov_model = nncf.quantize(ov_model, quantization_dataset, preset=nncf.QuantizationPreset.MIXED)
ov.serialize(ov_model, f_ov) # save
yaml_save(Path(f) / file.with_suffix(".yaml").name, metadata) # add metadata.yaml
return f, None
@try_export
def export_paddle(model, im, file, metadata, prefix=colorstr("PaddlePaddle:")):
check_requirements(("paddlepaddle>=3.0.0", "x2paddle"))
import x2paddle
from x2paddle.convert import pytorch2paddle
LOGGER.info(f"\n{prefix} starting export with X2Paddle {x2paddle.__version__}...")
f = str(file).replace(".pt", f"_paddle_model{os.sep}")
pytorch2paddle(module=model, save_dir=f, jit_type="trace", input_examples=[im]) # export
yaml_save(Path(f) / file.with_suffix(".yaml").name, metadata) # add metadata.yaml
return f, None
@try_export
def export_coreml(model, im, file, int8, half, nms, mlmodel, prefix=colorstr("CoreML:")):
check_requirements("coremltools")
import coremltools as ct
LOGGER.info(f"\n{prefix} starting export with coremltools {ct.__version__}...")
if mlmodel:
f = file.with_suffix(".mlmodel")
convert_to = "neuralnetwork"
precision = None
else:
f = file.with_suffix(".mlpackage")
convert_to = "mlprogram"
precision = ct.precision.FLOAT16 if half else ct.precision.FLOAT32
if nms:
model = iOSModel(model, im)
ts = torch.jit.trace(model, im, strict=False) # TorchScript model
ct_model = ct.convert(
ts,
inputs=[ct.ImageType("image", shape=im.shape, scale=1 / 255, bias=[0, 0, 0])],
convert_to=convert_to,
compute_precision=precision,
)
bits, mode = (8, "kmeans") if int8 else (16, "linear") if half else (32, None)
if bits < 32:
if mlmodel:
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", category=DeprecationWarning
) # suppress numpy==1.20 float warning, fixed in coremltools==7.0
ct_model = ct.models.neural_network.quantization_utils.quantize_weights(ct_model, bits, mode)
elif bits == 8:
op_config = ct.optimize.coreml.OpPalettizerConfig(mode=mode, nbits=bits, weight_threshold=512)
config = ct.optimize.coreml.OptimizationConfig(global_config=op_config)
ct_model = ct.optimize.coreml.palettize_weights(ct_model, config)
ct_model.save(f)
return f, ct_model
@try_export
def export_engine(
model, im, file, half, dynamic, simplify, workspace=4, verbose=False, cache="", prefix=colorstr("TensorRT:")
):
assert im.device.type != "cpu", "export running on CPU but must be on GPU, i.e. `python export.py --device 0`"
try:
import tensorrt as trt
except Exception:
if platform.system() == "Linux":
check_requirements("nvidia-tensorrt", cmds="-U --index-url https://pypi.ngc.nvidia.com")
import tensorrt as trt
if trt.__version__[0] == "7": # TensorRT 7 handling https://github.com/ultralytics/yolov5/issues/6012
grid = model.model[-1].anchor_grid
model.model[-1].anchor_grid = [a[..., :1, :1, :] for a in grid]
export_onnx(model, im, file, 12, dynamic, simplify) # opset 12
model.model[-1].anchor_grid = grid
else: # TensorRT >= 8
check_version(trt.__version__, "8.0.0", hard=True) # require tensorrt>=8.0.0
export_onnx(model, im, file, 12, dynamic, simplify) # opset 12
onnx = file.with_suffix(".onnx")
LOGGER.info(f"\n{prefix} starting export with TensorRT {trt.__version__}...")
is_trt10 = int(trt.__version__.split(".")[0]) >= 10 # is TensorRT >= 10
assert onnx.exists(), f"failed to export ONNX file: {onnx}"
f = file.with_suffix(".engine") # TensorRT engine file
logger = trt.Logger(trt.Logger.INFO)
if verbose:
logger.min_severity = trt.Logger.Severity.VERBOSE
builder = trt.Builder(logger)
config = builder.create_builder_config()
if is_trt10:
config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, workspace << 30)
else: # TensorRT versions 7, 8
config.max_workspace_size = workspace * 1 << 30
if cache: # enable timing cache
Path(cache).parent.mkdir(parents=True, exist_ok=True)
buf = Path(cache).read_bytes() if Path(cache).exists() else b""
timing_cache = config.create_timing_cache(buf)
config.set_timing_cache(timing_cache, ignore_mismatch=True)
flag = 1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
network = builder.create_network(flag)
parser = trt.OnnxParser(network, logger)
if not parser.parse_from_file(str(onnx)):
raise RuntimeError(f"failed to load ONNX file: {onnx}")
inputs = [network.get_input(i) for i in range(network.num_inputs)]
outputs = [network.get_output(i) for i in range(network.num_outputs)]
for inp in inputs:
LOGGER.info(f'{prefix} input "{inp.name}" with shape{inp.shape} {inp.dtype}')
for out in outputs:
LOGGER.info(f'{prefix} output "{out.name}" with shape{out.shape} {out.dtype}')
if dynamic:
if im.shape[0] <= 1:
LOGGER.warning(f"{prefix} WARNING ⚠️ --dynamic model requires maximum --batch-size argument")
profile = builder.create_optimization_profile()
for inp in inputs:
profile.set_shape(inp.name, (1, *im.shape[1:]), (max(1, im.shape[0] // 2), *im.shape[1:]), im.shape)
config.add_optimization_profile(profile)
LOGGER.info(f"{prefix} building FP{16 if builder.platform_has_fast_fp16 and half else 32} engine as {f}")
if builder.platform_has_fast_fp16 and half:
config.set_flag(trt.BuilderFlag.FP16)
build = builder.build_serialized_network if is_trt10 else builder.build_engine
with build(network, config) as engine, open(f, "wb") as t:
t.write(engine if is_trt10 else engine.serialize())
if cache: # save timing cache
with open(cache, "wb") as c:
c.write(config.get_timing_cache().serialize())
return f, None
@try_export
def export_saved_model(
model,
im,
file,
dynamic,
tf_nms=False,
agnostic_nms=False,
topk_per_class=100,
topk_all=100,
iou_thres=0.45,
conf_thres=0.25,
keras=False,
prefix=colorstr("TensorFlow SavedModel:"),
):
# YOLOv5 TensorFlow SavedModel export
try:
import tensorflow as tf
except Exception:
check_requirements(f"tensorflow{'' if torch.cuda.is_available() else '-macos' if MACOS else '-cpu'}<=2.15.1")
import tensorflow as tf
from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2
from models.tf import TFModel
LOGGER.info(f"\n{prefix} starting export with tensorflow {tf.__version__}...")
if tf.__version__ > "2.13.1":
helper_url = "https://github.com/ultralytics/yolov5/issues/12489"
LOGGER.info(
f"WARNING ⚠️ using Tensorflow {tf.__version__} > 2.13.1 might cause issue when exporting the model to tflite {helper_url}"
) # handling issue https://github.com/ultralytics/yolov5/issues/12489
f = str(file).replace(".pt", "_saved_model")
batch_size, ch, *imgsz = list(im.shape) # BCHW
tf_model = TFModel(cfg=model.yaml, model=model, nc=model.nc, imgsz=imgsz)
im = tf.zeros((batch_size, *imgsz, ch)) # BHWC order for TensorFlow
_ = tf_model.predict(im, tf_nms, agnostic_nms, topk_per_class, topk_all, iou_thres, conf_thres)
inputs = tf.keras.Input(shape=(*imgsz, ch), batch_size=None if dynamic else batch_size)
outputs = tf_model.predict(inputs, tf_nms, agnostic_nms, topk_per_class, topk_all, iou_thres, conf_thres)
keras_model = tf.keras.Model(inputs=inputs, outputs=outputs)
keras_model.trainable = False
keras_model.summary()
if keras:
keras_model.save(f, save_format="tf")
else:
spec = tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype)
m = tf.function(lambda x: keras_model(x)) # full model
m = m.get_concrete_function(spec)
frozen_func = convert_variables_to_constants_v2(m)
tfm = tf.Module()
tfm.__call__ = tf.function(lambda x: frozen_func(x)[:4] if tf_nms else frozen_func(x), [spec])
tfm.__call__(im)
tf.saved_model.save(
tfm,
f,
options=tf.saved_model.SaveOptions(experimental_custom_gradients=False)
if check_version(tf.__version__, "2.6")
else tf.saved_model.SaveOptions(),
)
return f, keras_model
@try_export
def export_pb(keras_model, file, prefix=colorstr("TensorFlow GraphDef:")):
import tensorflow as tf
from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2
LOGGER.info(f"\n{prefix} starting export with tensorflow {tf.__version__}...")
f = file.with_suffix(".pb")
m = tf.function(lambda x: keras_model(x)) # full model
m = m.get_concrete_function(tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype))
frozen_func = convert_variables_to_constants_v2(m)
frozen_func.graph.as_graph_def()
tf.io.write_graph(graph_or_graph_def=frozen_func.graph, logdir=str(f.parent), name=f.name, as_text=False)
return f, None
@try_export
def export_tflite(
keras_model, im, file, int8, per_tensor, data, nms, agnostic_nms, prefix=colorstr("TensorFlow Lite:")
):
# YOLOv5 TensorFlow Lite export
import tensorflow as tf
LOGGER.info(f"\n{prefix} starting export with tensorflow {tf.__version__}...")
_batch_size, _ch, *imgsz = list(im.shape) # BCHW
f = str(file).replace(".pt", "-fp16.tflite")
converter = tf.lite.TFLiteConverter.from_keras_model(keras_model)
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS]
converter.target_spec.supported_types = [tf.float16]
converter.optimizations = [tf.lite.Optimize.DEFAULT]
if int8:
from models.tf import representative_dataset_gen
dataset = LoadImages(check_dataset(check_yaml(data))["train"], img_size=imgsz, auto=False)
converter.representative_dataset = lambda: representative_dataset_gen(dataset, ncalib=100)
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.target_spec.supported_types = []
converter.inference_input_type = tf.uint8 # or tf.int8
converter.inference_output_type = tf.uint8 # or tf.int8
converter.experimental_new_quantizer = True
if per_tensor:
converter._experimental_disable_per_channel = True
f = str(file).replace(".pt", "-int8.tflite")
if nms or agnostic_nms:
converter.target_spec.supported_ops.append(tf.lite.OpsSet.SELECT_TF_OPS)
tflite_model = converter.convert()
open(f, "wb").write(tflite_model)
return f, None
@try_export
def export_edgetpu(file, prefix=colorstr("Edge TPU:")):
cmd = "edgetpu_compiler --version"
help_url = "https://coral.ai/docs/edgetpu/compiler/"
assert platform.system() == "Linux", f"export only supported on Linux. See {help_url}"
if subprocess.run(f"{cmd} > /dev/null 2>&1", shell=True).returncode != 0:
LOGGER.info(f"\n{prefix} export requires Edge TPU compiler. Attempting install from {help_url}")
sudo = subprocess.run("sudo --version >/dev/null", shell=True).returncode == 0 # sudo installed on system
for c in (
"curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -",
'echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" | sudo tee /etc/apt/sources.list.d/coral-edgetpu.list',
"sudo apt-get update",
"sudo apt-get install edgetpu-compiler",
):
subprocess.run(c if sudo else c.replace("sudo ", ""), shell=True, check=True)
ver = subprocess.run(cmd, shell=True, capture_output=True, check=True).stdout.decode().split()[-1]
LOGGER.info(f"\n{prefix} starting export with Edge TPU compiler {ver}...")
f = str(file).replace(".pt", "-int8_edgetpu.tflite") # Edge TPU model
f_tfl = str(file).replace(".pt", "-int8.tflite") # TFLite model
subprocess.run(
[
"edgetpu_compiler",
"-s",
"-d",
"-k",
"10",
"--out_dir",
str(file.parent),
f_tfl,
],
check=True,
)
return f, None
@try_export
def export_tfjs(file, int8, prefix=colorstr("TensorFlow.js:")):
check_requirements("tensorflowjs")
import tensorflowjs as tfjs
LOGGER.info(f"\n{prefix} starting export with tensorflowjs {tfjs.__version__}...")
f = str(file).replace(".pt", "_web_model") # js dir
f_pb = file.with_suffix(".pb") # *.pb path
f_json = f"{f}/model.json" # *.json path
args = [
"tensorflowjs_converter",
"--input_format=tf_frozen_model",
"--quantize_uint8" if int8 else "",
"--output_node_names=Identity,Identity_1,Identity_2,Identity_3",
str(f_pb),
f,
]
subprocess.run([arg for arg in args if arg], check=True)
json = Path(f_json).read_text()
with open(f_json, "w") as j: # sort JSON Identity_* in ascending order
subst = re.sub(
r'{"outputs": {"Identity.?.?": {"name": "Identity.?.?"}, '
r'"Identity.?.?": {"name": "Identity.?.?"}, '
r'"Identity.?.?": {"name": "Identity.?.?"}, '
r'"Identity.?.?": {"name": "Identity.?.?"}}}',
r'{"outputs": {"Identity": {"name": "Identity"}, '
r'"Identity_1": {"name": "Identity_1"}, '
r'"Identity_2": {"name": "Identity_2"}, '
r'"Identity_3": {"name": "Identity_3"}}}',
json,
)
j.write(subst)
return f, None
def add_tflite_metadata(file, metadata, num_outputs):
with contextlib.suppress(ImportError):
# check_requirements('tflite_support')
from tflite_support import flatbuffers
from tflite_support import metadata as _metadata
from tflite_support import metadata_schema_py_generated as _metadata_fb
tmp_file = Path("/tmp/meta.txt")
with open(tmp_file, "w") as meta_f:
meta_f.write(str(metadata))
model_meta = _metadata_fb.ModelMetadataT()
label_file = _metadata_fb.AssociatedFileT()
label_file.name = tmp_file.name
model_meta.associatedFiles = [label_file]
subgraph = _metadata_fb.SubGraphMetadataT()
subgraph.inputTensorMetadata = [_metadata_fb.TensorMetadataT()]
subgraph.outputTensorMetadata = [_metadata_fb.TensorMetadataT()] * num_outputs
model_meta.subgraphMetadata = [subgraph]
b = flatbuffers.Builder(0)
b.Finish(model_meta.Pack(b), _metadata.MetadataPopulator.METADATA_FILE_IDENTIFIER)
metadata_buf = b.Output()
populator = _metadata.MetadataPopulator.with_model_file(file)
populator.load_metadata_buffer(metadata_buf)
populator.load_associated_files([str(tmp_file)])
populator.populate()
tmp_file.unlink()
def pipeline_coreml(model, im, file, names, y, mlmodel, prefix=colorstr("CoreML Pipeline:")):
import coremltools as ct
from PIL import Image
f = file.with_suffix(".mlmodel") if mlmodel else file.with_suffix(".mlpackage")
print(f"{prefix} starting pipeline with coremltools {ct.__version__}...")
_batch_size, _ch, h, w = list(im.shape) # BCHW
t = time.time()
# YOLOv5 Output shapes
spec = model.get_spec()
out0, out1 = iter(spec.description.output)
if platform.system() == "Darwin":
img = Image.new("RGB", (w, h)) # img(192 width, 320 height)
# img = torch.zeros((*opt.img_size, 3)).numpy() # img size(320,192,3) iDetection
out = model.predict({"image": img})
out0_shape, out1_shape = out[out0.name].shape, out[out1.name].shape
else: # linux and windows can not run model.predict(), get sizes from pytorch output y
s = tuple(y[0].shape)
out0_shape, out1_shape = (s[1], s[2] - 5), (s[1], 4) # (3780, 80), (3780, 4)
# Checks
nx, ny = spec.description.input[0].type.imageType.width, spec.description.input[0].type.imageType.height
_na, nc = out0_shape
# na, nc = out0.type.multiArrayType.shape # number anchors, classes
assert len(names) == nc, f"{len(names)} names found for nc={nc}" # check
# Define output shapes (missing)
out0.type.multiArrayType.shape[:] = out0_shape # (3780, 80)
out1.type.multiArrayType.shape[:] = out1_shape # (3780, 4)
# spec.neuralNetwork.preprocessing[0].featureName = '0'
# Flexible input shapes
# from coremltools.models.neural_network import flexible_shape_utils
# s = [] # shapes
# s.append(flexible_shape_utils.NeuralNetworkImageSize(320, 192))
# s.append(flexible_shape_utils.NeuralNetworkImageSize(640, 384)) # (height, width)
# flexible_shape_utils.add_enumerated_image_sizes(spec, feature_name='image', sizes=s)
# r = flexible_shape_utils.NeuralNetworkImageSizeRange() # shape ranges
# r.add_height_range((192, 640))
# r.add_width_range((192, 640))
# flexible_shape_utils.update_image_size_range(spec, feature_name='image', size_range=r)
# Print
print(spec.description)
# Model from spec
weights_dir = None
weights_dir = None if mlmodel else str(f / "Data/com.apple.CoreML/weights")
model = ct.models.MLModel(spec, weights_dir=weights_dir)
# 3. Create NMS protobuf
nms_spec = ct.proto.Model_pb2.Model()
nms_spec.specificationVersion = 5
for i in range(2):
decoder_output = model._spec.description.output[i].SerializeToString()
nms_spec.description.input.add()
nms_spec.description.input[i].ParseFromString(decoder_output)
nms_spec.description.output.add()
nms_spec.description.output[i].ParseFromString(decoder_output)
nms_spec.description.output[0].name = "confidence"
nms_spec.description.output[1].name = "coordinates"
output_sizes = [nc, 4]
for i in range(2):
ma_type = nms_spec.description.output[i].type.multiArrayType
ma_type.shapeRange.sizeRanges.add()
ma_type.shapeRange.sizeRanges[0].lowerBound = 0
ma_type.shapeRange.sizeRanges[0].upperBound = -1
ma_type.shapeRange.sizeRanges.add()
ma_type.shapeRange.sizeRanges[1].lowerBound = output_sizes[i]
ma_type.shapeRange.sizeRanges[1].upperBound = output_sizes[i]
del ma_type.shape[:]
nms = nms_spec.nonMaximumSuppression
nms.confidenceInputFeatureName = out0.name # 1x507x80
nms.coordinatesInputFeatureName = out1.name # 1x507x4
nms.confidenceOutputFeatureName = "confidence"
nms.coordinatesOutputFeatureName = "coordinates"
nms.iouThresholdInputFeatureName = "iouThreshold"
nms.confidenceThresholdInputFeatureName = "confidenceThreshold"
nms.iouThreshold = 0.45
nms.confidenceThreshold = 0.25
nms.pickTop.perClass = True
nms.stringClassLabels.vector.extend(names.values())
nms_model = ct.models.MLModel(nms_spec)
# 4. Pipeline models together
pipeline = ct.models.pipeline.Pipeline(
input_features=[
("image", ct.models.datatypes.Array(3, ny, nx)),
("iouThreshold", ct.models.datatypes.Double()),
("confidenceThreshold", ct.models.datatypes.Double()),
],
output_features=["confidence", "coordinates"],
)
pipeline.add_model(model)
pipeline.add_model(nms_model)
# Correct datatypes
pipeline.spec.description.input[0].ParseFromString(model._spec.description.input[0].SerializeToString())
pipeline.spec.description.output[0].ParseFromString(nms_model._spec.description.output[0].SerializeToString())
pipeline.spec.description.output[1].ParseFromString(nms_model._spec.description.output[1].SerializeToString())
# Update metadata
pipeline.spec.specificationVersion = 5
pipeline.spec.description.metadata.versionString = "https://github.com/ultralytics/yolov5"
pipeline.spec.description.metadata.shortDescription = "https://github.com/ultralytics/yolov5"
pipeline.spec.description.metadata.author = "glenn.jocher@ultralytics.com"
pipeline.spec.description.metadata.license = "https://github.com/ultralytics/yolov5/blob/master/LICENSE"
pipeline.spec.description.metadata.userDefined.update(
{
"classes": ",".join(names.values()),
"iou_threshold": str(nms.iouThreshold),
"confidence_threshold": str(nms.confidenceThreshold),
}
)
# Save the model
model = ct.models.MLModel(pipeline.spec, weights_dir=weights_dir)
model.input_description["image"] = "Input image"
model.input_description["iouThreshold"] = f"(optional) IOU Threshold override (default: {nms.iouThreshold})"
model.input_description["confidenceThreshold"] = (
f"(optional) Confidence Threshold override (default: {nms.confidenceThreshold})"
)
model.output_description["confidence"] = 'Boxes × Class confidence (see user-defined metadata "classes")'
model.output_description["coordinates"] = "Boxes × [x, y, width, height] (relative to image size)"
model.save(f) # pipelined
print(f"{prefix} pipeline success ({time.time() - t:.2f}s), saved as {f} ({file_size(f):.1f} MB)")
@smart_inference_mode()
def run(
data=ROOT / "data/coco128.yaml", # 'dataset.yaml path'
weights=ROOT / "yolov5s.pt", # weights path
imgsz=(640, 640), # image (height, width)
batch_size=1, # batch size
device="cpu", # cuda device, i.e. 0 or 0,1,2,3 or cpu
include=("torchscript", "onnx"), # include formats
half=False, # FP16 half-precision export
inplace=False, # set YOLOv5 Detect() inplace=True
keras=False, # use Keras
optimize=False, # TorchScript: optimize for mobile
int8=False, # CoreML/TF INT8 quantization
per_tensor=False, # TF per tensor quantization
dynamic=False, # ONNX/TF/TensorRT: dynamic axes
cache="", # TensorRT: timing cache path
simplify=False, # ONNX: simplify model
mlmodel=False, # CoreML: Export in *.mlmodel format
opset=12, # ONNX: opset version
verbose=False, # TensorRT: verbose log
workspace=4, # TensorRT: workspace size (GB)
nms=False, # TF: add NMS to model
agnostic_nms=False, # TF: add agnostic NMS to model
topk_per_class=100, # TF.js NMS: topk per class to keep
topk_all=100, # TF.js NMS: topk for all classes to keep
iou_thres=0.45, # TF.js NMS: IoU threshold
conf_thres=0.25, # TF.js NMS: confidence threshold
):
t = time.time()
include = [x.lower() for x in include] # to lowercase
fmts = tuple(export_formats()["Argument"][1:]) # --include arguments
flags = [x in include for x in fmts]
assert sum(flags) == len(include), f"ERROR: Invalid --include {include}, valid --include arguments are {fmts}"
jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle = flags # export booleans
file = Path(url2file(weights) if str(weights).startswith(("http:/", "https:/")) else weights) # PyTorch weights
# Load PyTorch model
device = select_device(device)
if half:
assert device.type != "cpu" or coreml, "--half only compatible with GPU export, i.e. use --device 0"
assert not dynamic, "--half not compatible with --dynamic, i.e. use either --half or --dynamic but not both"
model = attempt_load(weights, device=device, inplace=True, fuse=True) # load FP32 model
# Checks
imgsz *= 2 if len(imgsz) == 1 else 1 # expand
if optimize:
assert device.type == "cpu", "--optimize not compatible with cuda devices, i.e. use --device cpu"
# Input
gs = int(max(model.stride)) # grid size (max stride)
imgsz = [check_img_size(x, gs) for x in imgsz] # verify img_size are gs-multiples
ch = next(model.parameters()).size(1) # require input image channels
im = torch.zeros(batch_size, ch, *imgsz).to(device) # image size(1,3,320,192) BCHW iDetection
# Update model
model.eval()
for k, m in model.named_modules():
if isinstance(m, Detect):
m.inplace = inplace
m.dynamic = dynamic
m.export = True
for _ in range(2):
y = model(im) # dry runs
if half and not coreml:
im, model = im.half(), model.half() # to FP16
shape = tuple((y[0] if isinstance(y, tuple) else y).shape) # model output shape
metadata = {"stride": int(max(model.stride)), "names": model.names} # model metadata
LOGGER.info(f"\n{colorstr('PyTorch:')} starting from {file} with output shape {shape} ({file_size(file):.1f} MB)")
# Exports
f = [""] * len(fmts) # exported filenames
warnings.filterwarnings(action="ignore", category=torch.jit.TracerWarning) # suppress TracerWarning
if jit: # TorchScript
f[0], _ = export_torchscript(model, im, file, optimize)
if engine: # TensorRT required before ONNX
f[1], _ = export_engine(model, im, file, half, dynamic, simplify, workspace, verbose, cache)
if onnx or xml: # OpenVINO requires ONNX
f[2], _ = export_onnx(model, im, file, opset, dynamic, simplify)
if xml: # OpenVINO
f[3], _ = export_openvino(file, metadata, half, int8, data)
if coreml: # CoreML
f[4], ct_model = export_coreml(model, im, file, int8, half, nms, mlmodel)
if nms:
pipeline_coreml(ct_model, im, file, model.names, y, mlmodel)
if any((saved_model, pb, tflite, edgetpu, tfjs)): # TensorFlow formats
assert not tflite or not tfjs, "TFLite and TF.js models must be exported separately, please pass only one type."
assert not isinstance(model, ClassificationModel), "ClassificationModel export to TF formats not yet supported."
f[5], s_model = export_saved_model(
model.cpu(),
im,
file,
dynamic,
tf_nms=nms or agnostic_nms or tfjs,
agnostic_nms=agnostic_nms or tfjs,
topk_per_class=topk_per_class,
topk_all=topk_all,
iou_thres=iou_thres,
conf_thres=conf_thres,
keras=keras,
)
if pb or tfjs: # pb prerequisite to tfjs
f[6], _ = export_pb(s_model, file)
if tflite or edgetpu:
f[7], _ = export_tflite(
s_model, im, file, int8 or edgetpu, per_tensor, data=data, nms=nms, agnostic_nms=agnostic_nms
)
if edgetpu:
f[8], _ = export_edgetpu(file)
add_tflite_metadata(f[8] or f[7], metadata, num_outputs=len(s_model.outputs))
if tfjs:
f[9], _ = export_tfjs(file, int8)
if paddle: # PaddlePaddle
f[10], _ = export_paddle(model, im, file, metadata)
# Finish
f = [str(x) for x in f if x] # filter out '' and None
if any(f):
cls, det, seg = (isinstance(model, x) for x in (ClassificationModel, DetectionModel, SegmentationModel)) # type
det &= not seg # segmentation models inherit from SegmentationModel(DetectionModel)
dir = Path("segment" if seg else "classify" if cls else "")
h = "--half" if half else "" # --half FP16 inference arg
s = (
"# WARNING ⚠️ ClassificationModel not yet supported for PyTorch Hub AutoShape inference"
if cls
else "# WARNING ⚠️ SegmentationModel not yet supported for PyTorch Hub AutoShape inference"
if seg
else ""
)
LOGGER.info(
f"\nExport complete ({time.time() - t:.1f}s)"
f"\nResults saved to {colorstr('bold', file.parent.resolve())}"
f"\nDetect: python {dir / ('detect.py' if det else 'predict.py')} --weights {f[-1]} {h}"
f"\nValidate: python {dir / 'val.py'} --weights {f[-1]} {h}"
f"\nPyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', '{f[-1]}') {s}"
f"\nVisualize: https://netron.app"
)
return f # return list of exported files/dirs
def parse_opt(known=False):
parser = argparse.ArgumentParser()
parser.add_argument("--data", type=str, default=ROOT / "data/coco128.yaml", help="dataset.yaml path")
parser.add_argument("--weights", nargs="+", type=str, default=ROOT / "yolov5s.pt", help="model.pt path(s)")
parser.add_argument("--imgsz", "--img", "--img-size", nargs="+", type=int, default=[640, 640], help="image (h, w)")
parser.add_argument("--batch-size", type=int, default=1, help="batch size")
parser.add_argument("--device", default="cpu", help="cuda device, i.e. 0 or 0,1,2,3 or cpu")
parser.add_argument("--half", action="store_true", help="FP16 half-precision export")
parser.add_argument("--inplace", action="store_true", help="set YOLOv5 Detect() inplace=True")
parser.add_argument("--keras", action="store_true", help="TF: use Keras")
parser.add_argument("--optimize", action="store_true", help="TorchScript: optimize for mobile")
parser.add_argument("--int8", action="store_true", help="CoreML/TF/OpenVINO INT8 quantization")
parser.add_argument("--per-tensor", action="store_true", help="TF per-tensor quantization")
parser.add_argument("--dynamic", action="store_true", help="ONNX/TF/TensorRT: dynamic axes")
parser.add_argument("--cache", type=str, default="", help="TensorRT: timing cache file path")
parser.add_argument("--simplify", action="store_true", help="ONNX: simplify model")
parser.add_argument("--mlmodel", action="store_true", help="CoreML: Export in *.mlmodel format")
parser.add_argument("--opset", type=int, default=17, help="ONNX: opset version")
parser.add_argument("--verbose", action="store_true", help="TensorRT: verbose log")
parser.add_argument("--workspace", type=int, default=4, help="TensorRT: workspace size (GB)")
parser.add_argument("--nms", action="store_true", help="TF: add NMS to model")
parser.add_argument("--agnostic-nms", action="store_true", help="TF: add agnostic NMS to model")
parser.add_argument("--topk-per-class", type=int, default=100, help="TF.js NMS: topk per class to keep")
parser.add_argument("--topk-all", type=int, default=100, help="TF.js NMS: topk for all classes to keep")
parser.add_argument("--iou-thres", type=float, default=0.45, help="TF.js NMS: IoU threshold")
parser.add_argument("--conf-thres", type=float, default=0.25, help="TF.js NMS: confidence threshold")
parser.add_argument(
"--include",
nargs="+",
default=["torchscript"],
help="torchscript, onnx, openvino, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle",
)
opt = parser.parse_known_args()[0] if known else parser.parse_args()
print_args(vars(opt))
return opt
def main(opt):
for opt.weights in opt.weights if isinstance(opt.weights, list) else [opt.weights]:
run(**vars(opt))
if __name__ == "__main__":
opt = parse_opt()
main(opt) | --- +++ @@ -1,4 +1,48 @@ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
+"""
+Export a YOLOv5 PyTorch model to other formats. TensorFlow exports authored by https://github.com/zldrobit.
+
+Format | `export.py --include` | Model
+--- | --- | ---
+PyTorch | - | yolov5s.pt
+TorchScript | `torchscript` | yolov5s.torchscript
+ONNX | `onnx` | yolov5s.onnx
+OpenVINO | `openvino` | yolov5s_openvino_model/
+TensorRT | `engine` | yolov5s.engine
+CoreML | `coreml` | yolov5s.mlmodel
+TensorFlow SavedModel | `saved_model` | yolov5s_saved_model/
+TensorFlow GraphDef | `pb` | yolov5s.pb
+TensorFlow Lite | `tflite` | yolov5s.tflite
+TensorFlow Edge TPU | `edgetpu` | yolov5s_edgetpu.tflite
+TensorFlow.js | `tfjs` | yolov5s_web_model/
+PaddlePaddle | `paddle` | yolov5s_paddle_model/
+
+Requirements:
+ $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime openvino-dev tensorflow-cpu # CPU
+ $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime-gpu openvino-dev tensorflow # GPU
+
+Usage:
+ $ python export.py --weights yolov5s.pt --include torchscript onnx openvino engine coreml tflite ...
+
+Inference:
+ $ python detect.py --weights yolov5s.pt # PyTorch
+ yolov5s.torchscript # TorchScript
+ yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn
+ yolov5s_openvino_model # OpenVINO
+ yolov5s.engine # TensorRT
+ yolov5s.mlmodel # CoreML (macOS-only)
+ yolov5s_saved_model # TensorFlow SavedModel
+ yolov5s.pb # TensorFlow GraphDef
+ yolov5s.tflite # TensorFlow Lite
+ yolov5s_edgetpu.tflite # TensorFlow Edge TPU
+ yolov5s_paddle_model # PaddlePaddle
+
+TensorFlow.js:
+ $ cd .. && git clone https://github.com/zldrobit/tfjs-yolov5-example.git && cd tfjs-yolov5-example
+ $ npm install
+ $ ln -s ../../yolov5/yolov5s_web_model public/yolov5s_web_model
+ $ npm start
+"""
import argparse
import contextlib
@@ -47,8 +91,23 @@
class iOSModel(torch.nn.Module):
+ """An iOS-compatible wrapper for YOLOv5 models that normalizes input images based on their dimensions."""
def __init__(self, model, im):
+ """Initializes an iOS compatible model with normalization based on image dimensions.
+
+ Args:
+ model (torch.nn.Module): The PyTorch model to be adapted for iOS compatibility.
+ im (torch.Tensor): An input tensor representing a batch of images with shape (B, C, H, W).
+
+ Returns:
+ None: This method does not return any value.
+
+ Notes:
+ This initializer configures normalization based on the input image dimensions, which is critical for
+ ensuring the model's compatibility and proper functionality on iOS devices. The normalization step
+ involves dividing by the image width if the image is square; otherwise, additional conditions might apply.
+ """
super().__init__()
_b, _c, h, w = im.shape # batch, channel, height, width
self.model = model
@@ -61,11 +120,48 @@ # self.normalize = torch.tensor([1. / w, 1. / h, 1. / w, 1. / h]).expand(np, 4) # explicit (faster, larger)
def forward(self, x):
+ """Run a forward pass on the input tensor, returning class confidences and normalized coordinates.
+
+ Args:
+ x (torch.Tensor): Input tensor containing the image data with shape (batch, channels, height, width).
+
+ Returns:
+ torch.Tensor: Concatenated tensor with normalized coordinates (xywh), confidence scores (conf), and class
+ probabilities (cls), having shape (N, 4 + 1 + C), where N is the number of predictions, and C is the
+ number of classes.
+
+ Examples:
+ ```python
+ model = iOSModel(pretrained_model, input_image)
+ output = model.forward(torch_input_tensor)
+ ```
+ """
xywh, conf, cls = self.model(x)[0].squeeze().split((4, 1, self.nc), 1)
return cls * conf, xywh * self.normalize # confidence (3780, 80), coordinates (3780, 4)
def export_formats():
+ r"""Returns a DataFrame of supported YOLOv5 model export formats and their properties.
+
+ Returns:
+ pandas.DataFrame: A DataFrame containing supported export formats and their properties. The DataFrame includes
+ columns for format name, CLI argument suffix, file extension or directory name, and boolean flags indicating
+ if the export format supports training and detection.
+
+ Examples:
+ ```python
+ formats = export_formats()
+ print(f"Supported export formats:\n{formats}")
+ ```
+
+ Notes:
+ The DataFrame contains the following columns:
+ - Format: The name of the model format (e.g., PyTorch, TorchScript, ONNX, etc.).
+ - Include Argument: The argument to use with the export script to include this format.
+ - File Suffix: File extension or directory name associated with the format.
+ - Supports Training: Whether the format supports training.
+ - Supports Detection: Whether the format supports detection.
+ """
x = [
["PyTorch", "-", ".pt", True, True],
["TorchScript", "torchscript", ".torchscript", True, True],
@@ -84,9 +180,34 @@
def try_export(inner_func):
+ """Log success or failure, execution time, and file size for YOLOv5 model export functions wrapped with @try_export.
+
+ Args:
+ inner_func (Callable): The model export function to be wrapped by the decorator.
+
+ Returns:
+ Callable: The wrapped function that logs execution details. When executed, this wrapper function returns either:
+ - Tuple (str | torch.nn.Module): On success — the file path of the exported model and the model instance.
+ - Tuple (None, None): On failure — None values indicating export failure.
+
+ Examples:
+ ```python
+ @try_export
+ def export_onnx(model, filepath):
+ # implementation here
+ pass
+
+ exported_file, exported_model = export_onnx(yolo_model, 'path/to/save/model.onnx')
+ ```
+
+ Notes:
+ For additional requirements and model export formats, refer to the
+ [Ultralytics YOLOv5 GitHub repository](https://github.com/ultralytics/ultralytics).
+ """
inner_args = get_default_args(inner_func)
def outer_func(*args, **kwargs):
+ """Logs success/failure and execution details of model export functions wrapped with @try_export decorator."""
prefix = inner_args["prefix"]
try:
with Profile() as dt:
@@ -102,6 +223,46 @@
@try_export
def export_torchscript(model, im, file, optimize, prefix=colorstr("TorchScript:")):
+ """Export a YOLOv5 model to the TorchScript format.
+
+ Args:
+ model (torch.nn.Module): The YOLOv5 model to be exported.
+ im (torch.Tensor): Example input tensor to be used for tracing the TorchScript model.
+ file (Path): File path where the exported TorchScript model will be saved.
+ optimize (bool): If True, applies optimizations for mobile deployment.
+ prefix (str): Optional prefix for log messages. Default is 'TorchScript:'.
+
+ Returns:
+ (str | None, torch.jit.ScriptModule | None): A tuple containing the file path of the exported model (as a
+ string) and the TorchScript model (as a torch.jit.ScriptModule). If the export fails, both elements of the
+ tuple will be None.
+
+ Examples:
+ ```python
+ from pathlib import Path
+ import torch
+ from models.experimental import attempt_load
+ from utils.torch_utils import select_device
+
+ # Load model
+ weights = 'yolov5s.pt'
+ device = select_device('')
+ model = attempt_load(weights, device=device)
+
+ # Example input tensor
+ im = torch.zeros(1, 3, 640, 640).to(device)
+
+ # Export model
+ file = Path('yolov5s.torchscript')
+ export_torchscript(model, im, file, optimize=False)
+ ```
+
+ Notes:
+ - This function uses tracing to create the TorchScript model.
+ - Metadata, including the input image shape, model stride, and class names, is saved in an extra file (`config.txt`)
+ within the TorchScript model package.
+ - For mobile optimization, refer to the PyTorch tutorial: https://pytorch.org/tutorials/recipes/mobile_interpreter.html
+ """
LOGGER.info(f"\n{prefix} starting export with torch {torch.__version__}...")
f = file.with_suffix(".torchscript")
@@ -117,6 +278,50 @@
@try_export
def export_onnx(model, im, file, opset, dynamic, simplify, prefix=colorstr("ONNX:")):
+ """Export a YOLOv5 model to ONNX format with dynamic axes support and optional model simplification.
+
+ Args:
+ model (torch.nn.Module): The YOLOv5 model to be exported.
+ im (torch.Tensor): A sample input tensor for model tracing, usually the shape is (1, 3, height, width).
+ file (pathlib.Path | str): The output file path where the ONNX model will be saved.
+ opset (int): The ONNX opset version to use for export.
+ dynamic (bool): If True, enables dynamic axes for batch, height, and width dimensions.
+ simplify (bool): If True, applies ONNX model simplification for optimization.
+ prefix (str): A prefix string for logging messages, defaults to 'ONNX:'.
+
+ Returns:
+ tuple[pathlib.Path | str, None]: The path to the saved ONNX model file and None (consistent with decorator).
+
+ Raises:
+ ImportError: If required libraries for export (e.g., 'onnx', 'onnx-simplifier') are not installed.
+ AssertionError: If the simplification check fails.
+
+ Examples:
+ ```python
+ from pathlib import Path
+ import torch
+ from models.experimental import attempt_load
+ from utils.torch_utils import select_device
+
+ # Load model
+ weights = 'yolov5s.pt'
+ device = select_device('')
+ model = attempt_load(weights, map_location=device)
+
+ # Example input tensor
+ im = torch.zeros(1, 3, 640, 640).to(device)
+
+ # Export model
+ file_path = Path('yolov5s.onnx')
+ export_onnx(model, im, file_path, opset=12, dynamic=True, simplify=True)
+ ```
+
+ Notes:
+ The required packages for this function can be installed via:
+ ```
+ pip install onnx onnx-simplifier onnxruntime onnxruntime-gpu
+ ```
+ """
check_requirements(("onnx>=1.12.0", "onnxscript"))
import onnx
@@ -172,6 +377,39 @@
@try_export
def export_openvino(file, metadata, half, int8, data, prefix=colorstr("OpenVINO:")):
+ """Export a YOLOv5 model to OpenVINO format with optional FP16 and INT8 quantization.
+
+ Args:
+ file (Path): Path to the output file where the OpenVINO model will be saved.
+ metadata (dict): Dictionary including model metadata such as names and strides.
+ half (bool): If True, export the model with FP16 precision.
+ int8 (bool): If True, export the model with INT8 quantization.
+ data (str): Path to the dataset YAML file required for INT8 quantization.
+ prefix (str): Prefix string for logging purposes (default is "OpenVINO:").
+
+ Returns:
+ (str, openvino.runtime.Model | None): The OpenVINO model file path and openvino.runtime.Model object if export
+ is successful; otherwise, None.
+
+ Examples:
+ ```python
+ from pathlib import Path
+ from ultralytics import YOLOv5
+
+ model = YOLOv5('yolov5s.pt')
+ export_openvino(Path('yolov5s.onnx'), metadata={'names': model.names, 'stride': model.stride}, half=True,
+ int8=False, data='data.yaml')
+ ```
+
+ This will export the YOLOv5 model to OpenVINO with FP16 precision but without INT8 quantization, saving it to
+ the specified file path.
+
+ Notes:
+ - Requires `openvino-dev` package version 2023.0 or higher. Install with:
+ `$ pip install openvino-dev>=2023.0`
+ - For INT8 quantization, also requires `nncf` library version 2.5.0 or higher. Install with:
+ `$ pip install nncf>=2.5.0`
+ """
check_requirements("openvino-dev>=2023.0") # requires openvino-dev: https://pypi.org/project/openvino-dev/
import openvino.runtime as ov
from openvino.tools import mo
@@ -191,6 +429,7 @@ from utils.dataloaders import create_dataloader
def gen_dataloader(yaml_path, task="train", imgsz=640, workers=4):
+ """Generates a DataLoader for model training or validation based on the given YAML dataset configuration."""
data_yaml = check_yaml(yaml_path)
data = check_dataset(data_yaml)
dataloader = create_dataloader(
@@ -199,6 +438,16 @@ return dataloader
def transform_fn(data_item):
+ """Quantization transform function.
+
+ Extracts and preprocess input data from dataloader item for quantization.
+
+ Args:
+ data_item: Tuple with data item produced by DataLoader during iteration
+
+ Returns:
+ input_tensor: Input data for quantization
+ """
assert data_item[0].dtype == torch.uint8, "input image must be uint8 for the quantization preprocessing"
img = data_item[0].numpy().astype(np.float32) # uint8 to fp16/32
@@ -216,6 +465,40 @@
@try_export
def export_paddle(model, im, file, metadata, prefix=colorstr("PaddlePaddle:")):
+ """Export a YOLOv5 PyTorch model to PaddlePaddle format using X2Paddle, saving the converted model and metadata.
+
+ Args:
+ model (torch.nn.Module): The YOLOv5 model to be exported.
+ im (torch.Tensor): Input tensor used for model tracing during export.
+ file (pathlib.Path): Path to the source file to be converted.
+ metadata (dict): Additional metadata to be saved alongside the model.
+ prefix (str): Prefix for logging information.
+
+ Returns:
+ tuple (str, None): A tuple where the first element is the path to the saved PaddlePaddle model, and the second
+ element is None.
+
+ Examples:
+ ```python
+ from pathlib import Path
+ import torch
+
+ # Assume 'model' is a pre-trained YOLOv5 model and 'im' is an example input tensor
+ model = ... # Load your model here
+ im = torch.randn((1, 3, 640, 640)) # Dummy input tensor for tracing
+ file = Path("yolov5s.pt")
+ metadata = {"stride": 32, "names": ["person", "bicycle", "car", "motorbike"]}
+
+ export_paddle(model=model, im=im, file=file, metadata=metadata)
+ ```
+
+ Notes:
+ Ensure that `paddlepaddle` and `x2paddle` are installed, as these are required for the export function. You can
+ install them via pip:
+ ```
+ $ pip install paddlepaddle x2paddle
+ ```
+ """
check_requirements(("paddlepaddle>=3.0.0", "x2paddle"))
import x2paddle
from x2paddle.convert import pytorch2paddle
@@ -230,6 +513,36 @@
@try_export
def export_coreml(model, im, file, int8, half, nms, mlmodel, prefix=colorstr("CoreML:")):
+ """Export a YOLOv5 model to CoreML format with optional NMS, INT8, and FP16 support.
+
+ Args:
+ model (torch.nn.Module): The YOLOv5 model to be exported.
+ im (torch.Tensor): Example input tensor to trace the model.
+ file (pathlib.Path): Path object where the CoreML model will be saved.
+ int8 (bool): Flag indicating whether to use INT8 quantization (default is False).
+ half (bool): Flag indicating whether to use FP16 quantization (default is False).
+ nms (bool): Flag indicating whether to include Non-Maximum Suppression (default is False).
+ mlmodel (bool): Flag indicating whether to export as older *.mlmodel format (default is False).
+ prefix (str): Prefix string for logging purposes (default is 'CoreML:').
+
+ Returns:
+ tuple[pathlib.Path | None, None]: The path to the saved CoreML model file, or (None, None) if there is an error.
+
+ Examples:
+ ```python
+ from pathlib import Path
+ import torch
+ from models.yolo import Model
+ model = Model(cfg, ch=3, nc=80)
+ im = torch.randn(1, 3, 640, 640)
+ file = Path("yolov5s_coreml")
+ export_coreml(model, im, file, int8=False, half=False, nms=True, mlmodel=False)
+ ```
+
+ Notes:
+ The exported CoreML model will be saved with a .mlmodel extension.
+ Quantization is supported only on macOS.
+ """
check_requirements("coremltools")
import coremltools as ct
@@ -271,6 +584,40 @@ def export_engine(
model, im, file, half, dynamic, simplify, workspace=4, verbose=False, cache="", prefix=colorstr("TensorRT:")
):
+ """Export a YOLOv5 model to TensorRT engine format, requiring GPU and TensorRT>=7.0.0.
+
+ Args:
+ model (torch.nn.Module): YOLOv5 model to be exported.
+ im (torch.Tensor): Input tensor of shape (B, C, H, W).
+ file (pathlib.Path): Path to save the exported model.
+ half (bool): Set to True to export with FP16 precision.
+ dynamic (bool): Set to True to enable dynamic input shapes.
+ simplify (bool): Set to True to simplify the model during export.
+ workspace (int): Workspace size in GB (default is 4).
+ verbose (bool): Set to True for verbose logging output.
+ cache (str): Path to save the TensorRT timing cache.
+ prefix (str): Log message prefix.
+
+ Returns:
+ (pathlib.Path, None): Tuple containing the path to the exported model and None.
+
+ Raises:
+ AssertionError: If executed on CPU instead of GPU.
+ RuntimeError: If there is a failure in parsing the ONNX file.
+
+ Examples:
+ ```python
+ from ultralytics import YOLOv5
+ import torch
+ from pathlib import Path
+
+ model = YOLOv5('yolov5s.pt') # Load a pre-trained YOLOv5 model
+ input_tensor = torch.randn(1, 3, 640, 640).cuda() # example input tensor on GPU
+ export_path = Path('yolov5s.engine') # export destination
+
+ export_engine(model.model, input_tensor, export_path, half=True, dynamic=True, simplify=True, workspace=8, verbose=True)
+ ```
+ """
assert im.device.type != "cpu", "export running on CPU but must be on GPU, i.e. `python export.py --device 0`"
try:
import tensorrt as trt
@@ -357,6 +704,39 @@ keras=False,
prefix=colorstr("TensorFlow SavedModel:"),
):
+ """Export a YOLOv5 model to the TensorFlow SavedModel format, supporting dynamic axes and non-maximum suppression
+ (NMS).
+
+ Args:
+ model (torch.nn.Module): The PyTorch model to convert.
+ im (torch.Tensor): Sample input tensor with shape (B, C, H, W) for tracing.
+ file (pathlib.Path): File path to save the exported model.
+ dynamic (bool): Flag to indicate whether dynamic axes should be used.
+ tf_nms (bool, optional): Enable TensorFlow non-maximum suppression (NMS). Default is False.
+ agnostic_nms (bool, optional): Enable class-agnostic NMS. Default is False.
+ topk_per_class (int, optional): Top K detections per class to keep before applying NMS. Default is 100.
+ topk_all (int, optional): Top K detections across all classes to keep before applying NMS. Default is 100.
+ iou_thres (float, optional): IoU threshold for NMS. Default is 0.45.
+ conf_thres (float, optional): Confidence threshold for detections. Default is 0.25.
+ keras (bool, optional): Save the model in Keras format if True. Default is False.
+ prefix (str, optional): Prefix for logging messages. Default is "TensorFlow SavedModel:".
+
+ Returns:
+ tuple[str, tf.keras.Model | None]: A tuple containing the path to the saved model folder and the Keras model instance,
+ or None if TensorFlow export fails.
+
+ Examples:
+ ```python
+ model, im = ... # Initialize your PyTorch model and input tensor
+ export_saved_model(model, im, Path("yolov5_saved_model"), dynamic=True)
+ ```
+
+ Notes:
+ - The method supports TensorFlow versions up to 2.15.1.
+ - TensorFlow NMS may not be supported in older TensorFlow versions.
+ - If the TensorFlow version exceeds 2.13.1, it might cause issues when exporting to TFLite.
+ Refer to: https://github.com/ultralytics/yolov5/issues/12489
+ """
# YOLOv5 TensorFlow SavedModel export
try:
import tensorflow as tf
@@ -407,6 +787,27 @@
@try_export
def export_pb(keras_model, file, prefix=colorstr("TensorFlow GraphDef:")):
+ """Export YOLOv5 model to TensorFlow GraphDef (*.pb) format.
+
+ Args:
+ keras_model (tf.keras.Model): The Keras model to be converted.
+ file (Path): The output file path where the GraphDef will be saved.
+ prefix (str): Optional prefix string; defaults to a colored string indicating TensorFlow GraphDef export status.
+
+ Returns:
+ Tuple[Path, None]: The file path where the GraphDef model was saved and a None placeholder.
+
+ Examples:
+ ```python
+ from pathlib import Path
+ keras_model = ... # assume an existing Keras model
+ file = Path("model.pb")
+ export_pb(keras_model, file)
+ ```
+
+ Notes:
+ For more details, refer to the guide on frozen graphs: https://github.com/leimao/Frozen_Graph_TensorFlow
+ """
import tensorflow as tf
from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2
@@ -426,6 +827,45 @@ keras_model, im, file, int8, per_tensor, data, nms, agnostic_nms, prefix=colorstr("TensorFlow Lite:")
):
# YOLOv5 TensorFlow Lite export
+ """Export a YOLOv5 model to TensorFlow Lite format with optional INT8 quantization and NMS support.
+
+ Args:
+ keras_model (tf.keras.Model): The Keras model to be exported.
+ im (torch.Tensor): An input image tensor for normalization and model tracing.
+ file (Path): The file path to save the TensorFlow Lite model.
+ int8 (bool): Enables INT8 quantization if True.
+ per_tensor (bool): If True, disables per-channel quantization.
+ data (str): Path to the dataset for representative dataset generation in INT8 quantization.
+ nms (bool): Enables Non-Maximum Suppression (NMS) if True.
+ agnostic_nms (bool): Enables class-agnostic NMS if True.
+ prefix (str): Prefix for log messages.
+
+ Returns:
+ (str | None, tflite.Model | None): The file path of the exported TFLite model and the TFLite model instance, or
+ None if the export failed.
+
+ Examples:
+ ```python
+ from pathlib import Path
+ import torch
+ import tensorflow as tf
+
+ # Load a Keras model wrapping a YOLOv5 model
+ keras_model = tf.keras.models.load_model('path/to/keras_model.h5')
+
+ # Example input tensor
+ im = torch.zeros(1, 3, 640, 640)
+
+ # Export the model
+ export_tflite(keras_model, im, Path('model.tflite'), int8=True, per_tensor=False, data='data/coco.yaml',
+ nms=True, agnostic_nms=False)
+ ```
+
+ Notes:
+ - Ensure TensorFlow and TensorFlow Lite dependencies are installed.
+ - INT8 quantization requires a representative dataset to achieve optimal accuracy.
+ - TensorFlow Lite models are suitable for efficient inference on mobile and edge devices.
+ """
import tensorflow as tf
LOGGER.info(f"\n{prefix} starting export with tensorflow {tf.__version__}...")
@@ -459,6 +899,30 @@
@try_export
def export_edgetpu(file, prefix=colorstr("Edge TPU:")):
+ """Exports a YOLOv5 model to Edge TPU compatible TFLite format; requires Linux and Edge TPU compiler.
+
+ Args:
+ file (Path): Path to the YOLOv5 model file to be exported (.pt format).
+ prefix (str, optional): Prefix for logging messages. Defaults to colorstr("Edge TPU:").
+
+ Returns:
+ tuple[Path, None]: Path to the exported Edge TPU compatible TFLite model, None.
+
+ Raises:
+ AssertionError: If the system is not Linux.
+ subprocess.CalledProcessError: If any subprocess call to install or run the Edge TPU compiler fails.
+
+ Examples:
+ ```python
+ from pathlib import Path
+ file = Path('yolov5s.pt')
+ export_edgetpu(file)
+ ```
+
+ Notes:
+ To use this function, ensure you have the Edge TPU compiler installed on your Linux system. You can find
+ installation instructions here: https://coral.ai/docs/edgetpu/compiler/.
+ """
cmd = "edgetpu_compiler --version"
help_url = "https://coral.ai/docs/edgetpu/compiler/"
assert platform.system() == "Linux", f"export only supported on Linux. See {help_url}"
@@ -496,6 +960,31 @@
@try_export
def export_tfjs(file, int8, prefix=colorstr("TensorFlow.js:")):
+ """Convert a YOLOv5 model to TensorFlow.js format with optional uint8 quantization.
+
+ Args:
+ file (Path): Path to the YOLOv5 model file to be converted, typically having a ".pt" or ".onnx" extension.
+ int8 (bool): If True, applies uint8 quantization during the conversion process.
+ prefix (str): Optional prefix for logging messages, default is 'TensorFlow.js:' with color formatting.
+
+ Returns:
+ (str, None): Tuple containing the output directory path as a string and None.
+
+ Examples:
+ ```python
+ from pathlib import Path
+ file = Path('yolov5.onnx')
+ export_tfjs(file, int8=False)
+ ```
+
+ Notes:
+ - This function requires the `tensorflowjs` package. Install it using:
+ ```shell
+ pip install tensorflowjs
+ ```
+ - The converted TensorFlow.js model will be saved in a directory with the "_web_model" suffix appended to the original file name.
+ - The conversion involves running shell commands that invoke the TensorFlow.js converter tool.
+ """
check_requirements("tensorflowjs")
import tensorflowjs as tfjs
@@ -532,6 +1021,35 @@
def add_tflite_metadata(file, metadata, num_outputs):
+ """Adds metadata to a TensorFlow Lite (TFLite) model file, supporting multiple outputs according to TensorFlow
+ guidelines.
+
+ Args:
+ file (str): Path to the TFLite model file to which metadata will be added.
+ metadata (dict): Metadata information to be added to the model, structured as required by the TFLite metadata
+ schema. Common keys include "name", "description", "version", "author", and "license".
+ num_outputs (int): Number of output tensors the model has, used to configure the metadata properly.
+
+ Returns:
+ None
+
+ Examples:
+ ```python
+ metadata = {
+ "name": "yolov5",
+ "description": "YOLOv5 object detection model",
+ "version": "1.0",
+ "author": "Ultralytics",
+ "license": "Apache License 2.0"
+ }
+ add_tflite_metadata("model.tflite", metadata, num_outputs=4)
+ ```
+
+ Notes:
+ TFLite metadata can include information such as model name, version, author, and other relevant details.
+ For more details on the structure of the metadata, refer to TensorFlow Lite
+ [metadata guidelines](https://ai.google.dev/edge/litert/models/metadata).
+ """
with contextlib.suppress(ImportError):
# check_requirements('tflite_support')
from tflite_support import flatbuffers
@@ -564,6 +1082,47 @@
def pipeline_coreml(model, im, file, names, y, mlmodel, prefix=colorstr("CoreML Pipeline:")):
+ """Convert a PyTorch YOLOv5 model to CoreML format with Non-Maximum Suppression (NMS), handling different
+ input/output shapes, and saving the model.
+
+ Args:
+ model (torch.nn.Module): The YOLOv5 PyTorch model to be converted.
+ im (torch.Tensor): Example input tensor with shape (N, C, H, W), where N is the batch size, C is the number of
+ channels, H is the height, and W is the width.
+ file (Path): Path to save the converted CoreML model.
+ names (dict[int, str]): Dictionary mapping class indices to class names.
+ y (torch.Tensor): Output tensor from the PyTorch model's forward pass.
+ mlmodel (bool): Flag indicating whether to export as older *.mlmodel format (default is False).
+ prefix (str): Custom prefix for logging messages.
+
+ Returns:
+ (Path): Path to the saved CoreML model (.mlmodel).
+
+ Raises:
+ AssertionError: If the number of class names does not match the number of classes in the model.
+
+ Examples:
+ ```python
+ from ultralytics.utils.patches import torch_load
+ from pathlib import Path
+ import torch
+
+ model = torch_load('yolov5s.pt') # Load YOLOv5 model
+ im = torch.zeros((1, 3, 640, 640)) # Example input tensor
+
+ names = {0: "person", 1: "bicycle", 2: "car", ...} # Define class names
+
+ y = model(im) # Perform forward pass to get model output
+
+ output_file = Path('yolov5s.mlmodel') # Convert to CoreML
+ pipeline_coreml(model, im, output_file, names, y)
+ ```
+
+ Notes:
+ - This function requires `coremltools` to be installed.
+ - Running this function on a non-macOS environment might not support some features.
+ - Flexible input shapes and additional NMS options can be customized within the function.
+ """
import coremltools as ct
from PIL import Image
@@ -723,6 +1282,73 @@ iou_thres=0.45, # TF.js NMS: IoU threshold
conf_thres=0.25, # TF.js NMS: confidence threshold
):
+ """Exports a YOLOv5 model to specified formats including ONNX, TensorRT, CoreML, and TensorFlow.
+
+ Args:
+ data (str | Path): Path to the dataset YAML configuration file. Default is 'data/coco128.yaml'.
+ weights (str | Path): Path to the pretrained model weights file. Default is 'yolov5s.pt'.
+ imgsz (tuple): Image size as (height, width). Default is (640, 640).
+ batch_size (int): Batch size for exporting the model. Default is 1.
+ device (str): Device to run the export on, e.g., '0' for GPU, 'cpu' for CPU. Default is 'cpu'.
+ include (tuple): Formats to include in the export. Default is ('torchscript', 'onnx').
+ half (bool): Flag to export model with FP16 half-precision. Default is False.
+ inplace (bool): Set the YOLOv5 Detect() module inplace=True. Default is False.
+ keras (bool): Flag to use Keras for TensorFlow SavedModel export. Default is False.
+ optimize (bool): Optimize TorchScript model for mobile deployment. Default is False.
+ int8 (bool): Apply INT8 quantization for CoreML or TensorFlow models. Default is False.
+ per_tensor (bool): Apply per tensor quantization for TensorFlow models. Default is False.
+ dynamic (bool): Enable dynamic axes for ONNX, TensorFlow, or TensorRT exports. Default is False.
+ cache (str): TensorRT timing cache path. Default is an empty string.
+ simplify (bool): Simplify the ONNX model during export. Default is False.
+ opset (int): ONNX opset version. Default is 12.
+ verbose (bool): Enable verbose logging for TensorRT export. Default is False.
+ workspace (int): TensorRT workspace size in GB. Default is 4.
+ nms (bool): Add non-maximum suppression (NMS) to the TensorFlow model. Default is False.
+ agnostic_nms (bool): Add class-agnostic NMS to the TensorFlow model. Default is False.
+ topk_per_class (int): Top-K boxes per class to keep for TensorFlow.js NMS. Default is 100.
+ topk_all (int): Top-K boxes for all classes to keep for TensorFlow.js NMS. Default is 100.
+ iou_thres (float): IoU threshold for NMS. Default is 0.45.
+ conf_thres (float): Confidence threshold for NMS. Default is 0.25.
+ mlmodel (bool): Flag to use *.mlmodel for CoreML export. Default is False.
+
+ Returns:
+ None
+
+ Examples:
+ ```python
+ run(
+ data="data/coco128.yaml",
+ weights="yolov5s.pt",
+ imgsz=(640, 640),
+ batch_size=1,
+ device="cpu",
+ include=("torchscript", "onnx"),
+ half=False,
+ inplace=False,
+ keras=False,
+ optimize=False,
+ int8=False,
+ per_tensor=False,
+ dynamic=False,
+ cache="",
+ simplify=False,
+ opset=12,
+ verbose=False,
+ mlmodel=False,
+ workspace=4,
+ nms=False,
+ agnostic_nms=False,
+ topk_per_class=100,
+ topk_all=100,
+ iou_thres=0.45,
+ conf_thres=0.25,
+ )
+ ```
+
+ Notes:
+ - Model export is based on the specified formats in the 'include' argument.
+ - Be cautious of combinations where certain flags are mutually exclusive, such as `--half` and `--dynamic`.
+ """
t = time.time()
include = [x.lower() for x in include] # to lowercase
fmts = tuple(export_formats()["Argument"][1:]) # --include arguments
@@ -836,6 +1462,22 @@
def parse_opt(known=False):
+ """Parse command-line options for YOLOv5 model export configurations.
+
+ Args:
+ known (bool): If True, uses `argparse.ArgumentParser.parse_known_args`; otherwise, uses
+ `argparse.ArgumentParser.parse_args`. Default is False.
+
+ Returns:
+ argparse.Namespace: Object containing parsed command-line arguments.
+
+ Examples:
+ ```python
+ opts = parse_opt()
+ print(opts.data)
+ print(opts.weights)
+ ```
+ """
parser = argparse.ArgumentParser()
parser.add_argument("--data", type=str, default=ROOT / "data/coco128.yaml", help="dataset.yaml path")
parser.add_argument("--weights", nargs="+", type=str, default=ROOT / "yolov5s.pt", help="model.pt path(s)")
@@ -873,10 +1515,11 @@
def main(opt):
+ """Run(**vars(opt)), execute the run function with parsed options."""
for opt.weights in opt.weights if isinstance(opt.weights, list) else [opt.weights]:
run(**vars(opt))
if __name__ == "__main__":
opt = parse_opt()
- main(opt)+ main(opt)
| https://raw.githubusercontent.com/ultralytics/yolov5/HEAD/export.py |
Add missing documentation to my Python functions | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
import glob
import json
import logging
import os
import sys
from pathlib import Path
logger = logging.getLogger(__name__)
FILE = Path(__file__).resolve()
ROOT = FILE.parents[3] # YOLOv5 root directory
if str(ROOT) not in sys.path:
sys.path.append(str(ROOT)) # add ROOT to PATH
try:
import comet_ml
# Project Configuration
config = comet_ml.config.get_config()
COMET_PROJECT_NAME = config.get_string(os.getenv("COMET_PROJECT_NAME"), "comet.project_name", default="yolov5")
except ImportError:
comet_ml = None
COMET_PROJECT_NAME = None
import PIL
import torch
import torchvision.transforms as T
import yaml
from utils.dataloaders import img2label_paths
from utils.general import check_dataset, scale_boxes, xywh2xyxy
from utils.metrics import box_iou
COMET_PREFIX = "comet://"
COMET_MODE = os.getenv("COMET_MODE", "online")
# Model Saving Settings
COMET_MODEL_NAME = os.getenv("COMET_MODEL_NAME", "yolov5")
# Dataset Artifact Settings
COMET_UPLOAD_DATASET = os.getenv("COMET_UPLOAD_DATASET", "false").lower() == "true"
# Evaluation Settings
COMET_LOG_CONFUSION_MATRIX = os.getenv("COMET_LOG_CONFUSION_MATRIX", "true").lower() == "true"
COMET_LOG_PREDICTIONS = os.getenv("COMET_LOG_PREDICTIONS", "true").lower() == "true"
COMET_MAX_IMAGE_UPLOADS = int(os.getenv("COMET_MAX_IMAGE_UPLOADS", 100))
# Confusion Matrix Settings
CONF_THRES = float(os.getenv("CONF_THRES", 0.001))
IOU_THRES = float(os.getenv("IOU_THRES", 0.6))
# Batch Logging Settings
COMET_LOG_BATCH_METRICS = os.getenv("COMET_LOG_BATCH_METRICS", "false").lower() == "true"
COMET_BATCH_LOGGING_INTERVAL = os.getenv("COMET_BATCH_LOGGING_INTERVAL", 1)
COMET_PREDICTION_LOGGING_INTERVAL = os.getenv("COMET_PREDICTION_LOGGING_INTERVAL", 1)
COMET_LOG_PER_CLASS_METRICS = os.getenv("COMET_LOG_PER_CLASS_METRICS", "false").lower() == "true"
RANK = int(os.getenv("RANK", -1))
to_pil = T.ToPILImage()
class CometLogger:
def __init__(self, opt, hyp, run_id=None, job_type="Training", **experiment_kwargs) -> None:
self.job_type = job_type
self.opt = opt
self.hyp = hyp
# Comet Flags
self.comet_mode = COMET_MODE
self.save_model = opt.save_period > -1
self.model_name = COMET_MODEL_NAME
# Batch Logging Settings
self.log_batch_metrics = COMET_LOG_BATCH_METRICS
self.comet_log_batch_interval = COMET_BATCH_LOGGING_INTERVAL
# Dataset Artifact Settings
self.upload_dataset = self.opt.upload_dataset or COMET_UPLOAD_DATASET
self.resume = self.opt.resume
self.default_experiment_kwargs = {
"log_code": False,
"log_env_gpu": True,
"log_env_cpu": True,
"project_name": COMET_PROJECT_NAME,
} | experiment_kwargs
self.experiment = self._get_experiment(self.comet_mode, run_id)
self.experiment.set_name(self.opt.name)
self.data_dict = self.check_dataset(self.opt.data)
self.class_names = self.data_dict["names"]
self.num_classes = self.data_dict["nc"]
self.logged_images_count = 0
self.max_images = COMET_MAX_IMAGE_UPLOADS
if run_id is None:
self.experiment.log_other("Created from", "YOLOv5")
if not isinstance(self.experiment, comet_ml.OfflineExperiment):
workspace, project_name, experiment_id = self.experiment.url.split("/")[-3:]
self.experiment.log_other(
"Run Path",
f"{workspace}/{project_name}/{experiment_id}",
)
self.log_parameters(vars(opt))
self.log_parameters(self.opt.hyp)
self.log_asset_data(
self.opt.hyp,
name="hyperparameters.json",
metadata={"type": "hyp-config-file"},
)
self.log_asset(
f"{self.opt.save_dir}/opt.yaml",
metadata={"type": "opt-config-file"},
)
self.comet_log_confusion_matrix = COMET_LOG_CONFUSION_MATRIX
if hasattr(self.opt, "conf_thres"):
self.conf_thres = self.opt.conf_thres
else:
self.conf_thres = CONF_THRES
if hasattr(self.opt, "iou_thres"):
self.iou_thres = self.opt.iou_thres
else:
self.iou_thres = IOU_THRES
self.log_parameters({"val_iou_threshold": self.iou_thres, "val_conf_threshold": self.conf_thres})
self.comet_log_predictions = COMET_LOG_PREDICTIONS
if self.opt.bbox_interval == -1:
self.comet_log_prediction_interval = 1 if self.opt.epochs < 10 else self.opt.epochs // 10
else:
self.comet_log_prediction_interval = self.opt.bbox_interval
if self.comet_log_predictions:
self.metadata_dict = {}
self.logged_image_names = []
self.comet_log_per_class_metrics = COMET_LOG_PER_CLASS_METRICS
self.experiment.log_others(
{
"comet_mode": COMET_MODE,
"comet_max_image_uploads": COMET_MAX_IMAGE_UPLOADS,
"comet_log_per_class_metrics": COMET_LOG_PER_CLASS_METRICS,
"comet_log_batch_metrics": COMET_LOG_BATCH_METRICS,
"comet_log_confusion_matrix": COMET_LOG_CONFUSION_MATRIX,
"comet_model_name": COMET_MODEL_NAME,
}
)
# Check if running the Experiment with the Comet Optimizer
if hasattr(self.opt, "comet_optimizer_id"):
self.experiment.log_other("optimizer_id", self.opt.comet_optimizer_id)
self.experiment.log_other("optimizer_objective", self.opt.comet_optimizer_objective)
self.experiment.log_other("optimizer_metric", self.opt.comet_optimizer_metric)
self.experiment.log_other("optimizer_parameters", json.dumps(self.hyp))
def _get_experiment(self, mode, experiment_id=None):
if mode == "offline":
return (
comet_ml.ExistingOfflineExperiment(
previous_experiment=experiment_id,
**self.default_experiment_kwargs,
)
if experiment_id is not None
else comet_ml.OfflineExperiment(
**self.default_experiment_kwargs,
)
)
try:
if experiment_id is not None:
return comet_ml.ExistingExperiment(
previous_experiment=experiment_id,
**self.default_experiment_kwargs,
)
return comet_ml.Experiment(**self.default_experiment_kwargs)
except ValueError:
logger.warning(
"COMET WARNING: "
"Comet credentials have not been set. "
"Comet will default to offline logging. "
"Please set your credentials to enable online logging."
)
return self._get_experiment("offline", experiment_id)
return
def log_metrics(self, log_dict, **kwargs):
self.experiment.log_metrics(log_dict, **kwargs)
def log_parameters(self, log_dict, **kwargs):
self.experiment.log_parameters(log_dict, **kwargs)
def log_asset(self, asset_path, **kwargs):
self.experiment.log_asset(asset_path, **kwargs)
def log_asset_data(self, asset, **kwargs):
self.experiment.log_asset_data(asset, **kwargs)
def log_image(self, img, **kwargs):
self.experiment.log_image(img, **kwargs)
def log_model(self, path, opt, epoch, fitness_score, best_model=False):
if not self.save_model:
return
model_metadata = {
"fitness_score": fitness_score[-1],
"epochs_trained": epoch + 1,
"save_period": opt.save_period,
"total_epochs": opt.epochs,
}
model_files = glob.glob(f"{path}/*.pt")
for model_path in model_files:
name = Path(model_path).name
self.experiment.log_model(
self.model_name,
file_or_folder=model_path,
file_name=name,
metadata=model_metadata,
overwrite=True,
)
def check_dataset(self, data_file):
with open(data_file) as f:
data_config = yaml.safe_load(f)
path = data_config.get("path")
if path and path.startswith(COMET_PREFIX):
path = data_config["path"].replace(COMET_PREFIX, "")
return self.download_dataset_artifact(path)
self.log_asset(self.opt.data, metadata={"type": "data-config-file"})
return check_dataset(data_file)
def log_predictions(self, image, labelsn, path, shape, predn):
if self.logged_images_count >= self.max_images:
return
detections = predn[predn[:, 4] > self.conf_thres]
iou = box_iou(labelsn[:, 1:], detections[:, :4])
mask, _ = torch.where(iou > self.iou_thres)
if len(mask) == 0:
return
filtered_detections = detections[mask]
filtered_labels = labelsn[mask]
image_id = path.split("/")[-1].split(".")[0]
image_name = f"{image_id}_curr_epoch_{self.experiment.curr_epoch}"
if image_name not in self.logged_image_names:
native_scale_image = PIL.Image.open(path)
self.log_image(native_scale_image, name=image_name)
self.logged_image_names.append(image_name)
metadata = [
{
"label": f"{self.class_names[int(cls)]}-gt",
"score": 100,
"box": {"x": xyxy[0], "y": xyxy[1], "x2": xyxy[2], "y2": xyxy[3]},
}
for cls, *xyxy in filtered_labels.tolist()
]
metadata.extend(
{
"label": f"{self.class_names[int(cls)]}",
"score": conf * 100,
"box": {"x": xyxy[0], "y": xyxy[1], "x2": xyxy[2], "y2": xyxy[3]},
}
for *xyxy, conf, cls in filtered_detections.tolist()
)
self.metadata_dict[image_name] = metadata
self.logged_images_count += 1
return
def preprocess_prediction(self, image, labels, shape, pred):
nl, _ = labels.shape[0], pred.shape[0]
# Predictions
if self.opt.single_cls:
pred[:, 5] = 0
predn = pred.clone()
scale_boxes(image.shape[1:], predn[:, :4], shape[0], shape[1])
labelsn = None
if nl:
tbox = xywh2xyxy(labels[:, 1:5]) # target boxes
scale_boxes(image.shape[1:], tbox, shape[0], shape[1]) # native-space labels
labelsn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels
scale_boxes(image.shape[1:], predn[:, :4], shape[0], shape[1]) # native-space pred
return predn, labelsn
def add_assets_to_artifact(self, artifact, path, asset_path, split):
img_paths = sorted(glob.glob(f"{asset_path}/*"))
label_paths = img2label_paths(img_paths)
for image_file, label_file in zip(img_paths, label_paths):
image_logical_path, label_logical_path = map(lambda x: os.path.relpath(x, path), [image_file, label_file])
try:
artifact.add(
image_file,
logical_path=image_logical_path,
metadata={"split": split},
)
artifact.add(
label_file,
logical_path=label_logical_path,
metadata={"split": split},
)
except ValueError as e:
logger.error("COMET ERROR: Error adding file to Artifact. Skipping file.")
logger.error(f"COMET ERROR: {e}")
continue
return artifact
def upload_dataset_artifact(self):
dataset_name = self.data_dict.get("dataset_name", "yolov5-dataset")
path = str((ROOT / Path(self.data_dict["path"])).resolve())
metadata = self.data_dict.copy()
for key in ["train", "val", "test"]:
split_path = metadata.get(key)
if split_path is not None:
metadata[key] = split_path.replace(path, "")
artifact = comet_ml.Artifact(name=dataset_name, artifact_type="dataset", metadata=metadata)
for key in metadata.keys():
if key in ["train", "val", "test"]:
if isinstance(self.upload_dataset, str) and (key != self.upload_dataset):
continue
asset_path = self.data_dict.get(key)
if asset_path is not None:
artifact = self.add_assets_to_artifact(artifact, path, asset_path, key)
self.experiment.log_artifact(artifact)
return
def download_dataset_artifact(self, artifact_path):
logged_artifact = self.experiment.get_artifact(artifact_path)
artifact_save_dir = str(Path(self.opt.save_dir) / logged_artifact.name)
logged_artifact.download(artifact_save_dir)
metadata = logged_artifact.metadata
data_dict = metadata.copy()
data_dict["path"] = artifact_save_dir
metadata_names = metadata.get("names")
if isinstance(metadata_names, dict):
data_dict["names"] = {int(k): v for k, v in metadata.get("names").items()}
elif isinstance(metadata_names, list):
data_dict["names"] = {int(k): v for k, v in zip(range(len(metadata_names)), metadata_names)}
else:
raise "Invalid 'names' field in dataset yaml file. Please use a list or dictionary"
return self.update_data_paths(data_dict)
def update_data_paths(self, data_dict):
path = data_dict.get("path", "")
for split in ["train", "val", "test"]:
if data_dict.get(split):
split_path = data_dict.get(split)
data_dict[split] = (
f"{path}/{split_path}" if isinstance(split, str) else [f"{path}/{x}" for x in split_path]
)
return data_dict
def on_pretrain_routine_end(self, paths):
if self.opt.resume:
return
for path in paths:
self.log_asset(str(path))
if self.upload_dataset and not self.resume:
self.upload_dataset_artifact()
return
def on_train_start(self):
self.log_parameters(self.hyp)
def on_train_epoch_start(self):
return
def on_train_epoch_end(self, epoch):
self.experiment.curr_epoch = epoch
return
def on_train_batch_start(self):
return
def on_train_batch_end(self, log_dict, step):
self.experiment.curr_step = step
if self.log_batch_metrics and (step % self.comet_log_batch_interval == 0):
self.log_metrics(log_dict, step=step)
return
def on_train_end(self, files, save_dir, last, best, epoch, results):
if self.comet_log_predictions:
curr_epoch = self.experiment.curr_epoch
self.experiment.log_asset_data(self.metadata_dict, "image-metadata.json", epoch=curr_epoch)
for f in files:
self.log_asset(f, metadata={"epoch": epoch})
self.log_asset(f"{save_dir}/results.csv", metadata={"epoch": epoch})
if not self.opt.evolve:
model_path = str(best if best.exists() else last)
name = Path(model_path).name
if self.save_model:
self.experiment.log_model(
self.model_name,
file_or_folder=model_path,
file_name=name,
overwrite=True,
)
# Check if running Experiment with Comet Optimizer
if hasattr(self.opt, "comet_optimizer_id"):
metric = results.get(self.opt.comet_optimizer_metric)
self.experiment.log_other("optimizer_metric_value", metric)
self.finish_run()
def on_val_start(self):
return
def on_val_batch_start(self):
return
def on_val_batch_end(self, batch_i, images, targets, paths, shapes, outputs):
if not (self.comet_log_predictions and ((batch_i + 1) % self.comet_log_prediction_interval == 0)):
return
for si, pred in enumerate(outputs):
if len(pred) == 0:
continue
image = images[si]
labels = targets[targets[:, 0] == si, 1:]
shape = shapes[si]
path = paths[si]
predn, labelsn = self.preprocess_prediction(image, labels, shape, pred)
if labelsn is not None:
self.log_predictions(image, labelsn, path, shape, predn)
return
def on_val_end(self, nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix):
if self.comet_log_per_class_metrics and self.num_classes > 1:
for i, c in enumerate(ap_class):
class_name = self.class_names[c]
self.experiment.log_metrics(
{
"mAP@.5": ap50[i],
"mAP@.5:.95": ap[i],
"precision": p[i],
"recall": r[i],
"f1": f1[i],
"true_positives": tp[i],
"false_positives": fp[i],
"support": nt[c],
},
prefix=class_name,
)
if self.comet_log_confusion_matrix:
epoch = self.experiment.curr_epoch
class_names = list(self.class_names.values())
class_names.append("background")
num_classes = len(class_names)
self.experiment.log_confusion_matrix(
matrix=confusion_matrix.matrix,
max_categories=num_classes,
labels=class_names,
epoch=epoch,
column_label="Actual Category",
row_label="Predicted Category",
file_name=f"confusion-matrix-epoch-{epoch}.json",
)
def on_fit_epoch_end(self, result, epoch):
self.log_metrics(result, epoch=epoch)
def on_model_save(self, last, epoch, final_epoch, best_fitness, fi):
if ((epoch + 1) % self.opt.save_period == 0 and not final_epoch) and self.opt.save_period != -1:
self.log_model(last.parent, self.opt, epoch, fi, best_model=best_fitness == fi)
def on_params_update(self, params):
self.log_parameters(params)
def finish_run(self):
self.experiment.end() | --- +++ @@ -64,8 +64,12 @@
class CometLogger:
+ """Log metrics, parameters, source code, models and much more with Comet."""
def __init__(self, opt, hyp, run_id=None, job_type="Training", **experiment_kwargs) -> None:
+ """Initializes CometLogger with given options, hyperparameters, run ID, job type, and additional experiment
+ arguments.
+ """
self.job_type = job_type
self.opt = opt
self.hyp = hyp
@@ -164,6 +168,7 @@ self.experiment.log_other("optimizer_parameters", json.dumps(self.hyp))
def _get_experiment(self, mode, experiment_id=None):
+ """Returns a new or existing Comet.ml experiment based on mode and optional experiment_id."""
if mode == "offline":
return (
comet_ml.ExistingOfflineExperiment(
@@ -196,21 +201,27 @@ return
def log_metrics(self, log_dict, **kwargs):
+ """Logs metrics to the current experiment, accepting a dictionary of metric names and values."""
self.experiment.log_metrics(log_dict, **kwargs)
def log_parameters(self, log_dict, **kwargs):
+ """Logs parameters to the current experiment, accepting a dictionary of parameter names and values."""
self.experiment.log_parameters(log_dict, **kwargs)
def log_asset(self, asset_path, **kwargs):
+ """Logs a file or directory as an asset to the current experiment."""
self.experiment.log_asset(asset_path, **kwargs)
def log_asset_data(self, asset, **kwargs):
+ """Logs in-memory data as an asset to the current experiment, with optional kwargs."""
self.experiment.log_asset_data(asset, **kwargs)
def log_image(self, img, **kwargs):
+ """Logs an image to the current experiment with optional kwargs."""
self.experiment.log_image(img, **kwargs)
def log_model(self, path, opt, epoch, fitness_score, best_model=False):
+ """Logs model checkpoint to experiment with path, options, epoch, fitness, and best model flag."""
if not self.save_model:
return
@@ -234,6 +245,7 @@ )
def check_dataset(self, data_file):
+ """Validates the dataset configuration by loading the YAML file specified in `data_file`."""
with open(data_file) as f:
data_config = yaml.safe_load(f)
@@ -246,6 +258,7 @@ return check_dataset(data_file)
def log_predictions(self, image, labelsn, path, shape, predn):
+ """Logs predictions with IOU filtering, given image, labels, path, shape, and predictions."""
if self.logged_images_count >= self.max_images:
return
detections = predn[predn[:, 4] > self.conf_thres]
@@ -286,6 +299,7 @@ return
def preprocess_prediction(self, image, labels, shape, pred):
+ """Processes prediction data, resizing labels and adding dataset metadata."""
nl, _ = labels.shape[0], pred.shape[0]
# Predictions
@@ -305,6 +319,7 @@ return predn, labelsn
def add_assets_to_artifact(self, artifact, path, asset_path, split):
+ """Adds image and label assets to a wandb artifact given dataset split and paths."""
img_paths = sorted(glob.glob(f"{asset_path}/*"))
label_paths = img2label_paths(img_paths)
@@ -330,6 +345,7 @@ return artifact
def upload_dataset_artifact(self):
+ """Uploads a YOLOv5 dataset as an artifact to the Comet.ml platform."""
dataset_name = self.data_dict.get("dataset_name", "yolov5-dataset")
path = str((ROOT / Path(self.data_dict["path"])).resolve())
@@ -354,6 +370,7 @@ return
def download_dataset_artifact(self, artifact_path):
+ """Downloads a dataset artifact to a specified directory using the experiment's logged artifact."""
logged_artifact = self.experiment.get_artifact(artifact_path)
artifact_save_dir = str(Path(self.opt.save_dir) / logged_artifact.name)
logged_artifact.download(artifact_save_dir)
@@ -373,6 +390,7 @@ return self.update_data_paths(data_dict)
def update_data_paths(self, data_dict):
+ """Updates data paths in the dataset dictionary, defaulting 'path' to an empty string if not present."""
path = data_dict.get("path", "")
for split in ["train", "val", "test"]:
@@ -385,6 +403,7 @@ return data_dict
def on_pretrain_routine_end(self, paths):
+ """Called at the end of pretraining routine to handle paths if training is not being resumed."""
if self.opt.resume:
return
@@ -397,20 +416,25 @@ return
def on_train_start(self):
+ """Logs hyperparameters at the start of training."""
self.log_parameters(self.hyp)
def on_train_epoch_start(self):
+ """Called at the start of each training epoch."""
return
def on_train_epoch_end(self, epoch):
+ """Updates the current epoch in the experiment tracking at the end of each epoch."""
self.experiment.curr_epoch = epoch
return
def on_train_batch_start(self):
+ """Called at the start of each training batch."""
return
def on_train_batch_end(self, log_dict, step):
+ """Callback function that updates and logs metrics at the end of each training batch if conditions are met."""
self.experiment.curr_step = step
if self.log_batch_metrics and (step % self.comet_log_batch_interval == 0):
self.log_metrics(log_dict, step=step)
@@ -418,6 +442,7 @@ return
def on_train_end(self, files, save_dir, last, best, epoch, results):
+ """Logs metadata and optionally saves model files at the end of training."""
if self.comet_log_predictions:
curr_epoch = self.experiment.curr_epoch
self.experiment.log_asset_data(self.metadata_dict, "image-metadata.json", epoch=curr_epoch)
@@ -445,12 +470,15 @@ self.finish_run()
def on_val_start(self):
+ """Called at the start of validation, currently a placeholder with no functionality."""
return
def on_val_batch_start(self):
+ """Placeholder called at the start of a validation batch with no current functionality."""
return
def on_val_batch_end(self, batch_i, images, targets, paths, shapes, outputs):
+ """Callback executed at the end of a validation batch, conditionally logs predictions to Comet ML."""
if not (self.comet_log_predictions and ((batch_i + 1) % self.comet_log_prediction_interval == 0)):
return
@@ -469,6 +497,7 @@ return
def on_val_end(self, nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix):
+ """Logs per-class metrics to Comet.ml after validation if enabled and more than one class exists."""
if self.comet_log_per_class_metrics and self.num_classes > 1:
for i, c in enumerate(ap_class):
class_name = self.class_names[c]
@@ -503,14 +532,18 @@ )
def on_fit_epoch_end(self, result, epoch):
+ """Logs metrics at the end of each training epoch."""
self.log_metrics(result, epoch=epoch)
def on_model_save(self, last, epoch, final_epoch, best_fitness, fi):
+ """Callback to save model checkpoints periodically if conditions are met."""
if ((epoch + 1) % self.opt.save_period == 0 and not final_epoch) and self.opt.save_period != -1:
self.log_model(last.parent, self.opt, epoch, fi, best_model=best_fitness == fi)
def on_params_update(self, params):
+ """Logs updated parameters during training."""
self.log_parameters(params)
def finish_run(self):
- self.experiment.end()+ """Ends the current experiment and logs its completion."""
+ self.experiment.end()
| https://raw.githubusercontent.com/ultralytics/yolov5/HEAD/utils/loggers/comet/__init__.py |
Document all public functions with docstrings | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
import argparse
import csv
import os
import platform
import sys
from pathlib import Path
import torch
FILE = Path(__file__).resolve()
ROOT = FILE.parents[0] # YOLOv5 root directory
if str(ROOT) not in sys.path:
sys.path.append(str(ROOT)) # add ROOT to PATH
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
from ultralytics.utils.plotting import Annotator, colors, save_one_box
from models.common import DetectMultiBackend
from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams
from utils.general import (
LOGGER,
Profile,
check_file,
check_img_size,
check_imshow,
check_requirements,
colorstr,
cv2,
increment_path,
non_max_suppression,
print_args,
scale_boxes,
strip_optimizer,
xyxy2xywh,
)
from utils.torch_utils import select_device, smart_inference_mode
@smart_inference_mode()
def run(
weights=ROOT / "yolov5s.pt", # model path or triton URL
source=ROOT / "data/images", # file/dir/URL/glob/screen/0(webcam)
data=ROOT / "data/coco128.yaml", # dataset.yaml path
imgsz=(640, 640), # inference size (height, width)
conf_thres=0.25, # confidence threshold
iou_thres=0.45, # NMS IOU threshold
max_det=1000, # maximum detections per image
device="", # cuda device, i.e. 0 or 0,1,2,3 or cpu
view_img=False, # show results
save_txt=False, # save results to *.txt
save_format=0, # save boxes coordinates in YOLO format or Pascal-VOC format (0 for YOLO and 1 for Pascal-VOC)
save_csv=False, # save results in CSV format
save_conf=False, # save confidences in --save-txt labels
save_crop=False, # save cropped prediction boxes
nosave=False, # do not save images/videos
classes=None, # filter by class: --class 0, or --class 0 2 3
agnostic_nms=False, # class-agnostic NMS
augment=False, # augmented inference
visualize=False, # visualize features
update=False, # update all models
project=ROOT / "runs/detect", # save results to project/name
name="exp", # save results to project/name
exist_ok=False, # existing project/name ok, do not increment
line_thickness=3, # bounding box thickness (pixels)
hide_labels=False, # hide labels
hide_conf=False, # hide confidences
half=False, # use FP16 half-precision inference
dnn=False, # use OpenCV DNN for ONNX inference
vid_stride=1, # video frame-rate stride
):
source = str(source)
save_img = not nosave and not source.endswith(".txt") # save inference images
is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS)
is_url = source.lower().startswith(("rtsp://", "rtmp://", "http://", "https://"))
webcam = source.isnumeric() or source.endswith(".streams") or (is_url and not is_file)
screenshot = source.lower().startswith("screen")
if is_url and is_file:
source = check_file(source) # download
# Directories
save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run
(save_dir / "labels" if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
# Load model
device = select_device(device)
model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half)
stride, names, pt = model.stride, model.names, model.pt
imgsz = check_img_size(imgsz, s=stride) # check image size
# Dataloader
bs = 1 # batch_size
if webcam:
view_img = check_imshow(warn=True)
dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride)
bs = len(dataset)
elif screenshot:
dataset = LoadScreenshots(source, img_size=imgsz, stride=stride, auto=pt)
else:
dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride)
vid_path, vid_writer = [None] * bs, [None] * bs
# Run inference
model.warmup(imgsz=(1 if pt or model.triton else bs, 3, *imgsz)) # warmup
seen, windows, dt = 0, [], (Profile(device=device), Profile(device=device), Profile(device=device))
for path, im, im0s, vid_cap, s in dataset:
with dt[0]:
im = torch.from_numpy(im).to(model.device)
im = im.half() if model.fp16 else im.float() # uint8 to fp16/32
im /= 255 # 0 - 255 to 0.0 - 1.0
if len(im.shape) == 3:
im = im[None] # expand for batch dim
if model.xml and im.shape[0] > 1:
ims = torch.chunk(im, im.shape[0], 0)
# Inference
with dt[1]:
visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False
if model.xml and im.shape[0] > 1:
pred = None
for image in ims:
if pred is None:
pred = model(image, augment=augment, visualize=visualize).unsqueeze(0)
else:
pred = torch.cat((pred, model(image, augment=augment, visualize=visualize).unsqueeze(0)), dim=0)
pred = [pred, None]
else:
pred = model(im, augment=augment, visualize=visualize)
# NMS
with dt[2]:
pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det)
# Second-stage classifier (optional)
# pred = utils.general.apply_classifier(pred, classifier_model, im, im0s)
# Define the path for the CSV file
csv_path = save_dir / "predictions.csv"
# Create or append to the CSV file
def write_to_csv(image_name, prediction, confidence):
data = {"Image Name": image_name, "Prediction": prediction, "Confidence": confidence}
file_exists = os.path.isfile(csv_path)
with open(csv_path, mode="a", newline="") as f:
writer = csv.DictWriter(f, fieldnames=data.keys())
if not file_exists:
writer.writeheader()
writer.writerow(data)
# Process predictions
for i, det in enumerate(pred): # per image
seen += 1
if webcam: # batch_size >= 1
p, im0, frame = path[i], im0s[i].copy(), dataset.count
s += f"{i}: "
else:
p, im0, frame = path, im0s.copy(), getattr(dataset, "frame", 0)
p = Path(p) # to Path
save_path = str(save_dir / p.name) # im.jpg
txt_path = str(save_dir / "labels" / p.stem) + ("" if dataset.mode == "image" else f"_{frame}") # im.txt
s += "{:g}x{:g} ".format(*im.shape[2:]) # print string
gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
imc = im0.copy() if save_crop else im0 # for save_crop
annotator = Annotator(im0, line_width=line_thickness, example=str(names))
if len(det):
# Rescale boxes from img_size to im0 size
det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round()
# Print results
for c in det[:, 5].unique():
n = (det[:, 5] == c).sum() # detections per class
s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string
# Write results
for *xyxy, conf, cls in reversed(det):
c = int(cls) # integer class
label = names[c] if hide_conf else f"{names[c]}"
confidence = float(conf)
confidence_str = f"{confidence:.2f}"
if save_csv:
write_to_csv(p.name, label, confidence_str)
if save_txt: # Write to file
if save_format == 0:
coords = (
(xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist()
) # normalized xywh
else:
coords = (torch.tensor(xyxy).view(1, 4) / gn).view(-1).tolist() # xyxy
line = (cls, *coords, conf) if save_conf else (cls, *coords) # label format
with open(f"{txt_path}.txt", "a") as f:
f.write(("%g " * len(line)).rstrip() % line + "\n")
if save_img or save_crop or view_img: # Add bbox to image
c = int(cls) # integer class
label = None if hide_labels else (names[c] if hide_conf else f"{names[c]} {conf:.2f}")
annotator.box_label(xyxy, label, color=colors(c, True))
if save_crop:
save_one_box(xyxy, imc, file=save_dir / "crops" / names[c] / f"{p.stem}.jpg", BGR=True)
# Stream results
im0 = annotator.result()
if view_img:
if platform.system() == "Linux" and p not in windows:
windows.append(p)
cv2.namedWindow(str(p), cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO) # allow window resize (Linux)
cv2.resizeWindow(str(p), im0.shape[1], im0.shape[0])
cv2.imshow(str(p), im0)
cv2.waitKey(1) # 1 millisecond
# Save results (image with detections)
if save_img:
if dataset.mode == "image":
cv2.imwrite(save_path, im0)
else: # 'video' or 'stream'
if vid_path[i] != save_path: # new video
vid_path[i] = save_path
if isinstance(vid_writer[i], cv2.VideoWriter):
vid_writer[i].release() # release previous video writer
if vid_cap: # video
fps = vid_cap.get(cv2.CAP_PROP_FPS)
w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
else: # stream
fps, w, h = 30, im0.shape[1], im0.shape[0]
save_path = str(Path(save_path).with_suffix(".mp4")) # force *.mp4 suffix on results videos
vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*"mp4v"), fps, (w, h))
vid_writer[i].write(im0)
# Print time (inference-only)
LOGGER.info(f"{s}{'' if len(det) else '(no detections), '}{dt[1].dt * 1e3:.1f}ms")
# Print results
t = tuple(x.t / seen * 1e3 for x in dt) # speeds per image
LOGGER.info(f"Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}" % t)
if save_txt or save_img:
s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ""
LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}")
if update:
strip_optimizer(weights[0]) # update model (to fix SourceChangeWarning)
def parse_opt():
parser = argparse.ArgumentParser()
parser.add_argument("--weights", nargs="+", type=str, default=ROOT / "yolov5s.pt", help="model path or triton URL")
parser.add_argument("--source", type=str, default=ROOT / "data/images", help="file/dir/URL/glob/screen/0(webcam)")
parser.add_argument("--data", type=str, default=ROOT / "data/coco128.yaml", help="(optional) dataset.yaml path")
parser.add_argument("--imgsz", "--img", "--img-size", nargs="+", type=int, default=[640], help="inference size h,w")
parser.add_argument("--conf-thres", type=float, default=0.25, help="confidence threshold")
parser.add_argument("--iou-thres", type=float, default=0.45, help="NMS IoU threshold")
parser.add_argument("--max-det", type=int, default=1000, help="maximum detections per image")
parser.add_argument("--device", default="", help="cuda device, i.e. 0 or 0,1,2,3 or cpu")
parser.add_argument("--view-img", action="store_true", help="show results")
parser.add_argument("--save-txt", action="store_true", help="save results to *.txt")
parser.add_argument(
"--save-format",
type=int,
default=0,
help="whether to save boxes coordinates in YOLO format or Pascal-VOC format when save-txt is True, 0 for YOLO and 1 for Pascal-VOC",
)
parser.add_argument("--save-csv", action="store_true", help="save results in CSV format")
parser.add_argument("--save-conf", action="store_true", help="save confidences in --save-txt labels")
parser.add_argument("--save-crop", action="store_true", help="save cropped prediction boxes")
parser.add_argument("--nosave", action="store_true", help="do not save images/videos")
parser.add_argument("--classes", nargs="+", type=int, help="filter by class: --classes 0, or --classes 0 2 3")
parser.add_argument("--agnostic-nms", action="store_true", help="class-agnostic NMS")
parser.add_argument("--augment", action="store_true", help="augmented inference")
parser.add_argument("--visualize", action="store_true", help="visualize features")
parser.add_argument("--update", action="store_true", help="update all models")
parser.add_argument("--project", default=ROOT / "runs/detect", help="save results to project/name")
parser.add_argument("--name", default="exp", help="save results to project/name")
parser.add_argument("--exist-ok", action="store_true", help="existing project/name ok, do not increment")
parser.add_argument("--line-thickness", default=3, type=int, help="bounding box thickness (pixels)")
parser.add_argument("--hide-labels", default=False, action="store_true", help="hide labels")
parser.add_argument("--hide-conf", default=False, action="store_true", help="hide confidences")
parser.add_argument("--half", action="store_true", help="use FP16 half-precision inference")
parser.add_argument("--dnn", action="store_true", help="use OpenCV DNN for ONNX inference")
parser.add_argument("--vid-stride", type=int, default=1, help="video frame-rate stride")
opt = parser.parse_args()
opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand
print_args(vars(opt))
return opt
def main(opt):
check_requirements(ROOT / "requirements.txt", exclude=("tensorboard", "thop"))
run(**vars(opt))
if __name__ == "__main__":
opt = parse_opt()
main(opt) | --- +++ @@ -1,4 +1,32 @@ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
+"""
+Run YOLOv5 detection inference on images, videos, directories, globs, YouTube, webcam, streams, etc.
+
+Usage - sources:
+ $ python detect.py --weights yolov5s.pt --source 0 # webcam
+ img.jpg # image
+ vid.mp4 # video
+ screen # screenshot
+ path/ # directory
+ list.txt # list of images
+ list.streams # list of streams
+ 'path/*.jpg' # glob
+ 'https://youtu.be/LNwODJXcvt4' # YouTube
+ 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream
+
+Usage - formats:
+ $ python detect.py --weights yolov5s.pt # PyTorch
+ yolov5s.torchscript # TorchScript
+ yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn
+ yolov5s_openvino_model # OpenVINO
+ yolov5s.engine # TensorRT
+ yolov5s.mlpackage # CoreML (macOS-only)
+ yolov5s_saved_model # TensorFlow SavedModel
+ yolov5s.pb # TensorFlow GraphDef
+ yolov5s.tflite # TensorFlow Lite
+ yolov5s_edgetpu.tflite # TensorFlow Edge TPU
+ yolov5s_paddle_model # PaddlePaddle
+"""
import argparse
import csv
@@ -70,6 +98,55 @@ dnn=False, # use OpenCV DNN for ONNX inference
vid_stride=1, # video frame-rate stride
):
+ """Runs YOLOv5 detection inference on various sources like images, videos, directories, streams, etc.
+
+ Args:
+ weights (str | Path): Path to the model weights file or a Triton URL. Default is 'yolov5s.pt'.
+ source (str | Path): Input source, which can be a file, directory, URL, glob pattern, screen capture, or webcam
+ index. Default is 'data/images'.
+ data (str | Path): Path to the dataset YAML file. Default is 'data/coco128.yaml'.
+ imgsz (tuple[int, int]): Inference image size as a tuple (height, width). Default is (640, 640).
+ conf_thres (float): Confidence threshold for detections. Default is 0.25.
+ iou_thres (float): Intersection Over Union (IOU) threshold for non-max suppression. Default is 0.45.
+ max_det (int): Maximum number of detections per image. Default is 1000.
+ device (str): CUDA device identifier (e.g., '0' or '0,1,2,3') or 'cpu'. Default is an empty string, which uses
+ the best available device.
+ view_img (bool): If True, display inference results using OpenCV. Default is False.
+ save_txt (bool): If True, save results in a text file. Default is False.
+ save_csv (bool): If True, save results in a CSV file. Default is False.
+ save_conf (bool): If True, include confidence scores in the saved results. Default is False.
+ save_crop (bool): If True, save cropped prediction boxes. Default is False.
+ nosave (bool): If True, do not save inference images or videos. Default is False.
+ classes (list[int]): List of class indices to filter detections by. Default is None.
+ agnostic_nms (bool): If True, perform class-agnostic non-max suppression. Default is False.
+ augment (bool): If True, use augmented inference. Default is False.
+ visualize (bool): If True, visualize feature maps. Default is False.
+ update (bool): If True, update all models' weights. Default is False.
+ project (str | Path): Directory to save results. Default is 'runs/detect'.
+ name (str): Name of the current experiment; used to create a subdirectory within 'project'. Default is 'exp'.
+ exist_ok (bool): If True, existing directories with the same name are reused instead of being incremented.
+ Default is False.
+ line_thickness (int): Thickness of bounding box lines in pixels. Default is 3.
+ hide_labels (bool): If True, do not display labels on bounding boxes. Default is False.
+ hide_conf (bool): If True, do not display confidence scores on bounding boxes. Default is False.
+ half (bool): If True, use FP16 half-precision inference. Default is False.
+ dnn (bool): If True, use OpenCV DNN backend for ONNX inference. Default is False.
+ vid_stride (int): Stride for processing video frames, to skip frames between processing. Default is 1.
+
+ Returns:
+ None
+
+ Examples:
+ ```python
+ from ultralytics import run
+
+ # Run inference on an image
+ run(source='data/images/example.jpg', weights='yolov5s.pt', device='0')
+
+ # Run inference on a video with specific confidence threshold
+ run(source='data/videos/example.mp4', weights='yolov5s.pt', conf_thres=0.4, device='0')
+ ```
+ """
source = str(source)
save_img = not nosave and not source.endswith(".txt") # save inference images
is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS)
@@ -139,6 +216,7 @@
# Create or append to the CSV file
def write_to_csv(image_name, prediction, confidence):
+ """Writes prediction data for an image to a CSV file, appending if the file exists."""
data = {"Image Name": image_name, "Prediction": prediction, "Confidence": confidence}
file_exists = os.path.isfile(csv_path)
with open(csv_path, mode="a", newline="") as f:
@@ -243,6 +321,49 @@
def parse_opt():
+ """Parse command-line arguments for YOLOv5 detection, allowing custom inference options and model configurations.
+
+ Args:
+ --weights (str | list[str], optional): Model path or Triton URL. Defaults to ROOT / 'yolov5s.pt'.
+ --source (str, optional): File/dir/URL/glob/screen/0(webcam). Defaults to ROOT / 'data/images'.
+ --data (str, optional): Dataset YAML path. Provides dataset configuration information.
+ --imgsz (list[int], optional): Inference size (height, width). Defaults to [640].
+ --conf-thres (float, optional): Confidence threshold. Defaults to 0.25.
+ --iou-thres (float, optional): NMS IoU threshold. Defaults to 0.45.
+ --max-det (int, optional): Maximum number of detections per image. Defaults to 1000.
+ --device (str, optional): CUDA device, i.e., '0' or '0,1,2,3' or 'cpu'. Defaults to "".
+ --view-img (bool, optional): Flag to display results. Defaults to False.
+ --save-txt (bool, optional): Flag to save results to *.txt files. Defaults to False.
+ --save-csv (bool, optional): Flag to save results in CSV format. Defaults to False.
+ --save-conf (bool, optional): Flag to save confidences in labels saved via --save-txt. Defaults to False.
+ --save-crop (bool, optional): Flag to save cropped prediction boxes. Defaults to False.
+ --nosave (bool, optional): Flag to prevent saving images/videos. Defaults to False.
+ --classes (list[int], optional): List of classes to filter results by, e.g., '--classes 0 2 3'. Defaults to
+ None.
+ --agnostic-nms (bool, optional): Flag for class-agnostic NMS. Defaults to False.
+ --augment (bool, optional): Flag for augmented inference. Defaults to False.
+ --visualize (bool, optional): Flag for visualizing features. Defaults to False.
+ --update (bool, optional): Flag to update all models in the model directory. Defaults to False.
+ --project (str, optional): Directory to save results. Defaults to ROOT / 'runs/detect'.
+ --name (str, optional): Sub-directory name for saving results within --project. Defaults to 'exp'.
+ --exist-ok (bool, optional): Flag to allow overwriting if the project/name already exists. Defaults to False.
+ --line-thickness (int, optional): Thickness (in pixels) of bounding boxes. Defaults to 3.
+ --hide-labels (bool, optional): Flag to hide labels in the output. Defaults to False.
+ --hide-conf (bool, optional): Flag to hide confidences in the output. Defaults to False.
+ --half (bool, optional): Flag to use FP16 half-precision inference. Defaults to False.
+ --dnn (bool, optional): Flag to use OpenCV DNN for ONNX inference. Defaults to False.
+ --vid-stride (int, optional): Video frame-rate stride, determining the number of frames to skip in between
+ consecutive frames. Defaults to 1.
+
+ Returns:
+ argparse.Namespace: Parsed command-line arguments as an argparse.Namespace object.
+
+ Examples:
+ ```python
+ from ultralytics import YOLOv5
+ args = YOLOv5.parse_opt()
+ ```
+ """
parser = argparse.ArgumentParser()
parser.add_argument("--weights", nargs="+", type=str, default=ROOT / "yolov5s.pt", help="model path or triton URL")
parser.add_argument("--source", type=str, default=ROOT / "data/images", help="file/dir/URL/glob/screen/0(webcam)")
@@ -285,10 +406,31 @@
def main(opt):
+ """Executes YOLOv5 model inference based on provided command-line arguments, validating dependencies before running.
+
+ Args:
+ opt (argparse.Namespace): Command-line arguments for YOLOv5 detection. See function `parse_opt` for details.
+
+ Returns:
+ None
+
+ Notes:
+ This function performs essential pre-execution checks and initiates the YOLOv5 detection process based on user-specified
+ options. Refer to the usage guide and examples for more information about different sources and formats at:
+ https://github.com/ultralytics/ultralytics
+
+ Example usage:
+
+ ```python
+ if __name__ == "__main__":
+ opt = parse_opt()
+ main(opt)
+ ```
+ """
check_requirements(ROOT / "requirements.txt", exclude=("tensorboard", "thop"))
run(**vars(opt))
if __name__ == "__main__":
opt = parse_opt()
- main(opt)+ main(opt)
| https://raw.githubusercontent.com/ultralytics/yolov5/HEAD/detect.py |
Generate docstrings for script automation | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
import argparse
import platform
import sys
import time
from pathlib import Path
import pandas as pd
FILE = Path(__file__).resolve()
ROOT = FILE.parents[0] # YOLOv5 root directory
if str(ROOT) not in sys.path:
sys.path.append(str(ROOT)) # add ROOT to PATH
# ROOT = ROOT.relative_to(Path.cwd()) # relative
import export
from models.experimental import attempt_load
from models.yolo import SegmentationModel
from segment.val import run as val_seg
from utils import notebook_init
from utils.general import LOGGER, check_yaml, file_size, print_args
from utils.torch_utils import select_device
from val import run as val_det
def run(
weights=ROOT / "yolov5s.pt", # weights path
imgsz=640, # inference size (pixels)
batch_size=1, # batch size
data=ROOT / "data/coco128.yaml", # dataset.yaml path
device="", # cuda device, i.e. 0 or 0,1,2,3 or cpu
half=False, # use FP16 half-precision inference
test=False, # test exports only
pt_only=False, # test PyTorch only
hard_fail=False, # throw error on benchmark failure
):
y, t = [], time.time()
device = select_device(device)
model_type = type(attempt_load(weights, fuse=False)) # DetectionModel, SegmentationModel, etc.
for i, (name, f, suffix, cpu, gpu) in export.export_formats().iterrows(): # index, (name, file, suffix, CPU, GPU)
try:
assert i not in (9, 10), "inference not supported" # Edge TPU and TF.js are unsupported
assert i != 5 or platform.system() == "Darwin", "inference only supported on macOS>=10.13" # CoreML
if "cpu" in device.type:
assert cpu, "inference not supported on CPU"
if "cuda" in device.type:
assert gpu, "inference not supported on GPU"
# Export
if f == "-":
w = weights # PyTorch format
else:
w = export.run(
weights=weights, imgsz=[imgsz], include=[f], batch_size=batch_size, device=device, half=half
)[-1] # all others
assert suffix in str(w), "export failed"
# Validate
if model_type == SegmentationModel:
result = val_seg(data, w, batch_size, imgsz, plots=False, device=device, task="speed", half=half)
metric = result[0][7] # (box(p, r, map50, map), mask(p, r, map50, map), *loss(box, obj, cls))
else: # DetectionModel:
result = val_det(data, w, batch_size, imgsz, plots=False, device=device, task="speed", half=half)
metric = result[0][3] # (p, r, map50, map, *loss(box, obj, cls))
speed = result[2][1] # times (preprocess, inference, postprocess)
y.append([name, round(file_size(w), 1), round(metric, 4), round(speed, 2)]) # MB, mAP, t_inference
except Exception as e:
if hard_fail:
assert type(e) is AssertionError, f"Benchmark --hard-fail for {name}: {e}"
LOGGER.warning(f"WARNING ⚠️ Benchmark failure for {name}: {e}")
y.append([name, None, None, None]) # mAP, t_inference
if pt_only and i == 0:
break # break after PyTorch
# Print results
LOGGER.info("\n")
parse_opt()
notebook_init() # print system info
c = ["Format", "Size (MB)", "mAP50-95", "Inference time (ms)"] if map else ["Format", "Export", "", ""]
py = pd.DataFrame(y, columns=c)
LOGGER.info(f"\nBenchmarks complete ({time.time() - t:.2f}s)")
LOGGER.info(str(py if map else py.iloc[:, :2]))
if hard_fail and isinstance(hard_fail, str):
metrics = py["mAP50-95"].array # values to compare to floor
floor = eval(hard_fail) # minimum metric floor to pass, i.e. = 0.29 mAP for YOLOv5n
assert all(x > floor for x in metrics if pd.notna(x)), f"HARD FAIL: mAP50-95 < floor {floor}"
return py
def test(
weights=ROOT / "yolov5s.pt", # weights path
imgsz=640, # inference size (pixels)
batch_size=1, # batch size
data=ROOT / "data/coco128.yaml", # dataset.yaml path
device="", # cuda device, i.e. 0 or 0,1,2,3 or cpu
half=False, # use FP16 half-precision inference
test=False, # test exports only
pt_only=False, # test PyTorch only
hard_fail=False, # throw error on benchmark failure
):
y, t = [], time.time()
device = select_device(device)
for i, (name, f, suffix, gpu) in export.export_formats().iterrows(): # index, (name, file, suffix, gpu-capable)
try:
w = (
weights
if f == "-"
else export.run(weights=weights, imgsz=[imgsz], include=[f], device=device, half=half)[-1]
) # weights
assert suffix in str(w), "export failed"
y.append([name, True])
except Exception:
y.append([name, False]) # mAP, t_inference
# Print results
LOGGER.info("\n")
parse_opt()
notebook_init() # print system info
py = pd.DataFrame(y, columns=["Format", "Export"])
LOGGER.info(f"\nExports complete ({time.time() - t:.2f}s)")
LOGGER.info(str(py))
return py
def parse_opt():
parser = argparse.ArgumentParser()
parser.add_argument("--weights", type=str, default=ROOT / "yolov5s.pt", help="weights path")
parser.add_argument("--imgsz", "--img", "--img-size", type=int, default=640, help="inference size (pixels)")
parser.add_argument("--batch-size", type=int, default=1, help="batch size")
parser.add_argument("--data", type=str, default=ROOT / "data/coco128.yaml", help="dataset.yaml path")
parser.add_argument("--device", default="", help="cuda device, i.e. 0 or 0,1,2,3 or cpu")
parser.add_argument("--half", action="store_true", help="use FP16 half-precision inference")
parser.add_argument("--test", action="store_true", help="test exports only")
parser.add_argument("--pt-only", action="store_true", help="test PyTorch only")
parser.add_argument("--hard-fail", nargs="?", const=True, default=False, help="Exception on error or < min metric")
opt = parser.parse_args()
opt.data = check_yaml(opt.data) # check YAML
print_args(vars(opt))
return opt
def main(opt):
test(**vars(opt)) if opt.test else run(**vars(opt))
if __name__ == "__main__":
opt = parse_opt()
main(opt) | --- +++ @@ -1,4 +1,29 @@ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
+"""
+Run YOLOv5 benchmarks on all supported export formats.
+
+Format | `export.py --include` | Model
+--- | --- | ---
+PyTorch | - | yolov5s.pt
+TorchScript | `torchscript` | yolov5s.torchscript
+ONNX | `onnx` | yolov5s.onnx
+OpenVINO | `openvino` | yolov5s_openvino_model/
+TensorRT | `engine` | yolov5s.engine
+CoreML | `coreml` | yolov5s.mlpackage
+TensorFlow SavedModel | `saved_model` | yolov5s_saved_model/
+TensorFlow GraphDef | `pb` | yolov5s.pb
+TensorFlow Lite | `tflite` | yolov5s.tflite
+TensorFlow Edge TPU | `edgetpu` | yolov5s_edgetpu.tflite
+TensorFlow.js | `tfjs` | yolov5s_web_model/
+
+Requirements:
+ $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime openvino-dev tensorflow-cpu # CPU
+ $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime-gpu openvino-dev tensorflow # GPU
+ $ pip install -U nvidia-tensorrt --index-url https://pypi.ngc.nvidia.com # TensorRT
+
+Usage:
+ $ python benchmarks.py --weights yolov5s.pt --img 640
+"""
import argparse
import platform
@@ -35,6 +60,40 @@ pt_only=False, # test PyTorch only
hard_fail=False, # throw error on benchmark failure
):
+ """Run YOLOv5 benchmarks on multiple export formats and log results for model performance evaluation.
+
+ Args:
+ weights (Path | str): Path to the model weights file (default: ROOT / "yolov5s.pt").
+ imgsz (int): Inference size in pixels (default: 640).
+ batch_size (int): Batch size for inference (default: 1).
+ data (Path | str): Path to the dataset.yaml file (default: ROOT / "data/coco128.yaml").
+ device (str): CUDA device, e.g., '0' or '0,1,2,3' or 'cpu' (default: "").
+ half (bool): Use FP16 half-precision inference (default: False).
+ test (bool): Test export formats only (default: False).
+ pt_only (bool): Test PyTorch format only (default: False).
+ hard_fail (bool): Throw an error on benchmark failure if True (default: False).
+
+ Returns:
+ None. Logs information about the benchmark results, including the format, size, mAP50-95, and inference time.
+
+ Examples:
+ ```python
+ $ python benchmarks.py --weights yolov5s.pt --img 640
+ ```
+
+ Install required packages:
+ $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime openvino-dev tensorflow-cpu # CPU support
+ $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime-gpu openvino-dev tensorflow # GPU support
+ $ pip install -U nvidia-tensorrt --index-url https://pypi.ngc.nvidia.com # TensorRT
+
+ Run benchmarks:
+ $ python benchmarks.py --weights yolov5s.pt --img 640
+
+ Notes:
+ Supported export formats and models include PyTorch, TorchScript, ONNX, OpenVINO, TensorRT, CoreML,
+ TensorFlow SavedModel, TensorFlow GraphDef, TensorFlow Lite, and TensorFlow Edge TPU. Edge TPU and TF.js
+ are unsupported.
+ """
y, t = [], time.time()
device = select_device(device)
model_type = type(attempt_load(weights, fuse=False)) # DetectionModel, SegmentationModel, etc.
@@ -99,6 +158,40 @@ pt_only=False, # test PyTorch only
hard_fail=False, # throw error on benchmark failure
):
+ """Run YOLOv5 export tests for all supported formats and log the results, including export statuses.
+
+ Args:
+ weights (Path | str): Path to the model weights file (.pt format). Default is 'ROOT / "yolov5s.pt"'.
+ imgsz (int): Inference image size (in pixels). Default is 640.
+ batch_size (int): Batch size for testing. Default is 1.
+ data (Path | str): Path to the dataset configuration file (.yaml format). Default is 'ROOT /
+ "data/coco128.yaml"'.
+ device (str): Device for running the tests, can be 'cpu' or a specific CUDA device ('0', '0,1,2,3', etc.).
+ Default is an empty string.
+ half (bool): Use FP16 half-precision for inference if True. Default is False.
+ test (bool): Test export formats only without running inference. Default is False.
+ pt_only (bool): Test only the PyTorch model if True. Default is False.
+ hard_fail (bool): Raise error on export or test failure if True. Default is False.
+
+ Returns:
+ pd.DataFrame: DataFrame containing the results of the export tests, including format names and export statuses.
+
+ Examples:
+ ```python
+ $ python benchmarks.py --weights yolov5s.pt --img 640
+ ```
+
+ Install required packages:
+ $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime openvino-dev tensorflow-cpu # CPU support
+ $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime-gpu openvino-dev tensorflow # GPU support
+ $ pip install -U nvidia-tensorrt --index-url https://pypi.ngc.nvidia.com # TensorRT
+ Run export tests:
+ $ python benchmarks.py --weights yolov5s.pt --img 640
+
+ Notes:
+ Supported export formats and models include PyTorch, TorchScript, ONNX, OpenVINO, TensorRT, CoreML, TensorFlow
+ SavedModel, TensorFlow GraphDef, TensorFlow Lite, and TensorFlow Edge TPU. Edge TPU and TF.js are unsupported.
+ """
y, t = [], time.time()
device = select_device(device)
for i, (name, f, suffix, gpu) in export.export_formats().iterrows(): # index, (name, file, suffix, gpu-capable)
@@ -124,6 +217,27 @@
def parse_opt():
+ """Parses command-line arguments for YOLOv5 model inference configuration.
+
+ Args:
+ weights (str): The path to the weights file. Defaults to 'ROOT / "yolov5s.pt"'.
+ imgsz (int): Inference size in pixels. Defaults to 640.
+ batch_size (int): Batch size. Defaults to 1.
+ data (str): Path to the dataset YAML file. Defaults to 'ROOT / "data/coco128.yaml"'.
+ device (str): CUDA device, e.g., '0' or '0,1,2,3' or 'cpu'. Defaults to an empty string (auto-select).
+ half (bool): Use FP16 half-precision inference. This is a flag and defaults to False.
+ test (bool): Test exports only. This is a flag and defaults to False.
+ pt_only (bool): Test PyTorch only. This is a flag and defaults to False.
+ hard_fail (bool | str): Throw an error on benchmark failure. Can be a boolean or a string representing a minimum
+ metric floor, e.g., '0.29'. Defaults to False.
+
+ Returns:
+ argparse.Namespace: Parsed command-line arguments encapsulated in an argparse Namespace object.
+
+ Notes:
+ The function modifies the 'opt.data' by checking and validating the YAML path using 'check_yaml()'.
+ The parsed arguments are printed for reference using 'print_args()'.
+ """
parser = argparse.ArgumentParser()
parser.add_argument("--weights", type=str, default=ROOT / "yolov5s.pt", help="weights path")
parser.add_argument("--imgsz", "--img", "--img-size", type=int, default=640, help="inference size (pixels)")
@@ -141,9 +255,36 @@
def main(opt):
+ """Executes YOLOv5 benchmark tests or main training/inference routines based on the provided command-line arguments.
+
+ Args:
+ opt (argparse.Namespace): Parsed command-line arguments including options for weights, image size, batch size,
+ data configuration, device, and other flags for inference settings.
+
+ Returns:
+ None: This function does not return any value. It leverages side-effects such as logging and running benchmarks.
+
+ Examples:
+ ```python
+ if __name__ == "__main__":
+ opt = parse_opt()
+ main(opt)
+ ```
+
+ Notes:
+ - For a complete list of supported export formats and their respective requirements, refer to the
+ [Ultralytics YOLOv5 Export Formats](https://github.com/ultralytics/yolov5#export-formats).
+ - Ensure that you have installed all necessary dependencies by following the installation instructions detailed in
+ the [main repository](https://github.com/ultralytics/yolov5#installation).
+
+ ```shell
+ # Running benchmarks on default weights and image size
+ $ python benchmarks.py --weights yolov5s.pt --img 640
+ ```
+ """
test(**vars(opt)) if opt.test else run(**vars(opt))
if __name__ == "__main__":
opt = parse_opt()
- main(opt)+ main(opt)
| https://raw.githubusercontent.com/ultralytics/yolov5/HEAD/benchmarks.py |
Write docstrings for data processing functions | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
import argparse
import os
import platform
import sys
from pathlib import Path
import torch
FILE = Path(__file__).resolve()
ROOT = FILE.parents[1] # YOLOv5 root directory
if str(ROOT) not in sys.path:
sys.path.append(str(ROOT)) # add ROOT to PATH
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
from ultralytics.utils.plotting import Annotator, colors, save_one_box
from models.common import DetectMultiBackend
from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams
from utils.general import (
LOGGER,
Profile,
check_file,
check_img_size,
check_imshow,
check_requirements,
colorstr,
cv2,
increment_path,
non_max_suppression,
print_args,
scale_boxes,
scale_segments,
strip_optimizer,
)
from utils.segment.general import masks2segments, process_mask, process_mask_native
from utils.torch_utils import select_device, smart_inference_mode
@smart_inference_mode()
def run(
weights=ROOT / "yolov5s-seg.pt", # model.pt path(s)
source=ROOT / "data/images", # file/dir/URL/glob/screen/0(webcam)
data=ROOT / "data/coco128.yaml", # dataset.yaml path
imgsz=(640, 640), # inference size (height, width)
conf_thres=0.25, # confidence threshold
iou_thres=0.45, # NMS IOU threshold
max_det=1000, # maximum detections per image
device="", # cuda device, i.e. 0 or 0,1,2,3 or cpu
view_img=False, # show results
save_txt=False, # save results to *.txt
save_conf=False, # save confidences in --save-txt labels
save_crop=False, # save cropped prediction boxes
nosave=False, # do not save images/videos
classes=None, # filter by class: --class 0, or --class 0 2 3
agnostic_nms=False, # class-agnostic NMS
augment=False, # augmented inference
visualize=False, # visualize features
update=False, # update all models
project=ROOT / "runs/predict-seg", # save results to project/name
name="exp", # save results to project/name
exist_ok=False, # existing project/name ok, do not increment
line_thickness=3, # bounding box thickness (pixels)
hide_labels=False, # hide labels
hide_conf=False, # hide confidences
half=False, # use FP16 half-precision inference
dnn=False, # use OpenCV DNN for ONNX inference
vid_stride=1, # video frame-rate stride
retina_masks=False,
):
source = str(source)
save_img = not nosave and not source.endswith(".txt") # save inference images
is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS)
is_url = source.lower().startswith(("rtsp://", "rtmp://", "http://", "https://"))
webcam = source.isnumeric() or source.endswith(".streams") or (is_url and not is_file)
screenshot = source.lower().startswith("screen")
if is_url and is_file:
source = check_file(source) # download
# Directories
save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run
(save_dir / "labels" if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
# Load model
device = select_device(device)
model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half)
stride, names, pt = model.stride, model.names, model.pt
imgsz = check_img_size(imgsz, s=stride) # check image size
# Dataloader
bs = 1 # batch_size
if webcam:
view_img = check_imshow(warn=True)
dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride)
bs = len(dataset)
elif screenshot:
dataset = LoadScreenshots(source, img_size=imgsz, stride=stride, auto=pt)
else:
dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride)
vid_path, vid_writer = [None] * bs, [None] * bs
# Run inference
model.warmup(imgsz=(1 if pt else bs, 3, *imgsz)) # warmup
seen, windows, dt = 0, [], (Profile(device=device), Profile(device=device), Profile(device=device))
for path, im, im0s, vid_cap, s in dataset:
with dt[0]:
im = torch.from_numpy(im).to(model.device)
im = im.half() if model.fp16 else im.float() # uint8 to fp16/32
im /= 255 # 0 - 255 to 0.0 - 1.0
if len(im.shape) == 3:
im = im[None] # expand for batch dim
# Inference
with dt[1]:
visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False
pred, proto = model(im, augment=augment, visualize=visualize)[:2]
# NMS
with dt[2]:
pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det, nm=32)
# Second-stage classifier (optional)
# pred = utils.general.apply_classifier(pred, classifier_model, im, im0s)
# Process predictions
for i, det in enumerate(pred): # per image
seen += 1
if webcam: # batch_size >= 1
p, im0, frame = path[i], im0s[i].copy(), dataset.count
s += f"{i}: "
else:
p, im0, frame = path, im0s.copy(), getattr(dataset, "frame", 0)
p = Path(p) # to Path
save_path = str(save_dir / p.name) # im.jpg
txt_path = str(save_dir / "labels" / p.stem) + ("" if dataset.mode == "image" else f"_{frame}") # im.txt
s += "{:g}x{:g} ".format(*im.shape[2:]) # print string
imc = im0.copy() if save_crop else im0 # for save_crop
annotator = Annotator(im0, line_width=line_thickness, example=str(names))
if len(det):
if retina_masks:
# scale bbox first the crop masks
det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round() # rescale boxes to im0 size
masks = process_mask_native(proto[i], det[:, 6:], det[:, :4], im0.shape[:2]) # HWC
else:
masks = process_mask(proto[i], det[:, 6:], det[:, :4], im.shape[2:], upsample=True) # HWC
det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round() # rescale boxes to im0 size
# Segments
if save_txt:
segments = [
scale_segments(im0.shape if retina_masks else im.shape[2:], x, im0.shape, normalize=True)
for x in reversed(masks2segments(masks))
]
# Print results
for c in det[:, 5].unique():
n = (det[:, 5] == c).sum() # detections per class
s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string
# Mask plotting
annotator.masks(
masks,
colors=[colors(x, True) for x in det[:, 5]],
im_gpu=torch.as_tensor(im0, dtype=torch.float16).to(device).permute(2, 0, 1).flip(0).contiguous()
/ 255
if retina_masks
else im[i],
)
# Write results
for j, (*xyxy, conf, cls) in enumerate(reversed(det[:, :6])):
if save_txt: # Write to file
seg = segments[j].reshape(-1) # (n,2) to (n*2)
line = (cls, *seg, conf) if save_conf else (cls, *seg) # label format
with open(f"{txt_path}.txt", "a") as f:
f.write(("%g " * len(line)).rstrip() % line + "\n")
if save_img or save_crop or view_img: # Add bbox to image
c = int(cls) # integer class
label = None if hide_labels else (names[c] if hide_conf else f"{names[c]} {conf:.2f}")
annotator.box_label(xyxy, label, color=colors(c, True))
# annotator.draw.polygon(segments[j], outline=colors(c, True), width=3)
if save_crop:
save_one_box(xyxy, imc, file=save_dir / "crops" / names[c] / f"{p.stem}.jpg", BGR=True)
# Stream results
im0 = annotator.result()
if view_img:
if platform.system() == "Linux" and p not in windows:
windows.append(p)
cv2.namedWindow(str(p), cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO) # allow window resize (Linux)
cv2.resizeWindow(str(p), im0.shape[1], im0.shape[0])
cv2.imshow(str(p), im0)
if cv2.waitKey(1) == ord("q"): # 1 millisecond
exit()
# Save results (image with detections)
if save_img:
if dataset.mode == "image":
cv2.imwrite(save_path, im0)
else: # 'video' or 'stream'
if vid_path[i] != save_path: # new video
vid_path[i] = save_path
if isinstance(vid_writer[i], cv2.VideoWriter):
vid_writer[i].release() # release previous video writer
if vid_cap: # video
fps = vid_cap.get(cv2.CAP_PROP_FPS)
w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
else: # stream
fps, w, h = 30, im0.shape[1], im0.shape[0]
save_path = str(Path(save_path).with_suffix(".mp4")) # force *.mp4 suffix on results videos
vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*"mp4v"), fps, (w, h))
vid_writer[i].write(im0)
# Print time (inference-only)
LOGGER.info(f"{s}{'' if len(det) else '(no detections), '}{dt[1].dt * 1e3:.1f}ms")
# Print results
t = tuple(x.t / seen * 1e3 for x in dt) # speeds per image
LOGGER.info(f"Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}" % t)
if save_txt or save_img:
s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ""
LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}")
if update:
strip_optimizer(weights[0]) # update model (to fix SourceChangeWarning)
def parse_opt():
parser = argparse.ArgumentParser()
parser.add_argument("--weights", nargs="+", type=str, default=ROOT / "yolov5s-seg.pt", help="model path(s)")
parser.add_argument("--source", type=str, default=ROOT / "data/images", help="file/dir/URL/glob/screen/0(webcam)")
parser.add_argument("--data", type=str, default=ROOT / "data/coco128.yaml", help="(optional) dataset.yaml path")
parser.add_argument("--imgsz", "--img", "--img-size", nargs="+", type=int, default=[640], help="inference size h,w")
parser.add_argument("--conf-thres", type=float, default=0.25, help="confidence threshold")
parser.add_argument("--iou-thres", type=float, default=0.45, help="NMS IoU threshold")
parser.add_argument("--max-det", type=int, default=1000, help="maximum detections per image")
parser.add_argument("--device", default="", help="cuda device, i.e. 0 or 0,1,2,3 or cpu")
parser.add_argument("--view-img", action="store_true", help="show results")
parser.add_argument("--save-txt", action="store_true", help="save results to *.txt")
parser.add_argument("--save-conf", action="store_true", help="save confidences in --save-txt labels")
parser.add_argument("--save-crop", action="store_true", help="save cropped prediction boxes")
parser.add_argument("--nosave", action="store_true", help="do not save images/videos")
parser.add_argument("--classes", nargs="+", type=int, help="filter by class: --classes 0, or --classes 0 2 3")
parser.add_argument("--agnostic-nms", action="store_true", help="class-agnostic NMS")
parser.add_argument("--augment", action="store_true", help="augmented inference")
parser.add_argument("--visualize", action="store_true", help="visualize features")
parser.add_argument("--update", action="store_true", help="update all models")
parser.add_argument("--project", default=ROOT / "runs/predict-seg", help="save results to project/name")
parser.add_argument("--name", default="exp", help="save results to project/name")
parser.add_argument("--exist-ok", action="store_true", help="existing project/name ok, do not increment")
parser.add_argument("--line-thickness", default=3, type=int, help="bounding box thickness (pixels)")
parser.add_argument("--hide-labels", default=False, action="store_true", help="hide labels")
parser.add_argument("--hide-conf", default=False, action="store_true", help="hide confidences")
parser.add_argument("--half", action="store_true", help="use FP16 half-precision inference")
parser.add_argument("--dnn", action="store_true", help="use OpenCV DNN for ONNX inference")
parser.add_argument("--vid-stride", type=int, default=1, help="video frame-rate stride")
parser.add_argument("--retina-masks", action="store_true", help="whether to plot masks in native resolution")
opt = parser.parse_args()
opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand
print_args(vars(opt))
return opt
def main(opt):
check_requirements(ROOT / "requirements.txt", exclude=("tensorboard", "thop"))
run(**vars(opt))
if __name__ == "__main__":
opt = parse_opt()
main(opt) | --- +++ @@ -1,4 +1,32 @@ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
+"""
+Run YOLOv5 segmentation inference on images, videos, directories, streams, etc.
+
+Usage - sources:
+ $ python segment/predict.py --weights yolov5s-seg.pt --source 0 # webcam
+ img.jpg # image
+ vid.mp4 # video
+ screen # screenshot
+ path/ # directory
+ list.txt # list of images
+ list.streams # list of streams
+ 'path/*.jpg' # glob
+ 'https://youtu.be/LNwODJXcvt4' # YouTube
+ 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream
+
+Usage - formats:
+ $ python segment/predict.py --weights yolov5s-seg.pt # PyTorch
+ yolov5s-seg.torchscript # TorchScript
+ yolov5s-seg.onnx # ONNX Runtime or OpenCV DNN with --dnn
+ yolov5s-seg_openvino_model # OpenVINO
+ yolov5s-seg.engine # TensorRT
+ yolov5s-seg.mlmodel # CoreML (macOS-only)
+ yolov5s-seg_saved_model # TensorFlow SavedModel
+ yolov5s-seg.pb # TensorFlow GraphDef
+ yolov5s-seg.tflite # TensorFlow Lite
+ yolov5s-seg_edgetpu.tflite # TensorFlow Edge TPU
+ yolov5s-seg_paddle_model # PaddlePaddle
+"""
import argparse
import os
@@ -69,6 +97,7 @@ vid_stride=1, # video frame-rate stride
retina_masks=False,
):
+ """Run YOLOv5 segmentation inference on diverse sources including images, videos, directories, and streams."""
source = str(source)
save_img = not nosave and not source.endswith(".txt") # save inference images
is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS)
@@ -229,6 +258,9 @@
def parse_opt():
+ """Parses command-line options for YOLOv5 inference including model paths, data sources, inference settings, and
+ output preferences.
+ """
parser = argparse.ArgumentParser()
parser.add_argument("--weights", nargs="+", type=str, default=ROOT / "yolov5s-seg.pt", help="model path(s)")
parser.add_argument("--source", type=str, default=ROOT / "data/images", help="file/dir/URL/glob/screen/0(webcam)")
@@ -265,10 +297,11 @@
def main(opt):
+ """Executes YOLOv5 model inference with given options, checking for requirements before launching."""
check_requirements(ROOT / "requirements.txt", exclude=("tensorboard", "thop"))
run(**vars(opt))
if __name__ == "__main__":
opt = parse_opt()
- main(opt)+ main(opt)
| https://raw.githubusercontent.com/ultralytics/yolov5/HEAD/segment/predict.py |
Add docstrings to improve readability | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
import argparse
import math
import os
import random
import subprocess
import sys
import time
from copy import deepcopy
from datetime import datetime, timedelta
from pathlib import Path
try:
import comet_ml # must be imported before torch (if installed)
except ImportError:
comet_ml = None
import numpy as np
import torch
import torch.distributed as dist
import torch.nn as nn
import yaml
from torch.optim import lr_scheduler
from tqdm import tqdm
FILE = Path(__file__).resolve()
ROOT = FILE.parents[0] # YOLOv5 root directory
if str(ROOT) not in sys.path:
sys.path.append(str(ROOT)) # add ROOT to PATH
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
from ultralytics.utils.patches import torch_load
import val as validate # for end-of-epoch mAP
from models.experimental import attempt_load
from models.yolo import Model
from utils.autoanchor import check_anchors
from utils.autobatch import check_train_batch_size
from utils.callbacks import Callbacks
from utils.dataloaders import create_dataloader
from utils.downloads import attempt_download, is_url
from utils.general import (
LOGGER,
TQDM_BAR_FORMAT,
check_amp,
check_dataset,
check_file,
check_git_info,
check_git_status,
check_img_size,
check_requirements,
check_suffix,
check_yaml,
colorstr,
get_latest_run,
increment_path,
init_seeds,
intersect_dicts,
labels_to_class_weights,
labels_to_image_weights,
methods,
one_cycle,
print_args,
print_mutation,
strip_optimizer,
yaml_save,
)
from utils.loggers import LOGGERS, Loggers
from utils.loggers.comet.comet_utils import check_comet_resume
from utils.loss import ComputeLoss
from utils.metrics import fitness
from utils.plots import plot_evolve
from utils.torch_utils import (
EarlyStopping,
ModelEMA,
de_parallel,
select_device,
smart_DDP,
smart_optimizer,
smart_resume,
torch_distributed_zero_first,
)
LOCAL_RANK = int(os.getenv("LOCAL_RANK", -1)) # https://pytorch.org/docs/stable/elastic/run.html
RANK = int(os.getenv("RANK", -1))
WORLD_SIZE = int(os.getenv("WORLD_SIZE", 1))
GIT_INFO = check_git_info()
def train(hyp, opt, device, callbacks):
save_dir, epochs, batch_size, weights, single_cls, evolve, data, cfg, resume, noval, nosave, workers, freeze = (
Path(opt.save_dir),
opt.epochs,
opt.batch_size,
opt.weights,
opt.single_cls,
opt.evolve,
opt.data,
opt.cfg,
opt.resume,
opt.noval,
opt.nosave,
opt.workers,
opt.freeze,
)
callbacks.run("on_pretrain_routine_start")
# Directories
w = save_dir / "weights" # weights dir
(w.parent if evolve else w).mkdir(parents=True, exist_ok=True) # make dir
last, best = w / "last.pt", w / "best.pt"
# Hyperparameters
if isinstance(hyp, str):
with open(hyp, errors="ignore") as f:
hyp = yaml.safe_load(f) # load hyps dict
LOGGER.info(colorstr("hyperparameters: ") + ", ".join(f"{k}={v}" for k, v in hyp.items()))
opt.hyp = hyp.copy() # for saving hyps to checkpoints
# Save run settings
if not evolve:
yaml_save(save_dir / "hyp.yaml", hyp)
yaml_save(save_dir / "opt.yaml", vars(opt))
# Loggers
data_dict = None
if RANK in {-1, 0}:
include_loggers = list(LOGGERS)
if getattr(opt, "ndjson_console", False):
include_loggers.append("ndjson_console")
if getattr(opt, "ndjson_file", False):
include_loggers.append("ndjson_file")
loggers = Loggers(
save_dir=save_dir,
weights=weights,
opt=opt,
hyp=hyp,
logger=LOGGER,
include=tuple(include_loggers),
)
# Register actions
for k in methods(loggers):
callbacks.register_action(k, callback=getattr(loggers, k))
# Process custom dataset artifact link
data_dict = loggers.remote_dataset
if resume: # If resuming runs from remote artifact
weights, epochs, hyp, batch_size = opt.weights, opt.epochs, opt.hyp, opt.batch_size
# Config
plots = not evolve and not opt.noplots # create plots
cuda = device.type != "cpu"
init_seeds(opt.seed + 1 + RANK, deterministic=True)
with torch_distributed_zero_first(LOCAL_RANK):
data_dict = data_dict or check_dataset(data) # check if None
train_path, val_path = data_dict["train"], data_dict["val"]
nc = 1 if single_cls else int(data_dict["nc"]) # number of classes
names = {0: "item"} if single_cls and len(data_dict["names"]) != 1 else data_dict["names"] # class names
is_coco = isinstance(val_path, str) and val_path.endswith("coco/val2017.txt") # COCO dataset
# Model
check_suffix(weights, ".pt") # check weights
pretrained = weights.endswith(".pt")
if pretrained:
with torch_distributed_zero_first(LOCAL_RANK):
weights = attempt_download(weights) # download if not found locally
ckpt = torch_load(weights, map_location="cpu") # load checkpoint to CPU to avoid CUDA memory leak
model = Model(cfg or ckpt["model"].yaml, ch=3, nc=nc, anchors=hyp.get("anchors")).to(device) # create
exclude = ["anchor"] if (cfg or hyp.get("anchors")) and not resume else [] # exclude keys
csd = ckpt["model"].float().state_dict() # checkpoint state_dict as FP32
csd = intersect_dicts(csd, model.state_dict(), exclude=exclude) # intersect
model.load_state_dict(csd, strict=False) # load
LOGGER.info(f"Transferred {len(csd)}/{len(model.state_dict())} items from {weights}") # report
else:
model = Model(cfg, ch=3, nc=nc, anchors=hyp.get("anchors")).to(device) # create
amp = check_amp(model) # check AMP
# Freeze
freeze = [f"model.{x}." for x in (freeze if len(freeze) > 1 else range(freeze[0]))] # layers to freeze
for k, v in model.named_parameters():
v.requires_grad = True # train all layers
# v.register_hook(lambda x: torch.nan_to_num(x)) # NaN to 0 (commented for erratic training results)
if any(x in k for x in freeze):
LOGGER.info(f"freezing {k}")
v.requires_grad = False
# Image size
gs = max(int(model.stride.max()), 32) # grid size (max stride)
imgsz = check_img_size(opt.imgsz, gs, floor=gs * 2) # verify imgsz is gs-multiple
# Batch size
if RANK == -1 and batch_size == -1: # single-GPU only, estimate best batch size
batch_size = check_train_batch_size(model, imgsz, amp)
loggers.on_params_update({"batch_size": batch_size})
# Optimizer
nbs = 64 # nominal batch size
accumulate = max(round(nbs / batch_size), 1) # accumulate loss before optimizing
hyp["weight_decay"] *= batch_size * accumulate / nbs # scale weight_decay
optimizer = smart_optimizer(model, opt.optimizer, hyp["lr0"], hyp["momentum"], hyp["weight_decay"])
# Scheduler
if opt.cos_lr:
lf = one_cycle(1, hyp["lrf"], epochs) # cosine 1->hyp['lrf']
else:
def lf(x):
return (1 - x / epochs) * (1.0 - hyp["lrf"]) + hyp["lrf"] # linear
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) # plot_lr_scheduler(optimizer, scheduler, epochs)
# EMA
ema = ModelEMA(model) if RANK in {-1, 0} else None
# Resume
best_fitness, start_epoch = 0.0, 0
if pretrained:
if resume:
best_fitness, start_epoch, epochs = smart_resume(ckpt, optimizer, ema, weights, epochs, resume)
del ckpt, csd
# DP mode
if cuda and RANK == -1 and torch.cuda.device_count() > 1:
LOGGER.warning(
"WARNING ⚠️ DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.\n"
"See Multi-GPU Tutorial at https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training to get started."
)
model = torch.nn.DataParallel(model)
# SyncBatchNorm
if opt.sync_bn and cuda and RANK != -1:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)
LOGGER.info("Using SyncBatchNorm()")
# Trainloader
train_loader, dataset = create_dataloader(
train_path,
imgsz,
batch_size // WORLD_SIZE,
gs,
single_cls,
hyp=hyp,
augment=True,
cache=None if opt.cache == "val" else opt.cache,
rect=opt.rect,
rank=LOCAL_RANK,
workers=workers,
image_weights=opt.image_weights,
quad=opt.quad,
prefix=colorstr("train: "),
shuffle=True,
seed=opt.seed,
)
labels = np.concatenate(dataset.labels, 0)
mlc = int(labels[:, 0].max()) # max label class
assert mlc < nc, f"Label class {mlc} exceeds nc={nc} in {data}. Possible class labels are 0-{nc - 1}"
# Process 0
if RANK in {-1, 0}:
val_loader = create_dataloader(
val_path,
imgsz,
batch_size // WORLD_SIZE * 2,
gs,
single_cls,
hyp=hyp,
cache=None if noval else opt.cache,
rect=True,
rank=-1,
workers=workers * 2,
pad=0.5,
prefix=colorstr("val: "),
)[0]
if not resume:
if not opt.noautoanchor:
check_anchors(dataset, model=model, thr=hyp["anchor_t"], imgsz=imgsz) # run AutoAnchor
model.half().float() # pre-reduce anchor precision
callbacks.run("on_pretrain_routine_end", labels, names)
# DDP mode
if cuda and RANK != -1:
model = smart_DDP(model)
# Model attributes
nl = de_parallel(model).model[-1].nl # number of detection layers (to scale hyps)
hyp["box"] *= 3 / nl # scale to layers
hyp["cls"] *= nc / 80 * 3 / nl # scale to classes and layers
hyp["obj"] *= (imgsz / 640) ** 2 * 3 / nl # scale to image size and layers
hyp["label_smoothing"] = opt.label_smoothing
model.nc = nc # attach number of classes to model
model.hyp = hyp # attach hyperparameters to model
model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc # attach class weights
model.names = names
# Start training
t0 = time.time()
nb = len(train_loader) # number of batches
nw = max(round(hyp["warmup_epochs"] * nb), 100) # number of warmup iterations, max(3 epochs, 100 iterations)
# nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training
last_opt_step = -1
maps = np.zeros(nc) # mAP per class
results = (0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls)
scheduler.last_epoch = start_epoch - 1 # do not move
scaler = torch.cuda.amp.GradScaler(enabled=amp)
stopper, stop = EarlyStopping(patience=opt.patience), False
compute_loss = ComputeLoss(model) # init loss class
callbacks.run("on_train_start")
LOGGER.info(
f"Image sizes {imgsz} train, {imgsz} val\n"
f"Using {train_loader.num_workers * WORLD_SIZE} dataloader workers\n"
f"Logging results to {colorstr('bold', save_dir)}\n"
f"Starting training for {epochs} epochs..."
)
for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------
callbacks.run("on_train_epoch_start")
model.train()
# Update image weights (optional, single-GPU only)
if opt.image_weights:
cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc # class weights
iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw) # image weights
dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n) # rand weighted idx
# Update mosaic border (optional)
# b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs)
# dataset.mosaic_border = [b - imgsz, -b] # height, width borders
mloss = torch.zeros(3, device=device) # mean losses
if RANK != -1:
train_loader.sampler.set_epoch(epoch)
pbar = enumerate(train_loader)
LOGGER.info(("\n" + "%11s" * 7) % ("Epoch", "GPU_mem", "box_loss", "obj_loss", "cls_loss", "Instances", "Size"))
if RANK in {-1, 0}:
pbar = tqdm(pbar, total=nb, bar_format=TQDM_BAR_FORMAT) # progress bar
optimizer.zero_grad()
for i, (imgs, targets, paths, _) in pbar: # batch -------------------------------------------------------------
callbacks.run("on_train_batch_start")
ni = i + nb * epoch # number integrated batches (since train start)
imgs = imgs.to(device, non_blocking=True).float() / 255 # uint8 to float32, 0-255 to 0.0-1.0
# Warmup
if ni <= nw:
xi = [0, nw] # x interp
# compute_loss.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou)
accumulate = max(1, np.interp(ni, xi, [1, nbs / batch_size]).round())
for j, x in enumerate(optimizer.param_groups):
# bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
x["lr"] = np.interp(ni, xi, [hyp["warmup_bias_lr"] if j == 0 else 0.0, x["initial_lr"] * lf(epoch)])
if "momentum" in x:
x["momentum"] = np.interp(ni, xi, [hyp["warmup_momentum"], hyp["momentum"]])
# Multi-scale
if opt.multi_scale:
sz = random.randrange(int(imgsz * 0.5), int(imgsz * 1.5) + gs) // gs * gs # size
sf = sz / max(imgs.shape[2:]) # scale factor
if sf != 1:
ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple)
imgs = nn.functional.interpolate(imgs, size=ns, mode="bilinear", align_corners=False)
# Forward
with torch.cuda.amp.autocast(amp):
pred = model(imgs) # forward
loss, loss_items = compute_loss(pred, targets.to(device)) # loss scaled by batch_size
if RANK != -1:
loss *= WORLD_SIZE # gradient averaged between devices in DDP mode
if opt.quad:
loss *= 4.0
# Backward
scaler.scale(loss).backward()
# Optimize - https://pytorch.org/docs/master/notes/amp_examples.html
if ni - last_opt_step >= accumulate:
scaler.unscale_(optimizer) # unscale gradients
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=10.0) # clip gradients
scaler.step(optimizer) # optimizer.step
scaler.update()
optimizer.zero_grad()
if ema:
ema.update(model)
last_opt_step = ni
# Log
if RANK in {-1, 0}:
mloss = (mloss * i + loss_items) / (i + 1) # update mean losses
mem = f"{torch.cuda.memory_reserved() / 1e9 if torch.cuda.is_available() else 0:.3g}G" # (GB)
pbar.set_description(
("%11s" * 2 + "%11.4g" * 5)
% (f"{epoch}/{epochs - 1}", mem, *mloss, targets.shape[0], imgs.shape[-1])
)
callbacks.run("on_train_batch_end", model, ni, imgs, targets, paths, list(mloss))
if callbacks.stop_training:
return
# end batch ------------------------------------------------------------------------------------------------
# Scheduler
lr = [x["lr"] for x in optimizer.param_groups] # for loggers
scheduler.step()
if RANK in {-1, 0}:
# mAP
callbacks.run("on_train_epoch_end", epoch=epoch)
ema.update_attr(model, include=["yaml", "nc", "hyp", "names", "stride", "class_weights"])
final_epoch = (epoch + 1 == epochs) or stopper.possible_stop
if not noval or final_epoch: # Calculate mAP
results, maps, _ = validate.run(
data_dict,
batch_size=batch_size // WORLD_SIZE * 2,
imgsz=imgsz,
half=amp,
model=ema.ema,
single_cls=single_cls,
dataloader=val_loader,
save_dir=save_dir,
plots=False,
callbacks=callbacks,
compute_loss=compute_loss,
)
# Update best mAP
fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95]
stop = stopper(epoch=epoch, fitness=fi) # early stop check
if fi > best_fitness:
best_fitness = fi
log_vals = list(mloss) + list(results) + lr
callbacks.run("on_fit_epoch_end", log_vals, epoch, best_fitness, fi)
# Save model
if (not nosave) or (final_epoch and not evolve): # if save
ckpt = {
"epoch": epoch,
"best_fitness": best_fitness,
"model": deepcopy(de_parallel(model)).half(),
"ema": deepcopy(ema.ema).half(),
"updates": ema.updates,
"optimizer": optimizer.state_dict(),
"opt": vars(opt),
"git": GIT_INFO, # {remote, branch, commit} if a git repo
"date": datetime.now().isoformat(),
}
# Save last, best and delete
torch.save(ckpt, last)
if best_fitness == fi:
torch.save(ckpt, best)
if opt.save_period > 0 and epoch % opt.save_period == 0:
torch.save(ckpt, w / f"epoch{epoch}.pt")
del ckpt
callbacks.run("on_model_save", last, epoch, final_epoch, best_fitness, fi)
# EarlyStopping
if RANK != -1: # if DDP training
broadcast_list = [stop if RANK == 0 else None]
dist.broadcast_object_list(broadcast_list, 0) # broadcast 'stop' to all ranks
if RANK != 0:
stop = broadcast_list[0]
if stop:
break # must break all DDP ranks
# end epoch ----------------------------------------------------------------------------------------------------
# end training -----------------------------------------------------------------------------------------------------
if RANK in {-1, 0}:
LOGGER.info(f"\n{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.")
for f in last, best:
if f.exists():
strip_optimizer(f) # strip optimizers
if f is best:
LOGGER.info(f"\nValidating {f}...")
results, _, _ = validate.run(
data_dict,
batch_size=batch_size // WORLD_SIZE * 2,
imgsz=imgsz,
model=attempt_load(f, device).half(),
iou_thres=0.65 if is_coco else 0.60, # best pycocotools at iou 0.65
single_cls=single_cls,
dataloader=val_loader,
save_dir=save_dir,
save_json=is_coco,
verbose=True,
plots=plots,
callbacks=callbacks,
compute_loss=compute_loss,
) # val best model with plots
if is_coco:
callbacks.run("on_fit_epoch_end", list(mloss) + list(results) + lr, epoch, best_fitness, fi)
callbacks.run("on_train_end", last, best, epoch, results)
torch.cuda.empty_cache()
return results
def parse_opt(known=False):
parser = argparse.ArgumentParser()
parser.add_argument("--weights", type=str, default=ROOT / "yolov5s.pt", help="initial weights path")
parser.add_argument("--cfg", type=str, default="", help="model.yaml path")
parser.add_argument("--data", type=str, default=ROOT / "data/coco128.yaml", help="dataset.yaml path")
parser.add_argument("--hyp", type=str, default=ROOT / "data/hyps/hyp.scratch-low.yaml", help="hyperparameters path")
parser.add_argument("--epochs", type=int, default=100, help="total training epochs")
parser.add_argument("--batch-size", type=int, default=16, help="total batch size for all GPUs, -1 for autobatch")
parser.add_argument("--imgsz", "--img", "--img-size", type=int, default=640, help="train, val image size (pixels)")
parser.add_argument("--rect", action="store_true", help="rectangular training")
parser.add_argument("--resume", nargs="?", const=True, default=False, help="resume most recent training")
parser.add_argument("--nosave", action="store_true", help="only save final checkpoint")
parser.add_argument("--noval", action="store_true", help="only validate final epoch")
parser.add_argument("--noautoanchor", action="store_true", help="disable AutoAnchor")
parser.add_argument("--noplots", action="store_true", help="save no plot files")
parser.add_argument("--evolve", type=int, nargs="?", const=300, help="evolve hyperparameters for x generations")
parser.add_argument(
"--evolve_population", type=str, default=ROOT / "data/hyps", help="location for loading population"
)
parser.add_argument("--resume_evolve", type=str, default=None, help="resume evolve from last generation")
parser.add_argument("--bucket", type=str, default="", help="gsutil bucket")
parser.add_argument("--cache", type=str, nargs="?", const="ram", help="image --cache ram/disk")
parser.add_argument("--image-weights", action="store_true", help="use weighted image selection for training")
parser.add_argument("--device", default="", help="cuda device, i.e. 0 or 0,1,2,3 or cpu")
parser.add_argument("--multi-scale", action="store_true", help="vary img-size +/- 50%%")
parser.add_argument("--single-cls", action="store_true", help="train multi-class data as single-class")
parser.add_argument("--optimizer", type=str, choices=["SGD", "Adam", "AdamW"], default="SGD", help="optimizer")
parser.add_argument("--sync-bn", action="store_true", help="use SyncBatchNorm, only available in DDP mode")
parser.add_argument("--workers", type=int, default=8, help="max dataloader workers (per RANK in DDP mode)")
parser.add_argument("--project", default=ROOT / "runs/train", help="save to project/name")
parser.add_argument("--name", default="exp", help="save to project/name")
parser.add_argument("--exist-ok", action="store_true", help="existing project/name ok, do not increment")
parser.add_argument("--quad", action="store_true", help="quad dataloader")
parser.add_argument("--cos-lr", action="store_true", help="cosine LR scheduler")
parser.add_argument("--label-smoothing", type=float, default=0.0, help="Label smoothing epsilon")
parser.add_argument("--patience", type=int, default=100, help="EarlyStopping patience (epochs without improvement)")
parser.add_argument("--freeze", nargs="+", type=int, default=[0], help="Freeze layers: backbone=10, first3=0 1 2")
parser.add_argument("--save-period", type=int, default=-1, help="Save checkpoint every x epochs (disabled if < 1)")
parser.add_argument("--seed", type=int, default=0, help="Global training seed")
parser.add_argument("--local_rank", type=int, default=-1, help="Automatic DDP Multi-GPU argument, do not modify")
# Logger arguments
parser.add_argument("--entity", default=None, help="Entity")
parser.add_argument("--upload_dataset", nargs="?", const=True, default=False, help='Upload data, "val" option')
parser.add_argument("--bbox_interval", type=int, default=-1, help="Set bounding-box image logging interval")
parser.add_argument("--artifact_alias", type=str, default="latest", help="Version of dataset artifact to use")
# NDJSON logging
parser.add_argument("--ndjson-console", action="store_true", help="Log ndjson to console")
parser.add_argument("--ndjson-file", action="store_true", help="Log ndjson to file")
return parser.parse_known_args()[0] if known else parser.parse_args()
def main(opt, callbacks=Callbacks()):
if RANK in {-1, 0}:
print_args(vars(opt))
check_git_status()
check_requirements(ROOT / "requirements.txt")
# Resume (from specified or most recent last.pt)
if opt.resume and not check_comet_resume(opt) and not opt.evolve:
last = Path(check_file(opt.resume) if isinstance(opt.resume, str) else get_latest_run())
opt_yaml = last.parent.parent / "opt.yaml" # train options yaml
opt_data = opt.data # original dataset
if opt_yaml.is_file():
with open(opt_yaml, errors="ignore") as f:
d = yaml.safe_load(f)
else:
d = torch_load(last, map_location="cpu")["opt"]
opt = argparse.Namespace(**d) # replace
opt.cfg, opt.weights, opt.resume = "", str(last), True # reinstate
if is_url(opt_data):
opt.data = check_file(opt_data) # avoid HUB resume auth timeout
else:
opt.data, opt.cfg, opt.hyp, opt.weights, opt.project = (
check_file(opt.data),
check_yaml(opt.cfg),
check_yaml(opt.hyp),
str(opt.weights),
str(opt.project),
) # checks
assert len(opt.cfg) or len(opt.weights), "either --cfg or --weights must be specified"
if opt.evolve:
if opt.project == str(ROOT / "runs/train"): # if default project name, rename to runs/evolve
opt.project = str(ROOT / "runs/evolve")
opt.exist_ok, opt.resume = opt.resume, False # pass resume to exist_ok and disable resume
if opt.name == "cfg":
opt.name = Path(opt.cfg).stem # use model.yaml as name
opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok))
# DDP mode
device = select_device(opt.device, batch_size=opt.batch_size)
if LOCAL_RANK != -1:
msg = "is not compatible with YOLOv5 Multi-GPU DDP training"
assert not opt.image_weights, f"--image-weights {msg}"
assert not opt.evolve, f"--evolve {msg}"
assert opt.batch_size != -1, f"AutoBatch with --batch-size -1 {msg}, please pass a valid --batch-size"
assert opt.batch_size % WORLD_SIZE == 0, f"--batch-size {opt.batch_size} must be multiple of WORLD_SIZE"
assert torch.cuda.device_count() > LOCAL_RANK, "insufficient CUDA devices for DDP command"
torch.cuda.set_device(LOCAL_RANK)
device = torch.device("cuda", LOCAL_RANK)
dist.init_process_group(
backend="nccl" if dist.is_nccl_available() else "gloo", timeout=timedelta(seconds=10800)
)
# Train
if not opt.evolve:
train(opt.hyp, opt, device, callbacks)
# Evolve hyperparameters (optional)
else:
# Hyperparameter evolution metadata (including this hyperparameter True-False, lower_limit, upper_limit)
meta = {
"lr0": (False, 1e-5, 1e-1), # initial learning rate (SGD=1E-2, Adam=1E-3)
"lrf": (False, 0.01, 1.0), # final OneCycleLR learning rate (lr0 * lrf)
"momentum": (False, 0.6, 0.98), # SGD momentum/Adam beta1
"weight_decay": (False, 0.0, 0.001), # optimizer weight decay
"warmup_epochs": (False, 0.0, 5.0), # warmup epochs (fractions ok)
"warmup_momentum": (False, 0.0, 0.95), # warmup initial momentum
"warmup_bias_lr": (False, 0.0, 0.2), # warmup initial bias lr
"box": (False, 0.02, 0.2), # box loss gain
"cls": (False, 0.2, 4.0), # cls loss gain
"cls_pw": (False, 0.5, 2.0), # cls BCELoss positive_weight
"obj": (False, 0.2, 4.0), # obj loss gain (scale with pixels)
"obj_pw": (False, 0.5, 2.0), # obj BCELoss positive_weight
"iou_t": (False, 0.1, 0.7), # IoU training threshold
"anchor_t": (False, 2.0, 8.0), # anchor-multiple threshold
"anchors": (False, 2.0, 10.0), # anchors per output grid (0 to ignore)
"fl_gamma": (False, 0.0, 2.0), # focal loss gamma (efficientDet default gamma=1.5)
"hsv_h": (True, 0.0, 0.1), # image HSV-Hue augmentation (fraction)
"hsv_s": (True, 0.0, 0.9), # image HSV-Saturation augmentation (fraction)
"hsv_v": (True, 0.0, 0.9), # image HSV-Value augmentation (fraction)
"degrees": (True, 0.0, 45.0), # image rotation (+/- deg)
"translate": (True, 0.0, 0.9), # image translation (+/- fraction)
"scale": (True, 0.0, 0.9), # image scale (+/- gain)
"shear": (True, 0.0, 10.0), # image shear (+/- deg)
"perspective": (True, 0.0, 0.001), # image perspective (+/- fraction), range 0-0.001
"flipud": (True, 0.0, 1.0), # image flip up-down (probability)
"fliplr": (True, 0.0, 1.0), # image flip left-right (probability)
"mosaic": (True, 0.0, 1.0), # image mosaic (probability)
"mixup": (True, 0.0, 1.0), # image mixup (probability)
"copy_paste": (True, 0.0, 1.0), # segment copy-paste (probability)
}
# GA configs
pop_size = 50
mutation_rate_min = 0.01
mutation_rate_max = 0.5
crossover_rate_min = 0.5
crossover_rate_max = 1
min_elite_size = 2
max_elite_size = 5
tournament_size_min = 2
tournament_size_max = 10
with open(opt.hyp, errors="ignore") as f:
hyp = yaml.safe_load(f) # load hyps dict
if "anchors" not in hyp: # anchors commented in hyp.yaml
hyp["anchors"] = 3
if opt.noautoanchor:
del hyp["anchors"], meta["anchors"]
opt.noval, opt.nosave, save_dir = True, True, Path(opt.save_dir) # only val/save final epoch
# ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices
evolve_yaml, evolve_csv = save_dir / "hyp_evolve.yaml", save_dir / "evolve.csv"
if opt.bucket:
# download evolve.csv if exists
subprocess.run(
[
"gsutil",
"cp",
f"gs://{opt.bucket}/evolve.csv",
str(evolve_csv),
]
)
# Delete the items in meta dictionary whose first value is False
del_ = [item for item, value_ in meta.items() if value_[0] is False]
hyp_GA = hyp.copy() # Make a copy of hyp dictionary
for item in del_:
del meta[item] # Remove the item from meta dictionary
del hyp_GA[item] # Remove the item from hyp_GA dictionary
# Set lower_limit and upper_limit arrays to hold the search space boundaries
lower_limit = np.array([meta[k][1] for k in hyp_GA.keys()])
upper_limit = np.array([meta[k][2] for k in hyp_GA.keys()])
# Create gene_ranges list to hold the range of values for each gene in the population
gene_ranges = [(lower_limit[i], upper_limit[i]) for i in range(len(upper_limit))]
# Initialize the population with initial_values or random values
initial_values = []
# If resuming evolution from a previous checkpoint
if opt.resume_evolve is not None:
assert os.path.isfile(ROOT / opt.resume_evolve), "evolve population path is wrong!"
with open(ROOT / opt.resume_evolve, errors="ignore") as f:
evolve_population = yaml.safe_load(f)
for value in evolve_population.values():
value = np.array([value[k] for k in hyp_GA.keys()])
initial_values.append(list(value))
# If not resuming from a previous checkpoint, generate initial values from .yaml files in opt.evolve_population
else:
yaml_files = [f for f in os.listdir(opt.evolve_population) if f.endswith(".yaml")]
for file_name in yaml_files:
with open(os.path.join(opt.evolve_population, file_name)) as yaml_file:
value = yaml.safe_load(yaml_file)
value = np.array([value[k] for k in hyp_GA.keys()])
initial_values.append(list(value))
# Generate random values within the search space for the rest of the population
if initial_values is None:
population = [generate_individual(gene_ranges, len(hyp_GA)) for _ in range(pop_size)]
elif pop_size > 1:
population = [generate_individual(gene_ranges, len(hyp_GA)) for _ in range(pop_size - len(initial_values))]
for initial_value in initial_values:
population = [initial_value, *population]
# Run the genetic algorithm for a fixed number of generations
list_keys = list(hyp_GA.keys())
for generation in range(opt.evolve):
if generation >= 1:
save_dict = {}
for i in range(len(population)):
little_dict = {list_keys[j]: float(population[i][j]) for j in range(len(population[i]))}
save_dict[f"gen{generation!s}number{i!s}"] = little_dict
with open(save_dir / "evolve_population.yaml", "w") as outfile:
yaml.dump(save_dict, outfile, default_flow_style=False)
# Adaptive elite size
elite_size = min_elite_size + int((max_elite_size - min_elite_size) * (generation / opt.evolve))
# Evaluate the fitness of each individual in the population
fitness_scores = []
for individual in population:
for key, value in zip(hyp_GA.keys(), individual):
hyp_GA[key] = value
hyp.update(hyp_GA)
results = train(hyp.copy(), opt, device, callbacks)
callbacks = Callbacks()
# Write mutation results
keys = (
"metrics/precision",
"metrics/recall",
"metrics/mAP_0.5",
"metrics/mAP_0.5:0.95",
"val/box_loss",
"val/obj_loss",
"val/cls_loss",
)
print_mutation(keys, results, hyp.copy(), save_dir, opt.bucket)
fitness_scores.append(results[2])
# Select the fittest individuals for reproduction using adaptive tournament selection
selected_indices = []
for _ in range(pop_size - elite_size):
# Adaptive tournament size
tournament_size = max(
max(2, tournament_size_min),
int(min(tournament_size_max, pop_size) - (generation / (opt.evolve / 10))),
)
# Perform tournament selection to choose the best individual
tournament_indices = random.sample(range(pop_size), tournament_size)
tournament_fitness = [fitness_scores[j] for j in tournament_indices]
winner_index = tournament_indices[tournament_fitness.index(max(tournament_fitness))]
selected_indices.append(winner_index)
# Add the elite individuals to the selected indices
elite_indices = [i for i in range(pop_size) if fitness_scores[i] in sorted(fitness_scores)[-elite_size:]]
selected_indices.extend(elite_indices)
# Create the next generation through crossover and mutation
next_generation = []
for _ in range(pop_size):
parent1_index = selected_indices[random.randint(0, pop_size - 1)]
parent2_index = selected_indices[random.randint(0, pop_size - 1)]
# Adaptive crossover rate
crossover_rate = max(
crossover_rate_min, min(crossover_rate_max, crossover_rate_max - (generation / opt.evolve))
)
if random.uniform(0, 1) < crossover_rate:
crossover_point = random.randint(1, len(hyp_GA) - 1)
child = population[parent1_index][:crossover_point] + population[parent2_index][crossover_point:]
else:
child = population[parent1_index]
# Adaptive mutation rate
mutation_rate = max(
mutation_rate_min, min(mutation_rate_max, mutation_rate_max - (generation / opt.evolve))
)
for j in range(len(hyp_GA)):
if random.uniform(0, 1) < mutation_rate:
child[j] += random.uniform(-0.1, 0.1)
child[j] = min(max(child[j], gene_ranges[j][0]), gene_ranges[j][1])
next_generation.append(child)
# Replace the old population with the new generation
population = next_generation
# Print the best solution found
best_index = fitness_scores.index(max(fitness_scores))
best_individual = population[best_index]
print("Best solution found:", best_individual)
# Plot results
plot_evolve(evolve_csv)
LOGGER.info(
f"Hyperparameter evolution finished {opt.evolve} generations\n"
f"Results saved to {colorstr('bold', save_dir)}\n"
f"Usage example: $ python train.py --hyp {evolve_yaml}"
)
def generate_individual(input_ranges, individual_length):
individual = []
for i in range(individual_length):
lower_bound, upper_bound = input_ranges[i]
individual.append(random.uniform(lower_bound, upper_bound))
return individual
def run(**kwargs):
opt = parse_opt(True)
for k, v in kwargs.items():
setattr(opt, k, v)
main(opt)
return opt
if __name__ == "__main__":
opt = parse_opt()
main(opt) | --- +++ @@ -1,4 +1,18 @@ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
+"""
+Train a YOLOv5 model on a custom dataset. Models and datasets download automatically from the latest YOLOv5 release.
+
+Usage - Single-GPU training:
+ $ python train.py --data coco128.yaml --weights yolov5s.pt --img 640 # from pretrained (recommended)
+ $ python train.py --data coco128.yaml --weights '' --cfg yolov5s.yaml --img 640 # from scratch
+
+Usage - Multi-GPU DDP training:
+ $ python -m torch.distributed.run --nproc_per_node 4 --master_port 1 train.py --data coco128.yaml --weights yolov5s.pt --img 640 --device 0,1,2,3
+
+Models: https://github.com/ultralytics/yolov5/tree/master/models
+Datasets: https://github.com/ultralytics/yolov5/tree/master/data
+Tutorial: https://docs.ultralytics.com/yolov5/tutorials/train_custom_data
+"""
import argparse
import math
@@ -89,6 +103,39 @@
def train(hyp, opt, device, callbacks):
+ """Train a YOLOv5 model on a custom dataset using specified hyperparameters, options, and device, managing datasets,
+ model architecture, loss computation, and optimizer steps.
+
+ Args:
+ hyp (str | dict): Path to the hyperparameters YAML file or a dictionary of hyperparameters.
+ opt (argparse.Namespace): Parsed command-line arguments containing training options.
+ device (torch.device): Device on which training occurs, e.g., 'cuda' or 'cpu'.
+ callbacks (Callbacks): Callback functions for various training events.
+
+ Returns:
+ None
+
+ Examples:
+ Single-GPU training:
+ ```bash
+ $ python train.py --data coco128.yaml --weights yolov5s.pt --img 640 # from pretrained (recommended)
+ $ python train.py --data coco128.yaml --weights '' --cfg yolov5s.yaml --img 640 # from scratch
+ ```
+
+ Multi-GPU DDP training:
+ ```bash
+ $ python -m torch.distributed.run --nproc_per_node 4 --master_port 1 train.py --data coco128.yaml --weights
+ yolov5s.pt --img 640 --device 0,1,2,3
+ ```
+
+ For more usage details, refer to:
+ - Models: https://github.com/ultralytics/yolov5/tree/master/models
+ - Datasets: https://github.com/ultralytics/yolov5/tree/master/data
+ - Tutorial: https://docs.ultralytics.com/yolov5/tutorials/train_custom_data
+
+ Notes:
+ Models and datasets download automatically from the latest YOLOv5 release.
+ """
save_dir, epochs, batch_size, weights, single_cls, evolve, data, cfg, resume, noval, nosave, workers, freeze = (
Path(opt.save_dir),
opt.epochs,
@@ -208,6 +255,7 @@ else:
def lf(x):
+ """Linear learning rate scheduler function with decay calculated by epoch proportion."""
return (1 - x / epochs) * (1.0 - hyp["lrf"]) + hyp["lrf"] # linear
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) # plot_lr_scheduler(optimizer, scheduler, epochs)
@@ -496,6 +544,26 @@
def parse_opt(known=False):
+ """Parse command-line arguments for YOLOv5 training, validation, and testing.
+
+ Args:
+ known (bool, optional): If True, parses known arguments, ignoring the unknown. Defaults to False.
+
+ Returns:
+ (argparse.Namespace): Parsed command-line arguments containing options for YOLOv5 execution.
+
+ Examples:
+ ```python
+ from ultralytics.yolo import parse_opt
+ opt = parse_opt()
+ print(opt)
+ ```
+
+ Links:
+ - Models: https://github.com/ultralytics/yolov5/tree/master/models
+ - Datasets: https://github.com/ultralytics/yolov5/tree/master/data
+ - Tutorial: https://docs.ultralytics.com/yolov5/tutorials/train_custom_data
+ """
parser = argparse.ArgumentParser()
parser.add_argument("--weights", type=str, default=ROOT / "yolov5s.pt", help="initial weights path")
parser.add_argument("--cfg", type=str, default="", help="model.yaml path")
@@ -550,6 +618,20 @@
def main(opt, callbacks=Callbacks()):
+ """Runs the main entry point for training or hyperparameter evolution with specified options and optional callbacks.
+
+ Args:
+ opt (argparse.Namespace): The command-line arguments parsed for YOLOv5 training and evolution.
+ callbacks (ultralytics.utils.callbacks.Callbacks, optional): Callback functions for various training stages.
+ Defaults to Callbacks().
+
+ Returns:
+ None
+
+ Notes:
+ For detailed usage, refer to:
+ https://github.com/ultralytics/yolov5/tree/master/models
+ """
if RANK in {-1, 0}:
print_args(vars(opt))
check_git_status()
@@ -805,6 +887,28 @@
def generate_individual(input_ranges, individual_length):
+ """Generate an individual with random hyperparameters within specified ranges.
+
+ Args:
+ input_ranges (list[tuple[float, float]]): List of tuples where each tuple contains the lower and upper bounds
+ for the corresponding gene (hyperparameter).
+ individual_length (int): The number of genes (hyperparameters) in the individual.
+
+ Returns:
+ list[float]: A list representing a generated individual with random gene values within the specified ranges.
+
+ Examples:
+ ```python
+ input_ranges = [(0.01, 0.1), (0.1, 1.0), (0.9, 2.0)]
+ individual_length = 3
+ individual = generate_individual(input_ranges, individual_length)
+ print(individual) # Output: [0.035, 0.678, 1.456] (example output)
+ ```
+
+ Notes:
+ The individual returned will have a length equal to `individual_length`, with each gene value being a floating-point
+ number within its specified range in `input_ranges`.
+ """
individual = []
for i in range(individual_length):
lower_bound, upper_bound = input_ranges[i]
@@ -813,6 +917,64 @@
def run(**kwargs):
+ """Execute YOLOv5 training with specified options, allowing optional overrides through keyword arguments.
+
+ Args:
+ weights (str, optional): Path to initial weights. Defaults to ROOT / 'yolov5s.pt'.
+ cfg (str, optional): Path to model YAML configuration. Defaults to an empty string.
+ data (str, optional): Path to dataset YAML configuration. Defaults to ROOT / 'data/coco128.yaml'.
+ hyp (str, optional): Path to hyperparameters YAML configuration. Defaults to ROOT /
+ 'data/hyps/hyp.scratch-low.yaml'.
+ epochs (int, optional): Total number of training epochs. Defaults to 100.
+ batch_size (int, optional): Total batch size for all GPUs. Use -1 for automatic batch size determination.
+ Defaults to 16.
+ imgsz (int, optional): Image size (pixels) for training and validation. Defaults to 640.
+ rect (bool, optional): Use rectangular training. Defaults to False.
+ resume (bool | str, optional): Resume most recent training with an optional path. Defaults to False.
+ nosave (bool, optional): Only save the final checkpoint. Defaults to False.
+ noval (bool, optional): Only validate at the final epoch. Defaults to False.
+ noautoanchor (bool, optional): Disable AutoAnchor. Defaults to False.
+ noplots (bool, optional): Do not save plot files. Defaults to False.
+ evolve (int, optional): Evolve hyperparameters for a specified number of generations. Use 300 if provided
+ without a value.
+ evolve_population (str, optional): Directory for loading population during evolution. Defaults to ROOT / 'data/
+ hyps'.
+ resume_evolve (str, optional): Resume hyperparameter evolution from the last generation. Defaults to None.
+ bucket (str, optional): gsutil bucket for saving checkpoints. Defaults to an empty string.
+ cache (str, optional): Cache image data in 'ram' or 'disk'. Defaults to None.
+ image_weights (bool, optional): Use weighted image selection for training. Defaults to False.
+ device (str, optional): CUDA device identifier, e.g., '0', '0,1,2,3', or 'cpu'. Defaults to an empty string.
+ multi_scale (bool, optional): Use multi-scale training, varying image size by ±50%. Defaults to False.
+ single_cls (bool, optional): Train with multi-class data as single-class. Defaults to False.
+ optimizer (str, optional): Optimizer type, choices are ['SGD', 'Adam', 'AdamW']. Defaults to 'SGD'.
+ sync_bn (bool, optional): Use synchronized BatchNorm, only available in DDP mode. Defaults to False.
+ workers (int, optional): Maximum dataloader workers per rank in DDP mode. Defaults to 8.
+ project (str, optional): Directory for saving training runs. Defaults to ROOT / 'runs/train'.
+ name (str, optional): Name for saving the training run. Defaults to 'exp'.
+ exist_ok (bool, optional): Allow existing project/name without incrementing. Defaults to False.
+ quad (bool, optional): Use quad dataloader. Defaults to False.
+ cos_lr (bool, optional): Use cosine learning rate scheduler. Defaults to False.
+ label_smoothing (float, optional): Label smoothing epsilon value. Defaults to 0.0.
+ patience (int, optional): Patience for early stopping, measured in epochs without improvement. Defaults to 100.
+ freeze (list, optional): Layers to freeze, e.g., backbone=10, first 3 layers = [0, 1, 2]. Defaults to [0].
+ save_period (int, optional): Frequency in epochs to save checkpoints. Disabled if < 1. Defaults to -1.
+ seed (int, optional): Global training random seed. Defaults to 0.
+ local_rank (int, optional): Automatic DDP Multi-GPU argument. Do not modify. Defaults to -1.
+
+ Returns:
+ None: The function initiates YOLOv5 training or hyperparameter evolution based on the provided options.
+
+ Examples:
+ ```python
+ import train
+ train.run(data='coco128.yaml', imgsz=320, weights='yolov5m.pt')
+ ```
+
+ Notes:
+ - Models: https://github.com/ultralytics/yolov5/tree/master/models
+ - Datasets: https://github.com/ultralytics/yolov5/tree/master/data
+ - Tutorial: https://docs.ultralytics.com/yolov5/tutorials/train_custom_data
+ """
opt = parse_opt(True)
for k, v in kwargs.items():
setattr(opt, k, v)
@@ -822,4 +984,4 @@
if __name__ == "__main__":
opt = parse_opt()
- main(opt)+ main(opt)
| https://raw.githubusercontent.com/ultralytics/yolov5/HEAD/train.py |
Can you add docstrings to this Python file? | #!/usr/bin/env python
__all__ = ['sina_download', 'sina_download_by_vid', 'sina_download_by_vkey']
from ..common import *
from ..util.log import *
from hashlib import md5
from random import randint
from time import time
from xml.dom.minidom import parseString
import urllib.parse
def api_req(vid):
rand = "0.{0}{1}".format(randint(10000, 10000000), randint(10000, 10000000))
t = str(int('{0:b}'.format(int(time()))[:-6], 2))
k = md5((vid + 'Z6prk18aWxP278cVAH' + t + rand).encode('utf-8')).hexdigest()[:16] + t
url = 'http://ask.ivideo.sina.com.cn/v_play.php?vid={0}&ran={1}&p=i&k={2}'.format(vid, rand, k)
xml = get_content(url, headers=fake_headers)
return xml
def video_info(xml):
video = parseString(xml).getElementsByTagName('video')[0]
result = video.getElementsByTagName('result')[0]
if result.firstChild.nodeValue == 'error':
message = video.getElementsByTagName('message')[0]
return None, message.firstChild.nodeValue, None
vname = video.getElementsByTagName('vname')[0].firstChild.nodeValue
durls = video.getElementsByTagName('durl')
urls = []
size = 0
for durl in durls:
url = durl.getElementsByTagName('url')[0].firstChild.nodeValue
seg_size = durl.getElementsByTagName('filesize')[0].firstChild.nodeValue
urls.append(url)
size += int(seg_size)
return urls, vname, size
def sina_download_by_vid(vid, title=None, output_dir='.', merge=True, info_only=False):
xml = api_req(vid)
urls, name, size = video_info(xml)
if urls is None:
log.wtf(name)
title = name
print_info(site_info, title, 'flv', size)
if not info_only:
download_urls(urls, title, 'flv', size, output_dir = output_dir, merge = merge)
def sina_download_by_vkey(vkey, title=None, output_dir='.', merge=True, info_only=False):
url = 'http://video.sina.com/v/flvideo/%s_0.flv' % vkey
type, ext, size = url_info(url)
print_info(site_info, title, 'flv', size)
if not info_only:
download_urls([url], title, 'flv', size, output_dir = output_dir, merge = merge)
def sina_zxt(url, output_dir='.', merge=True, info_only=False, **kwargs):
ep = 'http://s.video.sina.com.cn/video/play?video_id='
frag = urllib.parse.urlparse(url).fragment
if not frag:
log.wtf('No video specified with fragment')
meta = json.loads(get_content(ep + frag))
if meta['code'] != 1:
# Yes they use 1 for success.
log.wtf(meta['message'])
title = meta['data']['title']
videos = sorted(meta['data']['videos'], key = lambda i: int(i['size']))
if len(videos) == 0:
log.wtf('No video file returned by API server')
vid = videos[-1]['file_id']
container = videos[-1]['type']
size = int(videos[-1]['size'])
if container == 'hlv':
container = 'flv'
urls, _, _ = video_info(api_req(vid))
print_info(site_info, title, container, size)
if not info_only:
download_urls(urls, title, container, size, output_dir=output_dir, merge=merge, **kwargs)
return
def sina_download(url, output_dir='.', merge=True, info_only=False, **kwargs):
if 'news.sina.com.cn/zxt' in url:
sina_zxt(url, output_dir=output_dir, merge=merge, info_only=info_only, **kwargs)
return
vid = match1(url, r'vid=(\d+)')
if vid is None:
video_page = get_content(url)
vid = hd_vid = match1(video_page, r'hd_vid\s*:\s*\'([^\']+)\'')
if hd_vid == '0':
vids = match1(video_page, r'[^\w]vid\s*:\s*\'([^\']+)\'').split('|')
vid = vids[-1]
if vid is None:
vid = match1(video_page, r'vid:"?(\d+)"?')
if vid:
#title = match1(video_page, r'title\s*:\s*\'([^\']+)\'')
sina_download_by_vid(vid, output_dir=output_dir, merge=merge, info_only=info_only)
else:
vkey = match1(video_page, r'vkey\s*:\s*"([^"]+)"')
if vkey is None:
vid = match1(url, r'#(\d+)')
sina_download_by_vid(vid, output_dir=output_dir, merge=merge, info_only=info_only)
return
title = match1(video_page, r'title\s*:\s*"([^"]+)"')
sina_download_by_vkey(vkey, title=title, output_dir=output_dir, merge=merge, info_only=info_only)
site_info = "Sina.com"
download = sina_download
download_playlist = playlist_not_supported('sina') | --- +++ @@ -39,6 +39,9 @@ return urls, vname, size
def sina_download_by_vid(vid, title=None, output_dir='.', merge=True, info_only=False):
+ """Downloads a Sina video by its unique vid.
+ http://video.sina.com.cn/
+ """
xml = api_req(vid)
urls, name, size = video_info(xml)
if urls is None:
@@ -49,6 +52,9 @@ download_urls(urls, title, 'flv', size, output_dir = output_dir, merge = merge)
def sina_download_by_vkey(vkey, title=None, output_dir='.', merge=True, info_only=False):
+ """Downloads a Sina video by its unique vkey.
+ http://video.sina.com/
+ """
url = 'http://video.sina.com/v/flvideo/%s_0.flv' % vkey
type, ext, size = url_info(url)
@@ -86,6 +92,8 @@ return
def sina_download(url, output_dir='.', merge=True, info_only=False, **kwargs):
+ """Downloads Sina videos by URL.
+ """
if 'news.sina.com.cn/zxt' in url:
sina_zxt(url, output_dir=output_dir, merge=merge, info_only=info_only, **kwargs)
return
@@ -114,4 +122,4 @@
site_info = "Sina.com"
download = sina_download
-download_playlist = playlist_not_supported('sina')+download_playlist = playlist_not_supported('sina')
| https://raw.githubusercontent.com/soimort/you-get/HEAD/src/you_get/extractors/sina.py |
Create documentation strings for testing functions | #!/usr/bin/env python
import io
import os
import re
import sys
import time
import json
import socket
import locale
import logging
import argparse
import ssl
from http import cookiejar
from importlib import import_module
from urllib import request, parse, error
from .version import __version__
from .util import log, term
from .util.git import get_version
from .util.strings import get_filename, unescape_html
from . import json_output as json_output_
sys.stdout = io.TextIOWrapper(sys.stdout.buffer,encoding='utf8')
SITES = {
'163' : 'netease',
'56' : 'w56',
'365yg' : 'toutiao',
'acfun' : 'acfun',
'archive' : 'archive',
'baidu' : 'baidu',
'bandcamp' : 'bandcamp',
'baomihua' : 'baomihua',
'bigthink' : 'bigthink',
'bilibili' : 'bilibili',
'cctv' : 'cntv',
'cntv' : 'cntv',
'cbs' : 'cbs',
'coub' : 'coub',
'dailymotion' : 'dailymotion',
'douban' : 'douban',
'douyin' : 'douyin',
'douyu' : 'douyutv',
'ehow' : 'ehow',
'facebook' : 'facebook',
'fc2' : 'fc2video',
'flickr' : 'flickr',
'freesound' : 'freesound',
'fun' : 'funshion',
'google' : 'google',
'giphy' : 'giphy',
'heavy-music' : 'heavymusic',
'huomao' : 'huomaotv',
'iask' : 'sina',
'icourses' : 'icourses',
'ifeng' : 'ifeng',
'imgur' : 'imgur',
'in' : 'alive',
'infoq' : 'infoq',
'instagram' : 'instagram',
'interest' : 'interest',
'iqilu' : 'iqilu',
'iqiyi' : 'iqiyi',
'ixigua' : 'ixigua',
'isuntv' : 'suntv',
'iwara' : 'iwara',
'joy' : 'joy',
'kankanews' : 'bilibili',
'kakao' : 'kakao',
'khanacademy' : 'khan',
'ku6' : 'ku6',
'kuaishou' : 'kuaishou',
'kugou' : 'kugou',
'kuwo' : 'kuwo',
'le' : 'le',
'letv' : 'le',
'lizhi' : 'lizhi',
'longzhu' : 'longzhu',
'lrts' : 'lrts',
'magisto' : 'magisto',
'metacafe' : 'metacafe',
'mgtv' : 'mgtv',
'miomio' : 'miomio',
'missevan' : 'missevan',
'mixcloud' : 'mixcloud',
'mtv81' : 'mtv81',
'miaopai' : 'yixia',
'naver' : 'naver',
'7gogo' : 'nanagogo',
'nicovideo' : 'nicovideo',
'pinterest' : 'pinterest',
'pixnet' : 'pixnet',
'pptv' : 'pptv',
'qingting' : 'qingting',
'qq' : 'qq',
'showroom-live' : 'showroom',
'sina' : 'sina',
'smgbb' : 'bilibili',
'sohu' : 'sohu',
'soundcloud' : 'soundcloud',
'ted' : 'ted',
'theplatform' : 'theplatform',
'tiktok' : 'tiktok',
'tucao' : 'tucao',
'tudou' : 'tudou',
'tumblr' : 'tumblr',
'twimg' : 'twitter',
'twitter' : 'twitter',
'ucas' : 'ucas',
'vimeo' : 'vimeo',
'wanmen' : 'wanmen',
'weibo' : 'miaopai',
'veoh' : 'veoh',
'vk' : 'vk',
'x' : 'twitter',
'xiaokaxiu' : 'yixia',
'xiaojiadianvideo' : 'fc2video',
'ximalaya' : 'ximalaya',
'xinpianchang' : 'xinpianchang',
'yizhibo' : 'yizhibo',
'youku' : 'youku',
'youtu' : 'youtube',
'youtube' : 'youtube',
'zhanqi' : 'zhanqi',
'zhibo' : 'zhibo',
'zhihu' : 'zhihu',
}
dry_run = False
json_output = False
force = False
skip_existing_file_size_check = False
player = None
extractor_proxy = None
cookies = None
output_filename = None
auto_rename = False
insecure = False
m3u8 = False
postfix = False
prefix = None
fake_headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'UTF-8,*;q=0.5',
'Accept-Encoding': 'gzip,deflate,sdch',
'Accept-Language': 'en-US,en;q=0.8',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36 Edg/126.0.2592.113' # Latest Edge
}
if sys.stdout.isatty():
default_encoding = sys.stdout.encoding.lower()
else:
default_encoding = locale.getpreferredencoding().lower()
def rc4(key, data):
# all encryption algo should work on bytes
assert type(key) == type(data) and type(key) == type(b'')
state = list(range(256))
j = 0
for i in range(256):
j += state[i] + key[i % len(key)]
j &= 0xff
state[i], state[j] = state[j], state[i]
i = 0
j = 0
out_list = []
for char in data:
i += 1
i &= 0xff
j += state[i]
j &= 0xff
state[i], state[j] = state[j], state[i]
prn = state[(state[i] + state[j]) & 0xff]
out_list.append(char ^ prn)
return bytes(out_list)
def general_m3u8_extractor(url, headers={}):
m3u8_list = get_content(url, headers=headers).split('\n')
urls = []
for line in m3u8_list:
line = line.strip()
if line and not line.startswith('#'):
if line.startswith('http'):
urls.append(line)
else:
seg_url = parse.urljoin(url, line)
urls.append(seg_url)
return urls
def maybe_print(*s):
try:
print(*s)
except:
pass
def tr(s):
if default_encoding == 'utf-8':
return s
else:
return s
# return str(s.encode('utf-8'))[2:-1]
# DEPRECATED in favor of match1()
def r1(pattern, text):
m = re.search(pattern, text)
if m:
return m.group(1)
# DEPRECATED in favor of match1()
def r1_of(patterns, text):
for p in patterns:
x = r1(p, text)
if x:
return x
def match1(text, *patterns):
if len(patterns) == 1:
pattern = patterns[0]
match = re.search(pattern, text)
if match:
return match.group(1)
else:
return None
else:
ret = []
for pattern in patterns:
match = re.search(pattern, text)
if match:
ret.append(match.group(1))
return ret
def matchall(text, patterns):
ret = []
for pattern in patterns:
match = re.findall(pattern, text)
ret += match
return ret
def launch_player(player, urls):
import subprocess
import shlex
urls = list(urls)
for url in urls.copy():
if type(url) is list:
urls.extend(url)
urls = [url for url in urls if type(url) is str]
assert urls
if (sys.version_info >= (3, 3)):
import shutil
exefile=shlex.split(player)[0]
if shutil.which(exefile) is not None:
subprocess.call(shlex.split(player) + urls)
else:
log.wtf('[Failed] Cannot find player "%s"' % exefile)
else:
subprocess.call(shlex.split(player) + urls)
def parse_query_param(url, param):
try:
return parse.parse_qs(parse.urlparse(url).query)[param][0]
except:
return None
def unicodize(text):
return re.sub(
r'\\u([0-9A-Fa-f][0-9A-Fa-f][0-9A-Fa-f][0-9A-Fa-f])',
lambda x: chr(int(x.group(0)[2:], 16)),
text
)
# DEPRECATED in favor of util.legitimize()
def escape_file_path(path):
path = path.replace('/', '-')
path = path.replace('\\', '-')
path = path.replace('*', '-')
path = path.replace('?', '-')
return path
def ungzip(data):
from io import BytesIO
import gzip
buffer = BytesIO(data)
f = gzip.GzipFile(fileobj=buffer)
return f.read()
def undeflate(data):
import zlib
decompressobj = zlib.decompressobj(-zlib.MAX_WBITS)
return decompressobj.decompress(data)+decompressobj.flush()
# an http.client implementation of get_content()
# because urllib does not support "Connection: keep-alive"
def getHttps(host, url, headers, debuglevel=0):
import http.client
conn = http.client.HTTPSConnection(host)
conn.set_debuglevel(debuglevel)
conn.request("GET", url, headers=headers)
resp = conn.getresponse()
logging.debug('getHttps: %s' % resp.getheaders())
set_cookie = resp.getheader('set-cookie')
data = resp.read()
try:
data = ungzip(data) # gzip
data = undeflate(data) # deflate
except:
pass
conn.close()
return str(data, encoding='utf-8'), set_cookie # TODO: support raw data
# DEPRECATED in favor of get_content()
def get_response(url, faker=False):
logging.debug('get_response: %s' % url)
ctx = None
if insecure:
# ignore ssl errors
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
# install cookies
if cookies:
opener = request.build_opener(request.HTTPCookieProcessor(cookies))
request.install_opener(opener)
if faker:
response = request.urlopen(
request.Request(url, headers=fake_headers), None, context=ctx,
)
else:
response = request.urlopen(url, context=ctx)
data = response.read()
if response.info().get('Content-Encoding') == 'gzip':
data = ungzip(data)
elif response.info().get('Content-Encoding') == 'deflate':
data = undeflate(data)
response.data = data
return response
# DEPRECATED in favor of get_content()
def get_html(url, encoding=None, faker=False):
content = get_response(url, faker).data
return str(content, 'utf-8', 'ignore')
# DEPRECATED in favor of get_content()
def get_decoded_html(url, faker=False):
response = get_response(url, faker)
data = response.data
charset = r1(r'charset=([\w-]+)', response.headers['content-type'])
if charset:
return data.decode(charset, 'ignore')
else:
return data
def get_location(url, headers=None, get_method='HEAD'):
logging.debug('get_location: %s' % url)
if headers:
req = request.Request(url, headers=headers)
else:
req = request.Request(url)
req.get_method = lambda: get_method
res = urlopen_with_retry(req)
return res.geturl()
def urlopen_with_retry(*args, **kwargs):
retry_time = 3
for i in range(retry_time):
try:
if insecure:
# ignore ssl errors
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
return request.urlopen(*args, context=ctx, **kwargs)
else:
return request.urlopen(*args, **kwargs)
except socket.timeout as e:
logging.debug('request attempt %s timeout' % str(i + 1))
if i + 1 == retry_time:
raise e
# try to tackle youku CDN fails
except error.HTTPError as http_error:
logging.debug('HTTP Error with code{}'.format(http_error.code))
if i + 1 == retry_time:
raise http_error
def get_content(url, headers={}, decoded=True):
logging.debug('get_content: %s' % url)
req = request.Request(url, headers=headers)
if cookies:
# NOTE: Do not use cookies.add_cookie_header(req)
# #HttpOnly_ cookies were not supported by CookieJar and MozillaCookieJar properly until python 3.10
# See also:
# - https://github.com/python/cpython/pull/17471
# - https://bugs.python.org/issue2190
# Here we add cookies to the request headers manually
cookie_strings = []
for cookie in list(cookies):
cookie_strings.append(cookie.name + '=' + cookie.value)
cookie_headers = {'Cookie': '; '.join(cookie_strings)}
req.headers.update(cookie_headers)
response = urlopen_with_retry(req)
data = response.read()
# Handle HTTP compression for gzip and deflate (zlib)
content_encoding = response.getheader('Content-Encoding')
if content_encoding == 'gzip':
data = ungzip(data)
elif content_encoding == 'deflate':
data = undeflate(data)
# Decode the response body
if decoded:
charset = match1(
response.getheader('Content-Type', ''), r'charset=([\w-]+)'
)
if charset is not None:
data = data.decode(charset, 'ignore')
else:
data = data.decode('utf-8', 'ignore')
return data
def post_content(url, headers={}, post_data={}, decoded=True, **kwargs):
if kwargs.get('post_data_raw'):
logging.debug('post_content: %s\npost_data_raw: %s' % (url, kwargs['post_data_raw']))
else:
logging.debug('post_content: %s\npost_data: %s' % (url, post_data))
req = request.Request(url, headers=headers)
if cookies:
# NOTE: Do not use cookies.add_cookie_header(req)
# #HttpOnly_ cookies were not supported by CookieJar and MozillaCookieJar properly until python 3.10
# See also:
# - https://github.com/python/cpython/pull/17471
# - https://bugs.python.org/issue2190
# Here we add cookies to the request headers manually
cookie_strings = []
for cookie in list(cookies):
cookie_strings.append(cookie.name + '=' + cookie.value)
cookie_headers = {'Cookie': '; '.join(cookie_strings)}
req.headers.update(cookie_headers)
if kwargs.get('post_data_raw'):
post_data_enc = bytes(kwargs['post_data_raw'], 'utf-8')
else:
post_data_enc = bytes(parse.urlencode(post_data), 'utf-8')
response = urlopen_with_retry(req, data=post_data_enc)
data = response.read()
# Handle HTTP compression for gzip and deflate (zlib)
content_encoding = response.getheader('Content-Encoding')
if content_encoding == 'gzip':
data = ungzip(data)
elif content_encoding == 'deflate':
data = undeflate(data)
# Decode the response body
if decoded:
charset = match1(
response.getheader('Content-Type'), r'charset=([\w-]+)'
)
if charset is not None:
data = data.decode(charset)
else:
data = data.decode('utf-8')
return data
def url_size(url, faker=False, headers={}):
if faker:
response = urlopen_with_retry(
request.Request(url, headers=fake_headers)
)
elif headers:
response = urlopen_with_retry(request.Request(url, headers=headers))
else:
response = urlopen_with_retry(url)
size = response.headers['content-length']
return int(size) if size is not None else float('inf')
def urls_size(urls, faker=False, headers={}):
return sum([url_size(url, faker=faker, headers=headers) for url in urls])
def get_head(url, headers=None, get_method='HEAD'):
logging.debug('get_head: %s' % url)
if headers:
req = request.Request(url, headers=headers)
else:
req = request.Request(url)
req.get_method = lambda: get_method
res = urlopen_with_retry(req)
return res.headers
def url_info(url, faker=False, headers={}):
logging.debug('url_info: %s' % url)
if faker:
response = urlopen_with_retry(
request.Request(url, headers=fake_headers)
)
elif headers:
response = urlopen_with_retry(request.Request(url, headers=headers))
else:
response = urlopen_with_retry(request.Request(url))
headers = response.headers
type = headers['content-type']
if type == 'image/jpg; charset=UTF-8' or type == 'image/jpg':
type = 'audio/mpeg' # fix for netease
mapping = {
'video/3gpp': '3gp',
'video/f4v': 'flv',
'video/mp4': 'mp4',
'video/MP2T': 'ts',
'video/quicktime': 'mov',
'video/webm': 'webm',
'video/x-flv': 'flv',
'video/x-ms-asf': 'asf',
'audio/mp4': 'mp4',
'audio/mpeg': 'mp3',
'audio/wav': 'wav',
'audio/x-wav': 'wav',
'audio/wave': 'wav',
'image/jpeg': 'jpg',
'image/png': 'png',
'image/gif': 'gif',
'application/pdf': 'pdf',
}
if type in mapping:
ext = mapping[type]
else:
type = None
if headers['content-disposition']:
try:
filename = parse.unquote(
r1(r'filename="?([^"]+)"?', headers['content-disposition'])
)
if len(filename.split('.')) > 1:
ext = filename.split('.')[-1]
else:
ext = None
except:
ext = None
else:
ext = None
if headers['transfer-encoding'] != 'chunked':
size = headers['content-length'] and int(headers['content-length'])
else:
size = None
return type, ext, size
def url_locations(urls, faker=False, headers={}):
locations = []
for url in urls:
logging.debug('url_locations: %s' % url)
if faker:
response = urlopen_with_retry(
request.Request(url, headers=fake_headers)
)
elif headers:
response = urlopen_with_retry(
request.Request(url, headers=headers)
)
else:
response = urlopen_with_retry(request.Request(url))
locations.append(response.url)
return locations
def url_save(
url, filepath, bar, refer=None, is_part=False, faker=False,
headers=None, timeout=None, **kwargs
):
tmp_headers = headers.copy() if headers is not None else {}
# When a referer specified with param refer,
# the key must be 'Referer' for the hack here
if refer is not None:
tmp_headers['Referer'] = refer
if type(url) is list:
chunk_sizes = [url_size(url, faker=faker, headers=tmp_headers) for url in url]
file_size = sum(chunk_sizes)
is_chunked, urls = True, url
else:
file_size = url_size(url, faker=faker, headers=tmp_headers)
chunk_sizes = [file_size]
is_chunked, urls = False, [url]
continue_renameing = True
while continue_renameing:
continue_renameing = False
if os.path.exists(filepath):
if not force and (file_size == os.path.getsize(filepath) or skip_existing_file_size_check):
if not is_part:
if bar:
bar.done()
if skip_existing_file_size_check:
log.w(
'Skipping {} without checking size: file already exists'.format(
tr(os.path.basename(filepath))
)
)
else:
log.w(
'Skipping {}: file already exists'.format(
tr(os.path.basename(filepath))
)
)
else:
if bar:
bar.update_received(file_size)
return
else:
if not is_part:
if bar:
bar.done()
if not force and auto_rename:
path, ext = os.path.basename(filepath).rsplit('.', 1)
finder = re.compile(r' \([1-9]\d*?\)$')
if (finder.search(path) is None):
thisfile = path + ' (1).' + ext
else:
def numreturn(a):
return ' (' + str(int(a.group()[2:-1]) + 1) + ').'
thisfile = finder.sub(numreturn, path) + ext
filepath = os.path.join(os.path.dirname(filepath), thisfile)
print('Changing name to %s' % tr(os.path.basename(filepath)), '...')
continue_renameing = True
continue
if log.yes_or_no('File with this name already exists. Overwrite?'):
log.w('Overwriting %s ...' % tr(os.path.basename(filepath)))
else:
return
elif not os.path.exists(os.path.dirname(filepath)):
os.mkdir(os.path.dirname(filepath))
temp_filepath = filepath + '.download' if file_size != float('inf') \
else filepath
received = 0
if not force:
open_mode = 'ab'
if os.path.exists(temp_filepath):
received += os.path.getsize(temp_filepath)
if bar:
bar.update_received(os.path.getsize(temp_filepath))
else:
open_mode = 'wb'
chunk_start = 0
chunk_end = 0
for i, url in enumerate(urls):
received_chunk = 0
chunk_start += 0 if i == 0 else chunk_sizes[i - 1]
chunk_end += chunk_sizes[i]
if received < file_size and received < chunk_end:
if faker:
tmp_headers = fake_headers
'''
if parameter headers passed in, we have it copied as tmp_header
elif headers:
headers = headers
else:
headers = {}
'''
if received:
# chunk_start will always be 0 if not chunked
tmp_headers['Range'] = 'bytes=' + str(received - chunk_start) + '-'
if refer:
tmp_headers['Referer'] = refer
if timeout:
response = urlopen_with_retry(
request.Request(url, headers=tmp_headers), timeout=timeout
)
else:
response = urlopen_with_retry(
request.Request(url, headers=tmp_headers)
)
try:
range_start = int(
response.headers[
'content-range'
][6:].split('/')[0].split('-')[0]
)
end_length = int(
response.headers['content-range'][6:].split('/')[1]
)
range_length = end_length - range_start
except:
content_length = response.headers['content-length']
range_length = int(content_length) if content_length is not None \
else float('inf')
if is_chunked: # always append if chunked
open_mode = 'ab'
elif file_size != received + range_length: # is it ever necessary?
received = 0
if bar:
bar.received = 0
open_mode = 'wb'
with open(temp_filepath, open_mode) as output:
while True:
buffer = None
try:
buffer = response.read(1024 * 256)
except socket.timeout:
pass
if not buffer:
if file_size == float('+inf'): # Prevent infinite downloading
break
if is_chunked and received_chunk == range_length:
break
elif not is_chunked and received == file_size: # Download finished
break
# Unexpected termination. Retry request
tmp_headers['Range'] = 'bytes=' + str(received - chunk_start) + '-'
response = urlopen_with_retry(
request.Request(url, headers=tmp_headers)
)
continue
output.write(buffer)
received += len(buffer)
received_chunk += len(buffer)
if bar:
bar.update_received(len(buffer))
assert received == os.path.getsize(temp_filepath), '%s == %s == %s' % (
received, os.path.getsize(temp_filepath), temp_filepath
)
if os.access(filepath, os.W_OK) and file_size != float('inf'):
# on Windows rename could fail if destination filepath exists
# we should simply choose a new name instead of brutal os.remove(filepath)
filepath = filepath + " (2)"
os.rename(temp_filepath, filepath)
class SimpleProgressBar:
term_size = term.get_terminal_size()[1]
def __init__(self, total_size, total_pieces=1):
self.displayed = False
self.total_size = total_size
self.total_pieces = total_pieces
self.current_piece = 1
self.received = 0
self.speed = ''
self.last_updated = time.time()
total_pieces_len = len(str(total_pieces))
# 38 is the size of all statically known size in self.bar
total_str = '%5s' % round(self.total_size / 1048576, 1)
total_str_width = max(len(total_str), 5)
self.bar_size = self.term_size - 28 - 2 * total_pieces_len \
- 2 * total_str_width
self.bar = '{:>4}%% ({:>%s}/%sMB) ├{:─<%s}┤[{:>%s}/{:>%s}] {}' % (
total_str_width, total_str, self.bar_size, total_pieces_len,
total_pieces_len
)
def update(self):
self.displayed = True
bar_size = self.bar_size
percent = round(self.received * 100 / self.total_size, 1)
if percent >= 100:
percent = 100
dots = bar_size * int(percent) // 100
plus = int(percent) - dots // bar_size * 100
if plus > 0.8:
plus = '█'
elif plus > 0.4:
plus = '>'
else:
plus = ''
bar = '█' * dots + plus
bar = self.bar.format(
percent, round(self.received / 1048576, 1), bar,
self.current_piece, self.total_pieces, self.speed
)
sys.stdout.write('\r' + bar)
sys.stdout.flush()
def update_received(self, n):
self.received += n
time_diff = time.time() - self.last_updated
bytes_ps = n / time_diff if time_diff else 0
if bytes_ps >= 1024 ** 3:
self.speed = '{:4.0f} GB/s'.format(bytes_ps / 1024 ** 3)
elif bytes_ps >= 1024 ** 2:
self.speed = '{:4.0f} MB/s'.format(bytes_ps / 1024 ** 2)
elif bytes_ps >= 1024:
self.speed = '{:4.0f} kB/s'.format(bytes_ps / 1024)
else:
self.speed = '{:4.0f} B/s'.format(bytes_ps)
self.last_updated = time.time()
self.update()
def update_piece(self, n):
self.current_piece = n
def done(self):
if self.displayed:
print()
self.displayed = False
class PiecesProgressBar:
def __init__(self, total_size, total_pieces=1):
self.displayed = False
self.total_size = total_size
self.total_pieces = total_pieces
self.current_piece = 1
self.received = 0
def update(self):
self.displayed = True
bar = '{0:>5}%[{1:<40}] {2}/{3}'.format(
'', '=' * 40, self.current_piece, self.total_pieces
)
sys.stdout.write('\r' + bar)
sys.stdout.flush()
def update_received(self, n):
self.received += n
self.update()
def update_piece(self, n):
self.current_piece = n
def done(self):
if self.displayed:
print()
self.displayed = False
class DummyProgressBar:
def __init__(self, *args):
pass
def update_received(self, n):
pass
def update_piece(self, n):
pass
def done(self):
pass
def get_output_filename(urls, title, ext, output_dir, merge, **kwargs):
# lame hack for the --output-filename option
global output_filename
if output_filename:
result = output_filename
if kwargs.get('part', -1) >= 0:
result = '%s[%02d]' % (result, kwargs.get('part'))
if ext:
result = '%s.%s' % (result, ext)
return result
merged_ext = ext
if (len(urls) > 1) and merge:
from .processor.ffmpeg import has_ffmpeg_installed
if ext in ['flv', 'f4v']:
if has_ffmpeg_installed():
merged_ext = 'mp4'
else:
merged_ext = 'flv'
elif ext == 'mp4':
merged_ext = 'mp4'
elif ext == 'ts':
if has_ffmpeg_installed():
merged_ext = 'mkv'
else:
merged_ext = 'ts'
result = title
if kwargs.get('part', -1) >= 0:
result = '%s[%02d]' % (result, kwargs.get('part'))
result = '%s.%s' % (result, merged_ext)
return result.replace("'", "_")
def print_user_agent(faker=False):
urllib_default_user_agent = 'Python-urllib/%d.%d' % sys.version_info[:2]
user_agent = fake_headers['User-Agent'] if faker else urllib_default_user_agent
print('User Agent: %s' % user_agent)
def download_urls(
urls, title, ext, total_size, output_dir='.', refer=None, merge=True,
faker=False, headers={}, **kwargs
):
assert urls
if json_output:
json_output_.download_urls(
urls=urls, title=title, ext=ext, total_size=total_size,
refer=refer
)
return
if dry_run:
print_user_agent(faker=faker)
try:
print('Real URLs:\n%s' % '\n'.join(urls))
except:
print('Real URLs:\n%s' % '\n'.join([j for i in urls for j in i]))
return
if player:
launch_player(player, urls)
return
if not total_size:
try:
total_size = urls_size(urls, faker=faker, headers=headers)
except:
import traceback
traceback.print_exc(file=sys.stdout)
pass
title = tr(get_filename(title))
if postfix and 'vid' in kwargs:
title = "%s [%s]" % (title, kwargs['vid'])
if prefix is not None:
title = "[%s] %s" % (prefix, title)
output_filename = get_output_filename(urls, title, ext, output_dir, merge)
output_filepath = os.path.join(output_dir, output_filename)
if total_size:
if not force and os.path.exists(output_filepath) and not auto_rename\
and (os.path.getsize(output_filepath) >= total_size * 0.9\
or skip_existing_file_size_check):
if skip_existing_file_size_check:
log.w('Skipping %s without checking size: file already exists' % output_filepath)
else:
log.w('Skipping %s: file already exists' % output_filepath)
print()
return
bar = SimpleProgressBar(total_size, len(urls))
else:
bar = PiecesProgressBar(total_size, len(urls))
if len(urls) == 1:
url = urls[0]
print('Downloading %s ...' % tr(output_filename))
bar.update()
url_save(
url, output_filepath, bar, refer=refer, faker=faker,
headers=headers, **kwargs
)
bar.done()
else:
parts = []
print('Downloading %s ...' % tr(output_filename))
bar.update()
for i, url in enumerate(urls):
output_filename_i = get_output_filename(urls, title, ext, output_dir, merge, part=i)
output_filepath_i = os.path.join(output_dir, output_filename_i)
parts.append(output_filepath_i)
# print 'Downloading %s [%s/%s]...' % (tr(filename), i + 1, len(urls))
bar.update_piece(i + 1)
url_save(
url, output_filepath_i, bar, refer=refer, is_part=True, faker=faker,
headers=headers, **kwargs
)
bar.done()
if not merge:
print()
return
if 'av' in kwargs and kwargs['av']:
from .processor.ffmpeg import has_ffmpeg_installed
if has_ffmpeg_installed():
from .processor.ffmpeg import ffmpeg_concat_av
ret = ffmpeg_concat_av(parts, output_filepath, ext)
print('Merged into %s' % output_filename)
if ret == 0:
for part in parts:
os.remove(part)
elif ext in ['flv', 'f4v']:
try:
from .processor.ffmpeg import has_ffmpeg_installed
if has_ffmpeg_installed():
from .processor.ffmpeg import ffmpeg_concat_flv_to_mp4
ffmpeg_concat_flv_to_mp4(parts, output_filepath)
else:
from .processor.join_flv import concat_flv
concat_flv(parts, output_filepath)
print('Merged into %s' % output_filename)
except:
raise
else:
for part in parts:
os.remove(part)
elif ext == 'mp4':
try:
from .processor.ffmpeg import has_ffmpeg_installed
if has_ffmpeg_installed():
from .processor.ffmpeg import ffmpeg_concat_mp4_to_mp4
ffmpeg_concat_mp4_to_mp4(parts, output_filepath)
else:
from .processor.join_mp4 import concat_mp4
concat_mp4(parts, output_filepath)
print('Merged into %s' % output_filename)
except:
raise
else:
for part in parts:
os.remove(part)
elif ext == 'ts':
try:
from .processor.ffmpeg import has_ffmpeg_installed
if has_ffmpeg_installed():
from .processor.ffmpeg import ffmpeg_concat_ts_to_mkv
ffmpeg_concat_ts_to_mkv(parts, output_filepath)
else:
from .processor.join_ts import concat_ts
concat_ts(parts, output_filepath)
print('Merged into %s' % output_filename)
except:
raise
else:
for part in parts:
os.remove(part)
elif ext == 'mp3':
try:
from .processor.ffmpeg import has_ffmpeg_installed
assert has_ffmpeg_installed()
from .processor.ffmpeg import ffmpeg_concat_mp3_to_mp3
ffmpeg_concat_mp3_to_mp3(parts, output_filepath)
print('Merged into %s' % output_filename)
except:
raise
else:
for part in parts:
os.remove(part)
else:
print("Can't merge %s files" % ext)
print()
def download_rtmp_url(
url, title, ext, params={}, total_size=0, output_dir='.', refer=None,
merge=True, faker=False
):
assert url
if dry_run:
print_user_agent(faker=faker)
print('Real URL:\n%s\n' % [url])
if params.get('-y', False): # None or unset -> False
print('Real Playpath:\n%s\n' % [params.get('-y')])
return
if player:
from .processor.rtmpdump import play_rtmpdump_stream
play_rtmpdump_stream(player, url, params)
return
from .processor.rtmpdump import (
has_rtmpdump_installed, download_rtmpdump_stream
)
assert has_rtmpdump_installed(), 'RTMPDump not installed.'
download_rtmpdump_stream(url, title, ext, params, output_dir)
def download_url_ffmpeg(
url, title, ext, params={}, total_size=0, output_dir='.', refer=None,
merge=True, faker=False, stream=True
):
assert url
if dry_run:
print_user_agent(faker=faker)
print('Real URL:\n%s\n' % [url])
if params.get('-y', False): # None or unset ->False
print('Real Playpath:\n%s\n' % [params.get('-y')])
return
if player:
launch_player(player, [url])
return
from .processor.ffmpeg import has_ffmpeg_installed, ffmpeg_download_stream
assert has_ffmpeg_installed(), 'FFmpeg not installed.'
global output_filename
if output_filename:
dotPos = output_filename.rfind('.')
if dotPos > 0:
title = output_filename[:dotPos]
ext = output_filename[dotPos+1:]
else:
title = output_filename
title = tr(get_filename(title))
ffmpeg_download_stream(url, title, ext, params, output_dir, stream=stream)
def playlist_not_supported(name):
def f(*args, **kwargs):
raise NotImplementedError('Playlist is not supported for ' + name)
return f
def print_info(site_info, title, type, size, **kwargs):
if json_output:
json_output_.print_info(
site_info=site_info, title=title, type=type, size=size
)
return
if type:
type = type.lower()
if type in ['3gp']:
type = 'video/3gpp'
elif type in ['asf', 'wmv']:
type = 'video/x-ms-asf'
elif type in ['flv', 'f4v']:
type = 'video/x-flv'
elif type in ['mkv']:
type = 'video/x-matroska'
elif type in ['mp3']:
type = 'audio/mpeg'
elif type in ['mp4']:
type = 'video/mp4'
elif type in ['mov']:
type = 'video/quicktime'
elif type in ['ts']:
type = 'video/MP2T'
elif type in ['webm']:
type = 'video/webm'
elif type in ['jpg']:
type = 'image/jpeg'
elif type in ['png']:
type = 'image/png'
elif type in ['gif']:
type = 'image/gif'
if type in ['video/3gpp']:
type_info = '3GPP multimedia file (%s)' % type
elif type in ['video/x-flv', 'video/f4v']:
type_info = 'Flash video (%s)' % type
elif type in ['video/mp4', 'video/x-m4v']:
type_info = 'MPEG-4 video (%s)' % type
elif type in ['video/MP2T']:
type_info = 'MPEG-2 transport stream (%s)' % type
elif type in ['video/webm']:
type_info = 'WebM video (%s)' % type
# elif type in ['video/ogg']:
# type_info = 'Ogg video (%s)' % type
elif type in ['video/quicktime']:
type_info = 'QuickTime video (%s)' % type
elif type in ['video/x-matroska']:
type_info = 'Matroska video (%s)' % type
# elif type in ['video/x-ms-wmv']:
# type_info = 'Windows Media video (%s)' % type
elif type in ['video/x-ms-asf']:
type_info = 'Advanced Systems Format (%s)' % type
# elif type in ['video/mpeg']:
# type_info = 'MPEG video (%s)' % type
elif type in ['audio/mp4', 'audio/m4a']:
type_info = 'MPEG-4 audio (%s)' % type
elif type in ['audio/mpeg']:
type_info = 'MP3 (%s)' % type
elif type in ['audio/wav', 'audio/wave', 'audio/x-wav']:
type_info = 'Waveform Audio File Format ({})'.format(type)
elif type in ['image/jpeg']:
type_info = 'JPEG Image (%s)' % type
elif type in ['image/png']:
type_info = 'Portable Network Graphics (%s)' % type
elif type in ['image/gif']:
type_info = 'Graphics Interchange Format (%s)' % type
elif type in ['m3u8']:
if 'm3u8_type' in kwargs:
if kwargs['m3u8_type'] == 'master':
type_info = 'M3U8 Master {}'.format(type)
else:
type_info = 'M3U8 Playlist {}'.format(type)
else:
type_info = 'Unknown type (%s)' % type
maybe_print('Site: ', site_info)
maybe_print('Title: ', unescape_html(tr(title)))
print('Type: ', type_info)
if type != 'm3u8':
print(
'Size: ', round(size / 1048576, 2),
'MiB (' + str(size) + ' Bytes)'
)
if type == 'm3u8' and 'm3u8_url' in kwargs:
print('M3U8 Url: {}'.format(kwargs['m3u8_url']))
print()
def mime_to_container(mime):
mapping = {
'video/3gpp': '3gp',
'video/mp4': 'mp4',
'video/webm': 'webm',
'video/x-flv': 'flv',
}
if mime in mapping:
return mapping[mime]
else:
return mime.split('/')[1]
def parse_host(host):
if re.match(r'^(\d+)$', host) is not None:
return ("0.0.0.0", int(host))
if re.match(r'^(\w+)://', host) is None:
host = "//" + host
o = parse.urlparse(host)
hostname = o.hostname or "0.0.0.0"
port = o.port or 0
return (hostname, port)
def set_proxy(proxy):
proxy_handler = request.ProxyHandler({
'http': '%s:%s' % proxy,
'https': '%s:%s' % proxy,
})
opener = request.build_opener(proxy_handler)
request.install_opener(opener)
def unset_proxy():
proxy_handler = request.ProxyHandler({})
opener = request.build_opener(proxy_handler)
request.install_opener(opener)
# DEPRECATED in favor of set_proxy() and unset_proxy()
def set_http_proxy(proxy):
if proxy is None: # Use system default setting
proxy_support = request.ProxyHandler()
elif proxy == '': # Don't use any proxy
proxy_support = request.ProxyHandler({})
else: # Use proxy
proxy_support = request.ProxyHandler(
{'http': '%s' % proxy, 'https': '%s' % proxy}
)
opener = request.build_opener(proxy_support)
request.install_opener(opener)
def print_more_compatible(*args, **kwargs):
import builtins as __builtin__
"""Overload default print function as py (<3.3) does not support 'flush' keyword.
Although the function name can be same as print to get itself overloaded automatically,
I'd rather leave it with a different name and only overload it when importing to make less confusion.
"""
# nothing happens on py3.3 and later
if sys.version_info[:2] >= (3, 3):
return __builtin__.print(*args, **kwargs)
# in lower pyver (e.g. 3.2.x), remove 'flush' keyword and flush it as requested
doFlush = kwargs.pop('flush', False)
ret = __builtin__.print(*args, **kwargs)
if doFlush:
kwargs.get('file', sys.stdout).flush()
return ret
def download_main(download, download_playlist, urls, playlist, **kwargs):
for url in urls:
if re.match(r'https?://', url) is None:
url = 'http://' + url
if m3u8:
if output_filename:
title = output_filename
else:
title = "m3u8file"
download_url_ffmpeg(url=url, title=title,ext = 'mp4',output_dir = '.')
elif playlist:
download_playlist(url, **kwargs)
else:
download(url, **kwargs)
def load_cookies(cookiefile):
global cookies
if cookiefile.endswith('.txt'):
# MozillaCookieJar treats prefix '#HttpOnly_' as comments incorrectly!
# do not use its load()
# see also:
# - https://docs.python.org/3/library/http.cookiejar.html#http.cookiejar.MozillaCookieJar
# - https://github.com/python/cpython/blob/4b219ce/Lib/http/cookiejar.py#L2014
# - https://curl.haxx.se/libcurl/c/CURLOPT_COOKIELIST.html#EXAMPLE
#cookies = cookiejar.MozillaCookieJar(cookiefile)
#cookies.load()
from http.cookiejar import Cookie
cookies = cookiejar.MozillaCookieJar()
now = time.time()
ignore_discard, ignore_expires = False, False
with open(cookiefile, 'r', encoding='utf-8') as f:
for line in f:
# last field may be absent, so keep any trailing tab
if line.endswith("\n"): line = line[:-1]
# skip comments and blank lines XXX what is $ for?
if (line.strip().startswith(("#", "$")) or
line.strip() == ""):
if not line.strip().startswith('#HttpOnly_'): # skip for #HttpOnly_
continue
domain, domain_specified, path, secure, expires, name, value = \
line.split("\t")
secure = (secure == "TRUE")
domain_specified = (domain_specified == "TRUE")
if name == "":
# cookies.txt regards 'Set-Cookie: foo' as a cookie
# with no name, whereas http.cookiejar regards it as a
# cookie with no value.
name = value
value = None
initial_dot = domain.startswith(".")
if not line.strip().startswith('#HttpOnly_'): # skip for #HttpOnly_
assert domain_specified == initial_dot
discard = False
if expires == "":
expires = None
discard = True
# assume path_specified is false
c = Cookie(0, name, value,
None, False,
domain, domain_specified, initial_dot,
path, False,
secure,
expires,
discard,
None,
None,
{})
if not ignore_discard and c.discard:
continue
if not ignore_expires and c.is_expired(now):
continue
cookies.set_cookie(c)
elif cookiefile.endswith(('.sqlite', '.sqlite3')):
import sqlite3, shutil, tempfile
temp_dir = tempfile.gettempdir()
temp_cookiefile = os.path.join(temp_dir, 'temp_cookiefile.sqlite')
shutil.copy2(cookiefile, temp_cookiefile)
cookies = cookiejar.MozillaCookieJar()
con = sqlite3.connect(temp_cookiefile)
cur = con.cursor()
cur.execute("""SELECT host, path, isSecure, expiry, name, value
FROM moz_cookies""")
for item in cur.fetchall():
c = cookiejar.Cookie(
0, item[4], item[5], None, False, item[0],
item[0].startswith('.'), item[0].startswith('.'),
item[1], False, item[2], item[3], item[3] == '', None,
None, {},
)
cookies.set_cookie(c)
else:
log.e('[error] unsupported cookies format')
# TODO: Chromium Cookies
# SELECT host_key, path, secure, expires_utc, name, encrypted_value
# FROM cookies
# http://n8henrie.com/2013/11/use-chromes-cookies-for-easier-downloading-with-python-requests/
def set_socks_proxy(proxy):
try:
import socks
if '@' in proxy:
proxy_info = proxy.split("@")
socks_proxy_addrs = proxy_info[1].split(':')
socks_proxy_auth = proxy_info[0].split(":")
socks.set_default_proxy(
socks.SOCKS5,
socks_proxy_addrs[0],
int(socks_proxy_addrs[1]),
True,
socks_proxy_auth[0],
socks_proxy_auth[1]
)
else:
socks_proxy_addrs = proxy.split(':')
socks.set_default_proxy(
socks.SOCKS5,
socks_proxy_addrs[0],
int(socks_proxy_addrs[1]),
)
socket.socket = socks.socksocket
def getaddrinfo(*args):
return [
(socket.AF_INET, socket.SOCK_STREAM, 6, '', (args[0], args[1]))
]
socket.getaddrinfo = getaddrinfo
except ImportError:
log.w(
'Error importing PySocks library, socks proxy ignored.'
'In order to use use socks proxy, please install PySocks.'
)
def script_main(download, download_playlist, **kwargs):
logging.basicConfig(format='[%(levelname)s] %(message)s')
def print_version():
version = get_version(
kwargs['repo_path'] if 'repo_path' in kwargs else __version__
)
log.i(
'version {}, a tiny downloader that scrapes the web.'.format(
version
)
)
parser = argparse.ArgumentParser(
prog='you-get',
usage='you-get [OPTION]... URL...',
description='A tiny downloader that scrapes the web',
add_help=False,
)
parser.add_argument(
'-V', '--version', action='store_true',
help='Print version and exit'
)
parser.add_argument(
'-h', '--help', action='store_true',
help='Print this help message and exit'
)
dry_run_grp = parser.add_argument_group(
'Dry-run options', '(no actual downloading)'
)
dry_run_grp = dry_run_grp.add_mutually_exclusive_group()
dry_run_grp.add_argument(
'-i', '--info', action='store_true', help='Print extracted information'
)
dry_run_grp.add_argument(
'-u', '--url', action='store_true',
help='Print extracted information with URLs'
)
dry_run_grp.add_argument(
'--json', action='store_true',
help='Print extracted URLs in JSON format'
)
download_grp = parser.add_argument_group('Download options')
download_grp.add_argument(
'-n', '--no-merge', action='store_true', default=False,
help='Do not merge video parts'
)
download_grp.add_argument(
'--no-caption', action='store_true',
help='Do not download captions (subtitles, lyrics, danmaku, ...)'
)
download_grp.add_argument(
'--post', '--postfix', dest='postfix', action='store_true', default=False,
help='Postfix downloaded files with unique identifiers'
)
download_grp.add_argument(
'--pre', '--prefix', dest='prefix', metavar='PREFIX', default=None,
help='Prefix downloaded files with string'
)
download_grp.add_argument(
'-f', '--force', action='store_true', default=False,
help='Force overwriting existing files'
)
download_grp.add_argument(
'--skip-existing-file-size-check', action='store_true', default=False,
help='Skip existing file without checking file size'
)
download_grp.add_argument(
'-F', '--format', metavar='STREAM_ID',
help='Set video format to STREAM_ID'
)
download_grp.add_argument(
'-O', '--output-filename', metavar='FILE', help='Set output filename'
)
download_grp.add_argument(
'-o', '--output-dir', metavar='DIR', default='.',
help='Set output directory'
)
download_grp.add_argument(
'-p', '--player', metavar='PLAYER',
help='Stream extracted URL to a PLAYER'
)
download_grp.add_argument(
'-c', '--cookies', metavar='COOKIES_FILE',
help='Load cookies.txt or cookies.sqlite'
)
download_grp.add_argument(
'-t', '--timeout', metavar='SECONDS', type=int, default=600,
help='Set socket timeout'
)
download_grp.add_argument(
'-d', '--debug', action='store_true',
help='Show traceback and other debug info'
)
download_grp.add_argument(
'-I', '--input-file', metavar='FILE', type=argparse.FileType('r'),
help='Read non-playlist URLs from FILE'
)
download_grp.add_argument(
'-P', '--password', help='Set video visit password to PASSWORD'
)
download_grp.add_argument(
'-l', '--playlist', action='store_true',
help='Prefer to download a playlist'
)
playlist_grp = parser.add_argument_group('Playlist optional options')
playlist_grp.add_argument(
'--first', metavar='FIRST',
help='the first number'
)
playlist_grp.add_argument(
'--last', metavar='LAST',
help='the last number'
)
playlist_grp.add_argument(
'--size', '--page-size', metavar='PAGE_SIZE',
help='the page size number'
)
download_grp.add_argument(
'-a', '--auto-rename', action='store_true', default=False,
help='Auto rename same name different files'
)
download_grp.add_argument(
'-k', '--insecure', action='store_true', default=False,
help='ignore ssl errors'
)
proxy_grp = parser.add_argument_group('Proxy options')
proxy_grp = proxy_grp.add_mutually_exclusive_group()
proxy_grp.add_argument(
'-x', '--http-proxy', metavar='HOST:PORT',
help='Use an HTTP proxy for downloading'
)
proxy_grp.add_argument(
'-y', '--extractor-proxy', metavar='HOST:PORT',
help='Use an HTTP proxy for extracting only'
)
proxy_grp.add_argument(
'--no-proxy', action='store_true', help='Never use a proxy'
)
proxy_grp.add_argument(
'-s', '--socks-proxy', metavar='HOST:PORT or USERNAME:PASSWORD@HOST:PORT',
help='Use an SOCKS5 proxy for downloading'
)
download_grp.add_argument('--stream', help=argparse.SUPPRESS)
download_grp.add_argument('--itag', help=argparse.SUPPRESS)
download_grp.add_argument('-m', '--m3u8', action='store_true', default=False,
help = 'download video using an m3u8 url')
parser.add_argument('URL', nargs='*', help=argparse.SUPPRESS)
args = parser.parse_args()
if args.help:
print_version()
parser.print_help()
sys.exit()
if args.version:
print_version()
sys.exit()
if args.debug:
# Set level of root logger to DEBUG
logging.getLogger().setLevel(logging.DEBUG)
global force
global skip_existing_file_size_check
global dry_run
global json_output
global player
global extractor_proxy
global output_filename
global auto_rename
global insecure
global m3u8
global postfix
global prefix
output_filename = args.output_filename
extractor_proxy = args.extractor_proxy
info_only = args.info
if args.force:
force = True
if args.skip_existing_file_size_check:
skip_existing_file_size_check = True
if args.auto_rename:
auto_rename = True
if args.url:
dry_run = True
if args.json:
json_output = True
# to fix extractors not use VideoExtractor
dry_run = True
info_only = False
if args.cookies:
load_cookies(args.cookies)
if args.m3u8:
m3u8 = True
caption = True
stream_id = args.format or args.stream or args.itag
if args.no_caption:
caption = False
if args.player:
player = args.player
caption = False
if args.insecure:
# ignore ssl
insecure = True
postfix = args.postfix
prefix = args.prefix
if args.no_proxy:
set_http_proxy('')
else:
set_http_proxy(args.http_proxy)
if args.socks_proxy:
set_socks_proxy(args.socks_proxy)
URLs = []
if args.input_file:
logging.debug('you are trying to load urls from %s', args.input_file)
if args.playlist:
log.e(
"reading playlist from a file is unsupported "
"and won't make your life easier"
)
sys.exit(2)
URLs.extend(args.input_file.read().splitlines())
args.input_file.close()
URLs.extend(args.URL)
if not URLs:
parser.print_help()
sys.exit()
socket.setdefaulttimeout(args.timeout)
try:
extra = {'args': args}
if extractor_proxy:
extra['extractor_proxy'] = extractor_proxy
if stream_id:
extra['stream_id'] = stream_id
download_main(
download, download_playlist,
URLs, args.playlist,
output_dir=args.output_dir, merge=not args.no_merge,
info_only=info_only, json_output=json_output, caption=caption,
password=args.password,
**extra
)
except KeyboardInterrupt:
if args.debug:
raise
else:
sys.exit(1)
except UnicodeEncodeError:
if args.debug:
raise
log.e(
'[error] oops, the current environment does not seem to support '
'Unicode.'
)
log.e('please set it to a UTF-8-aware locale first,')
log.e(
'so as to save the video (with some Unicode characters) correctly.'
)
log.e('you can do it like this:')
log.e(' (Windows) % chcp 65001 ')
log.e(' (Linux) $ LC_CTYPE=en_US.UTF-8')
sys.exit(1)
except Exception:
if not args.debug:
log.e('[error] oops, something went wrong.')
log.e(
'don\'t panic, c\'est la vie. please try the following steps:'
)
log.e(' (1) Rule out any network problem.')
log.e(' (2) Make sure you-get is up-to-date.')
log.e(' (3) Check if the issue is already known, on')
log.e(' https://github.com/soimort/you-get/wiki/Known-Bugs')
log.e(' https://github.com/soimort/you-get/issues')
log.e(' (4) Run the command with \'--debug\' option,')
log.e(' and report this issue with the full output.')
else:
print_version()
log.i(args)
raise
sys.exit(1)
def google_search(url):
keywords = r1(r'https?://(.*)', url)
url = 'https://www.google.com/search?tbm=vid&q=%s' % parse.quote(keywords)
page = get_content(url, headers=fake_headers)
videos = re.findall(
r'(https://www\.youtube\.com/watch\?v=[\w-]+)', page
)
print('Best matched result:')
return(videos[0])
def url_to_module(url):
try:
video_host = r1(r'https?://([^/]+)/', url)
video_url = r1(r'https?://[^/]+(.*)', url)
assert video_host and video_url
except AssertionError:
url = google_search(url)
video_host = r1(r'https?://([^/]+)/', url)
video_url = r1(r'https?://[^/]+(.*)', url)
if video_host.endswith('.com.cn') or video_host.endswith('.ac.cn'):
video_host = video_host[:-3]
domain = r1(r'(\.[^.]+\.[^.]+)$', video_host) or video_host
assert domain, 'unsupported url: ' + url
# all non-ASCII code points must be quoted (percent-encoded UTF-8)
url = ''.join([ch if ord(ch) in range(128) else parse.quote(ch) for ch in url])
video_host = r1(r'https?://([^/]+)/', url)
video_url = r1(r'https?://[^/]+(.*)', url)
k = r1(r'([^.]+)', domain)
if k in SITES:
return (
import_module('.'.join(['you_get', 'extractors', SITES[k]])),
url
)
else:
try:
try:
location = get_location(url) # t.co isn't happy with fake_headers
except:
location = get_location(url, headers=fake_headers)
except:
location = get_location(url, headers=fake_headers, get_method='GET')
if location and location != url and not location.startswith('/'):
return url_to_module(location)
else:
return import_module('you_get.extractors.universal'), url
def any_download(url, **kwargs):
m, url = url_to_module(url)
m.download(url, **kwargs)
def any_download_playlist(url, **kwargs):
m, url = url_to_module(url)
m.download_playlist(url, **kwargs)
def main(**kwargs):
script_main(any_download, any_download_playlist, **kwargs) | --- +++ @@ -224,6 +224,16 @@
def match1(text, *patterns):
+ """Scans through a string for substrings matched some patterns (first-subgroups only).
+
+ Args:
+ text: A string to be scanned.
+ patterns: Arbitrary number of regex patterns.
+
+ Returns:
+ When only one pattern is given, returns a string (None if no match found).
+ When more than one pattern are given, returns a list of strings ([] if no match found).
+ """
if len(patterns) == 1:
pattern = patterns[0]
@@ -242,6 +252,15 @@
def matchall(text, patterns):
+ """Scans through a string for substrings matched some patterns.
+
+ Args:
+ text: A string to be scanned.
+ patterns: a list of regex pattern.
+
+ Returns:
+ a list if matched. empty if not.
+ """
ret = []
for pattern in patterns:
@@ -272,6 +291,15 @@
def parse_query_param(url, param):
+ """Parses the query string of a URL and returns the value of a parameter.
+
+ Args:
+ url: A URL.
+ param: A string representing the name of the parameter.
+
+ Returns:
+ The value of the parameter.
+ """
try:
return parse.parse_qs(parse.urlparse(url).query)[param][0]
@@ -297,6 +325,8 @@
def ungzip(data):
+ """Decompresses data for Content-Encoding: gzip.
+ """
from io import BytesIO
import gzip
buffer = BytesIO(data)
@@ -305,6 +335,9 @@
def undeflate(data):
+ """Decompresses data for Content-Encoding: deflate.
+ (the zlib compression is used.)
+ """
import zlib
decompressobj = zlib.decompressobj(-zlib.MAX_WBITS)
return decompressobj.decompress(data)+decompressobj.flush()
@@ -416,6 +449,16 @@
def get_content(url, headers={}, decoded=True):
+ """Gets the content of a URL via sending a HTTP GET request.
+
+ Args:
+ url: A URL.
+ headers: Request headers used by the client.
+ decoded: Whether decode the response body using UTF-8 or the charset specified in Content-Type.
+
+ Returns:
+ The content as a string.
+ """
logging.debug('get_content: %s' % url)
@@ -457,6 +500,16 @@
def post_content(url, headers={}, post_data={}, decoded=True, **kwargs):
+ """Post the content of a URL via sending a HTTP POST request.
+
+ Args:
+ url: A URL.
+ headers: Request headers used by the client.
+ decoded: Whether decode the response body using UTF-8 or the charset specified in Content-Type.
+
+ Returns:
+ The content as a string.
+ """
if kwargs.get('post_data_raw'):
logging.debug('post_content: %s\npost_data_raw: %s' % (url, kwargs['post_data_raw']))
else:
@@ -1260,6 +1313,8 @@
def parse_host(host):
+ """Parses host name and port number from a string.
+ """
if re.match(r'^(\d+)$', host) is not None:
return ("0.0.0.0", int(host))
if re.match(r'^(\w+)://', host) is None:
@@ -1828,4 +1883,4 @@
def main(**kwargs):
- script_main(any_download, any_download_playlist, **kwargs)+ script_main(any_download, any_download_playlist, **kwargs)
| https://raw.githubusercontent.com/soimort/you-get/HEAD/src/you_get/common.py |
Generate descriptive docstrings automatically | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from ..common import *
from ..extractor import VideoExtractor
from json import loads
from urllib.parse import urlsplit
from os.path import dirname
import re
import base64
import time
import uuid
class MGTV(VideoExtractor):
name = "芒果 (MGTV)"
# Last updated: 2016-11-13
stream_types = [
{'id': 'fhd', 'container': 'ts', 'video_profile': '蓝光'},
{'id': 'hd', 'container': 'ts', 'video_profile': '超清'},
{'id': 'sd', 'container': 'ts', 'video_profile': '高清'},
{'id': 'ld', 'container': 'ts', 'video_profile': '标清'},
]
id_dic = {i['video_profile']: (i['id']) for i in stream_types}
did = str(uuid.uuid4())
ver = '0.3.0301'
pno = '1030'
def tk2(self):
return base64.urlsafe_b64encode(b'did=%s|ver=%s|pno=%s|clit=%d' % (
self.did.encode(), self.ver.encode(), self.pno.encode(), time.time())).decode('utf-8')[::-1]
info_endpoint = 'https://pcweb.api.mgtv.com/video/info?vid={video_id}'
player_endpoint = 'https://pcweb.api.mgtv.com/player/video?did={did}&tk2={tk2}&video_id={video_id}'
source_endpoint = 'https://pcweb.api.mgtv.com/player/getSource?tk2={tk2}&pm2={pm2}&video_id={video_id}'
playlist_endpoint = 'https://pcweb.api.mgtv.com/episode/list?video_id={video_id}&page={page}&size=30'
@staticmethod
def get_vid_from_url(url):
vid = match1(url, r'https?://www.mgtv.com/(?:b|l)/\d+/(\d+).html')
if not vid:
vid = match1(url, r'https?://www.mgtv.com/hz/bdpz/\d+/(\d+).html')
if not vid:
vid = match1(url, r'https?://www.mgtv.com/s/(\d+).html')
return vid
# ----------------------------------------------------------------------
def get_mgtv_real_url(self, url):
content = loads(get_content(url))
m3u_url = content['info']
split = urlsplit(m3u_url)
base_url = "{scheme}://{netloc}{path}/".format(scheme=split[0],
netloc=split[1],
path=dirname(split[2]))
content = get_content(content['info'],
headers={'Referer': self.url}) # get the REAL M3U url, maybe to be changed later?
segment_list = []
segments_size = 0
for i in content.split():
if not i.startswith('#'): # not the best way, better we use the m3u8 package
segment_list.append(base_url + i)
# use ext-info for fast size calculate
elif i.startswith('#EXT-MGTV-File-SIZE:'):
segments_size += int(i[i.rfind(':') + 1:])
return m3u_url, segments_size, segment_list
def download_playlist_by_url(self, url, **kwargs):
self.url = url
self.vid = self.get_vid_from_url(self.url)
content_playlist = get_content(self.playlist_endpoint.format(video_id=self.vid, page=1))
content_playlist = loads(content_playlist)
for ep in content_playlist['data']['list']:
self.download_by_url('https://www.mgtv.com' + ep['url'], **kwargs)
max_page = content_playlist['data']['total_page']
for page in range(2, max_page + 1):
content_playlist = get_content(self.playlist_endpoint.format(video_id=self.vid, page=page))
content_playlist = loads(content_playlist)
for ep in content_playlist['data']['list']:
self.download_by_url('https://www.mgtv.com' + ep['url'], **kwargs)
def prepare(self, **kwargs):
if self.url:
self.vid = self.get_vid_from_url(self.url)
content_info = get_content(self.info_endpoint.format(video_id=self.vid))
log.d(content_info)
content_info = loads(content_info)
self.title = content_info['data']['info']['videoName']
content_player = get_content(self.player_endpoint.format(did=self.did, video_id=self.vid, tk2=self.tk2()))
log.d(content_player)
content_player = loads(content_player)
pm2 = content_player['data']['atc']['pm2']
content_source = get_content(self.source_endpoint.format(video_id=self.vid, tk2=self.tk2(), pm2=pm2))
log.d(content_source)
content_source = loads(content_source)
domain = content_source['data']['stream_domain'][0]
# stream_available = [i['name'] for i in content['data']['stream']]
stream_available = {}
for i in content_source['data']['stream']:
stream_available[i['name']] = i['url']
for s in self.stream_types:
if s['video_profile'] in stream_available.keys():
quality_id = self.id_dic[s['video_profile']]
url = stream_available[s['video_profile']]
if url is None or url == '':
# skip invalid profile with empty url
continue
url = domain + re.sub(r'(\&arange\=\d+)', '', url) # Un-Hum
m3u8_url, m3u8_size, segment_list_this = self.get_mgtv_real_url(url)
stream_fileid_list = []
for i in segment_list_this:
stream_fileid_list.append(os.path.basename(i).split('.')[0])
# make pieces
pieces = []
for i in zip(stream_fileid_list, segment_list_this):
pieces.append({'fileid': i[0], 'segs': i[1], })
self.streams[quality_id] = {
'container': s['container'],
'video_profile': s['video_profile'],
'size': m3u8_size,
'pieces': pieces,
'm3u8_url': m3u8_url
}
if not kwargs['info_only']:
self.streams[quality_id]['src'] = segment_list_this
def extract(self, **kwargs):
if 'stream_id' in kwargs and kwargs['stream_id']:
# Extract the stream
stream_id = kwargs['stream_id']
if stream_id not in self.streams:
log.e('[Error] Invalid video format.')
log.e('Run \'-i\' command with no specific video format to view all available formats.')
exit(2)
else:
# Extract stream with the best quality
stream_id = self.streams_sorted[0]['id']
def download(self, **kwargs):
if 'stream_id' in kwargs and kwargs['stream_id']:
stream_id = kwargs['stream_id']
else:
stream_id = 'null'
# print video info only
if 'info_only' in kwargs and kwargs['info_only']:
if stream_id != 'null':
if 'index' not in kwargs:
self.p(stream_id)
else:
self.p_i(stream_id)
else:
# Display all available streams
if 'index' not in kwargs:
self.p([])
else:
stream_id = self.streams_sorted[0]['id'] if 'id' in self.streams_sorted[0] else \
self.streams_sorted[0]['itag']
self.p_i(stream_id)
# default to use the best quality
if stream_id == 'null':
stream_id = self.streams_sorted[0]['id']
stream_info = self.streams[stream_id]
if not kwargs['info_only']:
if player:
# with m3u8 format because some video player can process urls automatically (e.g. mpv)
launch_player(player, [stream_info['m3u8_url']])
else:
download_urls(stream_info['src'], self.title, stream_info['container'], stream_info['size'],
output_dir=kwargs['output_dir'],
merge=kwargs.get('merge', True),
headers={'Referer': self.url})
# av=stream_id in self.dash_streams)
site = MGTV()
download = site.download_by_url
download_playlist = site.download_playlist_by_url | --- +++ @@ -42,6 +42,8 @@
@staticmethod
def get_vid_from_url(url):
+ """Extracts video ID from URL.
+ """
vid = match1(url, r'https?://www.mgtv.com/(?:b|l)/\d+/(\d+).html')
if not vid:
vid = match1(url, r'https?://www.mgtv.com/hz/bdpz/\d+/(\d+).html')
@@ -51,6 +53,8 @@
# ----------------------------------------------------------------------
def get_mgtv_real_url(self, url):
+ """str->list of str
+ Give you the real URLs."""
content = loads(get_content(url))
m3u_url = content['info']
split = urlsplit(m3u_url)
@@ -195,4 +199,4 @@
site = MGTV()
download = site.download_by_url
-download_playlist = site.download_playlist_by_url+download_playlist = site.download_playlist_by_url
| https://raw.githubusercontent.com/soimort/you-get/HEAD/src/you_get/extractors/mgtv.py |
Add verbose docstrings with examples | #!/usr/bin/env python
from ..common import *
from ..extractor import VideoExtractor
try:
import dukpy
except ImportError:
log.e('Please install dukpy in order to extract videos from YouTube:')
log.e('$ pip install dukpy')
exit(0)
from urllib.parse import urlparse, parse_qs, urlencode
from xml.dom.minidom import parseString
class YouTube(VideoExtractor):
name = "YouTube"
# Non-DASH YouTube media encoding options, in descending quality order.
# http://en.wikipedia.org/wiki/YouTube#Quality_and_codecs. Retrieved July 17, 2014.
stream_types = [
{'itag': '38', 'container': 'MP4', 'video_resolution': '3072p',
'video_encoding': 'H.264', 'video_profile': 'High', 'video_bitrate': '3.5-5',
'audio_encoding': 'AAC', 'audio_bitrate': '192'},
#{'itag': '85', 'container': 'MP4', 'video_resolution': '1080p', 'video_encoding': 'H.264', 'video_profile': '3D', 'video_bitrate': '3-4', 'audio_encoding': 'AAC', 'audio_bitrate': '192'},
{'itag': '46', 'container': 'WebM', 'video_resolution': '1080p',
'video_encoding': 'VP8', 'video_profile': '', 'video_bitrate': '',
'audio_encoding': 'Vorbis', 'audio_bitrate': '192'},
{'itag': '37', 'container': 'MP4', 'video_resolution': '1080p',
'video_encoding': 'H.264', 'video_profile': 'High', 'video_bitrate': '3-4.3',
'audio_encoding': 'AAC', 'audio_bitrate': '192'},
#{'itag': '102', 'container': 'WebM', 'video_resolution': '720p', 'video_encoding': 'VP8', 'video_profile': '3D', 'video_bitrate': '', 'audio_encoding': 'Vorbis', 'audio_bitrate': '192'},
{'itag': '45', 'container': 'WebM', 'video_resolution': '720p',
'video_encoding': 'VP8', 'video_profile': '', 'video_bitrate': '2',
'audio_encoding': 'Vorbis', 'audio_bitrate': '192'},
#{'itag': '84', 'container': 'MP4', 'video_resolution': '720p', 'video_encoding': 'H.264', 'video_profile': '3D', 'video_bitrate': '2-3', 'audio_encoding': 'AAC', 'audio_bitrate': '192'},
{'itag': '22', 'container': 'MP4', 'video_resolution': '720p',
'video_encoding': 'H.264', 'video_profile': 'High', 'video_bitrate': '2-3',
'audio_encoding': 'AAC', 'audio_bitrate': '192'},
{'itag': '120', 'container': 'FLV', 'video_resolution': '720p',
'video_encoding': 'H.264', 'video_profile': 'Main@L3.1', 'video_bitrate': '2',
'audio_encoding': 'AAC', 'audio_bitrate': '128'}, # Live streaming only
{'itag': '44', 'container': 'WebM', 'video_resolution': '480p',
'video_encoding': 'VP8', 'video_profile': '', 'video_bitrate': '1',
'audio_encoding': 'Vorbis', 'audio_bitrate': '128'},
{'itag': '35', 'container': 'FLV', 'video_resolution': '480p',
'video_encoding': 'H.264', 'video_profile': 'Main', 'video_bitrate': '0.8-1',
'audio_encoding': 'AAC', 'audio_bitrate': '128'},
#{'itag': '101', 'container': 'WebM', 'video_resolution': '360p', 'video_encoding': 'VP8', 'video_profile': '3D', 'video_bitrate': '', 'audio_encoding': 'Vorbis', 'audio_bitrate': '192'},
#{'itag': '100', 'container': 'WebM', 'video_resolution': '360p', 'video_encoding': 'VP8', 'video_profile': '3D', 'video_bitrate': '', 'audio_encoding': 'Vorbis', 'audio_bitrate': '128'},
{'itag': '43', 'container': 'WebM', 'video_resolution': '360p',
'video_encoding': 'VP8', 'video_profile': '', 'video_bitrate': '0.5',
'audio_encoding': 'Vorbis', 'audio_bitrate': '128'},
{'itag': '34', 'container': 'FLV', 'video_resolution': '360p',
'video_encoding': 'H.264', 'video_profile': 'Main', 'video_bitrate': '0.5',
'audio_encoding': 'AAC', 'audio_bitrate': '128'},
#{'itag': '82', 'container': 'MP4', 'video_resolution': '360p', 'video_encoding': 'H.264', 'video_profile': '3D', 'video_bitrate': '0.5', 'audio_encoding': 'AAC', 'audio_bitrate': '96'},
{'itag': '18', 'container': 'MP4', 'video_resolution': '360p',
'video_encoding': 'H.264', 'video_profile': 'Baseline', 'video_bitrate': '0.5',
'audio_encoding': 'AAC', 'audio_bitrate': '96'},
{'itag': '6', 'container': 'FLV', 'video_resolution': '270p',
'video_encoding': 'Sorenson H.263', 'video_profile': '', 'video_bitrate': '0.8',
'audio_encoding': 'MP3', 'audio_bitrate': '64'},
#{'itag': '83', 'container': 'MP4', 'video_resolution': '240p', 'video_encoding': 'H.264', 'video_profile': '3D', 'video_bitrate': '0.5', 'audio_encoding': 'AAC', 'audio_bitrate': '96'},
{'itag': '13', 'container': '3GP', 'video_resolution': '',
'video_encoding': 'MPEG-4 Visual', 'video_profile': '', 'video_bitrate': '0.5',
'audio_encoding': 'AAC', 'audio_bitrate': ''},
{'itag': '5', 'container': 'FLV', 'video_resolution': '240p',
'video_encoding': 'Sorenson H.263', 'video_profile': '', 'video_bitrate': '0.25',
'audio_encoding': 'MP3', 'audio_bitrate': '64'},
{'itag': '36', 'container': '3GP', 'video_resolution': '240p',
'video_encoding': 'MPEG-4 Visual', 'video_profile': 'Simple', 'video_bitrate': '0.175',
'audio_encoding': 'AAC', 'audio_bitrate': '32'},
{'itag': '17', 'container': '3GP', 'video_resolution': '144p',
'video_encoding': 'MPEG-4 Visual', 'video_profile': 'Simple', 'video_bitrate': '0.05',
'audio_encoding': 'AAC', 'audio_bitrate': '24'},
]
def dethrottle(js, url):
def n_to_n(js, n):
# Examples:
# yma - https://www.youtube.com/s/player/84314bef/player_ias.vflset/en_US/base.js
# Xka - https://www.youtube.com/s/player/dc0c6770/player_ias.vflset/sv_SE/base.js
# jma - https://www.youtube.com/s/player/8d9f6215/player_ias.vflset/sv_SE/base.js
f1 = match1(js, r',[$\w]+\.length\|\|([$\w]+)\(""\)\)}};')
# Examples:
# Yla, ida - https://www.youtube.com/s/player/fb725ac8/player-plasma-ias-phone-sv_SE.vflset/base.js
# Hla, eda - https://www.youtube.com/s/player/2f238d39/player-plasma-ias-phone-en_US.vflset/base.js
# WyE, bE7, Gsn - https://www.youtube.com/s/player/3bb1f723/player-plasma-ias-phone-sv_SE.vflset/base.js
if not f1:
f0 = match1(js, r'\w=([$\w]+)\[0\]\(\w\),\w\.set\(\w,\w\)')
f1 = match1(js, r'%s=\[([$\w]+)\]' % f0)
f1def = match1(js, r'\W%s=(function\(\w+\).+?\)});' % re.escape(f1))
v1 = match1(f1def, r'if\(typeof ([$\w]+)==="undefined"\)')
v1def = match1(js, r'(var %s=[^;]+;)' % v1)
if not v1def:
v1def = ''
n = dukpy.evaljs('%s(%s)("%s")' % (v1def, f1def, n))
return n
u = urlparse(url)
qs = parse_qs(u.query)
n = n_to_n(js, qs['n'][0])
qs['n'] = [n]
return u._replace(query=urlencode(qs, doseq=True)).geturl()
def s_to_sig(js, s):
# Examples:
# BPa - https://www.youtube.com/s/player/84314bef/player_ias.vflset/en_US/base.js
# Xva - https://www.youtube.com/s/player/dc0c6770/player_ias.vflset/sv_SE/base.js
js_code = ''
f1 = match1(js, r'=([$\w]+)\(decodeURIComponent\(')
f1def = match1(js, r'\W%s=function(\(\w+\)\{[^\{]+\})' % re.escape(f1))
f1def = re.sub(r'([$\w]+\.)([$\w]+\(\w+,\d+\))', r'\2', f1def) # remove . prefix
f1def = 'function %s%s' % (f1, f1def)
f2s = set(re.findall(r'([$\w]+)\(\w+,\d+\)', f1def)) # find all invoked function names
for f2 in f2s:
f2e = re.escape(f2)
f2def = re.search(r'[^$\w]%s:function\((\w+,\w+)\)(\{[^\{\}]+\})' % f2e, js)
if f2def:
f2def = 'function {}({}){}'.format(f2e, f2def.group(1), f2def.group(2))
else:
f2def = re.search(r'[^$\w]%s:function\((\w+)\)(\{[^\{\}]+\})' % f2e, js)
f2def = 'function {}({},b){}'.format(f2e, f2def.group(1), f2def.group(2))
js_code += f2def + ';'
js_code += f1def + ';%s("%s")' % (f1, s)
sig = dukpy.evaljs(js_code)
return sig
def chunk_by_range(url, size):
urls = []
chunk_size = 10485760
start, end = 0, chunk_size - 1
urls.append('%s&range=%s-%s' % (url, start, end))
while end + 1 < size: # processed size < expected size
start, end = end + 1, end + chunk_size
urls.append('%s&range=%s-%s' % (url, start, end))
return urls
def get_url_from_vid(vid):
return 'https://youtu.be/{}'.format(vid)
def get_vid_from_url(url):
return match1(url, r'youtu\.be/([^?/]+)') or \
match1(url, r'youtube\.com/embed/([^/?]+)') or \
match1(url, r'youtube\.com/shorts/([^/?]+)') or \
match1(url, r'youtube\.com/v/([^/?]+)') or \
match1(url, r'youtube\.com/watch/([^/?]+)') or \
parse_query_param(url, 'v') or \
parse_query_param(parse_query_param(url, 'u'), 'v')
def get_playlist_id_from_url(url):
return parse_query_param(url, 'list') or \
parse_query_param(url, 'p')
def download_playlist_by_url(self, url, **kwargs):
self.url = url
playlist_id = self.__class__.get_playlist_id_from_url(self.url)
if playlist_id is None:
log.wtf('[Failed] Unsupported URL pattern.')
video_page = get_content('https://www.youtube.com/playlist?list=%s' % playlist_id)
playlist_json_serialized = match1(video_page, r'window\["ytInitialData"\]\s*=\s*(.+);', r'var\s+ytInitialData\s*=\s*([^;]+);')
if len(playlist_json_serialized) == 0:
log.wtf('[Failed] Unable to extract playlist data')
ytInitialData = json.loads(playlist_json_serialized[0])
tab0 = ytInitialData['contents']['twoColumnBrowseResultsRenderer']['tabs'][0]
itemSection0 = tab0['tabRenderer']['content']['sectionListRenderer']['contents'][0]
playlistVideoList0 = itemSection0['itemSectionRenderer']['contents'][0]
videos = playlistVideoList0['playlistVideoListRenderer']['contents']
self.title = re.search(r'<meta name="title" content="([^"]+)"', video_page).group(1)
self.p_playlist()
for index, video in enumerate(videos, 1):
vid = video['playlistVideoRenderer']['videoId']
try:
self.__class__().download_by_url(self.__class__.get_url_from_vid(vid), index=index, **kwargs)
except:
pass
# FIXME: show DASH stream sizes (by default) for playlist videos
def check_playability_response(self, ytInitialPlayerResponse):
STATUS_OK = "OK"
playerResponseStatus = ytInitialPlayerResponse["playabilityStatus"]["status"]
if playerResponseStatus != STATUS_OK:
try:
reason = ytInitialPlayerResponse["playabilityStatus"]['errorScreen']\
['playerErrorMessageRenderer']['reason']['runs'][0]['text']
reason += ' ' + ytInitialPlayerResponse["playabilityStatus"]['errorScreen']\
['playerErrorMessageRenderer']['subreason']['runs'][0]['text']
except:
reason = ytInitialPlayerResponse["playabilityStatus"].get("reason", "")
if reason:
log.wtf(f'Server refused to provide video details. Returned status: {playerResponseStatus}. Reason: {reason}')
else:
log.wtf(f'Server refused to provide video details. Returned status: {playerResponseStatus}.')
def prepare(self, **kwargs):
self.ua = 'Mozilla/5.0 (iPad; CPU OS 16_7_10 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.6 Mobile/15E148 Safari/604.1,gzip(gfe)'
assert self.url or self.vid
if not self.vid and self.url:
self.vid = self.__class__.get_vid_from_url(self.url)
if self.vid is None:
self.download_playlist_by_url(self.url, **kwargs)
exit(0)
if re.search(r'\Wlist=', self.url) and not kwargs.get('playlist'):
log.w('This video is from a playlist. (use --playlist to download all videos in the playlist.)')
# Extract from video page
logging.debug('Extracting from the video page...')
video_page = get_content('https://www.youtube.com/watch?v=%s' % self.vid, headers={'User-Agent': self.ua})
try:
jsUrl = re.search(r'([^"]*/base\.js)"', video_page).group(1)
except:
log.wtf('[Failed] Unable to find base.js on the video page')
self.html5player = 'https://www.youtube.com' + jsUrl
logging.debug('Retrieving the player code...')
self.js = get_content(self.html5player).replace('\n', ' ')
logging.debug('Loading ytInitialPlayerResponse...')
ytInitialPlayerResponse = json.loads(re.search(r'ytInitialPlayerResponse\s*=\s*([^\n]+?});(\n|</script>|var )', video_page).group(1))
self.check_playability_response(ytInitialPlayerResponse)
# Get the video title
self.title = ytInitialPlayerResponse["videoDetails"]["title"]
# Check the status
playabilityStatus = ytInitialPlayerResponse['playabilityStatus']
status = playabilityStatus['status']
logging.debug('status: %s' % status)
if status != 'OK':
# If cookies are loaded, status should be OK
try:
subreason = playabilityStatus['errorScreen']['playerErrorMessageRenderer']['subreason']['runs'][0]['text']
log.e('[Error] %s (%s)' % (playabilityStatus['reason'], subreason))
except:
log.e('[Error] %s' % playabilityStatus['reason'])
if status == 'LOGIN_REQUIRED':
log.e('View the video from a browser and export the cookies, then use --cookies to load cookies.')
exit(1)
stream_list = ytInitialPlayerResponse['streamingData']['formats']
for stream in stream_list:
logging.debug('Found format: itag=%s' % stream['itag'])
if 'signatureCipher' in stream:
logging.debug(' Parsing signatureCipher for itag=%s...' % stream['itag'])
qs = parse_qs(stream['signatureCipher'])
#logging.debug(qs)
sp = qs['sp'][0]
sig = self.__class__.s_to_sig(self.js, qs['s'][0])
url = qs['url'][0] + '&{}={}'.format(sp, sig)
elif 'url' in stream:
url = stream['url']
else:
log.wtf(' No signatureCipher or url for itag=%s' % stream['itag'])
url = self.__class__.dethrottle(self.js, url)
self.streams[str(stream['itag'])] = {
'itag': str(stream['itag']),
'url': url,
'quality': stream['quality'],
'type': stream['mimeType'],
'mime': stream['mimeType'].split(';')[0],
'container': mime_to_container(stream['mimeType'].split(';')[0]),
}
# FIXME: Prepare caption tracks
try:
caption_tracks = ytInitialPlayerResponse['captions']['playerCaptionsTracklistRenderer']['captionTracks']
for ct in caption_tracks:
ttsurl, lang = ct['baseUrl'], ct['languageCode']
if ttsurl.startswith('/'):
ttsurl = 'https://www.youtube.com' + ttsurl
tts_xml = parseString(get_content(ttsurl))
transcript = tts_xml.getElementsByTagName('transcript')[0]
texts = transcript.getElementsByTagName('text')
srt = ""; seq = 0
for text in texts:
if text.firstChild is None: continue # empty element
seq += 1
start = float(text.getAttribute('start'))
if text.getAttribute('dur'):
dur = float(text.getAttribute('dur'))
else: dur = 1.0 # could be ill-formed XML
finish = start + dur
m, s = divmod(start, 60); h, m = divmod(m, 60)
start = '{:0>2}:{:0>2}:{:06.3f}'.format(int(h), int(m), s).replace('.', ',')
m, s = divmod(finish, 60); h, m = divmod(m, 60)
finish = '{:0>2}:{:0>2}:{:06.3f}'.format(int(h), int(m), s).replace('.', ',')
content = unescape_html(text.firstChild.nodeValue)
srt += '%s\n' % str(seq)
srt += '%s --> %s\n' % (start, finish)
srt += '%s\n\n' % content
if 'kind' in ct:
self.caption_tracks[ct['vssId']] = srt # autogenerated
else:
self.caption_tracks[lang] = srt
except: pass
# Prepare DASH streams
if 'adaptiveFormats' in ytInitialPlayerResponse['streamingData']:
streams = ytInitialPlayerResponse['streamingData']['adaptiveFormats']
# FIXME: dead code?
# streams without contentLength got broken urls, just remove them (#2767)
streams = [stream for stream in streams if 'contentLength' in stream]
for stream in streams:
logging.debug('Found adaptiveFormat: itag=%s' % stream['itag'])
stream['itag'] = str(stream['itag'])
if 'qualityLabel' in stream:
stream['quality_label'] = stream['qualityLabel']
del stream['qualityLabel']
logging.debug(' quality_label: \t%s' % stream['quality_label'])
if 'width' in stream:
stream['size'] = '{}x{}'.format(stream['width'], stream['height'])
del stream['width']
del stream['height']
logging.debug(' size: \t%s' % stream['size'])
stream['type'] = stream['mimeType']
logging.debug(' type: \t%s' % stream['type'])
stream['clen'] = stream['contentLength']
stream['init'] = '{}-{}'.format(
stream['initRange']['start'],
stream['initRange']['end'])
stream['index'] = '{}-{}'.format(
stream['indexRange']['start'],
stream['indexRange']['end'])
del stream['mimeType']
del stream['contentLength']
del stream['initRange']
del stream['indexRange']
if 'signatureCipher' in stream:
logging.debug(' Parsing signatureCipher for itag=%s...' % stream['itag'])
qs = parse_qs(stream['signatureCipher'])
#logging.debug(qs)
sp = qs['sp'][0]
sig = self.__class__.s_to_sig(self.js, qs['s'][0])
url = qs['url'][0] + '&ratebypass=yes&{}={}'.format(sp, sig)
elif 'url' in stream:
url = stream['url']
else:
log.wtf('No signatureCipher or url for itag=%s' % stream['itag'])
url = self.__class__.dethrottle(self.js, url)
stream['url'] = url
for stream in streams: # audio
if stream['type'].startswith('audio/mp4'):
dash_mp4_a_url = stream['url']
dash_mp4_a_size = stream['clen']
elif stream['type'].startswith('audio/webm'):
dash_webm_a_url = stream['url']
dash_webm_a_size = stream['clen']
for stream in streams: # video
if 'size' in stream:
if stream['type'].startswith('video/mp4'):
mimeType = 'video/mp4'
dash_url = stream['url']
dash_size = stream['clen']
itag = stream['itag']
dash_urls = self.__class__.chunk_by_range(dash_url, int(dash_size))
dash_mp4_a_urls = self.__class__.chunk_by_range(dash_mp4_a_url, int(dash_mp4_a_size))
self.dash_streams[itag] = {
'quality': '%s (%s)' % (stream['size'], stream['quality_label']),
'itag': itag,
'type': mimeType,
'mime': mimeType,
'container': 'mp4',
'src': [dash_urls, dash_mp4_a_urls],
'size': int(dash_size) + int(dash_mp4_a_size)
}
elif stream['type'].startswith('video/webm'):
mimeType = 'video/webm'
dash_url = stream['url']
dash_size = stream['clen']
itag = stream['itag']
audio_url = None
audio_size = None
try:
audio_url = dash_webm_a_url
audio_size = int(dash_webm_a_size)
except UnboundLocalError as e:
audio_url = dash_mp4_a_url
audio_size = int(dash_mp4_a_size)
dash_urls = self.__class__.chunk_by_range(dash_url, int(dash_size))
audio_urls = self.__class__.chunk_by_range(audio_url, int(audio_size))
self.dash_streams[itag] = {
'quality': '%s (%s)' % (stream['size'], stream['quality_label']),
'itag': itag,
'type': mimeType,
'mime': mimeType,
'container': 'webm',
'src': [dash_urls, audio_urls],
'size': int(dash_size) + int(audio_size)
}
def extract(self, **kwargs):
if not self.streams_sorted:
# No stream is available
return
if 'stream_id' in kwargs and kwargs['stream_id']:
# Extract the stream
stream_id = kwargs['stream_id']
if stream_id not in self.streams and stream_id not in self.dash_streams:
log.e('[Error] Invalid video format.')
log.e('Run \'-i\' command with no specific video format to view all available formats.')
exit(2)
else:
# Extract stream with the best quality
stream_id = self.streams_sorted[0]['itag']
if stream_id in self.streams:
src = self.streams[stream_id]['url']
self.streams[stream_id]['src'] = [src]
self.streams[stream_id]['size'] = urls_size(self.streams[stream_id]['src'])
site = YouTube()
download = site.download_by_url
download_playlist = site.download_playlist_by_url | --- +++ @@ -142,6 +142,8 @@ return 'https://youtu.be/{}'.format(vid)
def get_vid_from_url(url):
+ """Extracts video ID from URL.
+ """
return match1(url, r'youtu\.be/([^?/]+)') or \
match1(url, r'youtube\.com/embed/([^/?]+)') or \
match1(url, r'youtube\.com/shorts/([^/?]+)') or \
@@ -151,6 +153,8 @@ parse_query_param(parse_query_param(url, 'u'), 'v')
def get_playlist_id_from_url(url):
+ """Extracts playlist ID from URL.
+ """
return parse_query_param(url, 'list') or \
parse_query_param(url, 'p')
@@ -434,4 +438,4 @@
site = YouTube()
download = site.download_by_url
-download_playlist = site.download_playlist_by_url+download_playlist = site.download_playlist_by_url
| https://raw.githubusercontent.com/soimort/you-get/HEAD/src/you_get/extractors/youtube.py |
Generate docstrings with examples | #!/usr/bin/env python
# This file is Python 2 compliant.
from ..version import script_name
import os, sys
TERM = os.getenv('TERM', '')
IS_ANSI_TERMINAL = TERM in (
'eterm-color',
'linux',
'screen',
'vt100',
) or TERM.startswith('xterm')
# ANSI escape code
# See <http://en.wikipedia.org/wiki/ANSI_escape_code>
RESET = 0
BOLD = 1
UNDERLINE = 4
NEGATIVE = 7
NO_BOLD = 21
NO_UNDERLINE = 24
POSITIVE = 27
BLACK = 30
RED = 31
GREEN = 32
YELLOW = 33
BLUE = 34
MAGENTA = 35
CYAN = 36
LIGHT_GRAY = 37
DEFAULT = 39
BLACK_BACKGROUND = 40
RED_BACKGROUND = 41
GREEN_BACKGROUND = 42
YELLOW_BACKGROUND = 43
BLUE_BACKGROUND = 44
MAGENTA_BACKGROUND = 45
CYAN_BACKGROUND = 46
LIGHT_GRAY_BACKGROUND = 47
DEFAULT_BACKGROUND = 49
DARK_GRAY = 90 # xterm
LIGHT_RED = 91 # xterm
LIGHT_GREEN = 92 # xterm
LIGHT_YELLOW = 93 # xterm
LIGHT_BLUE = 94 # xterm
LIGHT_MAGENTA = 95 # xterm
LIGHT_CYAN = 96 # xterm
WHITE = 97 # xterm
DARK_GRAY_BACKGROUND = 100 # xterm
LIGHT_RED_BACKGROUND = 101 # xterm
LIGHT_GREEN_BACKGROUND = 102 # xterm
LIGHT_YELLOW_BACKGROUND = 103 # xterm
LIGHT_BLUE_BACKGROUND = 104 # xterm
LIGHT_MAGENTA_BACKGROUND = 105 # xterm
LIGHT_CYAN_BACKGROUND = 106 # xterm
WHITE_BACKGROUND = 107 # xterm
def sprint(text, *colors):
return "\33[{}m{content}\33[{}m".format(";".join([str(color) for color in colors]), RESET, content=text) if IS_ANSI_TERMINAL and colors else text
def println(text, *colors):
sys.stdout.write(sprint(text, *colors) + "\n")
def print_err(text, *colors):
sys.stderr.write(sprint(text, *colors) + "\n")
def print_log(text, *colors):
sys.stderr.write(sprint("{}: {}".format(script_name, text), *colors) + "\n")
def i(message):
print_log(message)
def d(message):
print_log(message, BLUE)
def w(message):
print_log(message, YELLOW)
def e(message, exit_code=None):
print_log(message, YELLOW, BOLD)
if exit_code is not None:
sys.exit(exit_code)
def wtf(message, exit_code=1):
print_log(message, RED, BOLD)
if exit_code is not None:
sys.exit(exit_code)
def yes_or_no(message):
ans = str(input('%s (y/N) ' % message)).lower().strip()
return ans == 'y' | --- +++ @@ -58,36 +58,45 @@ WHITE_BACKGROUND = 107 # xterm
def sprint(text, *colors):
+ """Format text with color or other effects into ANSI escaped string."""
return "\33[{}m{content}\33[{}m".format(";".join([str(color) for color in colors]), RESET, content=text) if IS_ANSI_TERMINAL and colors else text
def println(text, *colors):
+ """Print text to standard output."""
sys.stdout.write(sprint(text, *colors) + "\n")
def print_err(text, *colors):
+ """Print text to standard error."""
sys.stderr.write(sprint(text, *colors) + "\n")
def print_log(text, *colors):
+ """Print a log message to standard error."""
sys.stderr.write(sprint("{}: {}".format(script_name, text), *colors) + "\n")
def i(message):
+ """Print a normal log message."""
print_log(message)
def d(message):
+ """Print a debug log message."""
print_log(message, BLUE)
def w(message):
+ """Print a warning log message."""
print_log(message, YELLOW)
def e(message, exit_code=None):
+ """Print an error log message."""
print_log(message, YELLOW, BOLD)
if exit_code is not None:
sys.exit(exit_code)
def wtf(message, exit_code=1):
+ """What a Terrible Failure!"""
print_log(message, RED, BOLD)
if exit_code is not None:
sys.exit(exit_code)
def yes_or_no(message):
ans = str(input('%s (y/N) ' % message)).lower().strip()
- return ans == 'y'+ return ans == 'y'
| https://raw.githubusercontent.com/soimort/you-get/HEAD/src/you_get/util/log.py |
Document functions with clear intent | #!/usr/bin/env python
__all__ = ['wanmen_download', 'wanmen_download_by_course', 'wanmen_download_by_course_topic', 'wanmen_download_by_course_topic_part']
from ..common import *
from .bokecc import bokecc_download_by_id
from json import loads
##Helper functions
def _wanmen_get_json_api_content_by_courseID(courseID):
return loads(get_content('http://api.wanmen.org/course/getCourseNested/{courseID}'.format(courseID = courseID)))
def _wanmen_get_title_by_json_topic_part(json_content, tIndex, pIndex):
return '_'.join([json_content[0]['name'],
json_content[0]['Topics'][tIndex]['name'],
json_content[0]['Topics'][tIndex]['Parts'][pIndex]['name']])
def _wanmen_get_boke_id_by_json_topic_part(json_content, tIndex, pIndex):
return json_content[0]['Topics'][tIndex]['Parts'][pIndex]['ccVideoLink']
##Parsers
def wanmen_download_by_course(json_api_content, output_dir='.', merge=True, info_only=False, **kwargs):
for tIndex in range(len(json_api_content[0]['Topics'])):
for pIndex in range(len(json_api_content[0]['Topics'][tIndex]['Parts'])):
wanmen_download_by_course_topic_part(json_api_content,
tIndex,
pIndex,
output_dir=output_dir,
merge=merge,
info_only=info_only,
**kwargs)
def wanmen_download_by_course_topic(json_api_content, tIndex, output_dir='.', merge=True, info_only=False, **kwargs):
for pIndex in range(len(json_api_content[0]['Topics'][tIndex]['Parts'])):
wanmen_download_by_course_topic_part(json_api_content,
tIndex,
pIndex,
output_dir=output_dir,
merge=merge,
info_only=info_only,
**kwargs)
def wanmen_download_by_course_topic_part(json_api_content, tIndex, pIndex, output_dir='.', merge=True, info_only=False, **kwargs):
html = json_api_content
title = _wanmen_get_title_by_json_topic_part(html,
tIndex,
pIndex)
bokeccID = _wanmen_get_boke_id_by_json_topic_part(html,
tIndex,
pIndex)
bokecc_download_by_id(vid = bokeccID, title = title, output_dir=output_dir, merge=merge, info_only=info_only, **kwargs)
##Main entrance
def wanmen_download(url, output_dir='.', merge=True, info_only=False, **kwargs):
if not 'wanmen.org' in url:
log.wtf('You are at the wrong place dude. This is for WanMen University!')
raise
courseID = int(match1(url, r'course\/(\d+)'))
assert courseID > 0 #without courseID we cannot do anything
tIndex = int(match1(url, r'tIndex=(\d+)'))
pIndex = int(match1(url, r'pIndex=(\d+)'))
json_api_content = _wanmen_get_json_api_content_by_courseID(courseID)
if pIndex: #only download ONE single part
assert tIndex >= 0
wanmen_download_by_course_topic_part(json_api_content, tIndex, pIndex,
output_dir = output_dir,
merge = merge,
info_only = info_only)
elif tIndex: #download a topic
wanmen_download_by_course_topic(json_api_content, tIndex,
output_dir = output_dir,
merge = merge,
info_only = info_only)
else: #download the whole course
wanmen_download_by_course(json_api_content,
output_dir = output_dir,
merge = merge,
info_only = info_only)
site_info = "WanMen University"
download = wanmen_download
download_playlist = wanmen_download_by_course | --- +++ @@ -9,10 +9,16 @@
##Helper functions
def _wanmen_get_json_api_content_by_courseID(courseID):
+ """int->JSON
+
+ Return a parsed JSON tree of WanMen's API."""
return loads(get_content('http://api.wanmen.org/course/getCourseNested/{courseID}'.format(courseID = courseID)))
def _wanmen_get_title_by_json_topic_part(json_content, tIndex, pIndex):
+ """JSON, int, int, int->str
+
+ Get a proper title with courseid+topicID+partID."""
return '_'.join([json_content[0]['name'],
json_content[0]['Topics'][tIndex]['name'],
@@ -20,12 +26,19 @@
def _wanmen_get_boke_id_by_json_topic_part(json_content, tIndex, pIndex):
+ """JSON, int, int, int->str
+
+ Get one BokeCC video ID with courseid+topicID+partID."""
return json_content[0]['Topics'][tIndex]['Parts'][pIndex]['ccVideoLink']
##Parsers
def wanmen_download_by_course(json_api_content, output_dir='.', merge=True, info_only=False, **kwargs):
+ """int->None
+
+ Download a WHOLE course.
+ Reuse the API call to save time."""
for tIndex in range(len(json_api_content[0]['Topics'])):
for pIndex in range(len(json_api_content[0]['Topics'][tIndex]['Parts'])):
@@ -39,6 +52,10 @@
def wanmen_download_by_course_topic(json_api_content, tIndex, output_dir='.', merge=True, info_only=False, **kwargs):
+ """int, int->None
+
+ Download a TOPIC of a course.
+ Reuse the API call to save time."""
for pIndex in range(len(json_api_content[0]['Topics'][tIndex]['Parts'])):
wanmen_download_by_course_topic_part(json_api_content,
@@ -50,6 +67,9 @@ **kwargs)
def wanmen_download_by_course_topic_part(json_api_content, tIndex, pIndex, output_dir='.', merge=True, info_only=False, **kwargs):
+ """int, int, int->None
+
+ Download ONE PART of the course."""
html = json_api_content
| https://raw.githubusercontent.com/soimort/you-get/HEAD/src/you_get/extractors/wanmen.py |
Add inline docstrings for readability | #!/usr/bin/env python3
import torch
from typeguard import check_argument_types
def initialize(model: torch.nn.Module, init: str):
assert check_argument_types()
print("init with", init)
# weight init
for p in model.parameters():
if p.dim() > 1:
if init == "xavier_uniform":
torch.nn.init.xavier_uniform_(p.data)
elif init == "xavier_normal":
torch.nn.init.xavier_normal_(p.data)
elif init == "kaiming_uniform":
torch.nn.init.kaiming_uniform_(p.data, nonlinearity="relu")
elif init == "kaiming_normal":
torch.nn.init.kaiming_normal_(p.data, nonlinearity="relu")
else:
raise ValueError("Unknown initialization: " + init)
# bias init
for name, p in model.named_parameters():
if ".bias" in name and p.dim() == 1:
p.data.zero_() | --- +++ @@ -1,10 +1,22 @@ #!/usr/bin/env python3
+"""Initialize modules for espnet2 neural networks."""
import torch
from typeguard import check_argument_types
def initialize(model: torch.nn.Module, init: str):
+ """Initialize weights of a neural network module.
+
+ Parameters are initialized using the given method or distribution.
+
+ Custom initialization routines can be implemented into submodules
+ as function `espnet_initialization_fn` within the custom module.
+
+ Args:
+ model: Target.
+ init: Method of initialization.
+ """
assert check_argument_types()
print("init with", init)
@@ -24,4 +36,4 @@ # bias init
for name, p in model.named_parameters():
if ".bias" in name and p.dim() == 1:
- p.data.zero_()+ p.data.zero_()
| https://raw.githubusercontent.com/RVC-Boss/GPT-SoVITS/HEAD/GPT_SoVITS/AR/utils/initialize.py |
Write docstrings including parameters and return values | # Adapted from https://github.com/junjun3518/alias-free-torch under the Apache License 2.0
# LICENSE is in incl_licenses directory.
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
if "sinc" in dir(torch):
sinc = torch.sinc
else:
# This code is adopted from adefossez's julius.core.sinc under the MIT License
# https://adefossez.github.io/julius/julius/core.html
# LICENSE is in incl_licenses directory.
def sinc(x: torch.Tensor):
return torch.where(
x == 0,
torch.tensor(1.0, device=x.device, dtype=x.dtype),
torch.sin(math.pi * x) / math.pi / x,
)
# This code is adopted from adefossez's julius.lowpass.LowPassFilters under the MIT License
# https://adefossez.github.io/julius/julius/lowpass.html
# LICENSE is in incl_licenses directory.
def kaiser_sinc_filter1d(cutoff, half_width, kernel_size): # return filter [1,1,kernel_size]
even = kernel_size % 2 == 0
half_size = kernel_size // 2
# For kaiser window
delta_f = 4 * half_width
A = 2.285 * (half_size - 1) * math.pi * delta_f + 7.95
if A > 50.0:
beta = 0.1102 * (A - 8.7)
elif A >= 21.0:
beta = 0.5842 * (A - 21) ** 0.4 + 0.07886 * (A - 21.0)
else:
beta = 0.0
window = torch.kaiser_window(kernel_size, beta=beta, periodic=False)
# ratio = 0.5/cutoff -> 2 * cutoff = 1 / ratio
if even:
time = torch.arange(-half_size, half_size) + 0.5
else:
time = torch.arange(kernel_size) - half_size
if cutoff == 0:
filter_ = torch.zeros_like(time)
else:
filter_ = 2 * cutoff * window * sinc(2 * cutoff * time)
"""
Normalize filter to have sum = 1, otherwise we will have a small leakage of the constant component in the input signal.
"""
filter_ /= filter_.sum()
filter = filter_.view(1, 1, kernel_size)
return filter
class LowPassFilter1d(nn.Module):
def __init__(
self,
cutoff=0.5,
half_width=0.6,
stride: int = 1,
padding: bool = True,
padding_mode: str = "replicate",
kernel_size: int = 12,
):
super().__init__()
if cutoff < -0.0:
raise ValueError("Minimum cutoff must be larger than zero.")
if cutoff > 0.5:
raise ValueError("A cutoff above 0.5 does not make sense.")
self.kernel_size = kernel_size
self.even = kernel_size % 2 == 0
self.pad_left = kernel_size // 2 - int(self.even)
self.pad_right = kernel_size // 2
self.stride = stride
self.padding = padding
self.padding_mode = padding_mode
filter = kaiser_sinc_filter1d(cutoff, half_width, kernel_size)
self.register_buffer("filter", filter)
# Input [B, C, T]
def forward(self, x):
_, C, _ = x.shape
if self.padding:
x = F.pad(x, (self.pad_left, self.pad_right), mode=self.padding_mode)
out = F.conv1d(x, self.filter.expand(C, -1, -1), stride=self.stride, groups=C)
return out | --- +++ @@ -13,6 +13,10 @@ # https://adefossez.github.io/julius/julius/core.html
# LICENSE is in incl_licenses directory.
def sinc(x: torch.Tensor):
+ """
+ Implementation of sinc, i.e. sin(pi * x) / (pi * x)
+ __Warning__: Different to julius.sinc, the input is multiplied by `pi`!
+ """
return torch.where(
x == 0,
torch.tensor(1.0, device=x.device, dtype=x.dtype),
@@ -66,6 +70,9 @@ padding_mode: str = "replicate",
kernel_size: int = 12,
):
+ """
+ kernel_size should be even number for stylegan3 setup, in this implementation, odd number is also possible.
+ """
super().__init__()
if cutoff < -0.0:
raise ValueError("Minimum cutoff must be larger than zero.")
@@ -89,4 +96,4 @@ x = F.pad(x, (self.pad_left, self.pad_right), mode=self.padding_mode)
out = F.conv1d(x, self.filter.expand(C, -1, -1), stride=self.stride, groups=C)
- return out+ return out
| https://raw.githubusercontent.com/RVC-Boss/GPT-SoVITS/HEAD/GPT_SoVITS/BigVGAN/alias_free_activation/torch/filter.py |
Provide clean and structured docstrings | # Copyright (c) 2024 NVIDIA CORPORATION.
# Licensed under the MIT license.
# Adapted from https://github.com/jik876/hifi-gan under the MIT license.
# LICENSE is in incl_licenses directory.
import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.nn import Conv2d
from torch.nn.utils import weight_norm, spectral_norm
from torchaudio.transforms import Spectrogram, Resample
from env import AttrDict
from utils import get_padding
import typing
from typing import List, Tuple
class DiscriminatorP(torch.nn.Module):
def __init__(
self,
h: AttrDict,
period: List[int],
kernel_size: int = 5,
stride: int = 3,
use_spectral_norm: bool = False,
):
super().__init__()
self.period = period
self.d_mult = h.discriminator_channel_mult
norm_f = weight_norm if not use_spectral_norm else spectral_norm
self.convs = nn.ModuleList(
[
norm_f(
Conv2d(
1,
int(32 * self.d_mult),
(kernel_size, 1),
(stride, 1),
padding=(get_padding(5, 1), 0),
)
),
norm_f(
Conv2d(
int(32 * self.d_mult),
int(128 * self.d_mult),
(kernel_size, 1),
(stride, 1),
padding=(get_padding(5, 1), 0),
)
),
norm_f(
Conv2d(
int(128 * self.d_mult),
int(512 * self.d_mult),
(kernel_size, 1),
(stride, 1),
padding=(get_padding(5, 1), 0),
)
),
norm_f(
Conv2d(
int(512 * self.d_mult),
int(1024 * self.d_mult),
(kernel_size, 1),
(stride, 1),
padding=(get_padding(5, 1), 0),
)
),
norm_f(
Conv2d(
int(1024 * self.d_mult),
int(1024 * self.d_mult),
(kernel_size, 1),
1,
padding=(2, 0),
)
),
]
)
self.conv_post = norm_f(Conv2d(int(1024 * self.d_mult), 1, (3, 1), 1, padding=(1, 0)))
def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, List[torch.Tensor]]:
fmap = []
# 1d to 2d
b, c, t = x.shape
if t % self.period != 0: # pad first
n_pad = self.period - (t % self.period)
x = F.pad(x, (0, n_pad), "reflect")
t = t + n_pad
x = x.view(b, c, t // self.period, self.period)
for l in self.convs:
x = l(x)
x = F.leaky_relu(x, 0.1)
fmap.append(x)
x = self.conv_post(x)
fmap.append(x)
x = torch.flatten(x, 1, -1)
return x, fmap
class MultiPeriodDiscriminator(torch.nn.Module):
def __init__(self, h: AttrDict):
super().__init__()
self.mpd_reshapes = h.mpd_reshapes
print(f"mpd_reshapes: {self.mpd_reshapes}")
self.discriminators = nn.ModuleList(
[DiscriminatorP(h, rs, use_spectral_norm=h.use_spectral_norm) for rs in self.mpd_reshapes]
)
def forward(
self, y: torch.Tensor, y_hat: torch.Tensor
) -> Tuple[
List[torch.Tensor],
List[torch.Tensor],
List[List[torch.Tensor]],
List[List[torch.Tensor]],
]:
y_d_rs = []
y_d_gs = []
fmap_rs = []
fmap_gs = []
for i, d in enumerate(self.discriminators):
y_d_r, fmap_r = d(y)
y_d_g, fmap_g = d(y_hat)
y_d_rs.append(y_d_r)
fmap_rs.append(fmap_r)
y_d_gs.append(y_d_g)
fmap_gs.append(fmap_g)
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
class DiscriminatorR(nn.Module):
def __init__(self, cfg: AttrDict, resolution: List[List[int]]):
super().__init__()
self.resolution = resolution
assert len(self.resolution) == 3, f"MRD layer requires list with len=3, got {self.resolution}"
self.lrelu_slope = 0.1
norm_f = weight_norm if cfg.use_spectral_norm == False else spectral_norm
if hasattr(cfg, "mrd_use_spectral_norm"):
print(f"[INFO] overriding MRD use_spectral_norm as {cfg.mrd_use_spectral_norm}")
norm_f = weight_norm if cfg.mrd_use_spectral_norm == False else spectral_norm
self.d_mult = cfg.discriminator_channel_mult
if hasattr(cfg, "mrd_channel_mult"):
print(f"[INFO] overriding mrd channel multiplier as {cfg.mrd_channel_mult}")
self.d_mult = cfg.mrd_channel_mult
self.convs = nn.ModuleList(
[
norm_f(nn.Conv2d(1, int(32 * self.d_mult), (3, 9), padding=(1, 4))),
norm_f(
nn.Conv2d(
int(32 * self.d_mult),
int(32 * self.d_mult),
(3, 9),
stride=(1, 2),
padding=(1, 4),
)
),
norm_f(
nn.Conv2d(
int(32 * self.d_mult),
int(32 * self.d_mult),
(3, 9),
stride=(1, 2),
padding=(1, 4),
)
),
norm_f(
nn.Conv2d(
int(32 * self.d_mult),
int(32 * self.d_mult),
(3, 9),
stride=(1, 2),
padding=(1, 4),
)
),
norm_f(
nn.Conv2d(
int(32 * self.d_mult),
int(32 * self.d_mult),
(3, 3),
padding=(1, 1),
)
),
]
)
self.conv_post = norm_f(nn.Conv2d(int(32 * self.d_mult), 1, (3, 3), padding=(1, 1)))
def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, List[torch.Tensor]]:
fmap = []
x = self.spectrogram(x)
x = x.unsqueeze(1)
for l in self.convs:
x = l(x)
x = F.leaky_relu(x, self.lrelu_slope)
fmap.append(x)
x = self.conv_post(x)
fmap.append(x)
x = torch.flatten(x, 1, -1)
return x, fmap
def spectrogram(self, x: torch.Tensor) -> torch.Tensor:
n_fft, hop_length, win_length = self.resolution
x = F.pad(
x,
(int((n_fft - hop_length) / 2), int((n_fft - hop_length) / 2)),
mode="reflect",
)
x = x.squeeze(1)
x = torch.stft(
x,
n_fft=n_fft,
hop_length=hop_length,
win_length=win_length,
center=False,
return_complex=True,
)
x = torch.view_as_real(x) # [B, F, TT, 2]
mag = torch.norm(x, p=2, dim=-1) # [B, F, TT]
return mag
class MultiResolutionDiscriminator(nn.Module):
def __init__(self, cfg, debug=False):
super().__init__()
self.resolutions = cfg.resolutions
assert len(self.resolutions) == 3, (
f"MRD requires list of list with len=3, each element having a list with len=3. Got {self.resolutions}"
)
self.discriminators = nn.ModuleList([DiscriminatorR(cfg, resolution) for resolution in self.resolutions])
def forward(
self, y: torch.Tensor, y_hat: torch.Tensor
) -> Tuple[
List[torch.Tensor],
List[torch.Tensor],
List[List[torch.Tensor]],
List[List[torch.Tensor]],
]:
y_d_rs = []
y_d_gs = []
fmap_rs = []
fmap_gs = []
for i, d in enumerate(self.discriminators):
y_d_r, fmap_r = d(x=y)
y_d_g, fmap_g = d(x=y_hat)
y_d_rs.append(y_d_r)
fmap_rs.append(fmap_r)
y_d_gs.append(y_d_g)
fmap_gs.append(fmap_g)
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
# Method based on descript-audio-codec: https://github.com/descriptinc/descript-audio-codec
# Modified code adapted from https://github.com/gemelo-ai/vocos under the MIT license.
# LICENSE is in incl_licenses directory.
class DiscriminatorB(nn.Module):
def __init__(
self,
window_length: int,
channels: int = 32,
hop_factor: float = 0.25,
bands: Tuple[Tuple[float, float], ...] = (
(0.0, 0.1),
(0.1, 0.25),
(0.25, 0.5),
(0.5, 0.75),
(0.75, 1.0),
),
):
super().__init__()
self.window_length = window_length
self.hop_factor = hop_factor
self.spec_fn = Spectrogram(
n_fft=window_length,
hop_length=int(window_length * hop_factor),
win_length=window_length,
power=None,
)
n_fft = window_length // 2 + 1
bands = [(int(b[0] * n_fft), int(b[1] * n_fft)) for b in bands]
self.bands = bands
convs = lambda: nn.ModuleList(
[
weight_norm(nn.Conv2d(2, channels, (3, 9), (1, 1), padding=(1, 4))),
weight_norm(nn.Conv2d(channels, channels, (3, 9), (1, 2), padding=(1, 4))),
weight_norm(nn.Conv2d(channels, channels, (3, 9), (1, 2), padding=(1, 4))),
weight_norm(nn.Conv2d(channels, channels, (3, 9), (1, 2), padding=(1, 4))),
weight_norm(nn.Conv2d(channels, channels, (3, 3), (1, 1), padding=(1, 1))),
]
)
self.band_convs = nn.ModuleList([convs() for _ in range(len(self.bands))])
self.conv_post = weight_norm(nn.Conv2d(channels, 1, (3, 3), (1, 1), padding=(1, 1)))
def spectrogram(self, x: torch.Tensor) -> List[torch.Tensor]:
# Remove DC offset
x = x - x.mean(dim=-1, keepdims=True)
# Peak normalize the volume of input audio
x = 0.8 * x / (x.abs().max(dim=-1, keepdim=True)[0] + 1e-9)
x = self.spec_fn(x)
x = torch.view_as_real(x)
x = x.permute(0, 3, 2, 1) # [B, F, T, C] -> [B, C, T, F]
# Split into bands
x_bands = [x[..., b[0] : b[1]] for b in self.bands]
return x_bands
def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, List[torch.Tensor]]:
x_bands = self.spectrogram(x.squeeze(1))
fmap = []
x = []
for band, stack in zip(x_bands, self.band_convs):
for i, layer in enumerate(stack):
band = layer(band)
band = torch.nn.functional.leaky_relu(band, 0.1)
if i > 0:
fmap.append(band)
x.append(band)
x = torch.cat(x, dim=-1)
x = self.conv_post(x)
fmap.append(x)
return x, fmap
# Method based on descript-audio-codec: https://github.com/descriptinc/descript-audio-codec
# Modified code adapted from https://github.com/gemelo-ai/vocos under the MIT license.
# LICENSE is in incl_licenses directory.
class MultiBandDiscriminator(nn.Module):
def __init__(
self,
h,
):
super().__init__()
# fft_sizes (list[int]): Tuple of window lengths for FFT. Defaults to [2048, 1024, 512] if not set in h.
self.fft_sizes = h.get("mbd_fft_sizes", [2048, 1024, 512])
self.discriminators = nn.ModuleList([DiscriminatorB(window_length=w) for w in self.fft_sizes])
def forward(
self, y: torch.Tensor, y_hat: torch.Tensor
) -> Tuple[
List[torch.Tensor],
List[torch.Tensor],
List[List[torch.Tensor]],
List[List[torch.Tensor]],
]:
y_d_rs = []
y_d_gs = []
fmap_rs = []
fmap_gs = []
for d in self.discriminators:
y_d_r, fmap_r = d(x=y)
y_d_g, fmap_g = d(x=y_hat)
y_d_rs.append(y_d_r)
fmap_rs.append(fmap_r)
y_d_gs.append(y_d_g)
fmap_gs.append(fmap_g)
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
# Adapted from https://github.com/open-mmlab/Amphion/blob/main/models/vocoders/gan/discriminator/mssbcqtd.py under the MIT license.
# LICENSE is in incl_licenses directory.
class DiscriminatorCQT(nn.Module):
def __init__(self, cfg: AttrDict, hop_length: int, n_octaves: int, bins_per_octave: int):
super().__init__()
self.cfg = cfg
self.filters = cfg["cqtd_filters"]
self.max_filters = cfg["cqtd_max_filters"]
self.filters_scale = cfg["cqtd_filters_scale"]
self.kernel_size = (3, 9)
self.dilations = cfg["cqtd_dilations"]
self.stride = (1, 2)
self.in_channels = cfg["cqtd_in_channels"]
self.out_channels = cfg["cqtd_out_channels"]
self.fs = cfg["sampling_rate"]
self.hop_length = hop_length
self.n_octaves = n_octaves
self.bins_per_octave = bins_per_octave
# Lazy-load
from nnAudio import features
self.cqt_transform = features.cqt.CQT2010v2(
sr=self.fs * 2,
hop_length=self.hop_length,
n_bins=self.bins_per_octave * self.n_octaves,
bins_per_octave=self.bins_per_octave,
output_format="Complex",
pad_mode="constant",
)
self.conv_pres = nn.ModuleList()
for _ in range(self.n_octaves):
self.conv_pres.append(
nn.Conv2d(
self.in_channels * 2,
self.in_channels * 2,
kernel_size=self.kernel_size,
padding=self.get_2d_padding(self.kernel_size),
)
)
self.convs = nn.ModuleList()
self.convs.append(
nn.Conv2d(
self.in_channels * 2,
self.filters,
kernel_size=self.kernel_size,
padding=self.get_2d_padding(self.kernel_size),
)
)
in_chs = min(self.filters_scale * self.filters, self.max_filters)
for i, dilation in enumerate(self.dilations):
out_chs = min((self.filters_scale ** (i + 1)) * self.filters, self.max_filters)
self.convs.append(
weight_norm(
nn.Conv2d(
in_chs,
out_chs,
kernel_size=self.kernel_size,
stride=self.stride,
dilation=(dilation, 1),
padding=self.get_2d_padding(self.kernel_size, (dilation, 1)),
)
)
)
in_chs = out_chs
out_chs = min(
(self.filters_scale ** (len(self.dilations) + 1)) * self.filters,
self.max_filters,
)
self.convs.append(
weight_norm(
nn.Conv2d(
in_chs,
out_chs,
kernel_size=(self.kernel_size[0], self.kernel_size[0]),
padding=self.get_2d_padding((self.kernel_size[0], self.kernel_size[0])),
)
)
)
self.conv_post = weight_norm(
nn.Conv2d(
out_chs,
self.out_channels,
kernel_size=(self.kernel_size[0], self.kernel_size[0]),
padding=self.get_2d_padding((self.kernel_size[0], self.kernel_size[0])),
)
)
self.activation = torch.nn.LeakyReLU(negative_slope=0.1)
self.resample = Resample(orig_freq=self.fs, new_freq=self.fs * 2)
self.cqtd_normalize_volume = self.cfg.get("cqtd_normalize_volume", False)
if self.cqtd_normalize_volume:
print(
"[INFO] cqtd_normalize_volume set to True. Will apply DC offset removal & peak volume normalization in CQTD!"
)
def get_2d_padding(
self,
kernel_size: typing.Tuple[int, int],
dilation: typing.Tuple[int, int] = (1, 1),
):
return (
((kernel_size[0] - 1) * dilation[0]) // 2,
((kernel_size[1] - 1) * dilation[1]) // 2,
)
def forward(self, x: torch.tensor) -> Tuple[torch.Tensor, List[torch.Tensor]]:
fmap = []
if self.cqtd_normalize_volume:
# Remove DC offset
x = x - x.mean(dim=-1, keepdims=True)
# Peak normalize the volume of input audio
x = 0.8 * x / (x.abs().max(dim=-1, keepdim=True)[0] + 1e-9)
x = self.resample(x)
z = self.cqt_transform(x)
z_amplitude = z[:, :, :, 0].unsqueeze(1)
z_phase = z[:, :, :, 1].unsqueeze(1)
z = torch.cat([z_amplitude, z_phase], dim=1)
z = torch.permute(z, (0, 1, 3, 2)) # [B, C, W, T] -> [B, C, T, W]
latent_z = []
for i in range(self.n_octaves):
latent_z.append(
self.conv_pres[i](
z[
:,
:,
:,
i * self.bins_per_octave : (i + 1) * self.bins_per_octave,
]
)
)
latent_z = torch.cat(latent_z, dim=-1)
for i, l in enumerate(self.convs):
latent_z = l(latent_z)
latent_z = self.activation(latent_z)
fmap.append(latent_z)
latent_z = self.conv_post(latent_z)
return latent_z, fmap
class MultiScaleSubbandCQTDiscriminator(nn.Module):
def __init__(self, cfg: AttrDict):
super().__init__()
self.cfg = cfg
# Using get with defaults
self.cfg["cqtd_filters"] = self.cfg.get("cqtd_filters", 32)
self.cfg["cqtd_max_filters"] = self.cfg.get("cqtd_max_filters", 1024)
self.cfg["cqtd_filters_scale"] = self.cfg.get("cqtd_filters_scale", 1)
self.cfg["cqtd_dilations"] = self.cfg.get("cqtd_dilations", [1, 2, 4])
self.cfg["cqtd_in_channels"] = self.cfg.get("cqtd_in_channels", 1)
self.cfg["cqtd_out_channels"] = self.cfg.get("cqtd_out_channels", 1)
# Multi-scale params to loop over
self.cfg["cqtd_hop_lengths"] = self.cfg.get("cqtd_hop_lengths", [512, 256, 256])
self.cfg["cqtd_n_octaves"] = self.cfg.get("cqtd_n_octaves", [9, 9, 9])
self.cfg["cqtd_bins_per_octaves"] = self.cfg.get("cqtd_bins_per_octaves", [24, 36, 48])
self.discriminators = nn.ModuleList(
[
DiscriminatorCQT(
self.cfg,
hop_length=self.cfg["cqtd_hop_lengths"][i],
n_octaves=self.cfg["cqtd_n_octaves"][i],
bins_per_octave=self.cfg["cqtd_bins_per_octaves"][i],
)
for i in range(len(self.cfg["cqtd_hop_lengths"]))
]
)
def forward(
self, y: torch.Tensor, y_hat: torch.Tensor
) -> Tuple[
List[torch.Tensor],
List[torch.Tensor],
List[List[torch.Tensor]],
List[List[torch.Tensor]],
]:
y_d_rs = []
y_d_gs = []
fmap_rs = []
fmap_gs = []
for disc in self.discriminators:
y_d_r, fmap_r = disc(y)
y_d_g, fmap_g = disc(y_hat)
y_d_rs.append(y_d_r)
fmap_rs.append(fmap_r)
y_d_gs.append(y_d_g)
fmap_gs.append(fmap_g)
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
class CombinedDiscriminator(nn.Module):
def __init__(self, list_discriminator: List[nn.Module]):
super().__init__()
self.discrimiantor = nn.ModuleList(list_discriminator)
def forward(
self, y: torch.Tensor, y_hat: torch.Tensor
) -> Tuple[
List[torch.Tensor],
List[torch.Tensor],
List[List[torch.Tensor]],
List[List[torch.Tensor]],
]:
y_d_rs = []
y_d_gs = []
fmap_rs = []
fmap_gs = []
for disc in self.discrimiantor:
y_d_r, y_d_g, fmap_r, fmap_g = disc(y, y_hat)
y_d_rs.extend(y_d_r)
fmap_rs.extend(fmap_r)
y_d_gs.extend(y_d_g)
fmap_gs.extend(fmap_g)
return y_d_rs, y_d_gs, fmap_rs, fmap_gs | --- +++ @@ -348,6 +348,10 @@ self,
h,
):
+ """
+ Multi-band multi-scale STFT discriminator, with the architecture based on https://github.com/descriptinc/descript-audio-codec.
+ and the modified code adapted from https://github.com/gemelo-ai/vocos.
+ """
super().__init__()
# fft_sizes (list[int]): Tuple of window lengths for FFT. Defaults to [2048, 1024, 512] if not set in h.
self.fft_sizes = h.get("mbd_fft_sizes", [2048, 1024, 512])
@@ -589,6 +593,10 @@
class CombinedDiscriminator(nn.Module):
+ """
+ Wrapper of chaining multiple discrimiantor architectures.
+ Example: combine mbd and cqtd as a single class
+ """
def __init__(self, list_discriminator: List[nn.Module]):
super().__init__()
@@ -614,4 +622,4 @@ y_d_gs.extend(y_d_g)
fmap_gs.extend(fmap_g)
- return y_d_rs, y_d_gs, fmap_rs, fmap_gs+ return y_d_rs, y_d_gs, fmap_rs, fmap_gs
| https://raw.githubusercontent.com/RVC-Boss/GPT-SoVITS/HEAD/GPT_SoVITS/BigVGAN/discriminators.py |
Provide docstrings following PEP 257 | # modified from https://github.com/lifeiteng/vall-e/blob/main/valle/modules/transformer.py
import copy
import numbers
from functools import partial
from typing import Any
from typing import Callable
from typing import List
from typing import Optional
from typing import Tuple
from typing import Union
import torch
from AR.modules.activation import MultiheadAttention
from AR.modules.scaling import BalancedDoubleSwish
from torch import nn
from torch import Tensor
from torch.nn import functional as F
_shape_t = Union[int, List[int], torch.Size]
class LayerNorm(nn.Module):
__constants__ = ["normalized_shape", "eps", "elementwise_affine"]
normalized_shape: Tuple[int, ...]
eps: float
elementwise_affine: bool
def __init__(
self,
normalized_shape: _shape_t,
eps: float = 1e-5,
elementwise_affine: bool = True,
device=None,
dtype=None,
) -> None:
factory_kwargs = {"device": device, "dtype": dtype}
super(LayerNorm, self).__init__()
if isinstance(normalized_shape, numbers.Integral):
# mypy error: incompatible types in assignment
normalized_shape = (normalized_shape,) # type: ignore[assignment]
self.normalized_shape = tuple(normalized_shape) # type: ignore[arg-type]
self.eps = eps
self.elementwise_affine = elementwise_affine
if self.elementwise_affine:
self.weight = nn.Parameter(torch.empty(self.normalized_shape, **factory_kwargs))
self.bias = nn.Parameter(torch.empty(self.normalized_shape, **factory_kwargs))
else:
self.register_parameter("weight", None)
self.register_parameter("bias", None)
self.reset_parameters()
def reset_parameters(self) -> None:
if self.elementwise_affine:
nn.init.ones_(self.weight)
nn.init.zeros_(self.bias)
def forward(self, input: Tensor, embedding: Any = None) -> Tensor:
if isinstance(input, tuple):
input, embedding = input
return (
F.layer_norm(
input,
self.normalized_shape,
self.weight,
self.bias,
self.eps,
),
embedding,
)
assert embedding is None
return F.layer_norm(input, self.normalized_shape, self.weight, self.bias, self.eps)
def extra_repr(self) -> str:
return "{normalized_shape}, eps={eps}, elementwise_affine={elementwise_affine}".format(**self.__dict__)
class IdentityNorm(nn.Module):
def __init__(
self,
d_model: int,
eps: float = 1e-5,
device=None,
dtype=None,
) -> None:
super(IdentityNorm, self).__init__()
def forward(self, input: Tensor, embedding: Any = None) -> Tensor:
if isinstance(input, tuple):
return input
assert embedding is None
return input
class TransformerEncoder(nn.Module):
__constants__ = ["norm"]
def __init__(self, encoder_layer, num_layers, norm=None):
super(TransformerEncoder, self).__init__()
self.layers = _get_clones(encoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
def forward(
self,
src: Tensor,
mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
return_layer_states: bool = False,
cache=None,
) -> Tensor:
if return_layer_states:
layer_states = [] # layers' output
output = src
for mod in self.layers:
output = mod(
output,
src_mask=mask,
src_key_padding_mask=src_key_padding_mask,
cache=cache,
)
layer_states.append(output[0])
if self.norm is not None:
output = self.norm(output)
return layer_states, output
output = src
for mod in self.layers:
output = mod(
output,
src_mask=mask,
src_key_padding_mask=src_key_padding_mask,
cache=cache,
)
if self.norm is not None:
output = self.norm(output)
return output
class TransformerEncoderLayer(nn.Module):
__constants__ = ["batch_first", "norm_first"]
def __init__(
self,
d_model: int,
nhead: int,
dim_feedforward: int = 2048,
dropout: float = 0.1,
activation: Union[str, Callable[[Tensor], Tensor]] = F.relu,
batch_first: bool = False,
norm_first: bool = False,
device=None,
dtype=None,
linear1_self_attention_cls: nn.Module = nn.Linear,
linear2_self_attention_cls: nn.Module = nn.Linear,
linear1_feedforward_cls: nn.Module = nn.Linear,
linear2_feedforward_cls: nn.Module = nn.Linear,
layer_norm_cls: nn.Module = LayerNorm,
layer_norm_eps: float = 1e-5,
adaptive_layer_norm=False,
) -> None:
factory_kwargs = {"device": device, "dtype": dtype}
super(TransformerEncoderLayer, self).__init__()
# print(233333333333,d_model,nhead)
# import os
# os._exit(2333333)
self.self_attn = MultiheadAttention(
d_model, # 512 16
nhead,
dropout=dropout,
batch_first=batch_first,
linear1_cls=linear1_self_attention_cls,
linear2_cls=linear2_self_attention_cls,
**factory_kwargs,
)
# Implementation of Feedforward model
self.linear1 = linear1_feedforward_cls(d_model, dim_feedforward, **factory_kwargs)
self.dropout = nn.Dropout(dropout)
self.linear2 = linear2_feedforward_cls(dim_feedforward, d_model, **factory_kwargs)
self.norm_first = norm_first
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
# Legacy string support for activation function.
if isinstance(activation, str):
activation = _get_activation_fn(activation)
elif isinstance(activation, partial):
activation = activation(d_model)
elif activation == BalancedDoubleSwish:
activation = BalancedDoubleSwish(d_model)
# # We can't test self.activation in forward() in TorchScript,
# # so stash some information about it instead.
# if activation is F.relu or isinstance(activation, torch.nn.ReLU):
# self.activation_relu_or_gelu = 1
# elif activation is F.gelu or isinstance(activation, torch.nn.GELU):
# self.activation_relu_or_gelu = 2
# else:
# self.activation_relu_or_gelu = 0
self.activation = activation
norm1 = layer_norm_cls(d_model, eps=layer_norm_eps, **factory_kwargs)
if layer_norm_cls == IdentityNorm:
norm2 = BalancedBasicNorm(d_model, eps=layer_norm_eps, **factory_kwargs)
else:
norm2 = layer_norm_cls(d_model, eps=layer_norm_eps, **factory_kwargs)
if adaptive_layer_norm:
self.norm1 = AdaptiveLayerNorm(d_model, norm1)
self.norm2 = AdaptiveLayerNorm(d_model, norm2)
else:
self.norm1 = norm1
self.norm2 = norm2
def __setstate__(self, state):
super(TransformerEncoderLayer, self).__setstate__(state)
if not hasattr(self, "activation"):
self.activation = F.relu
def forward(
self,
src: Tensor,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
cache=None,
) -> Tensor:
x, stage_embedding = src, None
is_src_tuple = False
if isinstance(src, tuple):
x, stage_embedding = src
is_src_tuple = True
if src_key_padding_mask is not None:
_skpm_dtype = src_key_padding_mask.dtype
if _skpm_dtype != torch.bool and not torch.is_floating_point(src_key_padding_mask):
raise AssertionError("only bool and floating types of key_padding_mask are supported")
if self.norm_first:
x = x + self._sa_block(
self.norm1(x, stage_embedding),
src_mask,
src_key_padding_mask,
cache=cache,
)
x = x + self._ff_block(self.norm2(x, stage_embedding))
else:
x = self.norm1(
x + self._sa_block(x, src_mask, src_key_padding_mask, cache=cache),
stage_embedding,
)
x = self.norm2(x + self._ff_block(x), stage_embedding)
if is_src_tuple:
return (x, stage_embedding)
return x
# self-attention block
def _sa_block(
self,
x: Tensor,
attn_mask: Optional[Tensor],
key_padding_mask: Optional[Tensor],
cache=None,
) -> Tensor:
# print(x.shape,attn_mask.shape,key_padding_mask)
# torch.Size([1, 188, 512]) torch.Size([188, 188]) None
# import os
# os._exit(23333)
x = self.self_attn(
x,
x,
x,
attn_mask=attn_mask,
key_padding_mask=key_padding_mask,
need_weights=False,
cache=cache,
)[0]
return self.dropout1(x)
# feed forward block
def _ff_block(self, x: Tensor) -> Tensor:
x = self.linear2(self.dropout(self.activation(self.linear1(x))))
return self.dropout2(x)
class AdaptiveLayerNorm(nn.Module):
def __init__(self, d_model, norm) -> None:
super(AdaptiveLayerNorm, self).__init__()
self.project_layer = nn.Linear(d_model, 2 * d_model)
self.norm = norm
self.d_model = d_model
self.eps = self.norm.eps
def forward(self, input: Tensor, embedding: Tensor = None) -> Tensor:
if isinstance(input, tuple):
input, embedding = input
weight, bias = torch.split(
self.project_layer(embedding),
split_size_or_sections=self.d_model,
dim=-1,
)
return (weight * self.norm(input) + bias, embedding)
weight, bias = torch.split(
self.project_layer(embedding),
split_size_or_sections=self.d_model,
dim=-1,
)
return weight * self.norm(input) + bias
def _get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) | --- +++ @@ -95,6 +95,23 @@
class TransformerEncoder(nn.Module):
+ r"""TransformerEncoder is a stack of N encoder layers. Users can build the
+ BERT(https://arxiv.org/abs/1810.04805) model with corresponding parameters.
+
+ Args:
+ encoder_layer: an instance of the TransformerEncoderLayer() class (required).
+ num_layers: the number of sub-encoder-layers in the encoder (required).
+ norm: the layer normalization component (optional).
+ enable_nested_tensor: if True, input will automatically convert to nested tensor
+ (and convert back on output). This will improve the overall performance of
+ TransformerEncoder when padding rate is high. Default: ``True`` (enabled).
+
+ Examples::
+ >>> encoder_layer = TransformerEncoderLayer(d_model=512, nhead=8)
+ >>> transformer_encoder = TransformerEncoder(encoder_layer, num_layers=6)
+ >>> src = torch.rand(10, 32, 512)
+ >>> out = transformer_encoder(src)
+ """
__constants__ = ["norm"]
@@ -112,6 +129,17 @@ return_layer_states: bool = False,
cache=None,
) -> Tensor:
+ r"""Pass the input through the encoder layers in turn.
+
+ Args:
+ src: the sequence to the encoder (required).
+ mask: the mask for the src sequence (optional).
+ src_key_padding_mask: the mask for the src keys per batch (optional).
+ return_layer_states: return layers' state (optional).
+
+ Shape:
+ see the docs in Transformer class.
+ """
if return_layer_states:
layer_states = [] # layers' output
output = src
@@ -233,6 +261,16 @@ src_key_padding_mask: Optional[Tensor] = None,
cache=None,
) -> Tensor:
+ r"""Pass the input through the encoder layer.
+
+ Args:
+ src: the sequence to the encoder layer (required).
+ src_mask: the mask for the src sequence (optional).
+ src_key_padding_mask: the mask for the src keys per batch (optional).
+
+ Shape:
+ see the docs in Transformer class.
+ """
x, stage_embedding = src, None
is_src_tuple = False
if isinstance(src, tuple):
@@ -293,6 +331,7 @@
class AdaptiveLayerNorm(nn.Module):
+ r"""Adaptive Layer Normalization"""
def __init__(self, d_model, norm) -> None:
super(AdaptiveLayerNorm, self).__init__()
@@ -320,4 +359,4 @@
def _get_clones(module, N):
- return nn.ModuleList([copy.deepcopy(module) for i in range(N)])+ return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
| https://raw.githubusercontent.com/RVC-Boss/GPT-SoVITS/HEAD/GPT_SoVITS/AR/modules/transformer.py |
Write docstrings for this repository | # Copyright 2022 Xiaomi Corp. (authors: Daniel Povey)
#
# See ../../../../LICENSE for clarification regarding multiple authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
from typing import Optional
from typing import Tuple
import torch
import torch.nn as nn
from torch import Tensor
class DoubleSwishFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, x: Tensor) -> Tensor:
requires_grad = x.requires_grad
x_dtype = x.dtype
if x.dtype == torch.float16:
x = x.to(torch.float32)
s = torch.sigmoid(x - 1.0)
y = x * s
if requires_grad:
deriv = y * (1 - s) + s
# notes on derivative of x * sigmoid(x - 1):
# https://www.wolframalpha.com/input?i=d%2Fdx+%28x+*+sigmoid%28x-1%29%29
# min \simeq -0.043638. Take floor as -0.043637 so it's a lower bund
# max \simeq 1.1990. Take ceil to be 1.2 so it's an upper bound.
# the combination of "+ torch.rand_like(deriv)" and casting to torch.uint8 (which
# floors), should be expectation-preserving.
floor = -0.043637
ceil = 1.2
d_scaled = (deriv - floor) * (255.0 / (ceil - floor)) + torch.rand_like(deriv)
if __name__ == "__main__":
# for self-testing only.
assert d_scaled.min() >= 0.0
assert d_scaled.max() < 256.0
d_int = d_scaled.to(torch.uint8)
ctx.save_for_backward(d_int)
if x.dtype == torch.float16 or torch.is_autocast_enabled():
y = y.to(torch.float16)
return y
@staticmethod
def backward(ctx, y_grad: Tensor) -> Tensor:
(d,) = ctx.saved_tensors
# the same constants as used in forward pass.
floor = -0.043637
ceil = 1.2
d = d * ((ceil - floor) / 255.0) + floor
return y_grad * d
class DoubleSwish(torch.nn.Module):
def forward(self, x: Tensor) -> Tensor:
if torch.jit.is_scripting() or torch.jit.is_tracing():
return x * torch.sigmoid(x - 1.0)
return DoubleSwishFunction.apply(x)
class ActivationBalancerFunction(torch.autograd.Function):
@staticmethod
def forward(
ctx,
x: Tensor,
scale_factor: Tensor,
sign_factor: Optional[Tensor],
channel_dim: int,
) -> Tensor:
if channel_dim < 0:
channel_dim += x.ndim
ctx.channel_dim = channel_dim
xgt0 = x > 0
if sign_factor is None:
ctx.save_for_backward(xgt0, scale_factor)
else:
ctx.save_for_backward(xgt0, scale_factor, sign_factor)
return x
@staticmethod
def backward(ctx, x_grad: Tensor) -> Tuple[Tensor, None, None, None]:
if len(ctx.saved_tensors) == 3:
xgt0, scale_factor, sign_factor = ctx.saved_tensors
for _ in range(ctx.channel_dim, x_grad.ndim - 1):
scale_factor = scale_factor.unsqueeze(-1)
sign_factor = sign_factor.unsqueeze(-1)
factor = sign_factor + scale_factor * (xgt0.to(x_grad.dtype) - 0.5)
else:
xgt0, scale_factor = ctx.saved_tensors
for _ in range(ctx.channel_dim, x_grad.ndim - 1):
scale_factor = scale_factor.unsqueeze(-1)
factor = scale_factor * (xgt0.to(x_grad.dtype) - 0.5)
neg_delta_grad = x_grad.abs() * factor
return (
x_grad - neg_delta_grad,
None,
None,
None,
)
def _compute_scale_factor(
x: Tensor,
channel_dim: int,
min_abs: float,
max_abs: float,
gain_factor: float,
max_factor: float,
) -> Tensor:
if channel_dim < 0:
channel_dim += x.ndim
sum_dims = [d for d in range(x.ndim) if d != channel_dim]
x_abs_mean = torch.mean(x.abs(), dim=sum_dims).to(torch.float32)
if min_abs == 0.0:
below_threshold = 0.0
else:
# below_threshold is 0 if x_abs_mean > min_abs, can be at most max_factor if
# x_abs)_mean , min_abs.
below_threshold = ((min_abs - x_abs_mean) * (gain_factor / min_abs)).clamp(min=0, max=max_factor)
above_threshold = ((x_abs_mean - max_abs) * (gain_factor / max_abs)).clamp(min=0, max=max_factor)
return below_threshold - above_threshold
def _compute_sign_factor(
x: Tensor,
channel_dim: int,
min_positive: float,
max_positive: float,
gain_factor: float,
max_factor: float,
) -> Tensor:
if channel_dim < 0:
channel_dim += x.ndim
sum_dims = [d for d in range(x.ndim) if d != channel_dim]
proportion_positive = torch.mean((x > 0).to(torch.float32), dim=sum_dims)
if min_positive == 0.0:
factor1 = 0.0
else:
# 0 if proportion_positive >= min_positive, else can be
# as large as max_factor.
factor1 = ((min_positive - proportion_positive) * (gain_factor / min_positive)).clamp_(min=0, max=max_factor)
if max_positive == 1.0:
factor2 = 0.0
else:
# 0 if self.proportion_positive <= max_positive, else can be
# as large as -max_factor.
factor2 = ((proportion_positive - max_positive) * (gain_factor / (1.0 - max_positive))).clamp_(
min=0, max=max_factor
)
sign_factor = factor1 - factor2
# require min_positive != 0 or max_positive != 1:
assert not isinstance(sign_factor, float)
return sign_factor
class ActivationBalancer(torch.nn.Module):
def __init__(
self,
num_channels: int,
channel_dim: int,
min_positive: float = 0.05,
max_positive: float = 0.95,
max_factor: float = 0.04,
sign_gain_factor: float = 0.01,
scale_gain_factor: float = 0.02,
min_abs: float = 0.2,
max_abs: float = 100.0,
min_prob: float = 0.1,
):
super(ActivationBalancer, self).__init__()
self.num_channels = num_channels
self.channel_dim = channel_dim
self.min_positive = min_positive
self.max_positive = max_positive
self.max_factor = max_factor
self.min_abs = min_abs
self.max_abs = max_abs
self.min_prob = min_prob
self.sign_gain_factor = sign_gain_factor
self.scale_gain_factor = scale_gain_factor
# count measures how many times the forward() function has been called.
# We occasionally sync this to a tensor called `count`, that exists to
# make sure it is synced to disk when we load and save the model.
self.cpu_count = 0
self.register_buffer("count", torch.tensor(0, dtype=torch.int64))
def forward(self, x: Tensor) -> Tensor:
if torch.jit.is_scripting() or not x.requires_grad or torch.jit.is_tracing():
return _no_op(x)
count = self.cpu_count
self.cpu_count += 1
if random.random() < 0.01:
# Occasionally sync self.cpu_count with self.count.
# count affects the decay of 'prob'. don't do this on every iter,
# because syncing with the GPU is slow.
self.cpu_count = max(self.cpu_count, self.count.item())
self.count.fill_(self.cpu_count)
# the prob of doing some work exponentially decreases from 0.5 till it hits
# a floor at min_prob (==0.1, by default)
prob = max(self.min_prob, 0.5 ** (1 + (count / 4000.0)))
if random.random() < prob:
sign_gain_factor = 0.5
if self.min_positive != 0.0 or self.max_positive != 1.0:
sign_factor = _compute_sign_factor(
x,
self.channel_dim,
self.min_positive,
self.max_positive,
gain_factor=self.sign_gain_factor / prob,
max_factor=self.max_factor,
)
else:
sign_factor = None
scale_factor = _compute_scale_factor(
x.detach(),
self.channel_dim,
min_abs=self.min_abs,
max_abs=self.max_abs,
gain_factor=self.scale_gain_factor / prob,
max_factor=self.max_factor,
)
return ActivationBalancerFunction.apply(
x,
scale_factor,
sign_factor,
self.channel_dim,
)
else:
return _no_op(x)
def BalancedDoubleSwish(d_model, channel_dim=-1, max_abs=10.0, min_prob=0.25) -> nn.Sequential:
balancer = ActivationBalancer(d_model, channel_dim=channel_dim, max_abs=max_abs, min_prob=min_prob)
return nn.Sequential(
balancer,
DoubleSwish(),
) | --- +++ @@ -23,6 +23,20 @@
class DoubleSwishFunction(torch.autograd.Function):
+ """
+ double_swish(x) = x * torch.sigmoid(x-1)
+ This is a definition, originally motivated by its close numerical
+ similarity to swish(swish(x)), where swish(x) = x * sigmoid(x).
+
+ Memory-efficient derivative computation:
+ double_swish(x) = x * s, where s(x) = torch.sigmoid(x-1)
+ double_swish'(x) = d/dx double_swish(x) = x * s'(x) + x' * s(x) = x * s'(x) + s(x).
+ Now, s'(x) = s(x) * (1-s(x)).
+ double_swish'(x) = x * s'(x) + s(x).
+ = x * s(x) * (1-s(x)) + s(x).
+ = double_swish(x) * (1-s(x)) + s(x)
+ ... so we just need to remember s(x) but not x itself.
+ """
@staticmethod
def forward(ctx, x: Tensor) -> Tensor:
@@ -67,6 +81,9 @@
class DoubleSwish(torch.nn.Module):
def forward(self, x: Tensor) -> Tensor:
+ """Return double-swish activation function which is an approximation to Swish(Swish(x)),
+ that we approximate closely with x * sigmoid(x-1).
+ """
if torch.jit.is_scripting() or torch.jit.is_tracing():
return x * torch.sigmoid(x - 1.0)
return DoubleSwishFunction.apply(x)
@@ -172,6 +189,44 @@
class ActivationBalancer(torch.nn.Module):
+ """
+ Modifies the backpropped derivatives of a function to try to encourage, for
+ each channel, that it is positive at least a proportion `threshold` of the
+ time. It does this by multiplying negative derivative values by up to
+ (1+max_factor), and positive derivative values by up to (1-max_factor),
+ interpolated from 1 at the threshold to those extremal values when none
+ of the inputs are positive.
+
+ Args:
+ num_channels: the number of channels
+ channel_dim: the dimension/axis corresponding to the channel, e.g.
+ -1, 0, 1, 2; will be interpreted as an offset from x.ndim if negative.
+ min_positive: the minimum, per channel, of the proportion of the time
+ that (x > 0), below which we start to modify the derivatives.
+ max_positive: the maximum, per channel, of the proportion of the time
+ that (x > 0), above which we start to modify the derivatives.
+ max_factor: the maximum factor by which we modify the derivatives for
+ either the sign constraint or the magnitude constraint;
+ e.g. with max_factor=0.02, the the derivatives would be multiplied by
+ values in the range [0.98..1.02].
+ sign_gain_factor: determines the 'gain' with which we increase the
+ change in gradient once the constraints on min_positive and max_positive
+ are violated.
+ scale_gain_factor: determines the 'gain' with which we increase the
+ change in gradient once the constraints on min_abs and max_abs
+ are violated.
+ min_abs: the minimum average-absolute-value difference from the mean
+ value per channel, which we allow, before we start to modify
+ the derivatives to prevent this.
+ max_abs: the maximum average-absolute-value difference from the mean
+ value per channel, which we allow, before we start to modify
+ the derivatives to prevent this.
+ min_prob: determines the minimum probability with which we modify the
+ gradients for the {min,max}_positive and {min,max}_abs constraints,
+ on each forward(). This is done randomly to prevent all layers
+ from doing it at the same time. Early in training we may use
+ higher probabilities than this; it will decay to this value.
+ """
def __init__(
self,
@@ -255,8 +310,11 @@
def BalancedDoubleSwish(d_model, channel_dim=-1, max_abs=10.0, min_prob=0.25) -> nn.Sequential:
+ """
+ ActivationBalancer -> DoubleSwish
+ """
balancer = ActivationBalancer(d_model, channel_dim=channel_dim, max_abs=max_abs, min_prob=min_prob)
return nn.Sequential(
balancer,
DoubleSwish(),
- )+ )
| https://raw.githubusercontent.com/RVC-Boss/GPT-SoVITS/HEAD/GPT_SoVITS/AR/modules/scaling.py |
Create documentation strings for testing functions | # Copyright (c) 2024 NVIDIA CORPORATION.
# Licensed under the MIT license.
# Adapted from https://github.com/jik876/hifi-gan under the MIT license.
# LICENSE is in incl_licenses directory.
import os
import json
from pathlib import Path
from typing import Optional, Union, Dict
import torch
import torch.nn as nn
from torch.nn import Conv1d, ConvTranspose1d
from torch.nn.utils import weight_norm, remove_weight_norm
from . import activations
from .utils0 import init_weights, get_padding
from .alias_free_activation.torch.act import Activation1d as TorchActivation1d
from .env import AttrDict
from huggingface_hub import PyTorchModelHubMixin, hf_hub_download
def load_hparams_from_json(path) -> AttrDict:
with open(path) as f:
data = f.read()
return AttrDict(json.loads(data))
class AMPBlock1(torch.nn.Module):
def __init__(
self,
h: AttrDict,
channels: int,
kernel_size: int = 3,
dilation: tuple = (1, 3, 5),
activation: str = None,
):
super().__init__()
self.h = h
self.convs1 = nn.ModuleList(
[
weight_norm(
Conv1d(
channels,
channels,
kernel_size,
stride=1,
dilation=d,
padding=get_padding(kernel_size, d),
)
)
for d in dilation
]
)
self.convs1.apply(init_weights)
self.convs2 = nn.ModuleList(
[
weight_norm(
Conv1d(
channels,
channels,
kernel_size,
stride=1,
dilation=1,
padding=get_padding(kernel_size, 1),
)
)
for _ in range(len(dilation))
]
)
self.convs2.apply(init_weights)
self.num_layers = len(self.convs1) + len(self.convs2) # Total number of conv layers
# Select which Activation1d, lazy-load cuda version to ensure backward compatibility
if self.h.get("use_cuda_kernel", False):
from .alias_free_activation.cuda.activation1d import (
Activation1d as CudaActivation1d,
)
Activation1d = CudaActivation1d
else:
Activation1d = TorchActivation1d
# Activation functions
if activation == "snake":
self.activations = nn.ModuleList(
[
Activation1d(activation=activations.Snake(channels, alpha_logscale=h.snake_logscale))
for _ in range(self.num_layers)
]
)
elif activation == "snakebeta":
self.activations = nn.ModuleList(
[
Activation1d(activation=activations.SnakeBeta(channels, alpha_logscale=h.snake_logscale))
for _ in range(self.num_layers)
]
)
else:
raise NotImplementedError(
"activation incorrectly specified. check the config file and look for 'activation'."
)
def forward(self, x):
acts1, acts2 = self.activations[::2], self.activations[1::2]
for c1, c2, a1, a2 in zip(self.convs1, self.convs2, acts1, acts2):
xt = a1(x)
xt = c1(xt)
xt = a2(xt)
xt = c2(xt)
x = xt + x
return x
def remove_weight_norm(self):
for l in self.convs1:
remove_weight_norm(l)
for l in self.convs2:
remove_weight_norm(l)
class AMPBlock2(torch.nn.Module):
def __init__(
self,
h: AttrDict,
channels: int,
kernel_size: int = 3,
dilation: tuple = (1, 3, 5),
activation: str = None,
):
super().__init__()
self.h = h
self.convs = nn.ModuleList(
[
weight_norm(
Conv1d(
channels,
channels,
kernel_size,
stride=1,
dilation=d,
padding=get_padding(kernel_size, d),
)
)
for d in dilation
]
)
self.convs.apply(init_weights)
self.num_layers = len(self.convs) # Total number of conv layers
# Select which Activation1d, lazy-load cuda version to ensure backward compatibility
if self.h.get("use_cuda_kernel", False):
from .alias_free_activation.cuda.activation1d import (
Activation1d as CudaActivation1d,
)
Activation1d = CudaActivation1d
else:
Activation1d = TorchActivation1d
# Activation functions
if activation == "snake":
self.activations = nn.ModuleList(
[
Activation1d(activation=activations.Snake(channels, alpha_logscale=h.snake_logscale))
for _ in range(self.num_layers)
]
)
elif activation == "snakebeta":
self.activations = nn.ModuleList(
[
Activation1d(activation=activations.SnakeBeta(channels, alpha_logscale=h.snake_logscale))
for _ in range(self.num_layers)
]
)
else:
raise NotImplementedError(
"activation incorrectly specified. check the config file and look for 'activation'."
)
def forward(self, x):
for c, a in zip(self.convs, self.activations):
xt = a(x)
xt = c(xt)
x = xt + x
return x
def remove_weight_norm(self):
for l in self.convs:
remove_weight_norm(l)
class BigVGAN(
torch.nn.Module,
PyTorchModelHubMixin,
# library_name="bigvgan",
# repo_url="https://github.com/NVIDIA/BigVGAN",
# docs_url="https://github.com/NVIDIA/BigVGAN/blob/main/README.md",
# pipeline_tag="audio-to-audio",
# license="mit",
# tags=["neural-vocoder", "audio-generation", "arxiv:2206.04658"],
):
def __init__(self, h: AttrDict, use_cuda_kernel: bool = False):
super().__init__()
self.h = h
self.h["use_cuda_kernel"] = use_cuda_kernel
# Select which Activation1d, lazy-load cuda version to ensure backward compatibility
if self.h.get("use_cuda_kernel", False):
from .alias_free_activation.cuda.activation1d import (
Activation1d as CudaActivation1d,
)
Activation1d = CudaActivation1d
else:
Activation1d = TorchActivation1d
self.num_kernels = len(h.resblock_kernel_sizes)
self.num_upsamples = len(h.upsample_rates)
# Pre-conv
self.conv_pre = weight_norm(Conv1d(h.num_mels, h.upsample_initial_channel, 7, 1, padding=3))
# Define which AMPBlock to use. BigVGAN uses AMPBlock1 as default
if h.resblock == "1":
resblock_class = AMPBlock1
elif h.resblock == "2":
resblock_class = AMPBlock2
else:
raise ValueError(f"Incorrect resblock class specified in hyperparameters. Got {h.resblock}")
# Transposed conv-based upsamplers. does not apply anti-aliasing
self.ups = nn.ModuleList()
for i, (u, k) in enumerate(zip(h.upsample_rates, h.upsample_kernel_sizes)):
self.ups.append(
nn.ModuleList(
[
weight_norm(
ConvTranspose1d(
h.upsample_initial_channel // (2**i),
h.upsample_initial_channel // (2 ** (i + 1)),
k,
u,
padding=(k - u) // 2,
)
)
]
)
)
# Residual blocks using anti-aliased multi-periodicity composition modules (AMP)
self.resblocks = nn.ModuleList()
for i in range(len(self.ups)):
ch = h.upsample_initial_channel // (2 ** (i + 1))
for j, (k, d) in enumerate(zip(h.resblock_kernel_sizes, h.resblock_dilation_sizes)):
self.resblocks.append(resblock_class(h, ch, k, d, activation=h.activation))
# Post-conv
activation_post = (
activations.Snake(ch, alpha_logscale=h.snake_logscale)
if h.activation == "snake"
else (activations.SnakeBeta(ch, alpha_logscale=h.snake_logscale) if h.activation == "snakebeta" else None)
)
if activation_post is None:
raise NotImplementedError(
"activation incorrectly specified. check the config file and look for 'activation'."
)
self.activation_post = Activation1d(activation=activation_post)
# Whether to use bias for the final conv_post. Default to True for backward compatibility
self.use_bias_at_final = h.get("use_bias_at_final", True)
self.conv_post = weight_norm(Conv1d(ch, 1, 7, 1, padding=3, bias=self.use_bias_at_final))
# Weight initialization
for i in range(len(self.ups)):
self.ups[i].apply(init_weights)
self.conv_post.apply(init_weights)
# Final tanh activation. Defaults to True for backward compatibility
self.use_tanh_at_final = h.get("use_tanh_at_final", True)
def forward(self, x):
# Pre-conv
x = self.conv_pre(x)
for i in range(self.num_upsamples):
# Upsampling
for i_up in range(len(self.ups[i])):
x = self.ups[i][i_up](x)
# AMP blocks
xs = None
for j in range(self.num_kernels):
if xs is None:
xs = self.resblocks[i * self.num_kernels + j](x)
else:
xs += self.resblocks[i * self.num_kernels + j](x)
x = xs / self.num_kernels
# Post-conv
x = self.activation_post(x)
x = self.conv_post(x)
# Final tanh activation
if self.use_tanh_at_final:
x = torch.tanh(x)
else:
x = torch.clamp(x, min=-1.0, max=1.0) # Bound the output to [-1, 1]
return x
def remove_weight_norm(self):
try:
# print("Removing weight norm...")
for l in self.ups:
for l_i in l:
remove_weight_norm(l_i)
for l in self.resblocks:
l.remove_weight_norm()
remove_weight_norm(self.conv_pre)
remove_weight_norm(self.conv_post)
except ValueError:
print("[INFO] Model already removed weight norm. Skipping!")
pass
# Additional methods for huggingface_hub support
def _save_pretrained(self, save_directory: Path) -> None:
model_path = save_directory / "bigvgan_generator.pt"
torch.save({"generator": self.state_dict()}, model_path)
config_path = save_directory / "config.json"
with open(config_path, "w") as config_file:
json.dump(self.h, config_file, indent=4)
@classmethod
def _from_pretrained(
cls,
*,
model_id: str,
revision: str,
cache_dir: str,
force_download: bool,
proxies: Optional[Dict],
resume_download: bool,
local_files_only: bool,
token: Union[str, bool, None],
map_location: str = "cpu", # Additional argument
strict: bool = False, # Additional argument
use_cuda_kernel: bool = False,
**model_kwargs,
):
# Download and load hyperparameters (h) used by BigVGAN
if os.path.isdir(model_id):
# print("Loading config.json from local directory")
config_file = os.path.join(model_id, "config.json")
else:
config_file = hf_hub_download(
repo_id=model_id,
filename="config.json",
revision=revision,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
resume_download=resume_download,
token=token,
local_files_only=local_files_only,
)
h = load_hparams_from_json(config_file)
# instantiate BigVGAN using h
if use_cuda_kernel:
print(
"[WARNING] You have specified use_cuda_kernel=True during BigVGAN.from_pretrained(). Only inference is supported (training is not implemented)!"
)
print(
"[WARNING] You need nvcc and ninja installed in your system that matches your PyTorch build is using to build the kernel. If not, the model will fail to initialize or generate incorrect waveform!"
)
print(
"[WARNING] For detail, see the official GitHub repository: https://github.com/NVIDIA/BigVGAN?tab=readme-ov-file#using-custom-cuda-kernel-for-synthesis"
)
model = cls(h, use_cuda_kernel=use_cuda_kernel)
# Download and load pretrained generator weight
if os.path.isdir(model_id):
# print("Loading weights from local directory")
model_file = os.path.join(model_id, "bigvgan_generator.pt")
else:
# print(f"Loading weights from {model_id}")
model_file = hf_hub_download(
repo_id=model_id,
filename="bigvgan_generator.pt",
revision=revision,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
resume_download=resume_download,
token=token,
local_files_only=local_files_only,
)
checkpoint_dict = torch.load(model_file, map_location=map_location)
try:
model.load_state_dict(checkpoint_dict["generator"])
except RuntimeError:
print(
"[INFO] the pretrained checkpoint does not contain weight norm. Loading the checkpoint after removing weight norm!"
)
model.remove_weight_norm()
model.load_state_dict(checkpoint_dict["generator"])
return model | --- +++ @@ -29,6 +29,17 @@
class AMPBlock1(torch.nn.Module):
+ """
+ AMPBlock applies Snake / SnakeBeta activation functions with trainable parameters that control periodicity, defined for each layer.
+ AMPBlock1 has additional self.convs2 that contains additional Conv1d layers with a fixed dilation=1 followed by each layer in self.convs1
+
+ Args:
+ h (AttrDict): Hyperparameters.
+ channels (int): Number of convolution channels.
+ kernel_size (int): Size of the convolution kernel. Default is 3.
+ dilation (tuple): Dilation rates for the convolutions. Each dilation layer has two convolutions. Default is (1, 3, 5).
+ activation (str): Activation function type. Should be either 'snake' or 'snakebeta'. Default is None.
+ """
def __init__(
self,
@@ -127,6 +138,17 @@
class AMPBlock2(torch.nn.Module):
+ """
+ AMPBlock applies Snake / SnakeBeta activation functions with trainable parameters that control periodicity, defined for each layer.
+ Unlike AMPBlock1, AMPBlock2 does not contain extra Conv1d layers with fixed dilation=1
+
+ Args:
+ h (AttrDict): Hyperparameters.
+ channels (int): Number of convolution channels.
+ kernel_size (int): Size of the convolution kernel. Default is 3.
+ dilation (tuple): Dilation rates for the convolutions. Each dilation layer has two convolutions. Default is (1, 3, 5).
+ activation (str): Activation function type. Should be either 'snake' or 'snakebeta'. Default is None.
+ """
def __init__(
self,
@@ -211,6 +233,18 @@ # license="mit",
# tags=["neural-vocoder", "audio-generation", "arxiv:2206.04658"],
):
+ """
+ BigVGAN is a neural vocoder model that applies anti-aliased periodic activation for residual blocks (resblocks).
+ New in BigVGAN-v2: it can optionally use optimized CUDA kernels for AMP (anti-aliased multi-periodicity) blocks.
+
+ Args:
+ h (AttrDict): Hyperparameters.
+ use_cuda_kernel (bool): If set to True, loads optimized CUDA kernels for AMP. This should be used for inference only, as training is not supported with CUDA kernels.
+
+ Note:
+ - The `use_cuda_kernel` parameter should be used for inference only, as training with CUDA kernels is not supported.
+ - Ensure that the activation function is correctly specified in the hyperparameters (h.activation).
+ """
def __init__(self, h: AttrDict, use_cuda_kernel: bool = False):
super().__init__()
@@ -336,6 +370,7 @@
# Additional methods for huggingface_hub support
def _save_pretrained(self, save_directory: Path) -> None:
+ """Save weights and config.json from a Pytorch model to a local directory."""
model_path = save_directory / "bigvgan_generator.pt"
torch.save({"generator": self.state_dict()}, model_path)
@@ -361,6 +396,7 @@ use_cuda_kernel: bool = False,
**model_kwargs,
):
+ """Load Pytorch pretrained weights and return the loaded model."""
# Download and load hyperparameters (h) used by BigVGAN
if os.path.isdir(model_id):
@@ -422,4 +458,4 @@ model.remove_weight_norm()
model.load_state_dict(checkpoint_dict["generator"])
- return model+ return model
| https://raw.githubusercontent.com/RVC-Boss/GPT-SoVITS/HEAD/GPT_SoVITS/BigVGAN/bigvgan.py |
Add inline docstrings for readability | # modified from https://github.com/lifeiteng/vall-e/blob/main/valle/modules/activation.py
from typing import Optional, Tuple
import torch
from torch import Tensor
from torch.nn import Linear, Module
from torch.nn import functional as F
from torch.nn.init import constant_, xavier_normal_, xavier_uniform_
from torch.nn.modules.linear import NonDynamicallyQuantizableLinear
from torch.nn.parameter import Parameter
from AR.modules.patched_mha_with_cache import multi_head_attention_forward_patched
F.multi_head_attention_forward = multi_head_attention_forward_patched
class MultiheadAttention(Module):
__constants__ = ["batch_first"]
bias_k: Optional[torch.Tensor]
bias_v: Optional[torch.Tensor]
def __init__(
self,
embed_dim,
num_heads,
dropout=0.0,
bias=True,
add_bias_kv=False,
add_zero_attn=False,
kdim=None,
vdim=None,
batch_first=False,
linear1_cls=Linear,
linear2_cls=Linear,
device=None,
dtype=None,
) -> None:
factory_kwargs = {"device": device, "dtype": dtype}
super(MultiheadAttention, self).__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self._qkv_same_embed_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.batch_first = batch_first
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
if add_bias_kv:
self.bias_k = Parameter(torch.empty((1, 1, embed_dim), **factory_kwargs))
self.bias_v = Parameter(torch.empty((1, 1, embed_dim), **factory_kwargs))
else:
self.bias_k = self.bias_v = None
if linear1_cls == Linear:
if not self._qkv_same_embed_dim:
self.q_proj_weight = Parameter(
torch.empty((embed_dim, embed_dim), **factory_kwargs),
)
self.k_proj_weight = Parameter(
torch.empty((embed_dim, self.kdim), **factory_kwargs),
)
self.v_proj_weight = Parameter(
torch.empty((embed_dim, self.vdim), **factory_kwargs),
)
self.register_parameter("in_proj_weight", None)
else:
self.in_proj_weight = Parameter(
torch.empty((3 * embed_dim, embed_dim), **factory_kwargs),
)
self.register_parameter("q_proj_weight", None)
self.register_parameter("k_proj_weight", None)
self.register_parameter("v_proj_weight", None)
if bias:
self.in_proj_bias = Parameter(torch.empty(3 * embed_dim, **factory_kwargs))
else:
self.register_parameter("in_proj_bias", None)
self.out_proj = NonDynamicallyQuantizableLinear(
embed_dim,
embed_dim,
bias=bias,
**factory_kwargs,
)
self._reset_parameters()
else:
if not self._qkv_same_embed_dim:
raise NotImplementedError
else:
self.in_proj_linear = linear1_cls(
embed_dim,
3 * embed_dim,
bias=bias,
**factory_kwargs,
)
self.in_proj_weight = self.in_proj_linear.weight
self.register_parameter("q_proj_weight", None)
self.register_parameter("k_proj_weight", None)
self.register_parameter("v_proj_weight", None)
if bias:
self.in_proj_bias = self.in_proj_linear.bias
else:
self.register_parameter("in_proj_bias", None)
self.out_proj = linear2_cls(
embed_dim,
embed_dim,
bias=bias,
**factory_kwargs,
)
if self.bias_k is not None:
xavier_normal_(self.bias_k)
if self.bias_v is not None:
xavier_normal_(self.bias_v)
self.add_zero_attn = add_zero_attn
def _reset_parameters(self):
if self._qkv_same_embed_dim:
xavier_uniform_(self.in_proj_weight)
else:
xavier_uniform_(self.q_proj_weight)
xavier_uniform_(self.k_proj_weight)
xavier_uniform_(self.v_proj_weight)
if self.in_proj_bias is not None:
constant_(self.in_proj_bias, 0.0)
constant_(self.out_proj.bias, 0.0)
if self.bias_k is not None:
xavier_normal_(self.bias_k)
if self.bias_v is not None:
xavier_normal_(self.bias_v)
def __setstate__(self, state):
# Support loading old MultiheadAttention checkpoints generated by v1.1.0
if "_qkv_same_embed_dim" not in state:
state["_qkv_same_embed_dim"] = True
super(MultiheadAttention, self).__setstate__(state)
def forward(
self,
query: Tensor,
key: Tensor,
value: Tensor,
key_padding_mask: Optional[Tensor] = None,
need_weights: bool = True,
attn_mask: Optional[Tensor] = None,
average_attn_weights: bool = True,
cache=None,
) -> Tuple[Tensor, Optional[Tensor]]:
is_batched = query.dim() == 3
if key_padding_mask is not None:
_kpm_dtype = key_padding_mask.dtype
if _kpm_dtype != torch.bool and not torch.is_floating_point(
key_padding_mask,
):
raise AssertionError("only bool and floating types of key_padding_mask are supported")
why_not_fast_path = ""
if not is_batched:
why_not_fast_path = f"input not batched; expected query.dim() of 3 but got {query.dim()}"
elif query is not key or key is not value:
# When lifting this restriction, don't forget to either
# enforce that the dtypes all match or test cases where
# they don't!
why_not_fast_path = "non-self attention was used (query, key, and value are not the same Tensor)"
elif self.in_proj_bias is not None and query.dtype != self.in_proj_bias.dtype:
why_not_fast_path = (
f"dtypes of query ({query.dtype}) and self.in_proj_bias ({self.in_proj_bias.dtype}) don't match"
)
elif self.in_proj_weight is not None and query.dtype != self.in_proj_weight.dtype:
# this case will fail anyway, but at least they'll get a useful error message.
why_not_fast_path = (
f"dtypes of query ({query.dtype}) and self.in_proj_weight ({self.in_proj_weight.dtype}) don't match"
)
elif self.training:
why_not_fast_path = "training is enabled"
elif not self.batch_first:
why_not_fast_path = "batch_first was not True"
elif self.bias_k is not None:
why_not_fast_path = "self.bias_k was not None"
elif self.bias_v is not None:
why_not_fast_path = "self.bias_v was not None"
elif self.dropout:
why_not_fast_path = f"dropout was {self.dropout}, required zero"
elif self.add_zero_attn:
why_not_fast_path = "add_zero_attn was enabled"
elif not self._qkv_same_embed_dim:
why_not_fast_path = "_qkv_same_embed_dim was not True"
elif attn_mask is not None:
why_not_fast_path = "attn_mask was not None"
elif query.is_nested and key_padding_mask is not None:
why_not_fast_path = "key_padding_mask is not supported with NestedTensor input"
elif self.num_heads % 2 == 1:
why_not_fast_path = "num_heads is odd"
elif torch.is_autocast_enabled():
why_not_fast_path = "autocast is enabled"
if not why_not_fast_path:
tensor_args = (
query,
key,
value,
self.in_proj_weight,
self.in_proj_bias,
self.out_proj.weight,
self.out_proj.bias,
)
# We have to use list comprehensions below because TorchScript does not support
# generator expressions.
if torch.overrides.has_torch_function(tensor_args):
why_not_fast_path = "some Tensor argument has_torch_function"
elif not all([(x is None or x.is_cuda or "cpu" in str(x.device)) for x in tensor_args]):
why_not_fast_path = "some Tensor argument is neither CUDA nor CPU"
elif torch.is_grad_enabled() and any([x is not None and x.requires_grad for x in tensor_args]):
why_not_fast_path = "grad is enabled and at least one of query or the input/output projection weights or biases requires_grad"
if not why_not_fast_path:
return torch._native_multi_head_attention(
query,
key,
value,
self.embed_dim,
self.num_heads,
self.in_proj_weight,
self.in_proj_bias,
self.out_proj.weight,
self.out_proj.bias,
key_padding_mask if key_padding_mask is not None else attn_mask,
need_weights,
average_attn_weights,
1 if key_padding_mask is not None else 0 if attn_mask is not None else None,
)
any_nested = query.is_nested or key.is_nested or value.is_nested
assert not any_nested, (
"MultiheadAttention does not support NestedTensor outside of its fast path. "
+ f"The fast path was not hit because {why_not_fast_path}"
)
if self.batch_first and is_batched:
# make sure that the transpose op does not affect the "is" property
if key is value:
if query is key:
query = key = value = query.transpose(1, 0)
else:
query, key = [x.transpose(1, 0) for x in (query, key)]
value = key
else:
query, key, value = [x.transpose(1, 0) for x in (query, key, value)]
if not self._qkv_same_embed_dim:
attn_output, attn_output_weights = F.multi_head_attention_forward(
query,
key,
value,
self.embed_dim,
self.num_heads,
self.in_proj_weight,
self.in_proj_bias,
self.bias_k,
self.bias_v,
self.add_zero_attn,
self.dropout,
self.out_proj.weight,
self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask,
need_weights=need_weights,
attn_mask=attn_mask,
use_separate_proj_weight=True,
q_proj_weight=self.q_proj_weight,
k_proj_weight=self.k_proj_weight,
v_proj_weight=self.v_proj_weight,
average_attn_weights=average_attn_weights,
cache=cache,
)
else:
attn_output, attn_output_weights = F.multi_head_attention_forward(
query,
key,
value,
self.embed_dim,
self.num_heads,
self.in_proj_weight,
self.in_proj_bias,
self.bias_k,
self.bias_v,
self.add_zero_attn,
self.dropout,
self.out_proj.weight,
self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask,
need_weights=need_weights,
attn_mask=attn_mask,
average_attn_weights=average_attn_weights,
cache=cache,
)
if self.batch_first and is_batched:
return attn_output.transpose(1, 0), attn_output_weights
else:
return attn_output, attn_output_weights | --- +++ @@ -15,6 +15,61 @@
class MultiheadAttention(Module):
+ r"""Allows the model to jointly attend to information
+ from different representation subspaces as described in the paper:
+ `Attention Is All You Need <https://arxiv.org/abs/1706.03762>`_.
+
+ Multi-Head Attention is defined as:
+
+ .. math::
+ \text{MultiHead}(Q, K, V) = \text{Concat}(head_1,\dots,head_h)W^O
+
+ where :math:`head_i = \text{Attention}(QW_i^Q, KW_i^K, VW_i^V)`.
+
+ ``forward()`` will use a special optimized implementation if all of the following
+ conditions are met:
+
+ - self attention is being computed (i.e., ``query``, ``key``, and ``value`` are the same tensor. This
+ restriction will be loosened in the future.)
+ - Either autograd is disabled (using ``torch.inference_mode`` or ``torch.no_grad``) or no tensor argument ``requires_grad``
+ - training is disabled (using ``.eval()``)
+ - dropout is 0
+ - ``add_bias_kv`` is ``False``
+ - ``add_zero_attn`` is ``False``
+ - ``batch_first`` is ``True`` and the input is batched
+ - ``kdim`` and ``vdim`` are equal to ``embed_dim``
+ - at most one of ``key_padding_mask`` or ``attn_mask`` is passed
+ - if a `NestedTensor <https://pytorch.org/docs/stable/nested.html>`_ is passed, neither ``key_padding_mask``
+ nor ``attn_mask`` is passed
+
+ If the optimized implementation is in use, a
+ `NestedTensor <https://pytorch.org/docs/stable/nested.html>`_ can be passed for
+ ``query``/``key``/``value`` to represent padding more efficiently than using a
+ padding mask. In this case, a `NestedTensor <https://pytorch.org/docs/stable/nested.html>`_
+ will be returned, and an additional speedup proportional to the fraction of the input
+ that is padding can be expected.
+
+ Args:
+ embed_dim: Total dimension of the model.
+ num_heads: Number of parallel attention heads. Note that ``embed_dim`` will be split
+ across ``num_heads`` (i.e. each head will have dimension ``embed_dim // num_heads``).
+ dropout: Dropout probability on ``attn_output_weights``. Default: ``0.0`` (no dropout).
+ bias: If specified, adds bias to input / output projection layers. Default: ``True``.
+ add_bias_kv: If specified, adds bias to the key and value sequences at dim=0. Default: ``False``.
+ add_zero_attn: If specified, adds a new batch of zeros to the key and value sequences at dim=1.
+ Default: ``False``.
+ kdim: Total number of features for keys. Default: ``None`` (uses ``kdim=embed_dim``).
+ vdim: Total number of features for values. Default: ``None`` (uses ``vdim=embed_dim``).
+ batch_first: If ``True``, then the input and output tensors are provided
+ as (batch, seq, feature). Default: ``False`` (seq, batch, feature).
+
+ Examples::
+
+ >>> # xdoctest: +SKIP
+ >>> multihead_attn = nn.MultiheadAttention(embed_dim, num_heads)
+ >>> attn_output, attn_output_weights = multihead_attn(query, key, value)
+
+ """
__constants__ = ["batch_first"]
bias_k: Optional[torch.Tensor]
@@ -157,6 +212,54 @@ average_attn_weights: bool = True,
cache=None,
) -> Tuple[Tensor, Optional[Tensor]]:
+ r"""
+ Args:
+ query: Query embeddings of shape :math:`(L, E_q)` for unbatched input, :math:`(L, N, E_q)` when ``batch_first=False``
+ or :math:`(N, L, E_q)` when ``batch_first=True``, where :math:`L` is the target sequence length,
+ :math:`N` is the batch size, and :math:`E_q` is the query embedding dimension ``embed_dim``.
+ Queries are compared against key-value pairs to produce the output.
+ See "Attention Is All You Need" for more details.
+ key: Key embeddings of shape :math:`(S, E_k)` for unbatched input, :math:`(S, N, E_k)` when ``batch_first=False``
+ or :math:`(N, S, E_k)` when ``batch_first=True``, where :math:`S` is the source sequence length,
+ :math:`N` is the batch size, and :math:`E_k` is the key embedding dimension ``kdim``.
+ See "Attention Is All You Need" for more details.
+ value: Value embeddings of shape :math:`(S, E_v)` for unbatched input, :math:`(S, N, E_v)` when
+ ``batch_first=False`` or :math:`(N, S, E_v)` when ``batch_first=True``, where :math:`S` is the source
+ sequence length, :math:`N` is the batch size, and :math:`E_v` is the value embedding dimension ``vdim``.
+ See "Attention Is All You Need" for more details.
+ key_padding_mask: If specified, a mask of shape :math:`(N, S)` indicating which elements within ``key``
+ to ignore for the purpose of attention (i.e. treat as "padding"). For unbatched `query`, shape should be :math:`(S)`.
+ Binary and byte masks are supported.
+ For a binary mask, a ``True`` value indicates that the corresponding ``key`` value will be ignored for
+ the purpose of attention. For a float mask, it will be directly added to the corresponding ``key`` value.
+ need_weights: If specified, returns ``attn_output_weights`` in addition to ``attn_outputs``.
+ Default: ``True``.
+ attn_mask: If specified, a 2D or 3D mask preventing attention to certain positions. Must be of shape
+ :math:`(L, S)` or :math:`(N\cdot\text{num\_heads}, L, S)`, where :math:`N` is the batch size,
+ :math:`L` is the target sequence length, and :math:`S` is the source sequence length. A 2D mask will be
+ broadcasted across the batch while a 3D mask allows for a different mask for each entry in the batch.
+ Binary, byte, and float masks are supported. For a binary mask, a ``True`` value indicates that the
+ corresponding position is not allowed to attend. For a byte mask, a non-zero value indicates that the
+ corresponding position is not allowed to attend. For a float mask, the mask values will be added to
+ the attention weight.
+ average_attn_weights: If true, indicates that the returned ``attn_weights`` should be averaged across
+ heads. Otherwise, ``attn_weights`` are provided separately per head. Note that this flag only has an
+ effect when ``need_weights=True``. Default: ``True`` (i.e. average weights across heads)
+
+ Outputs:
+ - **attn_output** - Attention outputs of shape :math:`(L, E)` when input is unbatched,
+ :math:`(L, N, E)` when ``batch_first=False`` or :math:`(N, L, E)` when ``batch_first=True``,
+ where :math:`L` is the target sequence length, :math:`N` is the batch size, and :math:`E` is the
+ embedding dimension ``embed_dim``.
+ - **attn_output_weights** - Only returned when ``need_weights=True``. If ``average_attn_weights=True``,
+ returns attention weights averaged across heads of shape :math:`(L, S)` when input is unbatched or
+ :math:`(N, L, S)`, where :math:`N` is the batch size, :math:`L` is the target sequence length, and
+ :math:`S` is the source sequence length. If ``average_attn_weights=False``, returns attention weights per
+ head of shape :math:`(\text{num\_heads}, L, S)` when input is unbatched or :math:`(N, \text{num\_heads}, L, S)`.
+
+ .. note::
+ `batch_first` argument is ignored for unbatched inputs.
+ """
is_batched = query.dim() == 3
if key_padding_mask is not None:
_kpm_dtype = key_padding_mask.dtype
@@ -307,4 +410,4 @@ if self.batch_first and is_batched:
return attn_output.transpose(1, 0), attn_output_weights
else:
- return attn_output, attn_output_weights+ return attn_output, attn_output_weights
| https://raw.githubusercontent.com/RVC-Boss/GPT-SoVITS/HEAD/GPT_SoVITS/AR/modules/activation.py |
Document this module using docstrings | # Copyright 2022 Xiaomi Corp. (authors: Daniel Povey)
#
# See ../LICENSE for clarification regarding multiple authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import logging
from collections import defaultdict
from typing import List, Tuple
import torch
from torch import Tensor
from torch.optim import Optimizer
class BatchedOptimizer(Optimizer):
def __init__(self, params, defaults):
super(BatchedOptimizer, self).__init__(params, defaults)
@contextlib.contextmanager
def batched_params(self, param_group, group_params_names):
batches = defaultdict(list) # `batches` maps from tuple (dtype_as_str,*shape) to list of nn.Parameter
batches_names = defaultdict(list) # `batches` maps from tuple (dtype_as_str,*shape) to list of str
assert len(param_group) == len(group_params_names)
for p, named_p in zip(param_group, group_params_names):
key = (str(p.dtype), *p.shape)
batches[key].append(p)
batches_names[key].append(named_p)
batches_names_keys = list(batches_names.keys())
sorted_idx = sorted(range(len(batches_names)), key=lambda i: batches_names_keys[i])
batches_names = [batches_names[batches_names_keys[idx]] for idx in sorted_idx]
batches = [batches[batches_names_keys[idx]] for idx in sorted_idx]
stacked_params_dict = dict()
# turn batches into a list, in deterministic order.
# tuples will contain tuples of (stacked_param, state, stacked_params_names),
# one for each batch in `batches`.
tuples = []
for batch, batch_names in zip(batches, batches_names):
p = batch[0]
# we arbitrarily store the state in the
# state corresponding to the 1st parameter in the
# group. class Optimizer will take care of saving/loading state.
state = self.state[p]
p_stacked = torch.stack(batch)
grad = torch.stack([torch.zeros_like(p) if p.grad is None else p.grad for p in batch])
p_stacked.grad = grad
stacked_params_dict[key] = p_stacked
tuples.append((p_stacked, state, batch_names))
yield tuples # <-- calling code will do the actual optimization here!
for (stacked_params, _state, _names), batch in zip(tuples, batches):
for i, p in enumerate(batch): # batch is list of Parameter
p.copy_(stacked_params[i])
class ScaledAdam(BatchedOptimizer):
def __init__(
self,
params,
lr=3e-02,
clipping_scale=None,
betas=(0.9, 0.98),
scalar_lr_scale=0.1,
eps=1.0e-08,
param_min_rms=1.0e-05,
param_max_rms=3.0,
scalar_max=10.0,
size_update_period=4,
clipping_update_period=100,
parameters_names=None,
show_dominant_parameters=True,
):
assert parameters_names is not None, (
"Please prepare parameters_names,which is a List[List[str]]. Each List[str] is for a groupand each str is for a parameter"
)
defaults = dict(
lr=lr,
clipping_scale=clipping_scale,
betas=betas,
scalar_lr_scale=scalar_lr_scale,
eps=eps,
param_min_rms=param_min_rms,
param_max_rms=param_max_rms,
scalar_max=scalar_max,
size_update_period=size_update_period,
clipping_update_period=clipping_update_period,
)
super(ScaledAdam, self).__init__(params, defaults)
assert len(self.param_groups) == len(parameters_names)
self.parameters_names = parameters_names
self.show_dominant_parameters = show_dominant_parameters
def __setstate__(self, state):
super(ScaledAdam, self).__setstate__(state)
@torch.no_grad()
def step(self, closure=None):
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
batch = True
for group, group_params_names in zip(self.param_groups, self.parameters_names):
with self.batched_params(group["params"], group_params_names) as batches:
# batches is list of pairs (stacked_param, state). stacked_param is like
# a regular parameter, and will have a .grad, but the 1st dim corresponds to
# a stacking dim, it is not a real dim.
if len(batches[0][1]) == 0: # if len(first state) == 0: not yet initialized
clipping_scale = 1
else:
clipping_scale = self._get_clipping_scale(group, batches)
for p, state, _ in batches:
# Perform optimization step.
# grad is not going to be None, we handled that when creating the batches.
grad = p.grad
if grad.is_sparse:
raise RuntimeError("ScaledAdam optimizer does not support sparse gradients")
# State initialization
if len(state) == 0:
self._init_state(group, p, state)
self._step_one_batch(group, p, state, clipping_scale)
return loss
def _init_state(self, group: dict, p: Tensor, state: dict):
size_update_period = group["size_update_period"]
state["step"] = 0
kwargs = {"device": p.device, "dtype": p.dtype}
# 'delta' implements conventional momentum. There are
# several different kinds of update going on, so rather than
# compute "exp_avg" like in Adam, we store and decay a
# parameter-change "delta", which combines all forms of
# update. this is equivalent to how it's done in Adam,
# except for the first few steps.
state["delta"] = torch.zeros_like(p, memory_format=torch.preserve_format)
batch_size = p.shape[0]
numel = p.numel() // batch_size
numel = p.numel()
if numel > 1:
# "param_rms" just periodically records the scalar root-mean-square value of
# the parameter tensor.
# it has a shape like (batch_size, 1, 1, 1, 1)
param_rms = (p**2).mean(dim=list(range(1, p.ndim)), keepdim=True).sqrt()
state["param_rms"] = param_rms
state["scale_exp_avg_sq"] = torch.zeros_like(param_rms)
state["scale_grads"] = torch.zeros(size_update_period, *param_rms.shape, **kwargs)
# exp_avg_sq is the weighted sum of scaled gradients. as in Adam.
state["exp_avg_sq"] = torch.zeros_like(p, memory_format=torch.preserve_format)
def _get_clipping_scale(self, group: dict, tuples: List[Tuple[Tensor, dict, List[str]]]) -> float:
assert len(tuples) >= 1
clipping_scale = group["clipping_scale"]
(first_p, first_state, _) = tuples[0]
step = first_state["step"]
if clipping_scale is None or step == 0:
# no clipping. return early on step == 0 because the other
# parameters' state won't have been initialized yet.
return 1.0
clipping_update_period = group["clipping_update_period"]
tot_sumsq = torch.tensor(0.0, device=first_p.device)
for p, state, param_names in tuples:
grad = p.grad
if grad.is_sparse:
raise RuntimeError("ScaledAdam optimizer does not support sparse gradients")
if p.numel() == p.shape[0]: # a batch of scalars
tot_sumsq += (grad**2).sum() # sum() to change shape [1] to []
else:
tot_sumsq += ((grad * state["param_rms"]) ** 2).sum()
tot_norm = tot_sumsq.sqrt()
if "model_norms" not in first_state:
first_state["model_norms"] = torch.zeros(clipping_update_period, device=p.device)
first_state["model_norms"][step % clipping_update_period] = tot_norm
if step % clipping_update_period == 0:
# Print some stats.
# We don't reach here if step == 0 because we would have returned
# above.
sorted_norms = first_state["model_norms"].sort()[0].to("cpu")
quartiles = []
for n in range(0, 5):
index = min(
clipping_update_period - 1,
(clipping_update_period // 4) * n,
)
quartiles.append(sorted_norms[index].item())
median = quartiles[2]
threshold = clipping_scale * median
first_state["model_norm_threshold"] = threshold
percent_clipped = (
first_state["num_clipped"] * 100.0 / clipping_update_period if "num_clipped" in first_state else 0.0
)
first_state["num_clipped"] = 0
quartiles = " ".join(["%.3e" % x for x in quartiles])
logging.info(
f"Clipping_scale={clipping_scale}, grad-norm quartiles {quartiles}, threshold={threshold:.3e}, percent-clipped={percent_clipped:.1f}"
)
if step < clipping_update_period:
return 1.0 # We have not yet estimated a norm to clip to.
else:
try:
model_norm_threshold = first_state["model_norm_threshold"]
except KeyError:
logging.info(
"Warning: model_norm_threshold not in state: possibly you changed config when restarting, adding clipping_scale option?"
)
return 1.0
ans = min(1.0, (model_norm_threshold / (tot_norm + 1.0e-20)).item())
if ans < 1.0:
first_state["num_clipped"] += 1
if ans < 0.1:
logging.warning(f"Scaling gradients by {ans}, model_norm_threshold={model_norm_threshold}")
if self.show_dominant_parameters:
assert p.shape[0] == len(param_names)
self._show_gradient_dominating_parameter(tuples, tot_sumsq)
return ans
def _show_gradient_dominating_parameter(self, tuples: List[Tuple[Tensor, dict, List[str]]], tot_sumsq: Tensor):
all_sumsq_orig = {}
for p, state, batch_param_names in tuples:
# p is a stacked batch parameters.
batch_grad = p.grad
if p.numel() == p.shape[0]: # a batch of scalars
batch_sumsq_orig = batch_grad**2
# Dummpy values used by following `zip` statement.
batch_rms_orig = torch.ones(p.shape[0])
else:
batch_rms_orig = state["param_rms"]
batch_sumsq_orig = ((batch_grad * batch_rms_orig) ** 2).sum(dim=list(range(1, batch_grad.ndim)))
for name, sumsq_orig, rms, grad in zip(
batch_param_names,
batch_sumsq_orig,
batch_rms_orig,
batch_grad,
):
proportion_orig = sumsq_orig / tot_sumsq
all_sumsq_orig[name] = (proportion_orig, sumsq_orig, rms, grad)
assert torch.isclose(
sum([value[0] for value in all_sumsq_orig.values()]).cpu(),
torch.tensor(1.0),
)
sorted_by_proportion = {
k: v
for k, v in sorted(
all_sumsq_orig.items(),
key=lambda item: item[1][0],
reverse=True,
)
}
dominant_param_name = next(iter(sorted_by_proportion))
(
dominant_proportion,
dominant_sumsq,
dominant_rms,
dominant_grad,
) = sorted_by_proportion[dominant_param_name]
logging.info(
f"Parameter Dominating tot_sumsq {dominant_param_name}"
f" with proportion {dominant_proportion:.2f},"
f" where dominant_sumsq=(grad_sumsq*orig_rms_sq)"
f"={dominant_sumsq:.3e},"
f" grad_sumsq = {(dominant_grad**2).sum():.3e},"
f" orig_rms_sq={(dominant_rms**2).item():.3e}"
)
def _step_one_batch(self, group: dict, p: Tensor, state: dict, clipping_scale: float):
lr = group["lr"]
size_update_period = group["size_update_period"]
beta1 = group["betas"][0]
grad = p.grad
if clipping_scale != 1.0:
grad = grad * clipping_scale
step = state["step"]
delta = state["delta"]
delta.mul_(beta1)
batch_size = p.shape[0]
numel = p.numel() // batch_size
if numel > 1:
# Update the size/scale of p, and set param_rms
scale_grads = state["scale_grads"]
scale_grads[step % size_update_period] = (p * grad).sum(dim=list(range(1, p.ndim)), keepdim=True)
if step % size_update_period == size_update_period - 1:
param_rms = state["param_rms"] # shape: (batch_size, 1, 1, ..)
param_rms.copy_((p**2).mean(dim=list(range(1, p.ndim)), keepdim=True).sqrt())
if step > 0:
# self._size_update() learns the overall scale on the
# parameter, by shrinking or expanding it.
self._size_update(group, scale_grads, p, state)
if numel == 1:
# For parameters with 1 element we just use regular Adam.
# Updates delta.
self._step_scalar(group, p, state)
else:
self._step(group, p, state)
state["step"] = step + 1
def _size_update(
self,
group: dict,
scale_grads: Tensor,
p: Tensor,
state: dict,
) -> None:
param_rms = state["param_rms"]
beta1, beta2 = group["betas"]
size_lr = group["lr"] * group["scalar_lr_scale"]
param_min_rms = group["param_min_rms"]
param_max_rms = group["param_max_rms"]
eps = group["eps"]
step = state["step"]
batch_size = p.shape[0]
size_update_period = scale_grads.shape[0]
# correct beta2 for the size update period: we will have
# faster decay at this level.
beta2_corr = beta2**size_update_period
scale_exp_avg_sq = state["scale_exp_avg_sq"] # shape: (batch_size, 1, 1, ..)
scale_exp_avg_sq.mul_(beta2_corr).add_(
(scale_grads**2).mean(dim=0), # mean over dim `size_update_period`
alpha=1 - beta2_corr,
) # shape is (batch_size, 1, 1, ...)
# The 1st time we reach here is when size_step == 1.
size_step = (step + 1) // size_update_period
bias_correction2 = 1 - beta2_corr**size_step
# we don't bother with bias_correction1; this will help prevent divergence
# at the start of training.
denom = scale_exp_avg_sq.sqrt() + eps
scale_step = -size_lr * (bias_correction2**0.5) * scale_grads.sum(dim=0) / denom
is_too_small = param_rms < param_min_rms
is_too_large = param_rms > param_max_rms
# when the param gets too small, just don't shrink it any further.
scale_step.masked_fill_(is_too_small, 0.0)
# when it gets too large, stop it from getting any larger.
scale_step.masked_fill_(is_too_large, -size_lr * size_update_period)
delta = state["delta"]
# the factor of (1-beta1) relates to momentum.
delta.add_(p * scale_step, alpha=(1 - beta1))
def _step(self, group: dict, p: Tensor, state: dict):
grad = p.grad
lr = group["lr"]
beta1, beta2 = group["betas"]
eps = group["eps"]
param_min_rms = group["param_min_rms"]
step = state["step"]
exp_avg_sq = state["exp_avg_sq"]
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=(1 - beta2))
this_step = state["step"] - (state["zero_step"] if "zero_step" in state else 0)
bias_correction2 = 1 - beta2 ** (this_step + 1)
if bias_correction2 < 0.99:
# note: not in-place.
exp_avg_sq = exp_avg_sq * (1.0 / bias_correction2)
denom = exp_avg_sq.sqrt()
denom += eps
grad = grad / denom
alpha = -lr * (1 - beta1) * state["param_rms"].clamp(min=param_min_rms)
delta = state["delta"]
delta.add_(grad * alpha)
p.add_(delta)
def _step_scalar(self, group: dict, p: Tensor, state: dict):
beta1, beta2 = group["betas"]
scalar_max = group["scalar_max"]
eps = group["eps"]
lr = group["lr"] * group["scalar_lr_scale"]
grad = p.grad
exp_avg_sq = state["exp_avg_sq"] # shape: (batch_size,)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
# bias_correction2 is like in Adam. Don't bother with bias_correction1;
# slower update at the start will help stability anyway.
bias_correction2 = 1 - beta2 ** (state["step"] + 1)
denom = (exp_avg_sq / bias_correction2).sqrt() + eps
delta = state["delta"]
delta.add_(grad / denom, alpha=-lr * (1 - beta1))
p.clamp_(min=-scalar_max, max=scalar_max)
p.add_(delta) | --- +++ @@ -24,12 +24,52 @@
class BatchedOptimizer(Optimizer):
+ """
+ This class adds to class Optimizer the capability to optimize parameters in batches:
+ it will stack the parameters and their grads for you so the optimizer can work
+ on tensors with an extra leading dimension. This is intended for speed with GPUs,
+ as it reduces the number of kernels launched in the optimizer.
+
+ Args:
+ params:
+ """
def __init__(self, params, defaults):
super(BatchedOptimizer, self).__init__(params, defaults)
@contextlib.contextmanager
def batched_params(self, param_group, group_params_names):
+ """
+ This function returns (technically, yields) a list of
+ of tuples (p, state), where
+ p is a `fake` parameter that is stacked (over axis 0) from real parameters
+ that share the same shape, and its gradient is also stacked;
+ `state` is the state corresponding to this batch of parameters
+ (it will be physically located in the "state" for one of the real
+ parameters, the last one that has any particular shape and dtype).
+
+ This function is decorated as a context manager so that it can
+ write parameters back to their "real" locations.
+
+ The idea is, instead of doing:
+ <code>
+ for p in group["params"]:
+ state = self.state[p]
+ ...
+ </code>
+ you can do:
+ <code>
+ with self.batched_params(group["params"]) as batches:
+ for p, state, p_names in batches:
+ ...
+ </code>
+
+ Args:
+ group: a parameter group, which is a list of parameters; should be
+ one of self.param_groups.
+ group_params_names: name for each parameter in group,
+ which is List[str].
+ """
batches = defaultdict(list) # `batches` maps from tuple (dtype_as_str,*shape) to list of nn.Parameter
batches_names = defaultdict(list) # `batches` maps from tuple (dtype_as_str,*shape) to list of str
@@ -71,6 +111,47 @@
class ScaledAdam(BatchedOptimizer):
+ """
+ Implements 'Scaled Adam', a variant of Adam where we scale each parameter's update
+ proportional to the norm of that parameter; and also learn the scale of the parameter,
+ in log space, subject to upper and lower limits (as if we had factored each parameter as
+ param = underlying_param * log_scale.exp())
+
+
+ Args:
+ params: The parameters or param_groups to optimize (like other Optimizer subclasses)
+ lr: The learning rate. We will typically use a learning rate schedule that starts
+ at 0.03 and decreases over time, i.e. much higher than other common
+ optimizers.
+ clipping_scale: (e.g. 2.0)
+ A scale for gradient-clipping: if specified, the normalized gradients
+ over the whole model will be clipped to have 2-norm equal to
+ `clipping_scale` times the median 2-norm over the most recent period
+ of `clipping_update_period` minibatches. By "normalized gradients",
+ we mean after multiplying by the rms parameter value for this tensor
+ [for non-scalars]; this is appropriate because our update is scaled
+ by this quantity.
+ betas: beta1,beta2 are momentum constants for regular momentum, and moving sum-sq grad.
+ Must satisfy 0 < beta <= beta2 < 1.
+ scalar_lr_scale: A scaling factor on the learning rate, that we use to update the
+ scale of each parameter tensor and scalar parameters of the mode..
+ If each parameter were decomposed
+ as p * p_scale.exp(), where (p**2).mean().sqrt() == 1.0, scalar_lr_scale
+ would be a the scaling factor on the learning rate of p_scale.
+ eps: A general-purpose epsilon to prevent division by zero
+ param_min_rms: Minimum root-mean-square value of parameter tensor, for purposes of
+ learning the scale on the parameters (we'll constrain the rms of each non-scalar
+ parameter tensor to be >= this value)
+ param_max_rms: Maximum root-mean-square value of parameter tensor, for purposes of
+ learning the scale on the parameters (we'll constrain the rms of each non-scalar
+ parameter tensor to be <= this value)
+ scalar_max: Maximum absolute value for scalar parameters (applicable if your
+ model has any parameters with numel() == 1).
+ size_update_period: The periodicity, in steps, with which we update the size (scale)
+ of the parameter tensor. This is provided to save a little time
+ in the update.
+ clipping_update_period: if clipping_scale is specified, this is the period
+ """
def __init__(
self,
@@ -114,6 +195,12 @@
@torch.no_grad()
def step(self, closure=None):
+ """Performs a single optimization step.
+
+ Arguments:
+ closure (callable, optional): A closure that reevaluates the model
+ and returns the loss.
+ """
loss = None
if closure is not None:
with torch.enable_grad():
@@ -147,6 +234,17 @@ return loss
def _init_state(self, group: dict, p: Tensor, state: dict):
+ """
+ Initializes state dict for parameter 'p'. Assumes that dim 0 of tensor p
+ is actually the batch dimension, corresponding to batched-together
+ parameters of a given shape.
+
+
+ Args:
+ group: Dict to look up configuration values.
+ p: The parameter that we are initializing the state for
+ state: Dict from string to whatever state we are initializing
+ """
size_update_period = group["size_update_period"]
state["step"] = 0
@@ -179,6 +277,19 @@ state["exp_avg_sq"] = torch.zeros_like(p, memory_format=torch.preserve_format)
def _get_clipping_scale(self, group: dict, tuples: List[Tuple[Tensor, dict, List[str]]]) -> float:
+ """
+ Returns a scalar factor <= 1.0 that dictates gradient clipping, i.e. we will scale the gradients
+ by this amount before applying the rest of the update.
+
+ Args:
+ group: the parameter group, an item in self.param_groups
+ tuples: a list of tuples of (param, state, param_names)
+ where param is a batched set of parameters,
+ with a .grad (1st dim is batch dim)
+ and state is the state-dict where optimization parameters are kept.
+ param_names is a List[str] while each str is name for a parameter
+ in batched set of parameters "param".
+ """
assert len(tuples) >= 1
clipping_scale = group["clipping_scale"]
(first_p, first_state, _) = tuples[0]
@@ -250,6 +361,19 @@ return ans
def _show_gradient_dominating_parameter(self, tuples: List[Tuple[Tensor, dict, List[str]]], tot_sumsq: Tensor):
+ """
+ Show information of parameter which dominating tot_sumsq.
+
+ Args:
+ tuples: a list of tuples of (param, state, param_names)
+ where param is a batched set of parameters,
+ with a .grad (1st dim is batch dim)
+ and state is the state-dict where optimization parameters are kept.
+ param_names is a List[str] while each str is name for a parameter
+ in batched set of parameters "param".
+ tot_sumsq: sumsq of all parameters. Though it's could be calculated
+ from tuples, we still pass it to save some time.
+ """
all_sumsq_orig = {}
for p, state, batch_param_names in tuples:
# p is a stacked batch parameters.
@@ -300,6 +424,15 @@ )
def _step_one_batch(self, group: dict, p: Tensor, state: dict, clipping_scale: float):
+ """
+ Do the step for one parameter, which is actually going to be a batch of
+ `real` parameters, with dim 0 as the batch dim.
+ Args:
+ group: dict to look up configuration values
+ p: parameter to update (actually multiple parameters stacked together
+ as a batch)
+ state: state-dict for p, to look up the optimizer state
+ """
lr = group["lr"]
size_update_period = group["size_update_period"]
beta1 = group["betas"][0]
@@ -341,6 +474,19 @@ p: Tensor,
state: dict,
) -> None:
+ """
+ Called only where p.numel() > 1, this updates the scale of the parameter.
+ If we imagine: p = underlying_param * scale.exp(), and we are doing
+ gradient descent on underlying param and on scale, this function does the update
+ on `scale`.
+
+ Args:
+ group: dict to look up configuration values
+ scale_grads: a tensor of shape (size_update_period, batch_size, 1, 1,...) containing
+ grads w.r.t. the scales.
+ p: The parameter to update
+ state: The state-dict of p
+ """
param_rms = state["param_rms"]
beta1, beta2 = group["betas"]
@@ -384,6 +530,18 @@ delta.add_(p * scale_step, alpha=(1 - beta1))
def _step(self, group: dict, p: Tensor, state: dict):
+ """
+ This function does the core update of self.step(), in the case where the members of
+ the batch have more than 1 element.
+
+ Args:
+ group: A dict which will be used to look up configuration values
+ p: The parameter to be updated
+ grad: The grad of p
+ state: The state-dict corresponding to parameter p
+
+ This function modifies p.
+ """
grad = p.grad
lr = group["lr"]
beta1, beta2 = group["betas"]
@@ -411,6 +569,10 @@ p.add_(delta)
def _step_scalar(self, group: dict, p: Tensor, state: dict):
+ """
+ A simplified form of the core update for scalar tensors, where we cannot get a good
+ estimate of the parameter rms.
+ """
beta1, beta2 = group["betas"]
scalar_max = group["scalar_max"]
eps = group["eps"]
@@ -428,4 +590,4 @@ delta = state["delta"]
delta.add_(grad / denom, alpha=-lr * (1 - beta1))
p.clamp_(min=-scalar_max, max=scalar_max)
- p.add_(delta)+ p.add_(delta)
| https://raw.githubusercontent.com/RVC-Boss/GPT-SoVITS/HEAD/GPT_SoVITS/AR/modules/optim.py |
Help me write clear docstrings | # modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/models/utils.py
# reference: https://github.com/lifeiteng/vall-e
from typing import Tuple
import torch
import torch.nn.functional as F
def sequence_mask(length, max_length=None):
if max_length is None:
max_length = length.max()
x = torch.arange(max_length, dtype=length.dtype, device=length.device)
return x.unsqueeze(0) < length.unsqueeze(1)
def make_pad_mask(lengths: torch.Tensor, max_len: int = 0) -> torch.Tensor:
assert lengths.ndim == 1, lengths.ndim
max_len = max(max_len, lengths.max())
n = lengths.size(0)
seq_range = torch.arange(0, max_len, device=lengths.device)
expaned_lengths = seq_range.unsqueeze(0).expand(n, max_len)
return expaned_lengths >= lengths.unsqueeze(-1)
def make_pad_mask_left(lengths: torch.Tensor, max_len: int = 0) -> torch.Tensor:
assert lengths.ndim == 1, lengths.ndim
max_len = max(max_len, lengths.max())
n = lengths.size(0)
seq_range = torch.arange(0, max_len, device=lengths.device)
expaned_lengths = seq_range.unsqueeze(0).repeat(n, 1)
expaned_lengths -= (max_len - lengths).unsqueeze(-1)
return expaned_lengths < 0
# https://github.com/microsoft/unilm/blob/master/xtune/src/transformers/modeling_utils.py
def top_k_top_p_filtering(
logits,
top_k=0,
top_p=1.0,
filter_value=-float("Inf"),
min_tokens_to_keep=1,
):
if top_k > 0:
top_k = min(max(top_k, min_tokens_to_keep), logits.size(-1)) # Safety check
# Remove all tokens with a probability less than the last token of the top-k
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value
if top_p < 1.0:
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold (token with 0 are kept)
sorted_indices_to_remove = cumulative_probs > top_p
if min_tokens_to_keep > 1:
# Keep at least min_tokens_to_keep (set to min_tokens_to_keep-1 because we add the first one below)
sorted_indices_to_remove[..., :min_tokens_to_keep] = 0
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
# scatter sorted tensors to original indexing
indices_to_remove = sorted_indices_to_remove.scatter(1, sorted_indices, sorted_indices_to_remove)
logits[indices_to_remove] = filter_value
return logits
def topk_sampling(logits, top_k=10, top_p=1.0, temperature=1.0):
# temperature: (`optional`) float
# The value used to module the next token probabilities. Must be strictly positive. Default to 1.0.
# top_k: (`optional`) int
# The number of highest probability vocabulary tokens to keep for top-k-filtering. Between 1 and infinity. Default to 50.
# top_p: (`optional`) float
# The cumulative probability of parameter highest probability vocabulary tokens to keep for nucleus sampling. Must be between 0 and 1. Default to 1.
# Temperature (higher temperature => more likely to sample low probability tokens)
if temperature != 1.0:
logits = logits / temperature
# Top-p/top-k filtering
logits = top_k_top_p_filtering(logits, top_k=top_k, top_p=top_p)
# Sample
token = torch.multinomial(F.softmax(logits, dim=-1), num_samples=1)
return token
from typing import Optional
def multinomial_sample_one_no_sync(
probs_sort,
): # Does multinomial sampling without a cuda synchronization
q = torch.empty_like(probs_sort).exponential_(1)
return torch.argmax(probs_sort / q, dim=-1, keepdim=True).to(dtype=torch.int)
def logits_to_probs(
logits,
previous_tokens: Optional[torch.Tensor] = None,
temperature: float = 1.0,
top_k: Optional[int] = None,
top_p: Optional[int] = None,
repetition_penalty: float = 1.0,
):
# if previous_tokens is not None:
# previous_tokens = previous_tokens.squeeze()
# print(logits.shape,previous_tokens.shape)
# pdb.set_trace()
if previous_tokens is not None and repetition_penalty != 1.0:
previous_tokens = previous_tokens.long()
score = torch.gather(logits, dim=1, index=previous_tokens)
score = torch.where(
score < 0,
score * repetition_penalty,
score / repetition_penalty,
)
logits.scatter_(dim=1, index=previous_tokens, src=score)
if top_p is not None and top_p < 1.0:
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cum_probs = torch.cumsum(torch.nn.functional.softmax(sorted_logits, dim=-1), dim=-1)
sorted_indices_to_remove = cum_probs > top_p
sorted_indices_to_remove[:, 0] = False # keep at least one option
indices_to_remove = sorted_indices_to_remove.scatter(
dim=1,
index=sorted_indices,
src=sorted_indices_to_remove,
)
logits = logits.masked_fill(indices_to_remove, -float("Inf"))
logits = logits / max(temperature, 1e-5)
if top_k is not None:
v, _ = torch.topk(logits, min(top_k, logits.size(-1)))
pivot = v[:, -1].unsqueeze(-1)
logits = torch.where(logits < pivot, -float("Inf"), logits)
probs = torch.nn.functional.softmax(logits, dim=-1)
return probs
def sample(
logits,
previous_tokens: Optional[torch.Tensor] = None,
**sampling_kwargs,
) -> Tuple[torch.Tensor, torch.Tensor]:
probs = logits_to_probs(logits=logits, previous_tokens=previous_tokens, **sampling_kwargs)
idx_next = multinomial_sample_one_no_sync(probs)
return idx_next, probs
def dpo_loss(
policy_chosen_logps: torch.FloatTensor,
policy_rejected_logps: torch.FloatTensor,
reference_chosen_logps: torch.FloatTensor,
reference_rejected_logps: torch.FloatTensor,
beta: float,
reference_free: bool = False,
) -> Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]:
pi_logratios = policy_chosen_logps - policy_rejected_logps
ref_logratios = reference_chosen_logps - reference_rejected_logps
if reference_free:
ref_logratios = 0
logits = pi_logratios - ref_logratios
losses = -F.logsigmoid(beta * logits)
chosen_rewards = beta * (policy_chosen_logps - reference_chosen_logps).detach()
rejected_rewards = beta * (policy_rejected_logps - reference_rejected_logps).detach()
return losses.mean(), chosen_rewards, rejected_rewards
def get_batch_logps(
logits_target: torch.FloatTensor,
logits_reject: torch.FloatTensor,
labels_target: torch.LongTensor,
labels_reject: torch.LongTensor,
average_log_prob: bool = False,
) -> Tuple[torch.FloatTensor, torch.FloatTensor]:
# dummy token; we'll ignore the losses on these tokens later
per_token_logps_target = torch.gather(
logits_target.log_softmax(-1), dim=2, index=labels_target.unsqueeze(2)
).squeeze(2)
per_token_logps_reject = torch.gather(
logits_reject.log_softmax(-1), dim=2, index=labels_reject.unsqueeze(2)
).squeeze(2)
return per_token_logps_target.sum(-1), per_token_logps_reject.sum(-1)
def make_reject_y(y_o, y_lens):
def repeat_P(y):
range_idx, _ = torch.randint(0, len(y), size=(2,)).sort()
pre = y[: range_idx[0]]
shf = y[range_idx[1] :]
range_text = y[range_idx[0] : range_idx[1]]
new_y = torch.cat([pre, range_text, range_text, shf])
return new_y
def lost_P(y):
range_idx, _ = torch.randint(0, len(y), size=(2,)).sort()
pre = y[: range_idx[0]]
shf = y[range_idx[1] :]
range_text = y[range_idx[0] : range_idx[1]]
new_y = torch.cat([pre, shf])
return new_y
bs = len(y_lens)
reject_y = []
reject_y_lens = []
for b in range(bs):
process_item_idx = torch.randint(0, 1, size=(1,))[0]
if process_item_idx == 0:
new_y = repeat_P(y_o[b])
reject_y.append(new_y)
reject_y_lens.append(len(new_y))
elif process_item_idx == 1:
new_y = lost_P(y_o[b])
reject_y.append(new_y)
reject_y_lens.append(len(new_y))
max_length = max(reject_y_lens)
for b in range(bs):
pad_length = max_length - reject_y_lens[b]
reject_y[b] = torch.cat([reject_y[b], torch.zeros(pad_length, dtype=y_o.dtype, device=y_o.device)], dim=0)
reject_y = torch.stack(reject_y, dim=0)
reject_y_lens = torch.tensor(reject_y_lens, device=y_lens.device)
return reject_y, reject_y_lens | --- +++ @@ -14,6 +14,24 @@
def make_pad_mask(lengths: torch.Tensor, max_len: int = 0) -> torch.Tensor:
+ """
+ Args:
+ lengths:
+ A 1-D tensor containing sentence lengths.
+ max_len:
+ The length of masks.
+ Returns:
+ Return a 2-D bool tensor, where masked positions
+ are filled with `True` and non-masked positions are
+ filled with `False`.
+
+ #>>> lengths = torch.tensor([1, 3, 2, 5])
+ #>>> make_pad_mask(lengths)
+ tensor([[False, True, True, True, True],
+ [False, False, False, True, True],
+ [False, False, True, True, True],
+ [False, False, False, False, False]])
+ """
assert lengths.ndim == 1, lengths.ndim
max_len = max(max_len, lengths.max())
n = lengths.size(0)
@@ -24,6 +42,28 @@
def make_pad_mask_left(lengths: torch.Tensor, max_len: int = 0) -> torch.Tensor:
+ """
+ Args:
+ lengths:
+ A 1-D tensor containing sentence lengths.
+ max_len:
+ The length of masks.
+ Returns:
+ Return a 2-D bool tensor, where masked positions
+ are filled with `True` and non-masked positions are
+ filled with `False`.
+
+ #>>> lengths = torch.tensor([1, 3, 2, 5])
+ #>>> make_pad_mask(lengths)
+ tensor(
+ [
+ [True, True, False],
+ [True, False, False],
+ [True, True, False],
+ ...
+ ]
+ )
+ """
assert lengths.ndim == 1, lengths.ndim
max_len = max(max_len, lengths.max())
n = lengths.size(0)
@@ -42,6 +82,15 @@ filter_value=-float("Inf"),
min_tokens_to_keep=1,
):
+ """Filter a distribution of logits using top-k and/or nucleus (top-p) filtering
+ Args:
+ logits: logits distribution shape (batch size, vocabulary size)
+ if top_k > 0: keep only top k tokens with highest probability (top-k filtering).
+ if top_p < 1.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).
+ Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)
+ Make sure we keep at least min_tokens_to_keep per batch example in the output
+ From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
+ """
if top_k > 0:
top_k = min(max(top_k, min_tokens_to_keep), logits.size(-1)) # Safety check
# Remove all tokens with a probability less than the last token of the top-k
@@ -230,4 +279,4 @@ reject_y = torch.stack(reject_y, dim=0)
reject_y_lens = torch.tensor(reject_y_lens, device=y_lens.device)
- return reject_y, reject_y_lens+ return reject_y, reject_y_lens
| https://raw.githubusercontent.com/RVC-Boss/GPT-SoVITS/HEAD/GPT_SoVITS/AR/models/utils.py |
Write docstrings that follow conventions | # Implementation adapted from https://github.com/EdwardDixon/snake under the MIT license.
# LICENSE is in incl_licenses directory.
import torch
from torch import nn, sin, pow
from torch.nn import Parameter
class Snake(nn.Module):
def __init__(self, in_features, alpha=1.0, alpha_trainable=True, alpha_logscale=False):
super(Snake, self).__init__()
self.in_features = in_features
# Initialize alpha
self.alpha_logscale = alpha_logscale
if self.alpha_logscale: # Log scale alphas initialized to zeros
self.alpha = Parameter(torch.zeros(in_features) * alpha)
else: # Linear scale alphas initialized to ones
self.alpha = Parameter(torch.ones(in_features) * alpha)
self.alpha.requires_grad = alpha_trainable
self.no_div_by_zero = 0.000000001
def forward(self, x):
alpha = self.alpha.unsqueeze(0).unsqueeze(-1) # Line up with x to [B, C, T]
if self.alpha_logscale:
alpha = torch.exp(alpha)
x = x + (1.0 / (alpha + self.no_div_by_zero)) * pow(sin(x * alpha), 2)
return x
class SnakeBeta(nn.Module):
def __init__(self, in_features, alpha=1.0, alpha_trainable=True, alpha_logscale=False):
super(SnakeBeta, self).__init__()
self.in_features = in_features
# Initialize alpha
self.alpha_logscale = alpha_logscale
if self.alpha_logscale: # Log scale alphas initialized to zeros
self.alpha = Parameter(torch.zeros(in_features) * alpha)
self.beta = Parameter(torch.zeros(in_features) * alpha)
else: # Linear scale alphas initialized to ones
self.alpha = Parameter(torch.ones(in_features) * alpha)
self.beta = Parameter(torch.ones(in_features) * alpha)
self.alpha.requires_grad = alpha_trainable
self.beta.requires_grad = alpha_trainable
self.no_div_by_zero = 0.000000001
def forward(self, x):
alpha = self.alpha.unsqueeze(0).unsqueeze(-1) # Line up with x to [B, C, T]
beta = self.beta.unsqueeze(0).unsqueeze(-1)
if self.alpha_logscale:
alpha = torch.exp(alpha)
beta = torch.exp(beta)
x = x + (1.0 / (beta + self.no_div_by_zero)) * pow(sin(x * alpha), 2)
return x | --- +++ @@ -7,8 +7,31 @@
class Snake(nn.Module):
+ """
+ Implementation of a sine-based periodic activation function
+ Shape:
+ - Input: (B, C, T)
+ - Output: (B, C, T), same shape as the input
+ Parameters:
+ - alpha - trainable parameter
+ References:
+ - This activation function is from this paper by Liu Ziyin, Tilman Hartwig, Masahito Ueda:
+ https://arxiv.org/abs/2006.08195
+ Examples:
+ >>> a1 = snake(256)
+ >>> x = torch.randn(256)
+ >>> x = a1(x)
+ """
def __init__(self, in_features, alpha=1.0, alpha_trainable=True, alpha_logscale=False):
+ """
+ Initialization.
+ INPUT:
+ - in_features: shape of the input
+ - alpha: trainable parameter
+ alpha is initialized to 1 by default, higher values = higher-frequency.
+ alpha will be trained along with the rest of your model.
+ """
super(Snake, self).__init__()
self.in_features = in_features
@@ -24,6 +47,11 @@ self.no_div_by_zero = 0.000000001
def forward(self, x):
+ """
+ Forward pass of the function.
+ Applies the function to the input elementwise.
+ Snake ∶= x + 1/a * sin^2 (xa)
+ """
alpha = self.alpha.unsqueeze(0).unsqueeze(-1) # Line up with x to [B, C, T]
if self.alpha_logscale:
alpha = torch.exp(alpha)
@@ -33,8 +61,34 @@
class SnakeBeta(nn.Module):
+ """
+ A modified Snake function which uses separate parameters for the magnitude of the periodic components
+ Shape:
+ - Input: (B, C, T)
+ - Output: (B, C, T), same shape as the input
+ Parameters:
+ - alpha - trainable parameter that controls frequency
+ - beta - trainable parameter that controls magnitude
+ References:
+ - This activation function is a modified version based on this paper by Liu Ziyin, Tilman Hartwig, Masahito Ueda:
+ https://arxiv.org/abs/2006.08195
+ Examples:
+ >>> a1 = snakebeta(256)
+ >>> x = torch.randn(256)
+ >>> x = a1(x)
+ """
def __init__(self, in_features, alpha=1.0, alpha_trainable=True, alpha_logscale=False):
+ """
+ Initialization.
+ INPUT:
+ - in_features: shape of the input
+ - alpha - trainable parameter that controls frequency
+ - beta - trainable parameter that controls magnitude
+ alpha is initialized to 1 by default, higher values = higher-frequency.
+ beta is initialized to 1 by default, higher values = higher-magnitude.
+ alpha will be trained along with the rest of your model.
+ """
super(SnakeBeta, self).__init__()
self.in_features = in_features
@@ -53,6 +107,11 @@ self.no_div_by_zero = 0.000000001
def forward(self, x):
+ """
+ Forward pass of the function.
+ Applies the function to the input elementwise.
+ SnakeBeta ∶= x + 1/b * sin^2 (xa)
+ """
alpha = self.alpha.unsqueeze(0).unsqueeze(-1) # Line up with x to [B, C, T]
beta = self.beta.unsqueeze(0).unsqueeze(-1)
if self.alpha_logscale:
@@ -60,4 +119,4 @@ beta = torch.exp(beta)
x = x + (1.0 / (beta + self.no_div_by_zero)) * pow(sin(x * alpha), 2)
- return x+ return x
| https://raw.githubusercontent.com/RVC-Boss/GPT-SoVITS/HEAD/GPT_SoVITS/BigVGAN/activations.py |
Annotate my code with docstrings |
from __future__ import annotations
import math
from typing import Optional
import torch
import torch.nn.functional as F
import torchaudio
from librosa.filters import mel as librosa_mel_fn
from torch import nn
from x_transformers.x_transformers import apply_rotary_pos_emb
# raw wav to mel spec
mel_basis_cache = {}
hann_window_cache = {}
def get_bigvgan_mel_spectrogram(
waveform,
n_fft=1024,
n_mel_channels=100,
target_sample_rate=24000,
hop_length=256,
win_length=1024,
fmin=0,
fmax=None,
center=False,
): # Copy from https://github.com/NVIDIA/BigVGAN/tree/main
device = waveform.device
key = f"{n_fft}_{n_mel_channels}_{target_sample_rate}_{hop_length}_{win_length}_{fmin}_{fmax}_{device}"
if key not in mel_basis_cache:
mel = librosa_mel_fn(sr=target_sample_rate, n_fft=n_fft, n_mels=n_mel_channels, fmin=fmin, fmax=fmax)
mel_basis_cache[key] = torch.from_numpy(mel).float().to(device) # TODO: why they need .float()?
hann_window_cache[key] = torch.hann_window(win_length).to(device)
mel_basis = mel_basis_cache[key]
hann_window = hann_window_cache[key]
padding = (n_fft - hop_length) // 2
waveform = torch.nn.functional.pad(waveform.unsqueeze(1), (padding, padding), mode="reflect").squeeze(1)
spec = torch.stft(
waveform,
n_fft,
hop_length=hop_length,
win_length=win_length,
window=hann_window,
center=center,
pad_mode="reflect",
normalized=False,
onesided=True,
return_complex=True,
)
spec = torch.sqrt(torch.view_as_real(spec).pow(2).sum(-1) + 1e-9)
mel_spec = torch.matmul(mel_basis, spec)
mel_spec = torch.log(torch.clamp(mel_spec, min=1e-5))
return mel_spec
def get_vocos_mel_spectrogram(
waveform,
n_fft=1024,
n_mel_channels=100,
target_sample_rate=24000,
hop_length=256,
win_length=1024,
):
mel_stft = torchaudio.transforms.MelSpectrogram(
sample_rate=target_sample_rate,
n_fft=n_fft,
win_length=win_length,
hop_length=hop_length,
n_mels=n_mel_channels,
power=1,
center=True,
normalized=False,
norm=None,
).to(waveform.device)
if len(waveform.shape) == 3:
waveform = waveform.squeeze(1) # 'b 1 nw -> b nw'
assert len(waveform.shape) == 2
mel = mel_stft(waveform)
mel = mel.clamp(min=1e-5).log()
return mel
class MelSpec(nn.Module):
def __init__(
self,
n_fft=1024,
hop_length=256,
win_length=1024,
n_mel_channels=100,
target_sample_rate=24_000,
mel_spec_type="vocos",
):
super().__init__()
assert mel_spec_type in ["vocos", "bigvgan"], print("We only support two extract mel backend: vocos or bigvgan")
self.n_fft = n_fft
self.hop_length = hop_length
self.win_length = win_length
self.n_mel_channels = n_mel_channels
self.target_sample_rate = target_sample_rate
if mel_spec_type == "vocos":
self.extractor = get_vocos_mel_spectrogram
elif mel_spec_type == "bigvgan":
self.extractor = get_bigvgan_mel_spectrogram
self.register_buffer("dummy", torch.tensor(0), persistent=False)
def forward(self, wav):
if self.dummy.device != wav.device:
self.to(wav.device)
mel = self.extractor(
waveform=wav,
n_fft=self.n_fft,
n_mel_channels=self.n_mel_channels,
target_sample_rate=self.target_sample_rate,
hop_length=self.hop_length,
win_length=self.win_length,
)
return mel
# sinusoidal position embedding
class SinusPositionEmbedding(nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, x, scale=1000):
device = x.device
half_dim = self.dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, device=device).float() * -emb)
emb = scale * x.unsqueeze(1) * emb.unsqueeze(0)
emb = torch.cat((emb.sin(), emb.cos()), dim=-1)
return emb
# convolutional position embedding
class ConvPositionEmbedding(nn.Module):
def __init__(self, dim, kernel_size=31, groups=16):
super().__init__()
assert kernel_size % 2 != 0
self.conv1d = nn.Sequential(
nn.Conv1d(dim, dim, kernel_size, groups=groups, padding=kernel_size // 2),
nn.Mish(),
nn.Conv1d(dim, dim, kernel_size, groups=groups, padding=kernel_size // 2),
nn.Mish(),
)
def forward(self, x: float["b n d"], mask: bool["b n"] | None = None): # noqa: F722
if mask is not None:
mask = mask[..., None]
x = x.masked_fill(~mask, 0.0)
x = x.permute(0, 2, 1)
x = self.conv1d(x)
out = x.permute(0, 2, 1)
if mask is not None:
out = out.masked_fill(~mask, 0.0)
return out
# rotary positional embedding related
def precompute_freqs_cis(dim: int, end: int, theta: float = 10000.0, theta_rescale_factor=1.0):
# proposed by reddit user bloc97, to rescale rotary embeddings to longer sequence length without fine-tuning
# has some connection to NTK literature
# https://www.reddit.com/r/LocalLLaMA/comments/14lz7j5/ntkaware_scaled_rope_allows_llama_models_to_have/
# https://github.com/lucidrains/rotary-embedding-torch/blob/main/rotary_embedding_torch/rotary_embedding_torch.py
theta *= theta_rescale_factor ** (dim / (dim - 2))
freqs = 1.0 / (theta ** (torch.arange(0, dim, 2)[: (dim // 2)].float() / dim))
t = torch.arange(end, device=freqs.device) # type: ignore
freqs = torch.outer(t, freqs).float() # type: ignore
freqs_cos = torch.cos(freqs) # real part
freqs_sin = torch.sin(freqs) # imaginary part
return torch.cat([freqs_cos, freqs_sin], dim=-1)
def get_pos_embed_indices(start, length, max_pos, scale=1.0):
# length = length if isinstance(length, int) else length.max()
scale = scale * torch.ones_like(start, dtype=torch.float32) # in case scale is a scalar
pos = (
start.unsqueeze(1)
+ (torch.arange(length, device=start.device, dtype=torch.float32).unsqueeze(0) * scale.unsqueeze(1)).long()
)
# avoid extra long error.
pos = torch.where(pos < max_pos, pos, max_pos - 1)
return pos
# Global Response Normalization layer (Instance Normalization ?)
class GRN(nn.Module):
def __init__(self, dim):
super().__init__()
self.gamma = nn.Parameter(torch.zeros(1, 1, dim))
self.beta = nn.Parameter(torch.zeros(1, 1, dim))
def forward(self, x):
Gx = torch.norm(x, p=2, dim=1, keepdim=True)
Nx = Gx / (Gx.mean(dim=-1, keepdim=True) + 1e-6)
return self.gamma * (x * Nx) + self.beta + x
# ConvNeXt-V2 Block https://github.com/facebookresearch/ConvNeXt-V2/blob/main/models/convnextv2.py
# ref: https://github.com/bfs18/e2_tts/blob/main/rfwave/modules.py#L108
class ConvNeXtV2Block(nn.Module):
def __init__(
self,
dim: int,
intermediate_dim: int,
dilation: int = 1,
):
super().__init__()
padding = (dilation * (7 - 1)) // 2
self.dwconv = nn.Conv1d(
dim, dim, kernel_size=7, padding=padding, groups=dim, dilation=dilation
) # depthwise conv
self.norm = nn.LayerNorm(dim, eps=1e-6)
self.pwconv1 = nn.Linear(dim, intermediate_dim) # pointwise/1x1 convs, implemented with linear layers
self.act = nn.GELU()
self.grn = GRN(intermediate_dim)
self.pwconv2 = nn.Linear(intermediate_dim, dim)
def forward(self, x: torch.Tensor) -> torch.Tensor:
residual = x
x = x.transpose(1, 2) # b n d -> b d n
x = self.dwconv(x)
x = x.transpose(1, 2) # b d n -> b n d
x = self.norm(x)
x = self.pwconv1(x)
x = self.act(x)
x = self.grn(x)
x = self.pwconv2(x)
return residual + x
# AdaLayerNormZero
# return with modulated x for attn input, and params for later mlp modulation
class AdaLayerNormZero(nn.Module):
def __init__(self, dim):
super().__init__()
self.silu = nn.SiLU()
self.linear = nn.Linear(dim, dim * 6)
self.norm = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-6)
def forward(self, x, emb=None):
emb = self.linear(self.silu(emb))
shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = torch.chunk(emb, 6, dim=1)
x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
# AdaLayerNormZero for final layer
# return only with modulated x for attn input, cuz no more mlp modulation
class AdaLayerNormZero_Final(nn.Module):
def __init__(self, dim):
super().__init__()
self.silu = nn.SiLU()
self.linear = nn.Linear(dim, dim * 2)
self.norm = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-6)
def forward(self, x, emb):
emb = self.linear(self.silu(emb))
scale, shift = torch.chunk(emb, 2, dim=1)
x = self.norm(x) * (1 + scale)[:, None, :] + shift[:, None, :]
return x
# FeedForward
class FeedForward(nn.Module):
def __init__(self, dim, dim_out=None, mult=4, dropout=0.0, approximate: str = "none"):
super().__init__()
inner_dim = int(dim * mult)
dim_out = dim_out if dim_out is not None else dim
activation = nn.GELU(approximate=approximate)
project_in = nn.Sequential(nn.Linear(dim, inner_dim), activation)
self.ff = nn.Sequential(project_in, nn.Dropout(dropout), nn.Linear(inner_dim, dim_out))
def forward(self, x):
return self.ff(x)
# Attention with possible joint part
# modified from diffusers/src/diffusers/models/attention_processor.py
class Attention(nn.Module):
def __init__(
self,
processor: JointAttnProcessor | AttnProcessor,
dim: int,
heads: int = 8,
dim_head: int = 64,
dropout: float = 0.0,
context_dim: Optional[int] = None, # if not None -> joint attention
context_pre_only=None,
):
super().__init__()
if not hasattr(F, "scaled_dot_product_attention"):
raise ImportError("Attention equires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.")
self.processor = processor
self.dim = dim
self.heads = heads
self.inner_dim = dim_head * heads
self.dropout = dropout
self.context_dim = context_dim
self.context_pre_only = context_pre_only
self.to_q = nn.Linear(dim, self.inner_dim)
self.to_k = nn.Linear(dim, self.inner_dim)
self.to_v = nn.Linear(dim, self.inner_dim)
if self.context_dim is not None:
self.to_k_c = nn.Linear(context_dim, self.inner_dim)
self.to_v_c = nn.Linear(context_dim, self.inner_dim)
if self.context_pre_only is not None:
self.to_q_c = nn.Linear(context_dim, self.inner_dim)
self.to_out = nn.ModuleList([])
self.to_out.append(nn.Linear(self.inner_dim, dim))
self.to_out.append(nn.Dropout(dropout))
if self.context_pre_only is not None and not self.context_pre_only:
self.to_out_c = nn.Linear(self.inner_dim, dim)
def forward(
self,
x: float["b n d"], # noised input x # noqa: F722
c: float["b n d"] = None, # context c # noqa: F722
mask: bool["b n"] | None = None, # noqa: F722
rope=None, # rotary position embedding for x
c_rope=None, # rotary position embedding for c
) -> torch.Tensor:
if c is not None:
return self.processor(self, x, c=c, mask=mask, rope=rope, c_rope=c_rope)
else:
return self.processor(self, x, mask=mask, rope=rope)
# Attention processor
# from torch.nn.attention import SDPBackend
# torch.backends.cuda.enable_flash_sdp(True)
class AttnProcessor:
def __init__(self):
pass
def __call__(
self,
attn: Attention,
x: float["b n d"], # noised input x # noqa: F722
mask: bool["b n"] | None = None, # noqa: F722
rope=None, # rotary position embedding
) -> torch.FloatTensor:
batch_size = x.shape[0]
# `sample` projections.
query = attn.to_q(x)
key = attn.to_k(x)
value = attn.to_v(x)
# apply rotary position embedding
if rope is not None:
freqs, xpos_scale = rope
q_xpos_scale, k_xpos_scale = (xpos_scale, xpos_scale**-1.0) if xpos_scale is not None else (1.0, 1.0)
query = apply_rotary_pos_emb(query, freqs, q_xpos_scale)
key = apply_rotary_pos_emb(key, freqs, k_xpos_scale)
# attention
inner_dim = key.shape[-1]
head_dim = inner_dim // attn.heads
query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
# mask. e.g. inference got a batch with different target durations, mask out the padding
if mask is not None:
attn_mask = mask
attn_mask = attn_mask.unsqueeze(1).unsqueeze(1) # 'b n -> b 1 1 n'
# print(3433333333,attn_mask.shape)
attn_mask = attn_mask.expand(batch_size, attn.heads, query.shape[-2], key.shape[-2])
else:
attn_mask = None
# with torch.nn.attention.sdpa_kernel(backends=[SDPBackend.EFFICIENT_ATTENTION]):
# with torch.backends.cuda.sdp_kernel(enable_flash=True, enable_math=False, enable_mem_efficient=True):
# with torch.backends.cuda.sdp_kernel(enable_flash=True, enable_math=True, enable_mem_efficient=False):
# print(torch.backends.cuda.flash_sdp_enabled())
# print(torch.backends.cuda.mem_efficient_sdp_enabled())
# print(torch.backends.cuda.math_sdp_enabled())
x = F.scaled_dot_product_attention(query, key, value, attn_mask=attn_mask, dropout_p=0.0, is_causal=False)
x = x.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim)
x = x.to(query.dtype)
# linear proj
x = attn.to_out[0](x)
# dropout
x = attn.to_out[1](x)
if mask is not None:
mask = mask.unsqueeze(-1)
x = x.masked_fill(~mask, 0.0)
return x
# Joint Attention processor for MM-DiT
# modified from diffusers/src/diffusers/models/attention_processor.py
class JointAttnProcessor:
def __init__(self):
pass
def __call__(
self,
attn: Attention,
x: float["b n d"], # noised input x # noqa: F722
c: float["b nt d"] = None, # context c, here text # noqa: F722
mask: bool["b n"] | None = None, # noqa: F722
rope=None, # rotary position embedding for x
c_rope=None, # rotary position embedding for c
) -> torch.FloatTensor:
residual = x
batch_size = c.shape[0]
# `sample` projections.
query = attn.to_q(x)
key = attn.to_k(x)
value = attn.to_v(x)
# `context` projections.
c_query = attn.to_q_c(c)
c_key = attn.to_k_c(c)
c_value = attn.to_v_c(c)
# apply rope for context and noised input independently
if rope is not None:
freqs, xpos_scale = rope
q_xpos_scale, k_xpos_scale = (xpos_scale, xpos_scale**-1.0) if xpos_scale is not None else (1.0, 1.0)
query = apply_rotary_pos_emb(query, freqs, q_xpos_scale)
key = apply_rotary_pos_emb(key, freqs, k_xpos_scale)
if c_rope is not None:
freqs, xpos_scale = c_rope
q_xpos_scale, k_xpos_scale = (xpos_scale, xpos_scale**-1.0) if xpos_scale is not None else (1.0, 1.0)
c_query = apply_rotary_pos_emb(c_query, freqs, q_xpos_scale)
c_key = apply_rotary_pos_emb(c_key, freqs, k_xpos_scale)
# attention
query = torch.cat([query, c_query], dim=1)
key = torch.cat([key, c_key], dim=1)
value = torch.cat([value, c_value], dim=1)
inner_dim = key.shape[-1]
head_dim = inner_dim // attn.heads
query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
# mask. e.g. inference got a batch with different target durations, mask out the padding
if mask is not None:
attn_mask = F.pad(mask, (0, c.shape[1]), value=True) # no mask for c (text)
attn_mask = attn_mask.unsqueeze(1).unsqueeze(1) # 'b n -> b 1 1 n'
attn_mask = attn_mask.expand(batch_size, attn.heads, query.shape[-2], key.shape[-2])
else:
attn_mask = None
x = F.scaled_dot_product_attention(query, key, value, attn_mask=attn_mask, dropout_p=0.0, is_causal=False)
x = x.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim)
x = x.to(query.dtype)
# Split the attention outputs.
x, c = (
x[:, : residual.shape[1]],
x[:, residual.shape[1] :],
)
# linear proj
x = attn.to_out[0](x)
# dropout
x = attn.to_out[1](x)
if not attn.context_pre_only:
c = attn.to_out_c(c)
if mask is not None:
mask = mask.unsqueeze(-1)
x = x.masked_fill(~mask, 0.0)
# c = c.masked_fill(~mask, 0.) # no mask for c (text)
return x, c
# DiT Block
class DiTBlock(nn.Module):
def __init__(self, dim, heads, dim_head, ff_mult=4, dropout=0.1):
super().__init__()
self.attn_norm = AdaLayerNormZero(dim)
self.attn = Attention(
processor=AttnProcessor(),
dim=dim,
heads=heads,
dim_head=dim_head,
dropout=dropout,
)
self.ff_norm = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-6)
self.ff = FeedForward(dim=dim, mult=ff_mult, dropout=dropout, approximate="tanh")
def forward(self, x, t, mask=None, rope=None): # x: noised input, t: time embedding
# pre-norm & modulation for attention input
norm, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.attn_norm(x, emb=t)
# attention
attn_output = self.attn(x=norm, mask=mask, rope=rope)
# process attention output for input x
x = x + gate_msa.unsqueeze(1) * attn_output
norm = self.ff_norm(x) * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
ff_output = self.ff(norm)
x = x + gate_mlp.unsqueeze(1) * ff_output
return x
# MMDiT Block https://arxiv.org/abs/2403.03206
class MMDiTBlock(nn.Module):
def __init__(self, dim, heads, dim_head, ff_mult=4, dropout=0.1, context_pre_only=False):
super().__init__()
self.context_pre_only = context_pre_only
self.attn_norm_c = AdaLayerNormZero_Final(dim) if context_pre_only else AdaLayerNormZero(dim)
self.attn_norm_x = AdaLayerNormZero(dim)
self.attn = Attention(
processor=JointAttnProcessor(),
dim=dim,
heads=heads,
dim_head=dim_head,
dropout=dropout,
context_dim=dim,
context_pre_only=context_pre_only,
)
if not context_pre_only:
self.ff_norm_c = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-6)
self.ff_c = FeedForward(dim=dim, mult=ff_mult, dropout=dropout, approximate="tanh")
else:
self.ff_norm_c = None
self.ff_c = None
self.ff_norm_x = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-6)
self.ff_x = FeedForward(dim=dim, mult=ff_mult, dropout=dropout, approximate="tanh")
def forward(self, x, c, t, mask=None, rope=None, c_rope=None): # x: noised input, c: context, t: time embedding
# pre-norm & modulation for attention input
if self.context_pre_only:
norm_c = self.attn_norm_c(c, t)
else:
norm_c, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.attn_norm_c(c, emb=t)
norm_x, x_gate_msa, x_shift_mlp, x_scale_mlp, x_gate_mlp = self.attn_norm_x(x, emb=t)
# attention
x_attn_output, c_attn_output = self.attn(x=norm_x, c=norm_c, mask=mask, rope=rope, c_rope=c_rope)
# process attention output for context c
if self.context_pre_only:
c = None
else: # if not last layer
c = c + c_gate_msa.unsqueeze(1) * c_attn_output
norm_c = self.ff_norm_c(c) * (1 + c_scale_mlp[:, None]) + c_shift_mlp[:, None]
c_ff_output = self.ff_c(norm_c)
c = c + c_gate_mlp.unsqueeze(1) * c_ff_output
# process attention output for input x
x = x + x_gate_msa.unsqueeze(1) * x_attn_output
norm_x = self.ff_norm_x(x) * (1 + x_scale_mlp[:, None]) + x_shift_mlp[:, None]
x_ff_output = self.ff_x(norm_x)
x = x + x_gate_mlp.unsqueeze(1) * x_ff_output
return c, x
# time step conditioning embedding
class TimestepEmbedding(nn.Module):
def __init__(self, dim, freq_embed_dim=256):
super().__init__()
self.time_embed = SinusPositionEmbedding(freq_embed_dim)
self.time_mlp = nn.Sequential(nn.Linear(freq_embed_dim, dim), nn.SiLU(), nn.Linear(dim, dim))
def forward(self, timestep: float["b"]): # noqa: F821
time_hidden = self.time_embed(timestep)
time_hidden = time_hidden.to(timestep.dtype)
time = self.time_mlp(time_hidden) # b d
return time | --- +++ @@ -1,3 +1,11 @@+"""
+ein notation:
+b - batch
+n - sequence
+nt - text sequence
+nw - raw wave length
+d - dimension
+"""
from __future__ import annotations
@@ -576,6 +584,14 @@
class MMDiTBlock(nn.Module):
+ r"""
+ modified from diffusers/src/diffusers/models/attention.py
+
+ notes.
+ _c: context related. text, cond, etc. (left part in sd3 fig2.b)
+ _x: noised input related. (right part)
+ context_pre_only: last layer only do prenorm + modulation cuz no more ffn
+ """
def __init__(self, dim, heads, dim_head, ff_mult=4, dropout=0.1, context_pre_only=False):
super().__init__()
@@ -647,4 +663,4 @@ time_hidden = self.time_embed(timestep)
time_hidden = time_hidden.to(timestep.dtype)
time = self.time_mlp(time_hidden) # b d
- return time+ return time
| https://raw.githubusercontent.com/RVC-Boss/GPT-SoVITS/HEAD/GPT_SoVITS/f5_tts/model/modules.py |
Write docstrings for data processing functions | # Copyright (c) 2024 NVIDIA CORPORATION.
# Licensed under the MIT license.
# Adapted from https://github.com/jik876/hifi-gan under the MIT license.
# LICENSE is in incl_licenses directory.
import torch
import torch.nn as nn
from librosa.filters import mel as librosa_mel_fn
from scipy import signal
import typing
from typing import List, Tuple
from collections import namedtuple
import math
import functools
# Adapted from https://github.com/descriptinc/descript-audio-codec/blob/main/dac/nn/loss.py under the MIT license.
# LICENSE is in incl_licenses directory.
class MultiScaleMelSpectrogramLoss(nn.Module):
def __init__(
self,
sampling_rate: int,
n_mels: List[int] = [5, 10, 20, 40, 80, 160, 320],
window_lengths: List[int] = [32, 64, 128, 256, 512, 1024, 2048],
loss_fn: typing.Callable = nn.L1Loss(),
clamp_eps: float = 1e-5,
mag_weight: float = 0.0,
log_weight: float = 1.0,
pow: float = 1.0,
weight: float = 1.0,
match_stride: bool = False,
mel_fmin: List[float] = [0, 0, 0, 0, 0, 0, 0],
mel_fmax: List[float] = [None, None, None, None, None, None, None],
window_type: str = "hann",
):
super().__init__()
self.sampling_rate = sampling_rate
STFTParams = namedtuple(
"STFTParams",
["window_length", "hop_length", "window_type", "match_stride"],
)
self.stft_params = [
STFTParams(
window_length=w,
hop_length=w // 4,
match_stride=match_stride,
window_type=window_type,
)
for w in window_lengths
]
self.n_mels = n_mels
self.loss_fn = loss_fn
self.clamp_eps = clamp_eps
self.log_weight = log_weight
self.mag_weight = mag_weight
self.weight = weight
self.mel_fmin = mel_fmin
self.mel_fmax = mel_fmax
self.pow = pow
@staticmethod
@functools.lru_cache(None)
def get_window(
window_type,
window_length,
):
return signal.get_window(window_type, window_length)
@staticmethod
@functools.lru_cache(None)
def get_mel_filters(sr, n_fft, n_mels, fmin, fmax):
return librosa_mel_fn(sr=sr, n_fft=n_fft, n_mels=n_mels, fmin=fmin, fmax=fmax)
def mel_spectrogram(
self,
wav,
n_mels,
fmin,
fmax,
window_length,
hop_length,
match_stride,
window_type,
):
B, C, T = wav.shape
if match_stride:
assert hop_length == window_length // 4, "For match_stride, hop must equal n_fft // 4"
right_pad = math.ceil(T / hop_length) * hop_length - T
pad = (window_length - hop_length) // 2
else:
right_pad = 0
pad = 0
wav = torch.nn.functional.pad(wav, (pad, pad + right_pad), mode="reflect")
window = self.get_window(window_type, window_length)
window = torch.from_numpy(window).to(wav.device).float()
stft = torch.stft(
wav.reshape(-1, T),
n_fft=window_length,
hop_length=hop_length,
window=window,
return_complex=True,
center=True,
)
_, nf, nt = stft.shape
stft = stft.reshape(B, C, nf, nt)
if match_stride:
"""
Drop first two and last two frames, which are added, because of padding. Now num_frames * hop_length = num_samples.
"""
stft = stft[..., 2:-2]
magnitude = torch.abs(stft)
nf = magnitude.shape[2]
mel_basis = self.get_mel_filters(self.sampling_rate, 2 * (nf - 1), n_mels, fmin, fmax)
mel_basis = torch.from_numpy(mel_basis).to(wav.device)
mel_spectrogram = magnitude.transpose(2, -1) @ mel_basis.T
mel_spectrogram = mel_spectrogram.transpose(-1, 2)
return mel_spectrogram
def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
loss = 0.0
for n_mels, fmin, fmax, s in zip(self.n_mels, self.mel_fmin, self.mel_fmax, self.stft_params):
kwargs = {
"n_mels": n_mels,
"fmin": fmin,
"fmax": fmax,
"window_length": s.window_length,
"hop_length": s.hop_length,
"match_stride": s.match_stride,
"window_type": s.window_type,
}
x_mels = self.mel_spectrogram(x, **kwargs)
y_mels = self.mel_spectrogram(y, **kwargs)
x_logmels = torch.log(x_mels.clamp(min=self.clamp_eps).pow(self.pow)) / torch.log(torch.tensor(10.0))
y_logmels = torch.log(y_mels.clamp(min=self.clamp_eps).pow(self.pow)) / torch.log(torch.tensor(10.0))
loss += self.log_weight * self.loss_fn(x_logmels, y_logmels)
loss += self.mag_weight * self.loss_fn(x_logmels, y_logmels)
return loss
# Loss functions
def feature_loss(fmap_r: List[List[torch.Tensor]], fmap_g: List[List[torch.Tensor]]) -> torch.Tensor:
loss = 0
for dr, dg in zip(fmap_r, fmap_g):
for rl, gl in zip(dr, dg):
loss += torch.mean(torch.abs(rl - gl))
return loss * 2 # This equates to lambda=2.0 for the feature matching loss
def discriminator_loss(
disc_real_outputs: List[torch.Tensor], disc_generated_outputs: List[torch.Tensor]
) -> Tuple[torch.Tensor, List[torch.Tensor], List[torch.Tensor]]:
loss = 0
r_losses = []
g_losses = []
for dr, dg in zip(disc_real_outputs, disc_generated_outputs):
r_loss = torch.mean((1 - dr) ** 2)
g_loss = torch.mean(dg**2)
loss += r_loss + g_loss
r_losses.append(r_loss.item())
g_losses.append(g_loss.item())
return loss, r_losses, g_losses
def generator_loss(
disc_outputs: List[torch.Tensor],
) -> Tuple[torch.Tensor, List[torch.Tensor]]:
loss = 0
gen_losses = []
for dg in disc_outputs:
l = torch.mean((1 - dg) ** 2)
gen_losses.append(l)
loss += l
return loss, gen_losses | --- +++ @@ -20,6 +20,33 @@ # Adapted from https://github.com/descriptinc/descript-audio-codec/blob/main/dac/nn/loss.py under the MIT license.
# LICENSE is in incl_licenses directory.
class MultiScaleMelSpectrogramLoss(nn.Module):
+ """Compute distance between mel spectrograms. Can be used
+ in a multi-scale way.
+
+ Parameters
+ ----------
+ n_mels : List[int]
+ Number of mels per STFT, by default [5, 10, 20, 40, 80, 160, 320],
+ window_lengths : List[int], optional
+ Length of each window of each STFT, by default [32, 64, 128, 256, 512, 1024, 2048]
+ loss_fn : typing.Callable, optional
+ How to compare each loss, by default nn.L1Loss()
+ clamp_eps : float, optional
+ Clamp on the log magnitude, below, by default 1e-5
+ mag_weight : float, optional
+ Weight of raw magnitude portion of loss, by default 0.0 (no ampliciation on mag part)
+ log_weight : float, optional
+ Weight of log magnitude portion of loss, by default 1.0
+ pow : float, optional
+ Power to raise magnitude to before taking log, by default 1.0
+ weight : float, optional
+ Weight of this loss, by default 1.0
+ match_stride : bool, optional
+ Whether to match the stride of convolutional layers, by default False
+
+ Implementation copied from: https://github.com/descriptinc/lyrebird-audiotools/blob/961786aa1a9d628cca0c0486e5885a457fe70c1a/audiotools/metrics/spectral.py
+ Additional code copied and modified from https://github.com/descriptinc/audiotools/blob/master/audiotools/core/audio_signal.py
+ """
def __init__(
self,
@@ -88,6 +115,10 @@ match_stride,
window_type,
):
+ """
+ Mirrors AudioSignal.mel_spectrogram used by BigVGAN-v2 training from:
+ https://github.com/descriptinc/audiotools/blob/master/audiotools/core/audio_signal.py
+ """
B, C, T = wav.shape
if match_stride:
@@ -129,6 +160,21 @@ return mel_spectrogram
def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
+ """Computes mel loss between an estimate and a reference
+ signal.
+
+ Parameters
+ ----------
+ x : torch.Tensor
+ Estimate signal
+ y : torch.Tensor
+ Reference signal
+
+ Returns
+ -------
+ torch.Tensor
+ Mel loss.
+ """
loss = 0.0
for n_mels, fmin, fmax, s in zip(self.n_mels, self.mel_fmin, self.mel_fmax, self.stft_params):
@@ -189,4 +235,4 @@ gen_losses.append(l)
loss += l
- return loss, gen_losses+ return loss, gen_losses
| https://raw.githubusercontent.com/RVC-Boss/GPT-SoVITS/HEAD/GPT_SoVITS/BigVGAN/loss.py |
Add docstrings including usage examples | import math
from typing import Tuple
import torch
import torchaudio
from torch import Tensor
__all__ = [
"get_mel_banks",
"inverse_mel_scale",
"inverse_mel_scale_scalar",
"mel_scale",
"mel_scale_scalar",
"spectrogram",
"fbank",
"mfcc",
"vtln_warp_freq",
"vtln_warp_mel_freq",
]
# numeric_limits<float>::epsilon() 1.1920928955078125e-07
EPSILON = torch.tensor(torch.finfo(torch.float).eps)
# 1 milliseconds = 0.001 seconds
MILLISECONDS_TO_SECONDS = 0.001
# window types
HAMMING = "hamming"
HANNING = "hanning"
POVEY = "povey"
RECTANGULAR = "rectangular"
BLACKMAN = "blackman"
WINDOWS = [HAMMING, HANNING, POVEY, RECTANGULAR, BLACKMAN]
def _get_epsilon(device, dtype):
return EPSILON.to(device=device, dtype=dtype)
def _next_power_of_2(x: int) -> int:
return 1 if x == 0 else 2 ** (x - 1).bit_length()
def _get_strided(waveform: Tensor, window_size: int, window_shift: int, snip_edges: bool) -> Tensor:
assert waveform.dim() == 1
num_samples = waveform.size(0)
strides = (window_shift * waveform.stride(0), waveform.stride(0))
if snip_edges:
if num_samples < window_size:
return torch.empty((0, 0), dtype=waveform.dtype, device=waveform.device)
else:
m = 1 + (num_samples - window_size) // window_shift
else:
reversed_waveform = torch.flip(waveform, [0])
m = (num_samples + (window_shift // 2)) // window_shift
pad = window_size // 2 - window_shift // 2
pad_right = reversed_waveform
if pad > 0:
# torch.nn.functional.pad returns [2,1,0,1,2] for 'reflect'
# but we want [2, 1, 0, 0, 1, 2]
pad_left = reversed_waveform[-pad:]
waveform = torch.cat((pad_left, waveform, pad_right), dim=0)
else:
# pad is negative so we want to trim the waveform at the front
waveform = torch.cat((waveform[-pad:], pad_right), dim=0)
sizes = (m, window_size)
return waveform.as_strided(sizes, strides)
def _feature_window_function(
window_type: str,
window_size: int,
blackman_coeff: float,
device: torch.device,
dtype: int,
) -> Tensor:
if window_type == HANNING:
return torch.hann_window(window_size, periodic=False, device=device, dtype=dtype)
elif window_type == HAMMING:
return torch.hamming_window(window_size, periodic=False, alpha=0.54, beta=0.46, device=device, dtype=dtype)
elif window_type == POVEY:
# like hanning but goes to zero at edges
return torch.hann_window(window_size, periodic=False, device=device, dtype=dtype).pow(0.85)
elif window_type == RECTANGULAR:
return torch.ones(window_size, device=device, dtype=dtype)
elif window_type == BLACKMAN:
a = 2 * math.pi / (window_size - 1)
window_function = torch.arange(window_size, device=device, dtype=dtype)
# can't use torch.blackman_window as they use different coefficients
return (
blackman_coeff
- 0.5 * torch.cos(a * window_function)
+ (0.5 - blackman_coeff) * torch.cos(2 * a * window_function)
).to(device=device, dtype=dtype)
else:
raise Exception("Invalid window type " + window_type)
def _get_log_energy(strided_input: Tensor, epsilon: Tensor, energy_floor: float) -> Tensor:
device, dtype = strided_input.device, strided_input.dtype
log_energy = torch.max(strided_input.pow(2).sum(1), epsilon).log() # size (m)
if energy_floor == 0.0:
return log_energy
return torch.max(log_energy, torch.tensor(math.log(energy_floor), device=device, dtype=dtype))
def _get_waveform_and_window_properties(
waveform: Tensor,
channel: int,
sample_frequency: float,
frame_shift: float,
frame_length: float,
round_to_power_of_two: bool,
preemphasis_coefficient: float,
) -> Tuple[Tensor, int, int, int]:
channel = max(channel, 0)
assert channel < waveform.size(0), "Invalid channel {} for size {}".format(channel, waveform.size(0))
waveform = waveform[channel, :] # size (n)
window_shift = int(sample_frequency * frame_shift * MILLISECONDS_TO_SECONDS)
window_size = int(sample_frequency * frame_length * MILLISECONDS_TO_SECONDS)
padded_window_size = _next_power_of_2(window_size) if round_to_power_of_two else window_size
assert 2 <= window_size <= len(waveform), "choose a window size {} that is [2, {}]".format(
window_size, len(waveform)
)
assert 0 < window_shift, "`window_shift` must be greater than 0"
assert padded_window_size % 2 == 0, (
"the padded `window_size` must be divisible by two. use `round_to_power_of_two` or change `frame_length`"
)
assert 0.0 <= preemphasis_coefficient <= 1.0, "`preemphasis_coefficient` must be between [0,1]"
assert sample_frequency > 0, "`sample_frequency` must be greater than zero"
return waveform, window_shift, window_size, padded_window_size
def _get_window(
waveform: Tensor,
padded_window_size: int,
window_size: int,
window_shift: int,
window_type: str,
blackman_coeff: float,
snip_edges: bool,
raw_energy: bool,
energy_floor: float,
dither: float,
remove_dc_offset: bool,
preemphasis_coefficient: float,
) -> Tuple[Tensor, Tensor]:
device, dtype = waveform.device, waveform.dtype
epsilon = _get_epsilon(device, dtype)
# size (m, window_size)
strided_input = _get_strided(waveform, window_size, window_shift, snip_edges)
if dither != 0.0:
rand_gauss = torch.randn(strided_input.shape, device=device, dtype=dtype)
strided_input = strided_input + rand_gauss * dither
if remove_dc_offset:
# Subtract each row/frame by its mean
row_means = torch.mean(strided_input, dim=1).unsqueeze(1) # size (m, 1)
strided_input = strided_input - row_means
if raw_energy:
# Compute the log energy of each row/frame before applying preemphasis and
# window function
signal_log_energy = _get_log_energy(strided_input, epsilon, energy_floor) # size (m)
if preemphasis_coefficient != 0.0:
# strided_input[i,j] -= preemphasis_coefficient * strided_input[i, max(0, j-1)] for all i,j
offset_strided_input = torch.nn.functional.pad(strided_input.unsqueeze(0), (1, 0), mode="replicate").squeeze(
0
) # size (m, window_size + 1)
strided_input = strided_input - preemphasis_coefficient * offset_strided_input[:, :-1]
# Apply window_function to each row/frame
window_function = _feature_window_function(window_type, window_size, blackman_coeff, device, dtype).unsqueeze(
0
) # size (1, window_size)
strided_input = strided_input * window_function # size (m, window_size)
# Pad columns with zero until we reach size (m, padded_window_size)
if padded_window_size != window_size:
padding_right = padded_window_size - window_size
strided_input = torch.nn.functional.pad(
strided_input.unsqueeze(0), (0, padding_right), mode="constant", value=0
).squeeze(0)
# Compute energy after window function (not the raw one)
if not raw_energy:
signal_log_energy = _get_log_energy(strided_input, epsilon, energy_floor) # size (m)
return strided_input, signal_log_energy
def _subtract_column_mean(tensor: Tensor, subtract_mean: bool) -> Tensor:
# subtracts the column mean of the tensor size (m, n) if subtract_mean=True
# it returns size (m, n)
if subtract_mean:
col_means = torch.mean(tensor, dim=0).unsqueeze(0)
tensor = tensor - col_means
return tensor
def spectrogram(
waveform: Tensor,
blackman_coeff: float = 0.42,
channel: int = -1,
dither: float = 0.0,
energy_floor: float = 1.0,
frame_length: float = 25.0,
frame_shift: float = 10.0,
min_duration: float = 0.0,
preemphasis_coefficient: float = 0.97,
raw_energy: bool = True,
remove_dc_offset: bool = True,
round_to_power_of_two: bool = True,
sample_frequency: float = 16000.0,
snip_edges: bool = True,
subtract_mean: bool = False,
window_type: str = POVEY,
) -> Tensor:
device, dtype = waveform.device, waveform.dtype
epsilon = _get_epsilon(device, dtype)
waveform, window_shift, window_size, padded_window_size = _get_waveform_and_window_properties(
waveform, channel, sample_frequency, frame_shift, frame_length, round_to_power_of_two, preemphasis_coefficient
)
if len(waveform) < min_duration * sample_frequency:
# signal is too short
return torch.empty(0)
strided_input, signal_log_energy = _get_window(
waveform,
padded_window_size,
window_size,
window_shift,
window_type,
blackman_coeff,
snip_edges,
raw_energy,
energy_floor,
dither,
remove_dc_offset,
preemphasis_coefficient,
)
# size (m, padded_window_size // 2 + 1, 2)
fft = torch.fft.rfft(strided_input)
# Convert the FFT into a power spectrum
power_spectrum = torch.max(fft.abs().pow(2.0), epsilon).log() # size (m, padded_window_size // 2 + 1)
power_spectrum[:, 0] = signal_log_energy
power_spectrum = _subtract_column_mean(power_spectrum, subtract_mean)
return power_spectrum
def inverse_mel_scale_scalar(mel_freq: float) -> float:
return 700.0 * (math.exp(mel_freq / 1127.0) - 1.0)
def inverse_mel_scale(mel_freq: Tensor) -> Tensor:
return 700.0 * ((mel_freq / 1127.0).exp() - 1.0)
def mel_scale_scalar(freq: float) -> float:
return 1127.0 * math.log(1.0 + freq / 700.0)
def mel_scale(freq: Tensor) -> Tensor:
return 1127.0 * (1.0 + freq / 700.0).log()
def vtln_warp_freq(
vtln_low_cutoff: float,
vtln_high_cutoff: float,
low_freq: float,
high_freq: float,
vtln_warp_factor: float,
freq: Tensor,
) -> Tensor:
assert vtln_low_cutoff > low_freq, "be sure to set the vtln_low option higher than low_freq"
assert vtln_high_cutoff < high_freq, "be sure to set the vtln_high option lower than high_freq [or negative]"
l = vtln_low_cutoff * max(1.0, vtln_warp_factor)
h = vtln_high_cutoff * min(1.0, vtln_warp_factor)
scale = 1.0 / vtln_warp_factor
Fl = scale * l # F(l)
Fh = scale * h # F(h)
assert l > low_freq and h < high_freq
# slope of left part of the 3-piece linear function
scale_left = (Fl - low_freq) / (l - low_freq)
# [slope of center part is just "scale"]
# slope of right part of the 3-piece linear function
scale_right = (high_freq - Fh) / (high_freq - h)
res = torch.empty_like(freq)
outside_low_high_freq = torch.lt(freq, low_freq) | torch.gt(freq, high_freq) # freq < low_freq || freq > high_freq
before_l = torch.lt(freq, l) # freq < l
before_h = torch.lt(freq, h) # freq < h
after_h = torch.ge(freq, h) # freq >= h
# order of operations matter here (since there is overlapping frequency regions)
res[after_h] = high_freq + scale_right * (freq[after_h] - high_freq)
res[before_h] = scale * freq[before_h]
res[before_l] = low_freq + scale_left * (freq[before_l] - low_freq)
res[outside_low_high_freq] = freq[outside_low_high_freq]
return res
def vtln_warp_mel_freq(
vtln_low_cutoff: float,
vtln_high_cutoff: float,
low_freq,
high_freq: float,
vtln_warp_factor: float,
mel_freq: Tensor,
) -> Tensor:
return mel_scale(
vtln_warp_freq(
vtln_low_cutoff, vtln_high_cutoff, low_freq, high_freq, vtln_warp_factor, inverse_mel_scale(mel_freq)
)
)
def get_mel_banks(
num_bins: int,
window_length_padded: int,
sample_freq: float,
low_freq: float,
high_freq: float,
vtln_low: float,
vtln_high: float,
vtln_warp_factor: float,
device=None,
dtype=None,
) -> Tuple[Tensor, Tensor]:
assert num_bins > 3, "Must have at least 3 mel bins"
assert window_length_padded % 2 == 0
num_fft_bins = window_length_padded / 2
nyquist = 0.5 * sample_freq
if high_freq <= 0.0:
high_freq += nyquist
assert (0.0 <= low_freq < nyquist) and (0.0 < high_freq <= nyquist) and (low_freq < high_freq), (
"Bad values in options: low-freq {} and high-freq {} vs. nyquist {}".format(low_freq, high_freq, nyquist)
)
# fft-bin width [think of it as Nyquist-freq / half-window-length]
fft_bin_width = sample_freq / window_length_padded
mel_low_freq = mel_scale_scalar(low_freq)
mel_high_freq = mel_scale_scalar(high_freq)
# divide by num_bins+1 in next line because of end-effects where the bins
# spread out to the sides.
mel_freq_delta = (mel_high_freq - mel_low_freq) / (num_bins + 1)
if vtln_high < 0.0:
vtln_high += nyquist
assert vtln_warp_factor == 1.0 or (
(low_freq < vtln_low < high_freq) and (0.0 < vtln_high < high_freq) and (vtln_low < vtln_high)
), "Bad values in options: vtln-low {} and vtln-high {}, versus low-freq {} and high-freq {}".format(
vtln_low, vtln_high, low_freq, high_freq
)
bin = torch.arange(num_bins).unsqueeze(1)
left_mel = mel_low_freq + bin * mel_freq_delta # size(num_bins, 1)
center_mel = mel_low_freq + (bin + 1.0) * mel_freq_delta # size(num_bins, 1)
right_mel = mel_low_freq + (bin + 2.0) * mel_freq_delta # size(num_bins, 1)
if vtln_warp_factor != 1.0:
left_mel = vtln_warp_mel_freq(vtln_low, vtln_high, low_freq, high_freq, vtln_warp_factor, left_mel)
center_mel = vtln_warp_mel_freq(vtln_low, vtln_high, low_freq, high_freq, vtln_warp_factor, center_mel)
right_mel = vtln_warp_mel_freq(vtln_low, vtln_high, low_freq, high_freq, vtln_warp_factor, right_mel)
# center_freqs = inverse_mel_scale(center_mel) # size (num_bins)
# size(1, num_fft_bins)
mel = mel_scale(fft_bin_width * torch.arange(num_fft_bins)).unsqueeze(0)
# size (num_bins, num_fft_bins)
up_slope = (mel - left_mel) / (center_mel - left_mel)
down_slope = (right_mel - mel) / (right_mel - center_mel)
if vtln_warp_factor == 1.0:
# left_mel < center_mel < right_mel so we can min the two slopes and clamp negative values
bins = torch.max(torch.zeros(1), torch.min(up_slope, down_slope))
else:
# warping can move the order of left_mel, center_mel, right_mel anywhere
bins = torch.zeros_like(up_slope)
up_idx = torch.gt(mel, left_mel) & torch.le(mel, center_mel) # left_mel < mel <= center_mel
down_idx = torch.gt(mel, center_mel) & torch.lt(mel, right_mel) # center_mel < mel < right_mel
bins[up_idx] = up_slope[up_idx]
bins[down_idx] = down_slope[down_idx]
return bins.to(device=device, dtype=dtype) # , center_freqs
cache = {}
def fbank(
waveform: Tensor,
blackman_coeff: float = 0.42,
channel: int = -1,
dither: float = 0.0,
energy_floor: float = 1.0,
frame_length: float = 25.0,
frame_shift: float = 10.0,
high_freq: float = 0.0,
htk_compat: bool = False,
low_freq: float = 20.0,
min_duration: float = 0.0,
num_mel_bins: int = 23,
preemphasis_coefficient: float = 0.97,
raw_energy: bool = True,
remove_dc_offset: bool = True,
round_to_power_of_two: bool = True,
sample_frequency: float = 16000.0,
snip_edges: bool = True,
subtract_mean: bool = False,
use_energy: bool = False,
use_log_fbank: bool = True,
use_power: bool = True,
vtln_high: float = -500.0,
vtln_low: float = 100.0,
vtln_warp: float = 1.0,
window_type: str = POVEY,
) -> Tensor:
device, dtype = waveform.device, waveform.dtype
waveform, window_shift, window_size, padded_window_size = _get_waveform_and_window_properties(
waveform, channel, sample_frequency, frame_shift, frame_length, round_to_power_of_two, preemphasis_coefficient
)
if len(waveform) < min_duration * sample_frequency:
# signal is too short
return torch.empty(0, device=device, dtype=dtype)
# strided_input, size (m, padded_window_size) and signal_log_energy, size (m)
strided_input, signal_log_energy = _get_window(
waveform,
padded_window_size,
window_size,
window_shift,
window_type,
blackman_coeff,
snip_edges,
raw_energy,
energy_floor,
dither,
remove_dc_offset,
preemphasis_coefficient,
)
# size (m, padded_window_size // 2 + 1)
spectrum = torch.fft.rfft(strided_input).abs()
if use_power:
spectrum = spectrum.pow(2.0)
# size (num_mel_bins, padded_window_size // 2)
# print(num_mel_bins, padded_window_size, sample_frequency, low_freq, high_freq, vtln_low, vtln_high, vtln_warp)
cache_key = "%s-%s-%s-%s-%s-%s-%s-%s-%s-%s" % (
num_mel_bins,
padded_window_size,
sample_frequency,
low_freq,
high_freq,
vtln_low,
vtln_high,
vtln_warp,
device,
dtype,
)
if cache_key not in cache:
mel_energies = get_mel_banks(
num_mel_bins,
padded_window_size,
sample_frequency,
low_freq,
high_freq,
vtln_low,
vtln_high,
vtln_warp,
device,
dtype,
)
cache[cache_key] = mel_energies
else:
mel_energies = cache[cache_key]
# pad right column with zeros and add dimension, size (num_mel_bins, padded_window_size // 2 + 1)
mel_energies = torch.nn.functional.pad(mel_energies, (0, 1), mode="constant", value=0)
# sum with mel fiterbanks over the power spectrum, size (m, num_mel_bins)
mel_energies = torch.mm(spectrum, mel_energies.T)
if use_log_fbank:
# avoid log of zero (which should be prevented anyway by dithering)
mel_energies = torch.max(mel_energies, _get_epsilon(device, dtype)).log()
# if use_energy then add it as the last column for htk_compat == true else first column
if use_energy:
signal_log_energy = signal_log_energy.unsqueeze(1) # size (m, 1)
# returns size (m, num_mel_bins + 1)
if htk_compat:
mel_energies = torch.cat((mel_energies, signal_log_energy), dim=1)
else:
mel_energies = torch.cat((signal_log_energy, mel_energies), dim=1)
mel_energies = _subtract_column_mean(mel_energies, subtract_mean)
return mel_energies
def _get_dct_matrix(num_ceps: int, num_mel_bins: int) -> Tensor:
# returns a dct matrix of size (num_mel_bins, num_ceps)
# size (num_mel_bins, num_mel_bins)
dct_matrix = torchaudio.functional.create_dct(num_mel_bins, num_mel_bins, "ortho")
# kaldi expects the first cepstral to be weighted sum of factor sqrt(1/num_mel_bins)
# this would be the first column in the dct_matrix for torchaudio as it expects a
# right multiply (which would be the first column of the kaldi's dct_matrix as kaldi
# expects a left multiply e.g. dct_matrix * vector).
dct_matrix[:, 0] = math.sqrt(1 / float(num_mel_bins))
dct_matrix = dct_matrix[:, :num_ceps]
return dct_matrix
def _get_lifter_coeffs(num_ceps: int, cepstral_lifter: float) -> Tensor:
# returns size (num_ceps)
# Compute liftering coefficients (scaling on cepstral coeffs)
# coeffs are numbered slightly differently from HTK: the zeroth index is C0, which is not affected.
i = torch.arange(num_ceps)
return 1.0 + 0.5 * cepstral_lifter * torch.sin(math.pi * i / cepstral_lifter)
def mfcc(
waveform: Tensor,
blackman_coeff: float = 0.42,
cepstral_lifter: float = 22.0,
channel: int = -1,
dither: float = 0.0,
energy_floor: float = 1.0,
frame_length: float = 25.0,
frame_shift: float = 10.0,
high_freq: float = 0.0,
htk_compat: bool = False,
low_freq: float = 20.0,
num_ceps: int = 13,
min_duration: float = 0.0,
num_mel_bins: int = 23,
preemphasis_coefficient: float = 0.97,
raw_energy: bool = True,
remove_dc_offset: bool = True,
round_to_power_of_two: bool = True,
sample_frequency: float = 16000.0,
snip_edges: bool = True,
subtract_mean: bool = False,
use_energy: bool = False,
vtln_high: float = -500.0,
vtln_low: float = 100.0,
vtln_warp: float = 1.0,
window_type: str = POVEY,
) -> Tensor:
assert num_ceps <= num_mel_bins, "num_ceps cannot be larger than num_mel_bins: %d vs %d" % (num_ceps, num_mel_bins)
device, dtype = waveform.device, waveform.dtype
# The mel_energies should not be squared (use_power=True), not have mean subtracted
# (subtract_mean=False), and use log (use_log_fbank=True).
# size (m, num_mel_bins + use_energy)
feature = fbank(
waveform=waveform,
blackman_coeff=blackman_coeff,
channel=channel,
dither=dither,
energy_floor=energy_floor,
frame_length=frame_length,
frame_shift=frame_shift,
high_freq=high_freq,
htk_compat=htk_compat,
low_freq=low_freq,
min_duration=min_duration,
num_mel_bins=num_mel_bins,
preemphasis_coefficient=preemphasis_coefficient,
raw_energy=raw_energy,
remove_dc_offset=remove_dc_offset,
round_to_power_of_two=round_to_power_of_two,
sample_frequency=sample_frequency,
snip_edges=snip_edges,
subtract_mean=False,
use_energy=use_energy,
use_log_fbank=True,
use_power=True,
vtln_high=vtln_high,
vtln_low=vtln_low,
vtln_warp=vtln_warp,
window_type=window_type,
)
if use_energy:
# size (m)
signal_log_energy = feature[:, num_mel_bins if htk_compat else 0]
# offset is 0 if htk_compat==True else 1
mel_offset = int(not htk_compat)
feature = feature[:, mel_offset : (num_mel_bins + mel_offset)]
# size (num_mel_bins, num_ceps)
dct_matrix = _get_dct_matrix(num_ceps, num_mel_bins).to(dtype=dtype, device=device)
# size (m, num_ceps)
feature = feature.matmul(dct_matrix)
if cepstral_lifter != 0.0:
# size (1, num_ceps)
lifter_coeffs = _get_lifter_coeffs(num_ceps, cepstral_lifter).unsqueeze(0)
feature *= lifter_coeffs.to(device=device, dtype=dtype)
# if use_energy then replace the last column for htk_compat == true else first column
if use_energy:
feature[:, 0] = signal_log_energy
if htk_compat:
energy = feature[:, 0].unsqueeze(1) # size (m, 1)
feature = feature[:, 1:] # size (m, num_ceps - 1)
if not use_energy:
# scale on C0 (actually removing a scale we previously added that's
# part of one common definition of the cosine transform.)
energy *= math.sqrt(2)
feature = torch.cat((feature, energy), dim=1)
feature = _subtract_column_mean(feature, subtract_mean)
return feature | --- +++ @@ -37,10 +37,25 @@
def _next_power_of_2(x: int) -> int:
+ r"""Returns the smallest power of 2 that is greater than x"""
return 1 if x == 0 else 2 ** (x - 1).bit_length()
def _get_strided(waveform: Tensor, window_size: int, window_shift: int, snip_edges: bool) -> Tensor:
+ r"""Given a waveform (1D tensor of size ``num_samples``), it returns a 2D tensor (m, ``window_size``)
+ representing how the window is shifted along the waveform. Each row is a frame.
+
+ Args:
+ waveform (Tensor): Tensor of size ``num_samples``
+ window_size (int): Frame length
+ window_shift (int): Frame shift
+ snip_edges (bool): If True, end effects will be handled by outputting only frames that completely fit
+ in the file, and the number of frames depends on the frame_length. If False, the number of frames
+ depends only on the frame_shift, and we reflect the data at the ends.
+
+ Returns:
+ Tensor: 2D tensor of size (m, ``window_size``) where each row is a frame
+ """
assert waveform.dim() == 1
num_samples = waveform.size(0)
strides = (window_shift * waveform.stride(0), waveform.stride(0))
@@ -75,6 +90,7 @@ device: torch.device,
dtype: int,
) -> Tensor:
+ r"""Returns a window function with the given type and size"""
if window_type == HANNING:
return torch.hann_window(window_size, periodic=False, device=device, dtype=dtype)
elif window_type == HAMMING:
@@ -98,6 +114,7 @@
def _get_log_energy(strided_input: Tensor, epsilon: Tensor, energy_floor: float) -> Tensor:
+ r"""Returns the log energy of size (m) for a strided_input (m,*)"""
device, dtype = strided_input.device, strided_input.dtype
log_energy = torch.max(strided_input.pow(2).sum(1), epsilon).log() # size (m)
if energy_floor == 0.0:
@@ -114,6 +131,7 @@ round_to_power_of_two: bool,
preemphasis_coefficient: float,
) -> Tuple[Tensor, int, int, int]:
+ r"""Gets the waveform and window properties"""
channel = max(channel, 0)
assert channel < waveform.size(0), "Invalid channel {} for size {}".format(channel, waveform.size(0))
waveform = waveform[channel, :] # size (n)
@@ -147,6 +165,11 @@ remove_dc_offset: bool,
preemphasis_coefficient: float,
) -> Tuple[Tensor, Tensor]:
+ r"""Gets a window and its log energy
+
+ Returns:
+ (Tensor, Tensor): strided_input of size (m, ``padded_window_size``) and signal_log_energy of size (m)
+ """
device, dtype = waveform.device, waveform.dtype
epsilon = _get_epsilon(device, dtype)
@@ -221,6 +244,40 @@ subtract_mean: bool = False,
window_type: str = POVEY,
) -> Tensor:
+ r"""Create a spectrogram from a raw audio signal. This matches the input/output of Kaldi's
+ compute-spectrogram-feats.
+
+ Args:
+ waveform (Tensor): Tensor of audio of size (c, n) where c is in the range [0,2)
+ blackman_coeff (float, optional): Constant coefficient for generalized Blackman window. (Default: ``0.42``)
+ channel (int, optional): Channel to extract (-1 -> expect mono, 0 -> left, 1 -> right) (Default: ``-1``)
+ dither (float, optional): Dithering constant (0.0 means no dither). If you turn this off, you should set
+ the energy_floor option, e.g. to 1.0 or 0.1 (Default: ``0.0``)
+ energy_floor (float, optional): Floor on energy (absolute, not relative) in Spectrogram computation. Caution:
+ this floor is applied to the zeroth component, representing the total signal energy. The floor on the
+ individual spectrogram elements is fixed at std::numeric_limits<float>::epsilon(). (Default: ``1.0``)
+ frame_length (float, optional): Frame length in milliseconds (Default: ``25.0``)
+ frame_shift (float, optional): Frame shift in milliseconds (Default: ``10.0``)
+ min_duration (float, optional): Minimum duration of segments to process (in seconds). (Default: ``0.0``)
+ preemphasis_coefficient (float, optional): Coefficient for use in signal preemphasis (Default: ``0.97``)
+ raw_energy (bool, optional): If True, compute energy before preemphasis and windowing (Default: ``True``)
+ remove_dc_offset (bool, optional): Subtract mean from waveform on each frame (Default: ``True``)
+ round_to_power_of_two (bool, optional): If True, round window size to power of two by zero-padding input
+ to FFT. (Default: ``True``)
+ sample_frequency (float, optional): Waveform data sample frequency (must match the waveform file, if
+ specified there) (Default: ``16000.0``)
+ snip_edges (bool, optional): If True, end effects will be handled by outputting only frames that completely fit
+ in the file, and the number of frames depends on the frame_length. If False, the number of frames
+ depends only on the frame_shift, and we reflect the data at the ends. (Default: ``True``)
+ subtract_mean (bool, optional): Subtract mean of each feature file [CMS]; not recommended to do
+ it this way. (Default: ``False``)
+ window_type (str, optional): Type of window ('hamming'|'hanning'|'povey'|'rectangular'|'blackman')
+ (Default: ``'povey'``)
+
+ Returns:
+ Tensor: A spectrogram identical to what Kaldi would output. The shape is
+ (m, ``padded_window_size // 2 + 1``) where m is calculated in _get_strided
+ """
device, dtype = waveform.device, waveform.dtype
epsilon = _get_epsilon(device, dtype)
@@ -282,6 +339,42 @@ vtln_warp_factor: float,
freq: Tensor,
) -> Tensor:
+ r"""This computes a VTLN warping function that is not the same as HTK's one,
+ but has similar inputs (this function has the advantage of never producing
+ empty bins).
+
+ This function computes a warp function F(freq), defined between low_freq
+ and high_freq inclusive, with the following properties:
+ F(low_freq) == low_freq
+ F(high_freq) == high_freq
+ The function is continuous and piecewise linear with two inflection
+ points.
+ The lower inflection point (measured in terms of the unwarped
+ frequency) is at frequency l, determined as described below.
+ The higher inflection point is at a frequency h, determined as
+ described below.
+ If l <= f <= h, then F(f) = f/vtln_warp_factor.
+ If the higher inflection point (measured in terms of the unwarped
+ frequency) is at h, then max(h, F(h)) == vtln_high_cutoff.
+ Since (by the last point) F(h) == h/vtln_warp_factor, then
+ max(h, h/vtln_warp_factor) == vtln_high_cutoff, so
+ h = vtln_high_cutoff / max(1, 1/vtln_warp_factor).
+ = vtln_high_cutoff * min(1, vtln_warp_factor).
+ If the lower inflection point (measured in terms of the unwarped
+ frequency) is at l, then min(l, F(l)) == vtln_low_cutoff
+ This implies that l = vtln_low_cutoff / min(1, 1/vtln_warp_factor)
+ = vtln_low_cutoff * max(1, vtln_warp_factor)
+ Args:
+ vtln_low_cutoff (float): Lower frequency cutoffs for VTLN
+ vtln_high_cutoff (float): Upper frequency cutoffs for VTLN
+ low_freq (float): Lower frequency cutoffs in mel computation
+ high_freq (float): Upper frequency cutoffs in mel computation
+ vtln_warp_factor (float): Vtln warp factor
+ freq (Tensor): given frequency in Hz
+
+ Returns:
+ Tensor: Freq after vtln warp
+ """
assert vtln_low_cutoff > low_freq, "be sure to set the vtln_low option higher than low_freq"
assert vtln_high_cutoff < high_freq, "be sure to set the vtln_high option lower than high_freq [or negative]"
l = vtln_low_cutoff * max(1.0, vtln_warp_factor)
@@ -321,6 +414,18 @@ vtln_warp_factor: float,
mel_freq: Tensor,
) -> Tensor:
+ r"""
+ Args:
+ vtln_low_cutoff (float): Lower frequency cutoffs for VTLN
+ vtln_high_cutoff (float): Upper frequency cutoffs for VTLN
+ low_freq (float): Lower frequency cutoffs in mel computation
+ high_freq (float): Upper frequency cutoffs in mel computation
+ vtln_warp_factor (float): Vtln warp factor
+ mel_freq (Tensor): Given frequency in Mel
+
+ Returns:
+ Tensor: ``mel_freq`` after vtln warp
+ """
return mel_scale(
vtln_warp_freq(
vtln_low_cutoff, vtln_high_cutoff, low_freq, high_freq, vtln_warp_factor, inverse_mel_scale(mel_freq)
@@ -340,6 +445,12 @@ device=None,
dtype=None,
) -> Tuple[Tensor, Tensor]:
+ """
+ Returns:
+ (Tensor, Tensor): The tuple consists of ``bins`` (which is
+ melbank of size (``num_bins``, ``num_fft_bins``)) and ``center_freqs`` (which is
+ center frequencies of bins of size (``num_bins``)).
+ """
assert num_bins > 3, "Must have at least 3 mel bins"
assert window_length_padded % 2 == 0
num_fft_bins = window_length_padded / 2
@@ -433,6 +544,53 @@ vtln_warp: float = 1.0,
window_type: str = POVEY,
) -> Tensor:
+ r"""Create a fbank from a raw audio signal. This matches the input/output of Kaldi's
+ compute-fbank-feats.
+
+ Args:
+ waveform (Tensor): Tensor of audio of size (c, n) where c is in the range [0,2)
+ blackman_coeff (float, optional): Constant coefficient for generalized Blackman window. (Default: ``0.42``)
+ channel (int, optional): Channel to extract (-1 -> expect mono, 0 -> left, 1 -> right) (Default: ``-1``)
+ dither (float, optional): Dithering constant (0.0 means no dither). If you turn this off, you should set
+ the energy_floor option, e.g. to 1.0 or 0.1 (Default: ``0.0``)
+ energy_floor (float, optional): Floor on energy (absolute, not relative) in Spectrogram computation. Caution:
+ this floor is applied to the zeroth component, representing the total signal energy. The floor on the
+ individual spectrogram elements is fixed at std::numeric_limits<float>::epsilon(). (Default: ``1.0``)
+ frame_length (float, optional): Frame length in milliseconds (Default: ``25.0``)
+ frame_shift (float, optional): Frame shift in milliseconds (Default: ``10.0``)
+ high_freq (float, optional): High cutoff frequency for mel bins (if <= 0, offset from Nyquist)
+ (Default: ``0.0``)
+ htk_compat (bool, optional): If true, put energy last. Warning: not sufficient to get HTK compatible features
+ (need to change other parameters). (Default: ``False``)
+ low_freq (float, optional): Low cutoff frequency for mel bins (Default: ``20.0``)
+ min_duration (float, optional): Minimum duration of segments to process (in seconds). (Default: ``0.0``)
+ num_mel_bins (int, optional): Number of triangular mel-frequency bins (Default: ``23``)
+ preemphasis_coefficient (float, optional): Coefficient for use in signal preemphasis (Default: ``0.97``)
+ raw_energy (bool, optional): If True, compute energy before preemphasis and windowing (Default: ``True``)
+ remove_dc_offset (bool, optional): Subtract mean from waveform on each frame (Default: ``True``)
+ round_to_power_of_two (bool, optional): If True, round window size to power of two by zero-padding input
+ to FFT. (Default: ``True``)
+ sample_frequency (float, optional): Waveform data sample frequency (must match the waveform file, if
+ specified there) (Default: ``16000.0``)
+ snip_edges (bool, optional): If True, end effects will be handled by outputting only frames that completely fit
+ in the file, and the number of frames depends on the frame_length. If False, the number of frames
+ depends only on the frame_shift, and we reflect the data at the ends. (Default: ``True``)
+ subtract_mean (bool, optional): Subtract mean of each feature file [CMS]; not recommended to do
+ it this way. (Default: ``False``)
+ use_energy (bool, optional): Add an extra dimension with energy to the FBANK output. (Default: ``False``)
+ use_log_fbank (bool, optional):If true, produce log-filterbank, else produce linear. (Default: ``True``)
+ use_power (bool, optional): If true, use power, else use magnitude. (Default: ``True``)
+ vtln_high (float, optional): High inflection point in piecewise linear VTLN warping function (if
+ negative, offset from high-mel-freq (Default: ``-500.0``)
+ vtln_low (float, optional): Low inflection point in piecewise linear VTLN warping function (Default: ``100.0``)
+ vtln_warp (float, optional): Vtln warp factor (only applicable if vtln_map not specified) (Default: ``1.0``)
+ window_type (str, optional): Type of window ('hamming'|'hanning'|'povey'|'rectangular'|'blackman')
+ (Default: ``'povey'``)
+
+ Returns:
+ Tensor: A fbank identical to what Kaldi would output. The shape is (m, ``num_mel_bins + use_energy``)
+ where m is calculated in _get_strided
+ """
device, dtype = waveform.device, waveform.dtype
waveform, window_shift, window_size, padded_window_size = _get_waveform_and_window_properties(
@@ -567,6 +725,53 @@ vtln_warp: float = 1.0,
window_type: str = POVEY,
) -> Tensor:
+ r"""Create a mfcc from a raw audio signal. This matches the input/output of Kaldi's
+ compute-mfcc-feats.
+
+ Args:
+ waveform (Tensor): Tensor of audio of size (c, n) where c is in the range [0,2)
+ blackman_coeff (float, optional): Constant coefficient for generalized Blackman window. (Default: ``0.42``)
+ cepstral_lifter (float, optional): Constant that controls scaling of MFCCs (Default: ``22.0``)
+ channel (int, optional): Channel to extract (-1 -> expect mono, 0 -> left, 1 -> right) (Default: ``-1``)
+ dither (float, optional): Dithering constant (0.0 means no dither). If you turn this off, you should set
+ the energy_floor option, e.g. to 1.0 or 0.1 (Default: ``0.0``)
+ energy_floor (float, optional): Floor on energy (absolute, not relative) in Spectrogram computation. Caution:
+ this floor is applied to the zeroth component, representing the total signal energy. The floor on the
+ individual spectrogram elements is fixed at std::numeric_limits<float>::epsilon(). (Default: ``1.0``)
+ frame_length (float, optional): Frame length in milliseconds (Default: ``25.0``)
+ frame_shift (float, optional): Frame shift in milliseconds (Default: ``10.0``)
+ high_freq (float, optional): High cutoff frequency for mel bins (if <= 0, offset from Nyquist)
+ (Default: ``0.0``)
+ htk_compat (bool, optional): If true, put energy last. Warning: not sufficient to get HTK compatible
+ features (need to change other parameters). (Default: ``False``)
+ low_freq (float, optional): Low cutoff frequency for mel bins (Default: ``20.0``)
+ num_ceps (int, optional): Number of cepstra in MFCC computation (including C0) (Default: ``13``)
+ min_duration (float, optional): Minimum duration of segments to process (in seconds). (Default: ``0.0``)
+ num_mel_bins (int, optional): Number of triangular mel-frequency bins (Default: ``23``)
+ preemphasis_coefficient (float, optional): Coefficient for use in signal preemphasis (Default: ``0.97``)
+ raw_energy (bool, optional): If True, compute energy before preemphasis and windowing (Default: ``True``)
+ remove_dc_offset (bool, optional): Subtract mean from waveform on each frame (Default: ``True``)
+ round_to_power_of_two (bool, optional): If True, round window size to power of two by zero-padding input
+ to FFT. (Default: ``True``)
+ sample_frequency (float, optional): Waveform data sample frequency (must match the waveform file, if
+ specified there) (Default: ``16000.0``)
+ snip_edges (bool, optional): If True, end effects will be handled by outputting only frames that completely fit
+ in the file, and the number of frames depends on the frame_length. If False, the number of frames
+ depends only on the frame_shift, and we reflect the data at the ends. (Default: ``True``)
+ subtract_mean (bool, optional): Subtract mean of each feature file [CMS]; not recommended to do
+ it this way. (Default: ``False``)
+ use_energy (bool, optional): Add an extra dimension with energy to the FBANK output. (Default: ``False``)
+ vtln_high (float, optional): High inflection point in piecewise linear VTLN warping function (if
+ negative, offset from high-mel-freq (Default: ``-500.0``)
+ vtln_low (float, optional): Low inflection point in piecewise linear VTLN warping function (Default: ``100.0``)
+ vtln_warp (float, optional): Vtln warp factor (only applicable if vtln_map not specified) (Default: ``1.0``)
+ window_type (str, optional): Type of window ('hamming'|'hanning'|'povey'|'rectangular'|'blackman')
+ (Default: ``"povey"``)
+
+ Returns:
+ Tensor: A mfcc identical to what Kaldi would output. The shape is (m, ``num_ceps``)
+ where m is calculated in _get_strided
+ """
assert num_ceps <= num_mel_bins, "num_ceps cannot be larger than num_mel_bins: %d vs %d" % (num_ceps, num_mel_bins)
device, dtype = waveform.device, waveform.dtype
@@ -636,4 +841,4 @@ feature = torch.cat((feature, energy), dim=1)
feature = _subtract_column_mean(feature, subtract_mean)
- return feature+ return feature
| https://raw.githubusercontent.com/RVC-Boss/GPT-SoVITS/HEAD/GPT_SoVITS/eres2net/kaldi.py |
Add return value explanations in docstrings | # Copyright (c) 2024 NVIDIA CORPORATION.
# Licensed under the MIT license.
# Adapted from https://github.com/jik876/hifi-gan under the MIT license.
# LICENSE is in incl_licenses directory.
import math
import os
import random
import torch
import torch.utils.data
import numpy as np
import librosa
from librosa.filters import mel as librosa_mel_fn
import pathlib
from tqdm import tqdm
from typing import List, Tuple, Optional
from .env import AttrDict
MAX_WAV_VALUE = 32767.0 # NOTE: 32768.0 -1 to prevent int16 overflow (results in popping sound in corner cases)
def dynamic_range_compression(x, C=1, clip_val=1e-5):
return np.log(np.clip(x, a_min=clip_val, a_max=None) * C)
def dynamic_range_decompression(x, C=1):
return np.exp(x) / C
def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
return torch.log(torch.clamp(x, min=clip_val) * C)
def dynamic_range_decompression_torch(x, C=1):
return torch.exp(x) / C
def spectral_normalize_torch(magnitudes):
return dynamic_range_compression_torch(magnitudes)
def spectral_de_normalize_torch(magnitudes):
return dynamic_range_decompression_torch(magnitudes)
mel_basis_cache = {}
hann_window_cache = {}
def mel_spectrogram(
y: torch.Tensor,
n_fft: int,
num_mels: int,
sampling_rate: int,
hop_size: int,
win_size: int,
fmin: int,
fmax: int = None,
center: bool = False,
) -> torch.Tensor:
if torch.min(y) < -1.0:
print(f"[WARNING] Min value of input waveform signal is {torch.min(y)}")
if torch.max(y) > 1.0:
print(f"[WARNING] Max value of input waveform signal is {torch.max(y)}")
device = y.device
key = f"{n_fft}_{num_mels}_{sampling_rate}_{hop_size}_{win_size}_{fmin}_{fmax}_{device}"
if key not in mel_basis_cache:
mel = librosa_mel_fn(sr=sampling_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax)
mel_basis_cache[key] = torch.from_numpy(mel).float().to(device)
hann_window_cache[key] = torch.hann_window(win_size).to(device)
mel_basis = mel_basis_cache[key]
hann_window = hann_window_cache[key]
padding = (n_fft - hop_size) // 2
y = torch.nn.functional.pad(y.unsqueeze(1), (padding, padding), mode="reflect").squeeze(1)
spec = torch.stft(
y,
n_fft,
hop_length=hop_size,
win_length=win_size,
window=hann_window,
center=center,
pad_mode="reflect",
normalized=False,
onesided=True,
return_complex=True,
)
spec = torch.sqrt(torch.view_as_real(spec).pow(2).sum(-1) + 1e-9)
mel_spec = torch.matmul(mel_basis, spec)
mel_spec = spectral_normalize_torch(mel_spec)
return mel_spec
def get_mel_spectrogram(wav, h):
return mel_spectrogram(
wav,
h.n_fft,
h.num_mels,
h.sampling_rate,
h.hop_size,
h.win_size,
h.fmin,
h.fmax,
)
def get_dataset_filelist(a):
training_files = []
validation_files = []
list_unseen_validation_files = []
with open(a.input_training_file, "r", encoding="utf-8") as fi:
training_files = [
os.path.join(a.input_wavs_dir, x.split("|")[0] + ".wav") for x in fi.read().split("\n") if len(x) > 0
]
print(f"first training file: {training_files[0]}")
with open(a.input_validation_file, "r", encoding="utf-8") as fi:
validation_files = [
os.path.join(a.input_wavs_dir, x.split("|")[0] + ".wav") for x in fi.read().split("\n") if len(x) > 0
]
print(f"first validation file: {validation_files[0]}")
for i in range(len(a.list_input_unseen_validation_file)):
with open(a.list_input_unseen_validation_file[i], "r", encoding="utf-8") as fi:
unseen_validation_files = [
os.path.join(a.list_input_unseen_wavs_dir[i], x.split("|")[0] + ".wav")
for x in fi.read().split("\n")
if len(x) > 0
]
print(f"first unseen {i}th validation fileset: {unseen_validation_files[0]}")
list_unseen_validation_files.append(unseen_validation_files)
return training_files, validation_files, list_unseen_validation_files
class MelDataset(torch.utils.data.Dataset):
def __init__(
self,
training_files: List[str],
hparams: AttrDict,
segment_size: int,
n_fft: int,
num_mels: int,
hop_size: int,
win_size: int,
sampling_rate: int,
fmin: int,
fmax: Optional[int],
split: bool = True,
shuffle: bool = True,
device: str = None,
fmax_loss: Optional[int] = None,
fine_tuning: bool = False,
base_mels_path: str = None,
is_seen: bool = True,
):
self.audio_files = training_files
random.seed(1234)
if shuffle:
random.shuffle(self.audio_files)
self.hparams = hparams
self.is_seen = is_seen
if self.is_seen:
self.name = pathlib.Path(self.audio_files[0]).parts[0]
else:
self.name = "-".join(pathlib.Path(self.audio_files[0]).parts[:2]).strip("/")
self.segment_size = segment_size
self.sampling_rate = sampling_rate
self.split = split
self.n_fft = n_fft
self.num_mels = num_mels
self.hop_size = hop_size
self.win_size = win_size
self.fmin = fmin
self.fmax = fmax
self.fmax_loss = fmax_loss
self.device = device
self.fine_tuning = fine_tuning
self.base_mels_path = base_mels_path
print("[INFO] checking dataset integrity...")
for i in tqdm(range(len(self.audio_files))):
assert os.path.exists(self.audio_files[i]), f"{self.audio_files[i]} not found"
def __getitem__(self, index: int) -> Tuple[torch.Tensor, torch.Tensor, str, torch.Tensor]:
try:
filename = self.audio_files[index]
# Use librosa.load that ensures loading waveform into mono with [-1, 1] float values
# Audio is ndarray with shape [T_time]. Disable auto-resampling here to minimize overhead
# The on-the-fly resampling during training will be done only for the obtained random chunk
audio, source_sampling_rate = librosa.load(filename, sr=None, mono=True)
# Main logic that uses <mel, audio> pair for training BigVGAN
if not self.fine_tuning:
if self.split: # Training step
# Obtain randomized audio chunk
if source_sampling_rate != self.sampling_rate:
# Adjust segment size to crop if the source sr is different
target_segment_size = math.ceil(self.segment_size * (source_sampling_rate / self.sampling_rate))
else:
target_segment_size = self.segment_size
# Compute upper bound index for the random chunk
random_chunk_upper_bound = max(0, audio.shape[0] - target_segment_size)
# Crop or pad audio to obtain random chunk with target_segment_size
if audio.shape[0] >= target_segment_size:
audio_start = random.randint(0, random_chunk_upper_bound)
audio = audio[audio_start : audio_start + target_segment_size]
else:
audio = np.pad(
audio,
(0, target_segment_size - audio.shape[0]),
mode="constant",
)
# Resample audio chunk to self.sampling rate
if source_sampling_rate != self.sampling_rate:
audio = librosa.resample(
audio,
orig_sr=source_sampling_rate,
target_sr=self.sampling_rate,
)
if audio.shape[0] > self.segment_size:
# trim last elements to match self.segment_size (e.g., 16385 for 44khz downsampled to 24khz -> 16384)
audio = audio[: self.segment_size]
else: # Validation step
# Resample full audio clip to target sampling rate
if source_sampling_rate != self.sampling_rate:
audio = librosa.resample(
audio,
orig_sr=source_sampling_rate,
target_sr=self.sampling_rate,
)
# Trim last elements to match audio length to self.hop_size * n for evaluation
if (audio.shape[0] % self.hop_size) != 0:
audio = audio[: -(audio.shape[0] % self.hop_size)]
# BigVGAN is trained using volume-normalized waveform
audio = librosa.util.normalize(audio) * 0.95
# Cast ndarray to torch tensor
audio = torch.FloatTensor(audio)
audio = audio.unsqueeze(0) # [B(1), self.segment_size]
# Compute mel spectrogram corresponding to audio
mel = mel_spectrogram(
audio,
self.n_fft,
self.num_mels,
self.sampling_rate,
self.hop_size,
self.win_size,
self.fmin,
self.fmax,
center=False,
) # [B(1), self.num_mels, self.segment_size // self.hop_size]
# Fine-tuning logic that uses pre-computed mel. Example: Using TTS model-generated mel as input
else:
# For fine-tuning, assert that the waveform is in the defined sampling_rate
# Fine-tuning won't support on-the-fly resampling to be fool-proof (the dataset should have been prepared properly)
assert source_sampling_rate == self.sampling_rate, (
f"For fine_tuning, waveform must be in the spcified sampling rate {self.sampling_rate}, got {source_sampling_rate}"
)
# Cast ndarray to torch tensor
audio = torch.FloatTensor(audio)
audio = audio.unsqueeze(0) # [B(1), T_time]
# Load pre-computed mel from disk
mel = np.load(
os.path.join(
self.base_mels_path,
os.path.splitext(os.path.split(filename)[-1])[0] + ".npy",
)
)
mel = torch.from_numpy(mel)
if len(mel.shape) < 3:
mel = mel.unsqueeze(0) # ensure [B, C, T]
if self.split:
frames_per_seg = math.ceil(self.segment_size / self.hop_size)
if audio.size(1) >= self.segment_size:
mel_start = random.randint(0, mel.size(2) - frames_per_seg - 1)
mel = mel[:, :, mel_start : mel_start + frames_per_seg]
audio = audio[
:,
mel_start * self.hop_size : (mel_start + frames_per_seg) * self.hop_size,
]
# Pad pre-computed mel and audio to match length to ensuring fine-tuning without error.
# NOTE: this may introduce a single-frame misalignment of the <pre-computed mel, audio>
# To remove possible misalignment, it is recommended to prepare the <pre-computed mel, audio> pair where the audio length is the integer multiple of self.hop_size
mel = torch.nn.functional.pad(mel, (0, frames_per_seg - mel.size(2)), "constant")
audio = torch.nn.functional.pad(audio, (0, self.segment_size - audio.size(1)), "constant")
# Compute mel_loss used by spectral regression objective. Uses self.fmax_loss instead (usually None)
mel_loss = mel_spectrogram(
audio,
self.n_fft,
self.num_mels,
self.sampling_rate,
self.hop_size,
self.win_size,
self.fmin,
self.fmax_loss,
center=False,
) # [B(1), self.num_mels, self.segment_size // self.hop_size]
# Shape sanity checks
assert (
audio.shape[1] == mel.shape[2] * self.hop_size and audio.shape[1] == mel_loss.shape[2] * self.hop_size
), (
f"Audio length must be mel frame length * hop_size. Got audio shape {audio.shape} mel shape {mel.shape} mel_loss shape {mel_loss.shape}"
)
return (mel.squeeze(), audio.squeeze(0), filename, mel_loss.squeeze())
# If it encounters error during loading the data, skip this sample and load random other sample to the batch
except Exception as e:
if self.fine_tuning:
raise e # Terminate training if it is fine-tuning. The dataset should have been prepared properly.
else:
print(f"[WARNING] Failed to load waveform, skipping! filename: {filename} Error: {e}")
return self[random.randrange(len(self))]
def __len__(self):
return len(self.audio_files) | --- +++ @@ -59,6 +59,24 @@ fmax: int = None,
center: bool = False,
) -> torch.Tensor:
+ """
+ Calculate the mel spectrogram of an input signal.
+ This function uses slaney norm for the librosa mel filterbank (using librosa.filters.mel) and uses Hann window for STFT (using torch.stft).
+
+ Args:
+ y (torch.Tensor): Input signal.
+ n_fft (int): FFT size.
+ num_mels (int): Number of mel bins.
+ sampling_rate (int): Sampling rate of the input signal.
+ hop_size (int): Hop size for STFT.
+ win_size (int): Window size for STFT.
+ fmin (int): Minimum frequency for mel filterbank.
+ fmax (int): Maximum frequency for mel filterbank. If None, defaults to half the sampling rate (fmax = sr / 2.0) inside librosa_mel_fn
+ center (bool): Whether to pad the input to center the frames. Default is False.
+
+ Returns:
+ torch.Tensor: Mel spectrogram.
+ """
if torch.min(y) < -1.0:
print(f"[WARNING] Min value of input waveform signal is {torch.min(y)}")
if torch.max(y) > 1.0:
@@ -99,6 +117,16 @@
def get_mel_spectrogram(wav, h):
+ """
+ Generate mel spectrogram from a waveform using given hyperparameters.
+
+ Args:
+ wav (torch.Tensor): Input waveform.
+ h: Hyperparameters object with attributes n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax.
+
+ Returns:
+ torch.Tensor: Mel spectrogram.
+ """
return mel_spectrogram(
wav,
h.n_fft,
@@ -339,4 +367,4 @@ return self[random.randrange(len(self))]
def __len__(self):
- return len(self.audio_files)+ return len(self.audio_files)
| https://raw.githubusercontent.com/RVC-Boss/GPT-SoVITS/HEAD/GPT_SoVITS/BigVGAN/meldataset.py |
Create docstrings for reusable components | # modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/data/bucket_sampler.py
# reference: https://github.com/lifeiteng/vall-e
import itertools
import math
import random
from random import shuffle
from typing import Iterator, Optional, TypeVar
import torch
import torch.distributed as dist
from torch.utils.data import Dataset, Sampler
__all__ = [
"DistributedBucketSampler",
]
T_co = TypeVar("T_co", covariant=True)
class DistributedBucketSampler(Sampler[T_co]):
def __init__(
self,
dataset: Dataset,
num_replicas: Optional[int] = None,
rank: Optional[int] = None,
shuffle: bool = True,
seed: int = 0,
drop_last: bool = False,
batch_size: int = 32,
) -> None:
if num_replicas is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
num_replicas = dist.get_world_size() if torch.cuda.is_available() else 1
if rank is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
rank = dist.get_rank() if torch.cuda.is_available() else 0
if torch.cuda.is_available():
torch.cuda.set_device(rank)
if rank >= num_replicas or rank < 0:
raise ValueError("Invalid rank {}, rank should be in the interval [0, {}]".format(rank, num_replicas - 1))
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.drop_last = drop_last
# If the dataset length is evenly divisible by # of replicas, then there
# is no need to drop any data, since the dataset will be split equally.
if self.drop_last and len(self.dataset) % self.num_replicas != 0: # type: ignore[arg-type]
# Split to nearest available length that is evenly divisible.
# This is to ensure each rank receives the same amount of data when
# using this Sampler.
self.num_samples = math.ceil(
(len(self.dataset) - self.num_replicas) / self.num_replicas, # type: ignore[arg-type]
)
else:
self.num_samples = math.ceil(
len(self.dataset) / self.num_replicas,
) # type: ignore[arg-type]
self.total_size = self.num_samples * self.num_replicas
self.shuffle = shuffle
self.seed = seed
self.batch_size = batch_size
self.id_with_length = self._get_sample_lengths()
self.id_buckets = self.make_buckets(bucket_width=2.0)
def _get_sample_lengths(self):
id_with_lengths = []
for i in range(len(self.dataset)):
id_with_lengths.append((i, self.dataset.get_sample_length(i)))
id_with_lengths.sort(key=lambda x: x[1])
return id_with_lengths
def make_buckets(self, bucket_width: float = 2.0):
buckets = []
cur = []
max_sec = bucket_width
for id, sec in self.id_with_length:
if sec < max_sec:
cur.append(id)
else:
buckets.append(cur)
cur = [id]
max_sec += bucket_width
if len(cur) > 0:
buckets.append(cur)
return buckets
def __iter__(self) -> Iterator[T_co]:
if self.shuffle:
# deterministically shuffle based on epoch and seed
g = torch.Generator()
g.manual_seed(self.seed + self.epoch)
random.seed(self.epoch + self.seed)
shuffled_bucket = []
for buc in self.id_buckets:
buc_copy = buc.copy()
shuffle(buc_copy)
shuffled_bucket.append(buc_copy)
grouped_batch_size = self.batch_size * self.num_replicas
shuffled_bucket = list(itertools.chain(*shuffled_bucket))
n_batch = int(math.ceil(len(shuffled_bucket) / grouped_batch_size))
batches = [shuffled_bucket[b * grouped_batch_size : (b + 1) * grouped_batch_size] for b in range(n_batch)]
shuffle(batches)
indices = list(itertools.chain(*batches))
else:
# type: ignore[arg-type]
indices = list(range(len(self.dataset)))
if not self.drop_last:
# add extra samples to make it evenly divisible
padding_size = self.total_size - len(indices)
if padding_size <= len(indices):
indices += indices[:padding_size]
else:
indices += (indices * math.ceil(padding_size / len(indices)))[:padding_size]
else:
# remove tail of data to make it evenly divisible.
indices = indices[: self.total_size]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank : self.total_size : self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices)
def __len__(self) -> int:
return self.num_samples
def set_epoch(self, epoch: int) -> None:
self.epoch = epoch | --- +++ @@ -18,6 +18,13 @@
class DistributedBucketSampler(Sampler[T_co]):
+ r"""
+ sort the dataset wrt. input length
+ divide samples into buckets
+ sort within buckets
+ divide buckets into batches
+ sort batches
+ """
def __init__(
self,
@@ -131,4 +138,12 @@ return self.num_samples
def set_epoch(self, epoch: int) -> None:
- self.epoch = epoch+ r"""
+ Sets the epoch for this sampler. When :attr:`shuffle=True`, this ensures all replicas
+ use a different random ordering for each epoch. Otherwise, the next iteration of this
+ sampler will yield the same ordering.
+
+ Args:
+ epoch (int): Epoch number.
+ """
+ self.epoch = epoch
| https://raw.githubusercontent.com/RVC-Boss/GPT-SoVITS/HEAD/GPT_SoVITS/AR/data/bucket_sampler.py |
Provide docstrings following PEP 257 | # Copyright 3D-Speaker (https://github.com/alibaba-damo-academy/3D-Speaker). All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import torch
import torch.nn as nn
class TAP(nn.Module):
def __init__(self, **kwargs):
super(TAP, self).__init__()
def forward(self, x):
pooling_mean = x.mean(dim=-1)
# To be compatable with 2D input
pooling_mean = pooling_mean.flatten(start_dim=1)
return pooling_mean
class TSDP(nn.Module):
def __init__(self, **kwargs):
super(TSDP, self).__init__()
def forward(self, x):
# The last dimension is the temporal axis
pooling_std = torch.sqrt(torch.var(x, dim=-1) + 1e-8)
pooling_std = pooling_std.flatten(start_dim=1)
return pooling_std
class TSTP(nn.Module):
def __init__(self, **kwargs):
super(TSTP, self).__init__()
def forward(self, x):
# The last dimension is the temporal axis
pooling_mean = x.mean(dim=-1)
pooling_std = torch.sqrt(torch.var(x, dim=-1) + 1e-8)
pooling_mean = pooling_mean.flatten(start_dim=1)
pooling_std = pooling_std.flatten(start_dim=1)
stats = torch.cat((pooling_mean, pooling_std), 1)
return stats
class ASTP(nn.Module):
def __init__(self, in_dim, bottleneck_dim=128, global_context_att=False):
super(ASTP, self).__init__()
self.global_context_att = global_context_att
# Use Conv1d with stride == 1 rather than Linear, then we don't
# need to transpose inputs.
if global_context_att:
self.linear1 = nn.Conv1d(in_dim * 3, bottleneck_dim, kernel_size=1) # equals W and b in the paper
else:
self.linear1 = nn.Conv1d(in_dim, bottleneck_dim, kernel_size=1) # equals W and b in the paper
self.linear2 = nn.Conv1d(bottleneck_dim, in_dim, kernel_size=1) # equals V and k in the paper
def forward(self, x):
if len(x.shape) == 4:
x = x.reshape(x.shape[0], x.shape[1] * x.shape[2], x.shape[3])
assert len(x.shape) == 3
if self.global_context_att:
context_mean = torch.mean(x, dim=-1, keepdim=True).expand_as(x)
context_std = torch.sqrt(torch.var(x, dim=-1, keepdim=True) + 1e-10).expand_as(x)
x_in = torch.cat((x, context_mean, context_std), dim=1)
else:
x_in = x
# DON'T use ReLU here! ReLU may be hard to converge.
alpha = torch.tanh(self.linear1(x_in)) # alpha = F.relu(self.linear1(x_in))
alpha = torch.softmax(self.linear2(alpha), dim=2)
mean = torch.sum(alpha * x, dim=2)
var = torch.sum(alpha * (x**2), dim=2) - mean**2
std = torch.sqrt(var.clamp(min=1e-10))
return torch.cat([mean, std], dim=1) | --- +++ @@ -1,12 +1,16 @@ # Copyright 3D-Speaker (https://github.com/alibaba-damo-academy/3D-Speaker). All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
+"""This implementation is adapted from https://github.com/wenet-e2e/wespeaker."""
import torch
import torch.nn as nn
class TAP(nn.Module):
+ """
+ Temporal average pooling, only first-order mean is considered
+ """
def __init__(self, **kwargs):
super(TAP, self).__init__()
@@ -19,6 +23,9 @@
class TSDP(nn.Module):
+ """
+ Temporal standard deviation pooling, only second-order std is considered
+ """
def __init__(self, **kwargs):
super(TSDP, self).__init__()
@@ -31,6 +38,11 @@
class TSTP(nn.Module):
+ """
+ Temporal statistics pooling, concatenate mean and std, which is used in
+ x-vector
+ Comment: simple concatenation can not make full use of both statistics
+ """
def __init__(self, **kwargs):
super(TSTP, self).__init__()
@@ -47,6 +59,9 @@
class ASTP(nn.Module):
+ """Attentive statistics pooling: Channel- and context-dependent
+ statistics pooling, first used in ECAPA_TDNN.
+ """
def __init__(self, in_dim, bottleneck_dim=128, global_context_att=False):
super(ASTP, self).__init__()
@@ -61,6 +76,11 @@ self.linear2 = nn.Conv1d(bottleneck_dim, in_dim, kernel_size=1) # equals V and k in the paper
def forward(self, x):
+ """
+ x: a 3-dimensional tensor in tdnn-based architecture (B,F,T)
+ or a 4-dimensional tensor in resnet architecture (B,C,F,T)
+ 0-dim: batch-dimension, last-dim: time-dimension (frame-dimension)
+ """
if len(x.shape) == 4:
x = x.reshape(x.shape[0], x.shape[1] * x.shape[2], x.shape[3])
assert len(x.shape) == 3
@@ -78,4 +98,4 @@ mean = torch.sum(alpha * x, dim=2)
var = torch.sum(alpha * (x**2), dim=2) - mean**2
std = torch.sqrt(var.clamp(min=1e-10))
- return torch.cat([mean, std], dim=1)+ return torch.cat([mean, std], dim=1)
| https://raw.githubusercontent.com/RVC-Boss/GPT-SoVITS/HEAD/GPT_SoVITS/eres2net/pooling_layers.py |
Create docstrings for all classes and functions | import gc
import math
import os
import random
import sys
import time
import traceback
from copy import deepcopy
import torchaudio
from tqdm import tqdm
now_dir = os.getcwd()
sys.path.append(now_dir)
import os
from typing import List, Tuple, Union
import ffmpeg
import librosa
import numpy as np
import torch
import torch.nn.functional as F
import yaml
from AR.models.t2s_lightning_module import Text2SemanticLightningModule
from BigVGAN.bigvgan import BigVGAN
from feature_extractor.cnhubert import CNHubert
from module.mel_processing import mel_spectrogram_torch, spectrogram_torch
from module.models import SynthesizerTrn, SynthesizerTrnV3, Generator
from peft import LoraConfig, get_peft_model
from process_ckpt import get_sovits_version_from_path_fast, load_sovits_new
from transformers import AutoModelForMaskedLM, AutoTokenizer
from tools.audio_sr import AP_BWE
from tools.i18n.i18n import I18nAuto, scan_language_list
from TTS_infer_pack.text_segmentation_method import splits
from TTS_infer_pack.TextPreprocessor import TextPreprocessor
from sv import SV
resample_transform_dict = {}
def resample(audio_tensor, sr0, sr1, device):
global resample_transform_dict
key = "%s-%s-%s" % (sr0, sr1, str(device))
if key not in resample_transform_dict:
resample_transform_dict[key] = torchaudio.transforms.Resample(sr0, sr1).to(device)
return resample_transform_dict[key](audio_tensor)
language = os.environ.get("language", "Auto")
language = sys.argv[-1] if sys.argv[-1] in scan_language_list() else language
i18n = I18nAuto(language=language)
spec_min = -12
spec_max = 2
def norm_spec(x):
return (x - spec_min) / (spec_max - spec_min) * 2 - 1
def denorm_spec(x):
return (x + 1) / 2 * (spec_max - spec_min) + spec_min
mel_fn = lambda x: mel_spectrogram_torch(
x,
**{
"n_fft": 1024,
"win_size": 1024,
"hop_size": 256,
"num_mels": 100,
"sampling_rate": 24000,
"fmin": 0,
"fmax": None,
"center": False,
},
)
mel_fn_v4 = lambda x: mel_spectrogram_torch(
x,
**{
"n_fft": 1280,
"win_size": 1280,
"hop_size": 320,
"num_mels": 100,
"sampling_rate": 32000,
"fmin": 0,
"fmax": None,
"center": False,
},
)
def speed_change(input_audio: np.ndarray, speed: float, sr: int):
# 将 NumPy 数组转换为原始 PCM 流
raw_audio = input_audio.astype(np.int16).tobytes()
# 设置 ffmpeg 输入流
input_stream = ffmpeg.input("pipe:", format="s16le", acodec="pcm_s16le", ar=str(sr), ac=1)
# 变速处理
output_stream = input_stream.filter("atempo", speed)
# 输出流到管道
out, _ = output_stream.output("pipe:", format="s16le", acodec="pcm_s16le").run(
input=raw_audio, capture_stdout=True, capture_stderr=True
)
# 将管道输出解码为 NumPy 数组
processed_audio = np.frombuffer(out, np.int16)
return processed_audio
class DictToAttrRecursive(dict):
def __init__(self, input_dict):
super().__init__(input_dict)
for key, value in input_dict.items():
if isinstance(value, dict):
value = DictToAttrRecursive(value)
self[key] = value
setattr(self, key, value)
def __getattr__(self, item):
try:
return self[item]
except KeyError:
raise AttributeError(f"Attribute {item} not found")
def __setattr__(self, key, value):
if isinstance(value, dict):
value = DictToAttrRecursive(value)
super(DictToAttrRecursive, self).__setitem__(key, value)
super().__setattr__(key, value)
def __delattr__(self, item):
try:
del self[item]
except KeyError:
raise AttributeError(f"Attribute {item} not found")
class NO_PROMPT_ERROR(Exception):
pass
# configs/tts_infer.yaml
"""
custom:
bert_base_path: GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large
cnhuhbert_base_path: GPT_SoVITS/pretrained_models/chinese-hubert-base
device: cpu
is_half: false
t2s_weights_path: GPT_SoVITS/pretrained_models/gsv-v2final-pretrained/s1bert25hz-5kh-longer-epoch=12-step=369668.ckpt
vits_weights_path: GPT_SoVITS/pretrained_models/gsv-v2final-pretrained/s2G2333k.pth
version: v2
v1:
bert_base_path: GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large
cnhuhbert_base_path: GPT_SoVITS/pretrained_models/chinese-hubert-base
device: cpu
is_half: false
t2s_weights_path: GPT_SoVITS/pretrained_models/s1bert25hz-2kh-longer-epoch=68e-step=50232.ckpt
vits_weights_path: GPT_SoVITS/pretrained_models/s2G488k.pth
version: v1
v2:
bert_base_path: GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large
cnhuhbert_base_path: GPT_SoVITS/pretrained_models/chinese-hubert-base
device: cpu
is_half: false
t2s_weights_path: GPT_SoVITS/pretrained_models/gsv-v2final-pretrained/s1bert25hz-5kh-longer-epoch=12-step=369668.ckpt
vits_weights_path: GPT_SoVITS/pretrained_models/gsv-v2final-pretrained/s2G2333k.pth
version: v2
v3:
bert_base_path: GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large
cnhuhbert_base_path: GPT_SoVITS/pretrained_models/chinese-hubert-base
device: cpu
is_half: false
t2s_weights_path: GPT_SoVITS/pretrained_models/s1v3.ckpt
vits_weights_path: GPT_SoVITS/pretrained_models/s2Gv3.pth
version: v3
v4:
bert_base_path: GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large
cnhuhbert_base_path: GPT_SoVITS/pretrained_models/chinese-hubert-base
device: cpu
is_half: false
t2s_weights_path: GPT_SoVITS/pretrained_models/s1v3.ckpt
version: v4
vits_weights_path: GPT_SoVITS/pretrained_models/gsv-v4-pretrained/s2Gv4.pth
"""
def set_seed(seed: int):
seed = int(seed)
seed = seed if seed != -1 else random.randint(0, 2**32 - 1)
print(f"Set seed to {seed}")
os.environ["PYTHONHASHSEED"] = str(seed)
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
try:
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# torch.backends.cudnn.deterministic = True
# torch.backends.cudnn.benchmark = False
# torch.backends.cudnn.enabled = True
# 开启后会影响精度
torch.backends.cuda.matmul.allow_tf32 = False
torch.backends.cudnn.allow_tf32 = False
except:
pass
return seed
class TTS_Config:
default_configs = {
"v1": {
"device": "cpu",
"is_half": False,
"version": "v1",
"t2s_weights_path": "GPT_SoVITS/pretrained_models/s1bert25hz-2kh-longer-epoch=68e-step=50232.ckpt",
"vits_weights_path": "GPT_SoVITS/pretrained_models/s2G488k.pth",
"cnhuhbert_base_path": "GPT_SoVITS/pretrained_models/chinese-hubert-base",
"bert_base_path": "GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large",
},
"v2": {
"device": "cpu",
"is_half": False,
"version": "v2",
"t2s_weights_path": "GPT_SoVITS/pretrained_models/gsv-v2final-pretrained/s1bert25hz-5kh-longer-epoch=12-step=369668.ckpt",
"vits_weights_path": "GPT_SoVITS/pretrained_models/gsv-v2final-pretrained/s2G2333k.pth",
"cnhuhbert_base_path": "GPT_SoVITS/pretrained_models/chinese-hubert-base",
"bert_base_path": "GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large",
},
"v3": {
"device": "cpu",
"is_half": False,
"version": "v3",
"t2s_weights_path": "GPT_SoVITS/pretrained_models/s1v3.ckpt",
"vits_weights_path": "GPT_SoVITS/pretrained_models/s2Gv3.pth",
"cnhuhbert_base_path": "GPT_SoVITS/pretrained_models/chinese-hubert-base",
"bert_base_path": "GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large",
},
"v4": {
"device": "cpu",
"is_half": False,
"version": "v4",
"t2s_weights_path": "GPT_SoVITS/pretrained_models/s1v3.ckpt",
"vits_weights_path": "GPT_SoVITS/pretrained_models/gsv-v4-pretrained/s2Gv4.pth",
"cnhuhbert_base_path": "GPT_SoVITS/pretrained_models/chinese-hubert-base",
"bert_base_path": "GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large",
},
"v2Pro": {
"device": "cpu",
"is_half": False,
"version": "v2Pro",
"t2s_weights_path": "GPT_SoVITS/pretrained_models/s1v3.ckpt",
"vits_weights_path": "GPT_SoVITS/pretrained_models/v2Pro/s2Gv2Pro.pth",
"cnhuhbert_base_path": "GPT_SoVITS/pretrained_models/chinese-hubert-base",
"bert_base_path": "GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large",
},
"v2ProPlus": {
"device": "cpu",
"is_half": False,
"version": "v2ProPlus",
"t2s_weights_path": "GPT_SoVITS/pretrained_models/s1v3.ckpt",
"vits_weights_path": "GPT_SoVITS/pretrained_models/v2Pro/s2Gv2ProPlus.pth",
"cnhuhbert_base_path": "GPT_SoVITS/pretrained_models/chinese-hubert-base",
"bert_base_path": "GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large",
},
}
configs: dict = None
v1_languages: list = ["auto", "en", "zh", "ja", "all_zh", "all_ja"]
v2_languages: list = ["auto", "auto_yue", "en", "zh", "ja", "yue", "ko", "all_zh", "all_ja", "all_yue", "all_ko"]
languages: list = v2_languages
mute_tokens: dict = {
"v1" : 486,
"v2" : 486,
"v2Pro": 486,
"v2ProPlus": 486,
"v3" : 486,
"v4" : 486,
}
mute_emb_sim_matrix: torch.Tensor = None
# "all_zh",#全部按中文识别
# "en",#全部按英文识别#######不变
# "all_ja",#全部按日文识别
# "all_yue",#全部按中文识别
# "all_ko",#全部按韩文识别
# "zh",#按中英混合识别####不变
# "ja",#按日英混合识别####不变
# "yue",#按粤英混合识别####不变
# "ko",#按韩英混合识别####不变
# "auto",#多语种启动切分识别语种
# "auto_yue",#多语种启动切分识别语种
def __init__(self, configs: Union[dict, str] = None):
# 设置默认配置文件路径
configs_base_path: str = "GPT_SoVITS/configs/"
os.makedirs(configs_base_path, exist_ok=True)
self.configs_path: str = os.path.join(configs_base_path, "tts_infer.yaml")
if configs in ["", None]:
if not os.path.exists(self.configs_path):
self.save_configs()
print(f"Create default config file at {self.configs_path}")
configs: dict = deepcopy(self.default_configs)
if isinstance(configs, str):
self.configs_path = configs
configs: dict = self._load_configs(self.configs_path)
assert isinstance(configs, dict)
configs_ = deepcopy(self.default_configs)
configs_.update(configs)
self.configs: dict = configs_.get("custom", configs_["v2"])
self.default_configs = deepcopy(configs_)
self.device = self.configs.get("device", torch.device("cpu"))
if "cuda" in str(self.device) and not torch.cuda.is_available():
print("Warning: CUDA is not available, set device to CPU.")
self.device = torch.device("cpu")
self.is_half = self.configs.get("is_half", False)
if str(self.device) == "cpu" and self.is_half:
print(f"Warning: Half precision is not supported on CPU, set is_half to False.")
self.is_half = False
version = self.configs.get("version", None)
self.version = version
assert self.version in ["v1", "v2", "v3", "v4", "v2Pro", "v2ProPlus"], "Invalid version!"
self.t2s_weights_path = self.configs.get("t2s_weights_path", None)
self.vits_weights_path = self.configs.get("vits_weights_path", None)
self.bert_base_path = self.configs.get("bert_base_path", None)
self.cnhuhbert_base_path = self.configs.get("cnhuhbert_base_path", None)
self.languages = self.v1_languages if self.version == "v1" else self.v2_languages
self.use_vocoder: bool = False
if (self.t2s_weights_path in [None, ""]) or (not os.path.exists(self.t2s_weights_path)):
self.t2s_weights_path = self.default_configs[version]["t2s_weights_path"]
print(f"fall back to default t2s_weights_path: {self.t2s_weights_path}")
if (self.vits_weights_path in [None, ""]) or (not os.path.exists(self.vits_weights_path)):
self.vits_weights_path = self.default_configs[version]["vits_weights_path"]
print(f"fall back to default vits_weights_path: {self.vits_weights_path}")
if (self.bert_base_path in [None, ""]) or (not os.path.exists(self.bert_base_path)):
self.bert_base_path = self.default_configs[version]["bert_base_path"]
print(f"fall back to default bert_base_path: {self.bert_base_path}")
if (self.cnhuhbert_base_path in [None, ""]) or (not os.path.exists(self.cnhuhbert_base_path)):
self.cnhuhbert_base_path = self.default_configs[version]["cnhuhbert_base_path"]
print(f"fall back to default cnhuhbert_base_path: {self.cnhuhbert_base_path}")
self.update_configs()
self.max_sec = None
self.hz: int = 50
self.semantic_frame_rate: str = "25hz"
self.segment_size: int = 20480
self.filter_length: int = 2048
self.sampling_rate: int = 32000
self.hop_length: int = 640
self.win_length: int = 2048
self.n_speakers: int = 300
def _load_configs(self, configs_path: str) -> dict:
if os.path.exists(configs_path):
...
else:
print(i18n("路径不存在,使用默认配置"))
self.save_configs(configs_path)
with open(configs_path, "r", encoding="utf-8") as f:
configs = yaml.load(f, Loader=yaml.FullLoader)
return configs
def save_configs(self, configs_path: str = None) -> None:
configs = deepcopy(self.default_configs)
if self.configs is not None:
configs["custom"] = self.update_configs()
if configs_path is None:
configs_path = self.configs_path
with open(configs_path, "w") as f:
yaml.dump(configs, f)
def update_configs(self):
self.config = {
"device": str(self.device),
"is_half": self.is_half,
"version": self.version,
"t2s_weights_path": self.t2s_weights_path,
"vits_weights_path": self.vits_weights_path,
"bert_base_path": self.bert_base_path,
"cnhuhbert_base_path": self.cnhuhbert_base_path,
}
return self.config
def update_version(self, version: str) -> None:
self.version = version
self.languages = self.v1_languages if self.version == "v1" else self.v2_languages
def __str__(self):
self.configs = self.update_configs()
string = "TTS Config".center(100, "-") + "\n"
for k, v in self.configs.items():
string += f"{str(k).ljust(20)}: {str(v)}\n"
string += "-" * 100 + "\n"
return string
def __repr__(self):
return self.__str__()
def __hash__(self):
return hash(self.configs_path)
def __eq__(self, other):
return isinstance(other, TTS_Config) and self.configs_path == other.configs_path
class TTS:
def __init__(self, configs: Union[dict, str, TTS_Config]):
if isinstance(configs, TTS_Config):
self.configs = configs
else:
self.configs: TTS_Config = TTS_Config(configs)
self.t2s_model: Text2SemanticLightningModule = None
self.vits_model: Union[SynthesizerTrn, SynthesizerTrnV3] = None
self.bert_tokenizer: AutoTokenizer = None
self.bert_model: AutoModelForMaskedLM = None
self.cnhuhbert_model: CNHubert = None
self.vocoder = None
self.sr_model: AP_BWE = None
self.sv_model = None
self.sr_model_not_exist: bool = False
self.vocoder_configs: dict = {
"sr": None,
"T_ref": None,
"T_chunk": None,
"upsample_rate": None,
"overlapped_len": None,
}
self._init_models()
self.text_preprocessor: TextPreprocessor = TextPreprocessor(
self.bert_model, self.bert_tokenizer, self.configs.device
)
self.prompt_cache: dict = {
"ref_audio_path": None,
"prompt_semantic": None,
"refer_spec": [],
"prompt_text": None,
"prompt_lang": None,
"phones": None,
"bert_features": None,
"norm_text": None,
"aux_ref_audio_paths": [],
}
self.stop_flag: bool = False
self.precision: torch.dtype = torch.float16 if self.configs.is_half else torch.float32
def _init_models(
self,
):
self.init_t2s_weights(self.configs.t2s_weights_path)
self.init_vits_weights(self.configs.vits_weights_path)
self.init_bert_weights(self.configs.bert_base_path)
self.init_cnhuhbert_weights(self.configs.cnhuhbert_base_path)
# self.enable_half_precision(self.configs.is_half)
def init_cnhuhbert_weights(self, base_path: str):
print(f"Loading CNHuBERT weights from {base_path}")
self.cnhuhbert_model = CNHubert(base_path)
self.cnhuhbert_model = self.cnhuhbert_model.eval()
self.cnhuhbert_model = self.cnhuhbert_model.to(self.configs.device)
if self.configs.is_half and str(self.configs.device) != "cpu":
self.cnhuhbert_model = self.cnhuhbert_model.half()
def init_bert_weights(self, base_path: str):
print(f"Loading BERT weights from {base_path}")
self.bert_tokenizer = AutoTokenizer.from_pretrained(base_path)
self.bert_model = AutoModelForMaskedLM.from_pretrained(base_path)
self.bert_model = self.bert_model.eval()
self.bert_model = self.bert_model.to(self.configs.device)
if self.configs.is_half and str(self.configs.device) != "cpu":
self.bert_model = self.bert_model.half()
def init_vits_weights(self, weights_path: str):
self.configs.vits_weights_path = weights_path
version, model_version, if_lora_v3 = get_sovits_version_from_path_fast(weights_path)
if "Pro" in model_version:
self.init_sv_model()
path_sovits = self.configs.default_configs[model_version]["vits_weights_path"]
if if_lora_v3 == True and os.path.exists(path_sovits) == False:
info = path_sovits + i18n("SoVITS %s 底模缺失,无法加载相应 LoRA 权重" % model_version)
raise FileExistsError(info)
# dict_s2 = torch.load(weights_path, map_location=self.configs.device,weights_only=False)
dict_s2 = load_sovits_new(weights_path)
hps = dict_s2["config"]
hps["model"]["semantic_frame_rate"] = "25hz"
if "enc_p.text_embedding.weight" not in dict_s2["weight"]:
hps["model"]["version"] = "v2" # v3model,v2sybomls
elif dict_s2["weight"]["enc_p.text_embedding.weight"].shape[0] == 322:
hps["model"]["version"] = "v1"
else:
hps["model"]["version"] = "v2"
version = hps["model"]["version"]
v3v4set = {"v3", "v4"}
if model_version not in v3v4set:
if "Pro" not in model_version:
model_version = version
else:
hps["model"]["version"] = model_version
else:
hps["model"]["version"] = model_version
self.configs.filter_length = hps["data"]["filter_length"]
self.configs.segment_size = hps["train"]["segment_size"]
self.configs.sampling_rate = hps["data"]["sampling_rate"]
self.configs.hop_length = hps["data"]["hop_length"]
self.configs.win_length = hps["data"]["win_length"]
self.configs.n_speakers = hps["data"]["n_speakers"]
self.configs.semantic_frame_rate = hps["model"]["semantic_frame_rate"]
kwargs = hps["model"]
# print(f"self.configs.sampling_rate:{self.configs.sampling_rate}")
self.configs.update_version(model_version)
# print(f"model_version:{model_version}")
# print(f'hps["model"]["version"]:{hps["model"]["version"]}')
if model_version not in v3v4set:
vits_model = SynthesizerTrn(
self.configs.filter_length // 2 + 1,
self.configs.segment_size // self.configs.hop_length,
n_speakers=self.configs.n_speakers,
**kwargs,
)
self.configs.use_vocoder = False
else:
kwargs["version"] = model_version
vits_model = SynthesizerTrnV3(
self.configs.filter_length // 2 + 1,
self.configs.segment_size // self.configs.hop_length,
n_speakers=self.configs.n_speakers,
**kwargs,
)
self.configs.use_vocoder = True
self.init_vocoder(model_version)
if "pretrained" not in weights_path and hasattr(vits_model, "enc_q"):
del vits_model.enc_q
self.is_v2pro = model_version in {"v2Pro", "v2ProPlus"}
if if_lora_v3 == False:
print(
f"Loading VITS weights from {weights_path}. {vits_model.load_state_dict(dict_s2['weight'], strict=False)}"
)
else:
print(
f"Loading VITS pretrained weights from {weights_path}. {vits_model.load_state_dict(load_sovits_new(path_sovits)['weight'], strict=False)}"
)
lora_rank = dict_s2["lora_rank"]
lora_config = LoraConfig(
target_modules=["to_k", "to_q", "to_v", "to_out.0"],
r=lora_rank,
lora_alpha=lora_rank,
init_lora_weights=True,
)
vits_model.cfm = get_peft_model(vits_model.cfm, lora_config)
print(
f"Loading LoRA weights from {weights_path}. {vits_model.load_state_dict(dict_s2['weight'], strict=False)}"
)
vits_model.cfm = vits_model.cfm.merge_and_unload()
vits_model = vits_model.to(self.configs.device)
vits_model = vits_model.eval()
self.vits_model = vits_model
if self.configs.is_half and str(self.configs.device) != "cpu":
self.vits_model = self.vits_model.half()
self.configs.save_configs()
def init_t2s_weights(self, weights_path: str):
print(f"Loading Text2Semantic weights from {weights_path}")
self.configs.t2s_weights_path = weights_path
self.configs.save_configs()
self.configs.hz = 50
dict_s1 = torch.load(weights_path, map_location=self.configs.device, weights_only=False)
config = dict_s1["config"]
self.configs.max_sec = config["data"]["max_sec"]
t2s_model = Text2SemanticLightningModule(config, "****", is_train=False)
t2s_model.load_state_dict(dict_s1["weight"])
t2s_model = t2s_model.to(self.configs.device)
t2s_model = t2s_model.eval()
self.t2s_model = t2s_model
if self.configs.is_half and str(self.configs.device) != "cpu":
self.t2s_model = self.t2s_model.half()
codebook = t2s_model.model.ar_audio_embedding.weight.clone()
mute_emb = codebook[self.configs.mute_tokens[self.configs.version]].unsqueeze(0)
sim_matrix = F.cosine_similarity(mute_emb.float(), codebook.float(), dim=-1)
self.configs.mute_emb_sim_matrix = sim_matrix
def init_vocoder(self, version: str):
if version == "v3":
if self.vocoder is not None and self.vocoder.__class__.__name__ == "BigVGAN":
return
if self.vocoder is not None:
self.vocoder.cpu()
del self.vocoder
self.empty_cache()
self.vocoder = BigVGAN.from_pretrained(
"%s/GPT_SoVITS/pretrained_models/models--nvidia--bigvgan_v2_24khz_100band_256x" % (now_dir,),
use_cuda_kernel=False,
) # if True, RuntimeError: Ninja is required to load C++ extensions
# remove weight norm in the model and set to eval mode
self.vocoder.remove_weight_norm()
self.vocoder_configs["sr"] = 24000
self.vocoder_configs["T_ref"] = 468
self.vocoder_configs["T_chunk"] = 934
self.vocoder_configs["upsample_rate"] = 256
self.vocoder_configs["overlapped_len"] = 12
elif version == "v4":
if self.vocoder is not None and self.vocoder.__class__.__name__ == "Generator":
return
if self.vocoder is not None:
self.vocoder.cpu()
del self.vocoder
self.empty_cache()
self.vocoder = Generator(
initial_channel=100,
resblock="1",
resblock_kernel_sizes=[3, 7, 11],
resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5], [1, 3, 5]],
upsample_rates=[10, 6, 2, 2, 2],
upsample_initial_channel=512,
upsample_kernel_sizes=[20, 12, 4, 4, 4],
gin_channels=0,
is_bias=True,
)
self.vocoder.remove_weight_norm()
state_dict_g = torch.load(
"%s/GPT_SoVITS/pretrained_models/gsv-v4-pretrained/vocoder.pth" % (now_dir,),
map_location="cpu",
weights_only=False,
)
print("loading vocoder", self.vocoder.load_state_dict(state_dict_g))
self.vocoder_configs["sr"] = 48000
self.vocoder_configs["T_ref"] = 500
self.vocoder_configs["T_chunk"] = 1000
self.vocoder_configs["upsample_rate"] = 480
self.vocoder_configs["overlapped_len"] = 12
self.vocoder = self.vocoder.eval()
if self.configs.is_half == True:
self.vocoder = self.vocoder.half().to(self.configs.device)
else:
self.vocoder = self.vocoder.to(self.configs.device)
def init_sr_model(self):
if self.sr_model is not None:
return
try:
self.sr_model: AP_BWE = AP_BWE(self.configs.device, DictToAttrRecursive)
self.sr_model_not_exist = False
except FileNotFoundError:
print(i18n("你没有下载超分模型的参数,因此不进行超分。如想超分请先参照教程把文件下载好"))
self.sr_model_not_exist = True
def init_sv_model(self):
if self.sv_model is not None:
return
self.sv_model = SV(self.configs.device, self.configs.is_half)
def enable_half_precision(self, enable: bool = True, save: bool = True):
if str(self.configs.device) == "cpu" and enable:
print("Half precision is not supported on CPU.")
return
self.configs.is_half = enable
self.precision = torch.float16 if enable else torch.float32
if save:
self.configs.save_configs()
if enable:
if self.t2s_model is not None:
self.t2s_model = self.t2s_model.half()
if self.vits_model is not None:
self.vits_model = self.vits_model.half()
if self.bert_model is not None:
self.bert_model = self.bert_model.half()
if self.cnhuhbert_model is not None:
self.cnhuhbert_model = self.cnhuhbert_model.half()
if self.vocoder is not None:
self.vocoder = self.vocoder.half()
else:
if self.t2s_model is not None:
self.t2s_model = self.t2s_model.float()
if self.vits_model is not None:
self.vits_model = self.vits_model.float()
if self.bert_model is not None:
self.bert_model = self.bert_model.float()
if self.cnhuhbert_model is not None:
self.cnhuhbert_model = self.cnhuhbert_model.float()
if self.vocoder is not None:
self.vocoder = self.vocoder.float()
def set_device(self, device: torch.device, save: bool = True):
self.configs.device = device
if save:
self.configs.save_configs()
if self.t2s_model is not None:
self.t2s_model = self.t2s_model.to(device)
if self.vits_model is not None:
self.vits_model = self.vits_model.to(device)
if self.bert_model is not None:
self.bert_model = self.bert_model.to(device)
if self.cnhuhbert_model is not None:
self.cnhuhbert_model = self.cnhuhbert_model.to(device)
if self.vocoder is not None:
self.vocoder = self.vocoder.to(device)
if self.sr_model is not None:
self.sr_model = self.sr_model.to(device)
def set_ref_audio(self, ref_audio_path: str):
self._set_prompt_semantic(ref_audio_path)
self._set_ref_spec(ref_audio_path)
self._set_ref_audio_path(ref_audio_path)
def _set_ref_audio_path(self, ref_audio_path):
self.prompt_cache["ref_audio_path"] = ref_audio_path
def _set_ref_spec(self, ref_audio_path):
spec_audio = self._get_ref_spec(ref_audio_path)
if self.prompt_cache["refer_spec"] in [[], None]:
self.prompt_cache["refer_spec"] = [spec_audio]
else:
self.prompt_cache["refer_spec"][0] = spec_audio
def _get_ref_spec(self, ref_audio_path):
raw_audio, raw_sr = torchaudio.load(ref_audio_path)
raw_audio = raw_audio.to(self.configs.device).float()
self.prompt_cache["raw_audio"] = raw_audio
self.prompt_cache["raw_sr"] = raw_sr
if raw_sr != self.configs.sampling_rate:
audio = raw_audio.to(self.configs.device)
if audio.shape[0] == 2:
audio = audio.mean(0).unsqueeze(0)
audio = resample(audio, raw_sr, self.configs.sampling_rate, self.configs.device)
else:
audio = raw_audio.to(self.configs.device)
if audio.shape[0] == 2:
audio = audio.mean(0).unsqueeze(0)
maxx = audio.abs().max()
if maxx > 1:
audio /= min(2, maxx)
spec = spectrogram_torch(
audio,
self.configs.filter_length,
self.configs.sampling_rate,
self.configs.hop_length,
self.configs.win_length,
center=False,
)
if self.configs.is_half:
spec = spec.half()
if self.is_v2pro == True:
audio = resample(audio, self.configs.sampling_rate, 16000, self.configs.device)
if self.configs.is_half:
audio = audio.half()
else:
audio = None
return spec, audio
def _set_prompt_semantic(self, ref_wav_path: str):
zero_wav = np.zeros(
int(self.configs.sampling_rate * 0.3),
dtype=np.float16 if self.configs.is_half else np.float32,
)
with torch.no_grad():
wav16k, sr = librosa.load(ref_wav_path, sr=16000)
if wav16k.shape[0] > 160000 or wav16k.shape[0] < 48000:
raise OSError(i18n("参考音频在3~10秒范围外,请更换!"))
wav16k = torch.from_numpy(wav16k)
zero_wav_torch = torch.from_numpy(zero_wav)
wav16k = wav16k.to(self.configs.device)
zero_wav_torch = zero_wav_torch.to(self.configs.device)
if self.configs.is_half:
wav16k = wav16k.half()
zero_wav_torch = zero_wav_torch.half()
wav16k = torch.cat([wav16k, zero_wav_torch])
hubert_feature = self.cnhuhbert_model.model(wav16k.unsqueeze(0))["last_hidden_state"].transpose(
1, 2
) # .float()
codes = self.vits_model.extract_latent(hubert_feature)
prompt_semantic = codes[0, 0].to(self.configs.device)
self.prompt_cache["prompt_semantic"] = prompt_semantic
def batch_sequences(self, sequences: List[torch.Tensor], axis: int = 0, pad_value: int = 0, max_length: int = None):
seq = sequences[0]
ndim = seq.dim()
if axis < 0:
axis += ndim
dtype: torch.dtype = seq.dtype
pad_value = torch.tensor(pad_value, dtype=dtype)
seq_lengths = [seq.shape[axis] for seq in sequences]
if max_length is None:
max_length = max(seq_lengths)
else:
max_length = max(seq_lengths) if max_length < max(seq_lengths) else max_length
padded_sequences = []
for seq, length in zip(sequences, seq_lengths):
padding = [0] * axis + [0, max_length - length] + [0] * (ndim - axis - 1)
padded_seq = torch.nn.functional.pad(seq, padding, value=pad_value)
padded_sequences.append(padded_seq)
batch = torch.stack(padded_sequences)
return batch
def to_batch(
self,
data: list,
prompt_data: dict = None,
batch_size: int = 5,
threshold: float = 0.75,
split_bucket: bool = True,
device: torch.device = torch.device("cpu"),
precision: torch.dtype = torch.float32,
):
_data: list = []
index_and_len_list = []
for idx, item in enumerate(data):
norm_text_len = len(item["norm_text"])
index_and_len_list.append([idx, norm_text_len])
batch_index_list = []
if split_bucket:
index_and_len_list.sort(key=lambda x: x[1])
index_and_len_list = np.array(index_and_len_list, dtype=np.int64)
batch_index_list_len = 0
pos = 0
while pos < index_and_len_list.shape[0]:
# batch_index_list.append(index_and_len_list[pos:min(pos+batch_size,len(index_and_len_list))])
pos_end = min(pos + batch_size, index_and_len_list.shape[0])
while pos < pos_end:
batch = index_and_len_list[pos:pos_end, 1].astype(np.float32)
score = batch[(pos_end - pos) // 2] / (batch.mean() + 1e-8)
if (score >= threshold) or (pos_end - pos == 1):
batch_index = index_and_len_list[pos:pos_end, 0].tolist()
batch_index_list_len += len(batch_index)
batch_index_list.append(batch_index)
pos = pos_end
break
pos_end = pos_end - 1
assert batch_index_list_len == len(data)
else:
for i in range(len(data)):
if i % batch_size == 0:
batch_index_list.append([])
batch_index_list[-1].append(i)
for batch_idx, index_list in enumerate(batch_index_list):
item_list = [data[idx] for idx in index_list]
phones_list = []
phones_len_list = []
# bert_features_list = []
all_phones_list = []
all_phones_len_list = []
all_bert_features_list = []
norm_text_batch = []
all_bert_max_len = 0
all_phones_max_len = 0
for item in item_list:
if prompt_data is not None:
all_bert_features = torch.cat([prompt_data["bert_features"], item["bert_features"]], 1).to(
dtype=precision, device=device
)
all_phones = torch.LongTensor(prompt_data["phones"] + item["phones"]).to(device)
phones = torch.LongTensor(item["phones"]).to(device)
# norm_text = prompt_data["norm_text"]+item["norm_text"]
else:
all_bert_features = item["bert_features"].to(dtype=precision, device=device)
phones = torch.LongTensor(item["phones"]).to(device)
all_phones = phones
# norm_text = item["norm_text"]
all_bert_max_len = max(all_bert_max_len, all_bert_features.shape[-1])
all_phones_max_len = max(all_phones_max_len, all_phones.shape[-1])
phones_list.append(phones)
phones_len_list.append(phones.shape[-1])
all_phones_list.append(all_phones)
all_phones_len_list.append(all_phones.shape[-1])
all_bert_features_list.append(all_bert_features)
norm_text_batch.append(item["norm_text"])
phones_batch = phones_list
all_phones_batch = all_phones_list
all_bert_features_batch = all_bert_features_list
max_len = max(all_bert_max_len, all_phones_max_len)
# phones_batch = self.batch_sequences(phones_list, axis=0, pad_value=0, max_length=max_len)
#### 直接对phones和bert_features进行pad。(padding策略会影响T2S模型生成的结果,但不直接影响复读概率。影响复读概率的主要因素是mask的策略)
# all_phones_batch = self.batch_sequences(all_phones_list, axis=0, pad_value=0, max_length=max_len)
# all_bert_features_batch = all_bert_features_list
# all_bert_features_batch = torch.zeros((len(all_bert_features_list), 1024, max_len), dtype=precision, device=device)
# for idx, item in enumerate(all_bert_features_list):
# all_bert_features_batch[idx, :, : item.shape[-1]] = item
# #### 先对phones进行embedding、对bert_features进行project,再pad到相同长度,(padding策略会影响T2S模型生成的结果,但不直接影响复读概率。影响复读概率的主要因素是mask的策略)
# all_phones_list = [self.t2s_model.model.ar_text_embedding(item.to(self.t2s_model.device)) for item in all_phones_list]
# all_phones_list = [F.pad(item,(0,0,0,max_len-item.shape[0]),value=0) for item in all_phones_list]
# all_phones_batch = torch.stack(all_phones_list, dim=0)
# all_bert_features_list = [self.t2s_model.model.bert_proj(item.to(self.t2s_model.device).transpose(0, 1)) for item in all_bert_features_list]
# all_bert_features_list = [F.pad(item,(0,0,0,max_len-item.shape[0]), value=0) for item in all_bert_features_list]
# all_bert_features_batch = torch.stack(all_bert_features_list, dim=0)
batch = {
"phones": phones_batch,
"phones_len": torch.LongTensor(phones_len_list).to(device),
"all_phones": all_phones_batch,
"all_phones_len": torch.LongTensor(all_phones_len_list).to(device),
"all_bert_features": all_bert_features_batch,
"norm_text": norm_text_batch,
"max_len": max_len,
}
_data.append(batch)
return _data, batch_index_list
def recovery_order(self, data: list, batch_index_list: list) -> list:
length = len(sum(batch_index_list, []))
_data = [None] * length
for i, index_list in enumerate(batch_index_list):
for j, index in enumerate(index_list):
_data[index] = data[i][j]
return _data
def stop(
self,
):
self.stop_flag = True
@torch.no_grad()
def run(self, inputs: dict):
########## variables initialization ###########
self.stop_flag: bool = False
text: str = inputs.get("text", "")
text_lang: str = inputs.get("text_lang", "")
ref_audio_path: str = inputs.get("ref_audio_path", "")
aux_ref_audio_paths: list = inputs.get("aux_ref_audio_paths", [])
prompt_text: str = inputs.get("prompt_text", "")
prompt_lang: str = inputs.get("prompt_lang", "")
top_k: int = inputs.get("top_k", 15)
top_p: float = inputs.get("top_p", 1)
temperature: float = inputs.get("temperature", 1)
text_split_method: str = inputs.get("text_split_method", "cut1")
batch_size = inputs.get("batch_size", 1)
batch_threshold = inputs.get("batch_threshold", 0.75)
speed_factor = inputs.get("speed_factor", 1.0)
split_bucket = inputs.get("split_bucket", True)
return_fragment = inputs.get("return_fragment", False)
fragment_interval = inputs.get("fragment_interval", 0.3)
seed = inputs.get("seed", -1)
seed = -1 if seed in ["", None] else seed
actual_seed = set_seed(seed)
parallel_infer = inputs.get("parallel_infer", True)
repetition_penalty = inputs.get("repetition_penalty", 1.35)
sample_steps = inputs.get("sample_steps", 32)
super_sampling = inputs.get("super_sampling", False)
streaming_mode = inputs.get("streaming_mode", False)
overlap_length = inputs.get("overlap_length", 2)
min_chunk_length = inputs.get("min_chunk_length", 16)
fixed_length_chunk = inputs.get("fixed_length_chunk", False)
chunk_split_thershold = 0.0 # 该值代表语义token与mute token的余弦相似度阈值,若大于该阈值,则视为可切分点。
if parallel_infer and not streaming_mode:
print(i18n("并行推理模式已开启"))
self.t2s_model.model.infer_panel = self.t2s_model.model.infer_panel_batch_infer
elif not parallel_infer and streaming_mode and not self.configs.use_vocoder:
print(i18n("流式推理模式已开启"))
self.t2s_model.model.infer_panel = self.t2s_model.model.infer_panel_naive
elif streaming_mode and self.configs.use_vocoder:
print(i18n("SoVits V3/4模型不支持流式推理模式,已自动回退到分段返回模式"))
streaming_mode = False
return_fragment = True
if parallel_infer:
self.t2s_model.model.infer_panel = self.t2s_model.model.infer_panel_batch_infer
else:
self.t2s_model.model.infer_panel = self.t2s_model.model.infer_panel_naive_batched
# self.t2s_model.model.infer_panel = self.t2s_model.model.infer_panel_naive
elif parallel_infer and streaming_mode:
print(i18n("不支持同时开启并行推理和流式推理模式,已自动关闭并行推理模式"))
parallel_infer = False
self.t2s_model.model.infer_panel = self.t2s_model.model.infer_panel_naive
else:
print(i18n("朴素推理模式已开启"))
self.t2s_model.model.infer_panel = self.t2s_model.model.infer_panel_naive_batched
if return_fragment and streaming_mode:
print(i18n("流式推理模式不支持分段返回,已自动关闭分段返回"))
return_fragment = False
if (return_fragment or streaming_mode) and split_bucket:
print(i18n("分段返回模式/流式推理模式不支持分桶处理,已自动关闭分桶处理"))
split_bucket = False
if split_bucket and speed_factor == 1.0 and not (self.configs.use_vocoder and parallel_infer):
print(i18n("分桶处理模式已开启"))
elif speed_factor != 1.0:
print(i18n("语速调节不支持分桶处理,已自动关闭分桶处理"))
split_bucket = False
elif self.configs.use_vocoder and parallel_infer:
print(i18n("当开启并行推理模式时,SoVits V3/4模型不支持分桶处理,已自动关闭分桶处理"))
split_bucket = False
else:
print(i18n("分桶处理模式已关闭"))
# if fragment_interval < 0.01:
# fragment_interval = 0.01
# print(i18n("分段间隔过小,已自动设置为0.01"))
no_prompt_text = False
if prompt_text in [None, ""]:
no_prompt_text = True
assert text_lang in self.configs.languages
if not no_prompt_text:
assert prompt_lang in self.configs.languages
if no_prompt_text and self.configs.use_vocoder:
raise NO_PROMPT_ERROR("prompt_text cannot be empty when using SoVITS_V3")
if ref_audio_path in [None, ""] and (
(self.prompt_cache["prompt_semantic"] is None) or (self.prompt_cache["refer_spec"] in [None, []])
):
raise ValueError(
"ref_audio_path cannot be empty, when the reference audio is not set using set_ref_audio()"
)
###### setting reference audio and prompt text preprocessing ########
t0 = time.perf_counter()
if (ref_audio_path is not None) and (
ref_audio_path != self.prompt_cache["ref_audio_path"]
or (self.is_v2pro and self.prompt_cache["refer_spec"][0][1] is None)
):
if not os.path.exists(ref_audio_path):
raise ValueError(f"{ref_audio_path} not exists")
self.set_ref_audio(ref_audio_path)
aux_ref_audio_paths = aux_ref_audio_paths if aux_ref_audio_paths is not None else []
paths = set(aux_ref_audio_paths) & set(self.prompt_cache["aux_ref_audio_paths"])
if not (len(list(paths)) == len(aux_ref_audio_paths) == len(self.prompt_cache["aux_ref_audio_paths"])):
self.prompt_cache["aux_ref_audio_paths"] = aux_ref_audio_paths
self.prompt_cache["refer_spec"] = [self.prompt_cache["refer_spec"][0]]
for path in aux_ref_audio_paths:
if path in [None, ""]:
continue
if not os.path.exists(path):
print(i18n("音频文件不存在,跳过:"), path)
continue
self.prompt_cache["refer_spec"].append(self._get_ref_spec(path))
if not no_prompt_text:
prompt_text = prompt_text.strip("\n")
if prompt_text[-1] not in splits:
prompt_text += "。" if prompt_lang != "en" else "."
print(i18n("实际输入的参考文本:"), prompt_text)
if self.prompt_cache["prompt_text"] != prompt_text:
phones, bert_features, norm_text = self.text_preprocessor.segment_and_extract_feature_for_text(
prompt_text, prompt_lang, self.configs.version
)
self.prompt_cache["prompt_text"] = prompt_text
self.prompt_cache["prompt_lang"] = prompt_lang
self.prompt_cache["phones"] = phones
self.prompt_cache["bert_features"] = bert_features
self.prompt_cache["norm_text"] = norm_text
###### text preprocessing ########
t1 = time.perf_counter()
data: list = None
if not (return_fragment or streaming_mode):
data = self.text_preprocessor.preprocess(text, text_lang, text_split_method, self.configs.version)
if len(data) == 0:
yield 16000, np.zeros(int(16000), dtype=np.int16)
return
batch_index_list: list = None
data, batch_index_list = self.to_batch(
data,
prompt_data=self.prompt_cache if not no_prompt_text else None,
batch_size=batch_size,
threshold=batch_threshold,
split_bucket=split_bucket,
device=self.configs.device,
precision=self.precision,
)
else:
print(f"############ {i18n('切分文本')} ############")
texts = self.text_preprocessor.pre_seg_text(text, text_lang, text_split_method)
data = []
for i in range(len(texts)):
if i % batch_size == 0:
data.append([])
data[-1].append(texts[i])
def make_batch(batch_texts):
batch_data = []
print(f"############ {i18n('提取文本Bert特征')} ############")
for text in tqdm(batch_texts):
phones, bert_features, norm_text = self.text_preprocessor.segment_and_extract_feature_for_text(
text, text_lang, self.configs.version
)
if phones is None:
continue
res = {
"phones": phones,
"bert_features": bert_features,
"norm_text": norm_text,
}
batch_data.append(res)
if len(batch_data) == 0:
return None
batch, _ = self.to_batch(
batch_data,
prompt_data=self.prompt_cache if not no_prompt_text else None,
batch_size=batch_size,
threshold=batch_threshold,
split_bucket=False,
device=self.configs.device,
precision=self.precision,
)
return batch[0]
t2 = time.perf_counter()
try:
print("############ 推理 ############")
###### inference ######
t_34 = 0.0
t_45 = 0.0
audio = []
is_first_package = True
output_sr = self.configs.sampling_rate if not self.configs.use_vocoder else self.vocoder_configs["sr"]
for item in data:
t3 = time.perf_counter()
if return_fragment or streaming_mode:
item = make_batch(item)
if item is None:
continue
batch_phones: List[torch.LongTensor] = item["phones"]
# batch_phones:torch.LongTensor = item["phones"]
batch_phones_len: torch.LongTensor = item["phones_len"]
all_phoneme_ids: torch.LongTensor = item["all_phones"]
all_phoneme_lens: torch.LongTensor = item["all_phones_len"]
all_bert_features: torch.LongTensor = item["all_bert_features"]
norm_text: str = item["norm_text"]
max_len = item["max_len"]
print(i18n("前端处理后的文本(每句):"), norm_text)
if no_prompt_text:
prompt = None
else:
prompt = (
self.prompt_cache["prompt_semantic"].expand(len(all_phoneme_ids), -1).to(self.configs.device)
)
refer_audio_spec = []
sv_emb = [] if self.is_v2pro else None
for spec, audio_tensor in self.prompt_cache["refer_spec"]:
spec = spec.to(dtype=self.precision, device=self.configs.device)
refer_audio_spec.append(spec)
if self.is_v2pro:
sv_emb.append(self.sv_model.compute_embedding3(audio_tensor))
if not streaming_mode:
print(f"############ {i18n('预测语义Token')} ############")
pred_semantic_list, idx_list = self.t2s_model.model.infer_panel(
all_phoneme_ids,
all_phoneme_lens,
prompt,
all_bert_features,
# prompt_phone_len=ph_offset,
top_k=top_k,
top_p=top_p,
temperature=temperature,
early_stop_num=self.configs.hz * self.configs.max_sec,
max_len=max_len,
repetition_penalty=repetition_penalty,
)
t4 = time.perf_counter()
t_34 += t4 - t3
batch_audio_fragment = []
# ## vits并行推理 method 1
# pred_semantic_list = [item[-idx:] for item, idx in zip(pred_semantic_list, idx_list)]
# pred_semantic_len = torch.LongTensor([item.shape[0] for item in pred_semantic_list]).to(self.configs.device)
# pred_semantic = self.batch_sequences(pred_semantic_list, axis=0, pad_value=0).unsqueeze(0)
# max_len = 0
# for i in range(0, len(batch_phones)):
# max_len = max(max_len, batch_phones[i].shape[-1])
# batch_phones = self.batch_sequences(batch_phones, axis=0, pad_value=0, max_length=max_len)
# batch_phones = batch_phones.to(self.configs.device)
# batch_audio_fragment = (self.vits_model.batched_decode(
# pred_semantic, pred_semantic_len, batch_phones, batch_phones_len,refer_audio_spec
# ))
print(f"############ {i18n('合成音频')} ############")
if not self.configs.use_vocoder:
if speed_factor == 1.0:
print(f"{i18n('并行合成中')}...")
# ## vits并行推理 method 2
pred_semantic_list = [item[-idx:] for item, idx in zip(pred_semantic_list, idx_list)]
upsample_rate = math.prod(self.vits_model.upsample_rates)
audio_frag_idx = [
pred_semantic_list[i].shape[0] * 2 * upsample_rate
for i in range(0, len(pred_semantic_list))
]
audio_frag_end_idx = [sum(audio_frag_idx[: i + 1]) for i in range(0, len(audio_frag_idx))]
all_pred_semantic = (
torch.cat(pred_semantic_list).unsqueeze(0).unsqueeze(0).to(self.configs.device)
)
_batch_phones = torch.cat(batch_phones).unsqueeze(0).to(self.configs.device)
_batch_audio_fragment = self.vits_model.decode(
all_pred_semantic, _batch_phones, refer_audio_spec, speed=speed_factor, sv_emb=sv_emb
).detach()[0, 0, :]
audio_frag_end_idx.insert(0, 0)
batch_audio_fragment = [
_batch_audio_fragment[audio_frag_end_idx[i - 1] : audio_frag_end_idx[i]]
for i in range(1, len(audio_frag_end_idx))
]
else:
# ## vits串行推理
for i, idx in enumerate(tqdm(idx_list)):
phones = batch_phones[i].unsqueeze(0).to(self.configs.device)
_pred_semantic = (
pred_semantic_list[i][-idx:].unsqueeze(0).unsqueeze(0)
) # .unsqueeze(0)#mq要多unsqueeze一次
audio_fragment = self.vits_model.decode(
_pred_semantic, phones, refer_audio_spec, speed=speed_factor, sv_emb=sv_emb
).detach()[0, 0, :]
batch_audio_fragment.append(audio_fragment) ###试试重建不带上prompt部分
else:
if parallel_infer:
print(f"{i18n('并行合成中')}...")
audio_fragments = self.using_vocoder_synthesis_batched_infer(
idx_list, pred_semantic_list, batch_phones, speed=speed_factor, sample_steps=sample_steps
)
batch_audio_fragment.extend(audio_fragments)
else:
for i, idx in enumerate(tqdm(idx_list)):
phones = batch_phones[i].unsqueeze(0).to(self.configs.device)
_pred_semantic = (
pred_semantic_list[i][-idx:].unsqueeze(0).unsqueeze(0)
) # .unsqueeze(0)#mq要多unsqueeze一次
audio_fragment = self.using_vocoder_synthesis(
_pred_semantic, phones, speed=speed_factor, sample_steps=sample_steps
)
batch_audio_fragment.append(audio_fragment)
else:
# refer_audio_spec: torch.Tensor = [
# item.to(dtype=self.precision, device=self.configs.device)
# for item in self.prompt_cache["refer_spec"]
# ]
semantic_token_generator =self.t2s_model.model.infer_panel(
all_phoneme_ids[0].unsqueeze(0),
all_phoneme_lens,
prompt,
all_bert_features[0].unsqueeze(0),
top_k=top_k,
top_p=top_p,
temperature=temperature,
early_stop_num=self.configs.hz * self.configs.max_sec,
max_len=max_len,
repetition_penalty=repetition_penalty,
streaming_mode=True,
chunk_length=min_chunk_length,
mute_emb_sim_matrix=self.configs.mute_emb_sim_matrix if not fixed_length_chunk else None,
chunk_split_thershold=chunk_split_thershold,
)
t4 = time.perf_counter()
t_34 += t4 - t3
phones = batch_phones[0].unsqueeze(0).to(self.configs.device)
is_first_chunk = True
if not self.configs.use_vocoder:
# if speed_factor == 1.0:
# upsample_rate = math.prod(self.vits_model.upsample_rates)*(2 if self.vits_model.semantic_frame_rate == "25hz" else 1)
# else:
upsample_rate = math.prod(self.vits_model.upsample_rates)*((2 if self.vits_model.semantic_frame_rate == "25hz" else 1)/speed_factor)
else:
# if speed_factor == 1.0:
# upsample_rate = self.vocoder_configs["upsample_rate"]*(3.875 if self.configs.version == "v3" else 4)
# else:
upsample_rate = self.vocoder_configs["upsample_rate"]*((3.875 if self.configs.version == "v3" else 4)/speed_factor)
last_audio_chunk = None
# last_tokens = None
last_latent = None
previous_tokens = []
overlap_len = overlap_length
overlap_size = math.ceil(overlap_length*upsample_rate)
for semantic_tokens, is_final in semantic_token_generator:
if semantic_tokens is None and last_audio_chunk is not None:
yield self.audio_postprocess(
[[last_audio_chunk[-overlap_size:]]],
output_sr,
None,
speed_factor,
False,
0.0,
super_sampling if self.configs.use_vocoder and self.configs.version == "v3" else False,
)
break
_semantic_tokens = semantic_tokens
print(f"semantic_tokens shape:{semantic_tokens.shape}")
previous_tokens.append(semantic_tokens)
_semantic_tokens = torch.cat(previous_tokens, dim=-1)
if not is_first_chunk and semantic_tokens.shape[-1] < 10:
overlap_len = overlap_length+(10-semantic_tokens.shape[-1])
else:
overlap_len = overlap_length
if not self.configs.use_vocoder:
token_padding_length = 0
# token_padding_length = int(phones.shape[-1]*2)-_semantic_tokens.shape[-1]
# if token_padding_length>0:
# _semantic_tokens = F.pad(_semantic_tokens, (0, token_padding_length), "constant", 486)
# else:
# token_padding_length = 0
audio_chunk, latent, latent_mask = self.vits_model.decode_streaming(
_semantic_tokens.unsqueeze(0),
phones, refer_audio_spec,
speed=speed_factor,
sv_emb=sv_emb,
result_length=semantic_tokens.shape[-1]+overlap_len if not is_first_chunk else None,
overlap_frames=last_latent[:,:,-overlap_len*(2 if self.vits_model.semantic_frame_rate == "25hz" else 1):] \
if last_latent is not None else None,
padding_length=token_padding_length
)
audio_chunk=audio_chunk.detach()[0, 0, :]
else:
raise RuntimeError(i18n("SoVits V3/4模型不支持流式推理模式"))
if overlap_len>overlap_length:
audio_chunk=audio_chunk[-int((overlap_length+semantic_tokens.shape[-1])*upsample_rate):]
audio_chunk_ = audio_chunk
if is_first_chunk and not is_final:
is_first_chunk = False
audio_chunk_ = audio_chunk_[:-overlap_size]
elif is_first_chunk and is_final:
is_first_chunk = False
elif not is_first_chunk and not is_final:
audio_chunk_ = self.sola_algorithm([last_audio_chunk, audio_chunk_], overlap_size)
audio_chunk_ = (
audio_chunk_[last_audio_chunk.shape[0]-overlap_size:-overlap_size] if not is_final \
else audio_chunk_[last_audio_chunk.shape[0]-overlap_size:]
)
last_latent = latent
last_audio_chunk = audio_chunk
yield self.audio_postprocess(
[[audio_chunk_]],
output_sr,
None,
speed_factor,
False,
0.0,
super_sampling if self.configs.use_vocoder and self.configs.version == "v3" else False,
)
if is_first_package:
print(f"first_package_delay: {time.perf_counter()-t0:.3f}")
is_first_package = False
yield output_sr, np.zeros(int(output_sr*fragment_interval), dtype=np.int16)
t5 = time.perf_counter()
t_45 += t5 - t4
if return_fragment:
print("%.3f\t%.3f\t%.3f\t%.3f" % (t1 - t0, t2 - t1, t4 - t3, t5 - t4))
yield self.audio_postprocess(
[batch_audio_fragment],
output_sr,
None,
speed_factor,
False,
fragment_interval,
super_sampling if self.configs.use_vocoder and self.configs.version == "v3" else False,
)
elif streaming_mode:...
else:
audio.append(batch_audio_fragment)
if self.stop_flag:
yield output_sr, np.zeros(int(output_sr), dtype=np.int16)
return
if not (return_fragment or streaming_mode):
print("%.3f\t%.3f\t%.3f\t%.3f" % (t1 - t0, t2 - t1, t_34, t_45))
if len(audio) == 0:
yield output_sr, np.zeros(int(output_sr), dtype=np.int16)
return
yield self.audio_postprocess(
audio,
output_sr,
batch_index_list,
speed_factor,
split_bucket,
fragment_interval,
super_sampling if self.configs.use_vocoder and self.configs.version == "v3" else False,
)
except Exception as e:
traceback.print_exc()
# 必须返回一个空音频, 否则会导致显存不释放。
yield 16000, np.zeros(int(16000), dtype=np.int16)
# 重置模型, 否则会导致显存释放不完全。
del self.t2s_model
del self.vits_model
self.t2s_model = None
self.vits_model = None
self.init_t2s_weights(self.configs.t2s_weights_path)
self.init_vits_weights(self.configs.vits_weights_path)
raise e
finally:
self.empty_cache()
def empty_cache(self):
try:
gc.collect() # 触发gc的垃圾回收。避免内存一直增长。
if "cuda" in str(self.configs.device):
torch.cuda.empty_cache()
elif str(self.configs.device) == "mps":
torch.mps.empty_cache()
except:
pass
def audio_postprocess(
self,
audio: List[torch.Tensor],
sr: int,
batch_index_list: list = None,
speed_factor: float = 1.0,
split_bucket: bool = True,
fragment_interval: float = 0.3,
super_sampling: bool = False,
) -> Tuple[int, np.ndarray]:
if fragment_interval>0:
zero_wav = torch.zeros(
int(self.configs.sampling_rate * fragment_interval), dtype=self.precision, device=self.configs.device
)
for i, batch in enumerate(audio):
for j, audio_fragment in enumerate(batch):
max_audio = torch.abs(audio_fragment).max() # 简单防止16bit爆音
if max_audio > 1:
audio_fragment /= max_audio
audio_fragment: torch.Tensor = torch.cat([audio_fragment, zero_wav], dim=0) if fragment_interval>0 else audio_fragment
audio[i][j] = audio_fragment
if split_bucket:
audio = self.recovery_order(audio, batch_index_list)
else:
# audio = [item for batch in audio for item in batch]
audio = sum(audio, [])
audio = torch.cat(audio, dim=0)
if super_sampling:
print(f"############ {i18n('音频超采样')} ############")
t1 = time.perf_counter()
self.init_sr_model()
if not self.sr_model_not_exist:
audio, sr = self.sr_model(audio.unsqueeze(0), sr)
max_audio = np.abs(audio).max()
if max_audio > 1:
audio /= max_audio
audio = (audio * 32768).astype(np.int16)
t2 = time.perf_counter()
print(f"超采样用时:{t2 - t1:.3f}s")
else:
# audio = audio.float() * 32768
# audio = audio.to(dtype=torch.int16).clamp(-32768, 32767).cpu().numpy()
audio = audio.cpu().numpy()
audio = (audio * 32768).astype(np.int16)
# try:
# if speed_factor != 1.0:
# audio = speed_change(audio, speed=speed_factor, sr=int(sr))
# except Exception as e:
# print(f"Failed to change speed of audio: \n{e}")
return sr, audio
def using_vocoder_synthesis(
self, semantic_tokens: torch.Tensor, phones: torch.Tensor, speed: float = 1.0, sample_steps: int = 32
):
prompt_semantic_tokens = self.prompt_cache["prompt_semantic"].unsqueeze(0).unsqueeze(0).to(self.configs.device)
prompt_phones = torch.LongTensor(self.prompt_cache["phones"]).unsqueeze(0).to(self.configs.device)
raw_entry = self.prompt_cache["refer_spec"][0]
if isinstance(raw_entry, tuple):
raw_entry = raw_entry[0]
refer_audio_spec = raw_entry.to(dtype=self.precision, device=self.configs.device)
fea_ref, ge = self.vits_model.decode_encp(prompt_semantic_tokens, prompt_phones, refer_audio_spec)
ref_audio: torch.Tensor = self.prompt_cache["raw_audio"]
ref_sr = self.prompt_cache["raw_sr"]
ref_audio = ref_audio.to(self.configs.device).float()
if ref_audio.shape[0] == 2:
ref_audio = ref_audio.mean(0).unsqueeze(0)
# tgt_sr = self.vocoder_configs["sr"]
tgt_sr = 24000 if self.configs.version == "v3" else 32000
if ref_sr != tgt_sr:
ref_audio = resample(ref_audio, ref_sr, tgt_sr, self.configs.device)
mel2 = mel_fn(ref_audio) if self.configs.version == "v3" else mel_fn_v4(ref_audio)
mel2 = norm_spec(mel2)
T_min = min(mel2.shape[2], fea_ref.shape[2])
mel2 = mel2[:, :, :T_min]
fea_ref = fea_ref[:, :, :T_min]
T_ref = self.vocoder_configs["T_ref"]
T_chunk = self.vocoder_configs["T_chunk"]
if T_min > T_ref:
mel2 = mel2[:, :, -T_ref:]
fea_ref = fea_ref[:, :, -T_ref:]
T_min = T_ref
chunk_len = T_chunk - T_min
mel2 = mel2.to(self.precision)
fea_todo, ge = self.vits_model.decode_encp(semantic_tokens, phones, refer_audio_spec, ge, speed)
cfm_resss = []
idx = 0
while 1:
fea_todo_chunk = fea_todo[:, :, idx : idx + chunk_len]
if fea_todo_chunk.shape[-1] == 0:
break
idx += chunk_len
fea = torch.cat([fea_ref, fea_todo_chunk], 2).transpose(2, 1)
cfm_res = self.vits_model.cfm.inference(
fea, torch.LongTensor([fea.size(1)]).to(fea.device), mel2, sample_steps, inference_cfg_rate=0
)
cfm_res = cfm_res[:, :, mel2.shape[2] :]
mel2 = cfm_res[:, :, -T_min:]
fea_ref = fea_todo_chunk[:, :, -T_min:]
cfm_resss.append(cfm_res)
cfm_res = torch.cat(cfm_resss, 2)
cfm_res = denorm_spec(cfm_res)
with torch.inference_mode():
wav_gen = self.vocoder(cfm_res)
audio = wav_gen[0][0] # .cpu().detach().numpy()
return audio
def using_vocoder_synthesis_batched_infer(
self,
idx_list: List[int],
semantic_tokens_list: List[torch.Tensor],
batch_phones: List[torch.Tensor],
speed: float = 1.0,
sample_steps: int = 32,
) -> List[torch.Tensor]:
prompt_semantic_tokens = self.prompt_cache["prompt_semantic"].unsqueeze(0).unsqueeze(0).to(self.configs.device)
prompt_phones = torch.LongTensor(self.prompt_cache["phones"]).unsqueeze(0).to(self.configs.device)
raw_entry = self.prompt_cache["refer_spec"][0]
if isinstance(raw_entry, tuple):
raw_entry = raw_entry[0]
refer_audio_spec = raw_entry.to(dtype=self.precision, device=self.configs.device)
fea_ref, ge = self.vits_model.decode_encp(prompt_semantic_tokens, prompt_phones, refer_audio_spec)
ref_audio: torch.Tensor = self.prompt_cache["raw_audio"]
ref_sr = self.prompt_cache["raw_sr"]
ref_audio = ref_audio.to(self.configs.device).float()
if ref_audio.shape[0] == 2:
ref_audio = ref_audio.mean(0).unsqueeze(0)
# tgt_sr = self.vocoder_configs["sr"]
tgt_sr = 24000 if self.configs.version == "v3" else 32000
if ref_sr != tgt_sr:
ref_audio = resample(ref_audio, ref_sr, tgt_sr, self.configs.device)
mel2 = mel_fn(ref_audio) if self.configs.version == "v3" else mel_fn_v4(ref_audio)
mel2 = norm_spec(mel2)
T_min = min(mel2.shape[2], fea_ref.shape[2])
mel2 = mel2[:, :, :T_min]
fea_ref = fea_ref[:, :, :T_min]
T_ref = self.vocoder_configs["T_ref"]
T_chunk = self.vocoder_configs["T_chunk"]
if T_min > T_ref:
mel2 = mel2[:, :, -T_ref:]
fea_ref = fea_ref[:, :, -T_ref:]
T_min = T_ref
chunk_len = T_chunk - T_min
mel2 = mel2.to(self.precision)
# #### batched inference
overlapped_len = self.vocoder_configs["overlapped_len"]
feat_chunks = []
feat_lens = []
feat_list = []
for i, idx in enumerate(idx_list):
phones = batch_phones[i].unsqueeze(0).to(self.configs.device)
semantic_tokens = (
semantic_tokens_list[i][-idx:].unsqueeze(0).unsqueeze(0)
) # .unsqueeze(0)#mq要多unsqueeze一次
feat, _ = self.vits_model.decode_encp(semantic_tokens, phones, refer_audio_spec, ge, speed)
feat_list.append(feat)
feat_lens.append(feat.shape[2])
feats = torch.cat(feat_list, 2)
feats_padded = F.pad(feats, (overlapped_len, 0), "constant", 0)
pos = 0
padding_len = 0
while True:
if pos == 0:
chunk = feats_padded[:, :, pos : pos + chunk_len]
else:
pos = pos - overlapped_len
chunk = feats_padded[:, :, pos : pos + chunk_len]
pos += chunk_len
if chunk.shape[-1] == 0:
break
# padding for the last chunk
padding_len = chunk_len - chunk.shape[2]
if padding_len != 0:
chunk = F.pad(chunk, (0, padding_len), "constant", 0)
feat_chunks.append(chunk)
feat_chunks = torch.cat(feat_chunks, 0)
bs = feat_chunks.shape[0]
fea_ref = fea_ref.repeat(bs, 1, 1)
fea = torch.cat([fea_ref, feat_chunks], 2).transpose(2, 1)
pred_spec = self.vits_model.cfm.inference(
fea, torch.LongTensor([fea.size(1)]).to(fea.device), mel2, sample_steps, inference_cfg_rate=0
)
pred_spec = pred_spec[:, :, -chunk_len:]
dd = pred_spec.shape[1]
pred_spec = pred_spec.permute(1, 0, 2).contiguous().view(dd, -1).unsqueeze(0)
# pred_spec = pred_spec[..., :-padding_len]
pred_spec = denorm_spec(pred_spec)
with torch.no_grad():
wav_gen = self.vocoder(pred_spec)
audio = wav_gen[0][0] # .cpu().detach().numpy()
audio_fragments = []
upsample_rate = self.vocoder_configs["upsample_rate"]
pos = 0
while pos < audio.shape[-1]:
audio_fragment = audio[pos : pos + chunk_len * upsample_rate]
audio_fragments.append(audio_fragment)
pos += chunk_len * upsample_rate
audio = self.sola_algorithm(audio_fragments, overlapped_len * upsample_rate)
audio = audio[overlapped_len * upsample_rate : -padding_len * upsample_rate]
audio_fragments = []
for feat_len in feat_lens:
audio_fragment = audio[: feat_len * upsample_rate]
audio_fragments.append(audio_fragment)
audio = audio[feat_len * upsample_rate :]
return audio_fragments
def sola_algorithm(
self,
audio_fragments: List[torch.Tensor],
overlap_len: int,
search_len:int= 320
):
# overlap_len-=search_len
dtype = audio_fragments[0].dtype
for i in range(len(audio_fragments) - 1):
f1 = audio_fragments[i].float()
f2 = audio_fragments[i + 1].float()
w1 = f1[-overlap_len:]
w2 = f2[:overlap_len+search_len]
# w2 = w2[-w2.shape[-1]//2:]
# assert w1.shape == w2.shape
corr_norm = F.conv1d(w2.view(1, 1, -1), w1.view(1, 1, -1)).view(-1)
corr_den = F.conv1d(w2.view(1, 1, -1)**2, torch.ones_like(w1).view(1, 1, -1)).view(-1)+ 1e-8
idx = (corr_norm/corr_den.sqrt()).argmax()
print(f"seg_idx: {idx}")
# idx = corr.argmax()
f1_ = f1[: -overlap_len]
audio_fragments[i] = f1_
f2_ = f2[idx:]
window = torch.hann_window((overlap_len) * 2, device=f1.device, dtype=f1.dtype)
f2_[: overlap_len] = (
window[: overlap_len] * f2_[: overlap_len]
+ window[overlap_len :] * f1[-overlap_len :]
)
# window = torch.sin(torch.arange((overlap_len - idx), device=f1.device) * np.pi / (overlap_len - idx))
# f2_[: (overlap_len - idx)] = (
# window * f2_[: (overlap_len - idx)]
# + (1-window) * f1[-(overlap_len - idx) :]
# )
audio_fragments[i + 1] = f2_
return torch.cat(audio_fragments, 0).to(dtype) | --- +++ @@ -689,6 +689,12 @@ self.sv_model = SV(self.configs.device, self.configs.is_half)
def enable_half_precision(self, enable: bool = True, save: bool = True):
+ """
+ To enable half precision for the TTS model.
+ Args:
+ enable: bool, whether to enable half precision.
+
+ """
if str(self.configs.device) == "cpu" and enable:
print("Half precision is not supported on CPU.")
return
@@ -721,6 +727,11 @@ self.vocoder = self.vocoder.float()
def set_device(self, device: torch.device, save: bool = True):
+ """
+ To set the device for all models.
+ Args:
+ device: torch.device, the device to use for all models.
+ """
self.configs.device = device
if save:
self.configs.save_configs()
@@ -738,6 +749,12 @@ self.sr_model = self.sr_model.to(device)
def set_ref_audio(self, ref_audio_path: str):
+ """
+ To set the reference audio for the TTS model,
+ including the prompt_semantic and refer_spepc.
+ Args:
+ ref_audio_path: str, the path of the reference audio.
+ """
self._set_prompt_semantic(ref_audio_path)
self._set_ref_spec(ref_audio_path)
self._set_ref_audio_path(ref_audio_path)
@@ -952,6 +969,16 @@ return _data, batch_index_list
def recovery_order(self, data: list, batch_index_list: list) -> list:
+ """
+ Recovery the order of the audio according to the batch_index_list.
+
+ Args:
+ data (List[list(torch.Tensor)]): the out of order audio .
+ batch_index_list (List[list[int]]): the batch index list.
+
+ Returns:
+ list (List[torch.Tensor]): the data in the original order.
+ """
length = len(sum(batch_index_list, []))
_data = [None] * length
for i, index_list in enumerate(batch_index_list):
@@ -962,10 +989,48 @@ def stop(
self,
):
+ """
+ Stop the inference process.
+ """
self.stop_flag = True
@torch.no_grad()
def run(self, inputs: dict):
+ """
+ Text to speech inference.
+
+ Args:
+ inputs (dict):
+ {
+ "text": "", # str.(required) text to be synthesized
+ "text_lang: "", # str.(required) language of the text to be synthesized
+ "ref_audio_path": "", # str.(required) reference audio path
+ "aux_ref_audio_paths": [], # list.(optional) auxiliary reference audio paths for multi-speaker tone fusion
+ "prompt_text": "", # str.(optional) prompt text for the reference audio
+ "prompt_lang": "", # str.(required) language of the prompt text for the reference audio
+ "top_k": 15, # int. top k sampling
+ "top_p": 1, # float. top p sampling
+ "temperature": 1, # float. temperature for sampling
+ "text_split_method": "cut1", # str. text split method, see text_segmentation_method.py for details.
+ "batch_size": 1, # int. batch size for inference
+ "batch_threshold": 0.75, # float. threshold for batch splitting.
+ "split_bucket": True, # bool. whether to split the batch into multiple buckets.
+ "speed_factor":1.0, # float. control the speed of the synthesized audio.
+ "fragment_interval":0.3, # float. to control the interval of the audio fragment.
+ "seed": -1, # int. random seed for reproducibility.
+ "parallel_infer": True, # bool. whether to use parallel inference.
+ "repetition_penalty": 1.35, # float. repetition penalty for T2S model.
+ "sample_steps": 32, # int. number of sampling steps for VITS model V3.
+ "super_sampling": False, # bool. whether to use super-sampling for audio when using VITS model V3.
+ "return_fragment": False, # bool. step by step return the audio fragment. (Best Quality, Slowest response speed. old version of streaming mode)
+ "streaming_mode": False, # bool. return audio chunk by chunk. (Medium quality, Slow response speed)
+ "overlap_length": 2, # int. overlap length of semantic tokens for streaming mode.
+ "min_chunk_length": 16, # int. The minimum chunk length of semantic tokens for streaming mode. (affects audio chunk size)
+ "fixed_length_chunk": False, # bool. When turned on, it can achieve faster streaming response, but with lower quality. (lower quality, faster response speed)
+ }
+ returns:
+ Tuple[int, np.ndarray]: sampling rate and audio data.
+ """
########## variables initialization ###########
self.stop_flag: bool = False
text: str = inputs.get("text", "")
@@ -1756,4 +1821,4 @@
audio_fragments[i + 1] = f2_
- return torch.cat(audio_fragments, 0).to(dtype)+ return torch.cat(audio_fragments, 0).to(dtype)
| https://raw.githubusercontent.com/RVC-Boss/GPT-SoVITS/HEAD/GPT_SoVITS/TTS_infer_pack/TTS.py |
Create docstrings for each class method | # by https://github.com/Cosmo-klara
from __future__ import print_function
import re
import inflect
import unicodedata
# 后缀计量单位替换表
measurement_map = {
"m": ["meter", "meters"],
"km": ["kilometer", "kilometers"],
"km/h": ["kilometer per hour", "kilometers per hour"],
"ft": ["feet", "feet"],
"L": ["liter", "liters"],
"tbsp": ["tablespoon", "tablespoons"],
"tsp": ["teaspoon", "teaspoons"],
"h": ["hour", "hours"],
"min": ["minute", "minutes"],
"s": ["second", "seconds"],
"°C": ["degree celsius", "degrees celsius"],
"°F": ["degree fahrenheit", "degrees fahrenheit"],
}
# 识别 12,000 类型
_inflect = inflect.engine()
# 转化数字序数词
_ordinal_number_re = re.compile(r"\b([0-9]+)\. ")
# 我听说好像对于数字正则识别其实用 \d 会好一点
_comma_number_re = re.compile(r"([0-9][0-9\,]+[0-9])")
# 时间识别
_time_re = re.compile(r"\b([01]?[0-9]|2[0-3]):([0-5][0-9])\b")
# 后缀计量单位识别
_measurement_re = re.compile(r"\b([0-9]+(\.[0-9]+)?(m|km|km/h|ft|L|tbsp|tsp|h|min|s|°C|°F))\b")
# 前后 £ 识别 ( 写了识别两边某一边的,但是不知道为什么失败了┭┮﹏┭┮ )
_pounds_re_start = re.compile(r"£([0-9\.\,]*[0-9]+)")
_pounds_re_end = re.compile(r"([0-9\.\,]*[0-9]+)£")
# 前后 $ 识别
_dollars_re_start = re.compile(r"\$([0-9\.\,]*[0-9]+)")
_dollars_re_end = re.compile(r"([(0-9\.\,]*[0-9]+)\$")
# 小数的识别
_decimal_number_re = re.compile(r"([0-9]+\.\s*[0-9]+)")
# 分数识别 (形式 "3/4" )
_fraction_re = re.compile(r"([0-9]+/[0-9]+)")
# 序数词识别
_ordinal_re = re.compile(r"[0-9]+(st|nd|rd|th)")
# 数字处理
_number_re = re.compile(r"[0-9]+")
def _convert_ordinal(m):
ordinal = _inflect.ordinal(m.group(1))
return ordinal + ", "
def _remove_commas(m):
return m.group(1).replace(",", "")
def _expand_time(m):
hours, minutes = map(int, m.group(1, 2))
period = "a.m." if hours < 12 else "p.m."
if hours > 12:
hours -= 12
hour_word = _inflect.number_to_words(hours)
minute_word = _inflect.number_to_words(minutes) if minutes != 0 else ""
if minutes == 0:
return f"{hour_word} o'clock {period}"
else:
return f"{hour_word} {minute_word} {period}"
def _expand_measurement(m):
sign = m.group(3)
ptr = 1
# 想不到怎么方便的取数字,又懒得改正则,诶,1.2 反正也是复数读法,干脆直接去掉 "."
num = int(m.group(1).replace(sign, "").replace(".", ""))
decimal_part = m.group(2)
# 上面判断的漏洞,比如 0.1 的情况,在这里排除了
if decimal_part == None and num == 1:
ptr = 0
return m.group(1).replace(sign, " " + measurement_map[sign][ptr])
def _expand_pounds(m):
match = m.group(1)
parts = match.split(".")
if len(parts) > 2:
return match + " pounds" # Unexpected format
pounds = int(parts[0]) if parts[0] else 0
pence = int(parts[1].ljust(2, "0")) if len(parts) > 1 and parts[1] else 0
if pounds and pence:
pound_unit = "pound" if pounds == 1 else "pounds"
penny_unit = "penny" if pence == 1 else "pence"
return "%s %s and %s %s" % (pounds, pound_unit, pence, penny_unit)
elif pounds:
pound_unit = "pound" if pounds == 1 else "pounds"
return "%s %s" % (pounds, pound_unit)
elif pence:
penny_unit = "penny" if pence == 1 else "pence"
return "%s %s" % (pence, penny_unit)
else:
return "zero pounds"
def _expand_dollars(m):
match = m.group(1)
parts = match.split(".")
if len(parts) > 2:
return match + " dollars" # Unexpected format
dollars = int(parts[0]) if parts[0] else 0
cents = int(parts[1].ljust(2, "0")) if len(parts) > 1 and parts[1] else 0
if dollars and cents:
dollar_unit = "dollar" if dollars == 1 else "dollars"
cent_unit = "cent" if cents == 1 else "cents"
return "%s %s and %s %s" % (dollars, dollar_unit, cents, cent_unit)
elif dollars:
dollar_unit = "dollar" if dollars == 1 else "dollars"
return "%s %s" % (dollars, dollar_unit)
elif cents:
cent_unit = "cent" if cents == 1 else "cents"
return "%s %s" % (cents, cent_unit)
else:
return "zero dollars"
# 小数的处理
def _expand_decimal_number(m):
match = m.group(1)
parts = match.split(".")
words = []
# 遍历字符串中的每个字符
for char in parts[1]:
if char == ".":
words.append("point")
else:
words.append(char)
return parts[0] + " point " + " ".join(words)
# 分数的处理
def _expend_fraction(m):
match = m.group(0)
numerator, denominator = map(int, match.split("/"))
numerator_part = _inflect.number_to_words(numerator)
if denominator == 2:
if numerator == 1:
denominator_part = "half"
else:
denominator_part = "halves"
elif denominator == 1:
return f"{numerator_part}"
else:
denominator_part = _inflect.ordinal(_inflect.number_to_words(denominator))
if numerator > 1:
denominator_part += "s"
return f"{numerator_part} {denominator_part}"
def _expand_ordinal(m):
return _inflect.number_to_words(m.group(0))
def _expand_number(m):
num = int(m.group(0))
if num > 1000 and num < 3000:
if num == 2000:
return "two thousand"
elif num > 2000 and num < 2010:
return "two thousand " + _inflect.number_to_words(num % 100)
elif num % 100 == 0:
return _inflect.number_to_words(num // 100) + " hundred"
else:
return _inflect.number_to_words(num, andword="", zero="oh", group=2).replace(", ", " ")
else:
return _inflect.number_to_words(num, andword="")
# 加减乘除
RE_ASMD = re.compile(
r"((-?)((\d+)(\.\d+)?[⁰¹²³⁴⁵⁶⁷⁸⁹ˣʸⁿ]*)|(\.\d+[⁰¹²³⁴⁵⁶⁷⁸⁹ˣʸⁿ]*)|([A-Za-z][⁰¹²³⁴⁵⁶⁷⁸⁹ˣʸⁿ]*))\s+([\+\-\×÷=])\s+((-?)((\d+)(\.\d+)?[⁰¹²³⁴⁵⁶⁷⁸⁹ˣʸⁿ]*)|(\.\d+[⁰¹²³⁴⁵⁶⁷⁸⁹ˣʸⁿ]*)|([A-Za-z][⁰¹²³⁴⁵⁶⁷⁸⁹ˣʸⁿ]*))"
)
# RE_ASMD = re.compile(
# r"\b((-?)((\d+)(\.\d+)?[⁰¹²³⁴⁵⁶⁷⁸⁹ˣʸⁿ]*)|(\.\d+[⁰¹²³⁴⁵⁶⁷⁸⁹ˣʸⁿ]*)|([A-Za-z][⁰¹²³⁴⁵⁶⁷⁸⁹ˣʸⁿ]*))([\+\-\×÷=])((-?)((\d+)(\.\d+)?[⁰¹²³⁴⁵⁶⁷⁸⁹ˣʸⁿ]*)|(\.\d+[⁰¹²³⁴⁵⁶⁷⁸⁹ˣʸⁿ]*)|([A-Za-z][⁰¹²³⁴⁵⁶⁷⁸⁹ˣʸⁿ]*))\b"
# )
asmd_map = {"+": " plus ", "-": " minus ", "×": " times ", "÷": " divided by ", "=": " Equals "}
def replace_asmd(match) -> str:
result = match.group(1) + asmd_map[match.group(8)] + match.group(9)
return result
RE_INTEGER = re.compile(r"(?:^|\s+)(-)" r"(\d+)")
def replace_negative_num(match) -> str:
sign = match.group(1)
number = match.group(2)
sign: str = "negative " if sign else ""
result = f"{sign}{number}"
return result
def normalize(text):
text = re.sub(_ordinal_number_re, _convert_ordinal, text)
# 处理数学运算
# 替换text = re.sub(r"(?<!\d)-|-(?!\d)", " minus ", text)
while RE_ASMD.search(text):
text = RE_ASMD.sub(replace_asmd, text)
text = RE_INTEGER.sub(replace_negative_num, text)
text = re.sub(_comma_number_re, _remove_commas, text)
text = re.sub(_time_re, _expand_time, text)
text = re.sub(_measurement_re, _expand_measurement, text)
text = re.sub(_pounds_re_start, _expand_pounds, text)
text = re.sub(_pounds_re_end, _expand_pounds, text)
text = re.sub(_dollars_re_start, _expand_dollars, text)
text = re.sub(_dollars_re_end, _expand_dollars, text)
text = re.sub(_decimal_number_re, _expand_decimal_number, text)
text = re.sub(_fraction_re, _expend_fraction, text)
text = re.sub(_ordinal_re, _expand_ordinal, text)
text = re.sub(_number_re, _expand_number, text)
text = "".join(
char for char in unicodedata.normalize("NFD", text) if unicodedata.category(char) != "Mn"
) # Strip accents
text = re.sub("%", " percent", text)
text = re.sub("[^ A-Za-z'.,?!\-]", "", text)
text = re.sub(r"(?i)i\.e\.", "that is", text)
text = re.sub(r"(?i)e\.g\.", "for example", text)
# 增加纯大写单词拆分
text = re.sub(r"(?<!^)(?<![\s])([A-Z])", r" \1", text)
return text
if __name__ == "__main__":
# 我觉得其实可以把切分结果展示出来(只读,或者修改不影响传给TTS的实际text)
# 然后让用户确认后再输入给 TTS,可以让用户检查自己有没有不标准的输入
print(normalize("1. test ordinal number 1st"))
print(normalize("32.3$, $6.24, 1.1£, £7.14."))
print(normalize("3/23, 1/2, 3/2, 1/3, 6/1"))
print(normalize("1st, 22nd"))
print(normalize("a test 20h, 1.2s, 1L, 0.1km"))
print(normalize("a test of time 4:00, 13:00, 13:30"))
print(normalize("a test of temperature 4°F, 23°C, -19°C")) | --- +++ @@ -61,6 +61,13 @@
def _convert_ordinal(m):
+ """
+ 标准化序数词, 例如: 1. 2. 3. 4. 5. 6.
+ Examples:
+ input: "1. "
+ output: "1st"
+ 然后在后面的 _expand_ordinal, 将其转化为 first 这类的
+ """
ordinal = _inflect.ordinal(m.group(1))
return ordinal + ", "
@@ -70,6 +77,13 @@
def _expand_time(m):
+ """
+ 将 24 小时制的时间转换为 12 小时制的时间表示方式。
+
+ Examples:
+ input: "13:00 / 4:00 / 13:30"
+ output: "one o'clock p.m. / four o'clock am. / one thirty p.m."
+ """
hours, minutes = map(int, m.group(1, 2))
period = "a.m." if hours < 12 else "p.m."
if hours > 12:
@@ -85,6 +99,10 @@
def _expand_measurement(m):
+ """
+ 处理一些常见的测量单位后缀, 目前支持: m, km, km/h, ft, L, tbsp, tsp, h, min, s, °C, °F
+ 如果要拓展的话修改: _measurement_re 和 measurement_map
+ """
sign = m.group(3)
ptr = 1
# 想不到怎么方便的取数字,又懒得改正则,诶,1.2 反正也是复数读法,干脆直接去掉 "."
@@ -97,6 +115,9 @@
def _expand_pounds(m):
+ """
+ 没找到特别规范的说明,和美元的处理一样,其实可以把两个合并在一起
+ """
match = m.group(1)
parts = match.split(".")
if len(parts) > 2:
@@ -118,6 +139,12 @@
def _expand_dollars(m):
+ """
+ change: 美分是 100 的限值, 应该要做补零的吧
+ Example:
+ input: "32.3$ / $6.24"
+ output: "thirty-two dollars and thirty cents" / "six dollars and twenty-four cents"
+ """
match = m.group(1)
parts = match.split(".")
if len(parts) > 2:
@@ -140,6 +167,11 @@
# 小数的处理
def _expand_decimal_number(m):
+ """
+ Example:
+ input: "13.234"
+ output: "thirteen point two three four"
+ """
match = m.group(1)
parts = match.split(".")
words = []
@@ -154,6 +186,20 @@
# 分数的处理
def _expend_fraction(m):
+ """
+ 规则1: 分子使用基数词读法, 分母用序数词读法.
+ 规则2: 如果分子大于 1, 在读分母的时候使用序数词复数读法.
+ 规则3: 当分母为2的时候, 分母读做 half, 并且当分子大于 1 的时候, half 也要用复数读法, 读为 halves.
+ Examples:
+
+ | Written | Said |
+ |:---:|:---:|
+ | 1/3 | one third |
+ | 3/4 | three fourths |
+ | 5/6 | five sixths |
+ | 1/2 | one half |
+ | 3/2 | three halves |
+ """
match = m.group(0)
numerator, denominator = map(int, match.split("/"))
@@ -204,6 +250,12 @@
def replace_asmd(match) -> str:
+ """
+ Args:
+ match (re.Match)
+ Returns:
+ str
+ """
result = match.group(1) + asmd_map[match.group(8)] + match.group(9)
return result
@@ -212,6 +264,12 @@
def replace_negative_num(match) -> str:
+ """
+ Args:
+ match (re.Match)
+ Returns:
+ str
+ """
sign = match.group(1)
number = match.group(2)
sign: str = "negative " if sign else ""
@@ -221,6 +279,10 @@
def normalize(text):
+ """
+ !!! 所有的处理都需要正确的输入 !!!
+ 可以添加新的处理,只需要添加正则表达式和对应的处理函数即可
+ """
text = re.sub(_ordinal_number_re, _convert_ordinal, text)
@@ -264,4 +326,4 @@ print(normalize("1st, 22nd"))
print(normalize("a test 20h, 1.2s, 1L, 0.1km"))
print(normalize("a test of time 4:00, 13:00, 13:30"))
- print(normalize("a test of temperature 4°F, 23°C, -19°C"))+ print(normalize("a test of temperature 4°F, 23°C, -19°C"))
| https://raw.githubusercontent.com/RVC-Boss/GPT-SoVITS/HEAD/GPT_SoVITS/text/en_normalization/expend.py |
Annotate my code with docstrings | import psutil
import os
def set_high_priority():
if os.name != "nt":
return # 仅 Windows 有效
p = psutil.Process(os.getpid())
try:
p.nice(psutil.HIGH_PRIORITY_CLASS)
print("已将进程优先级设为 High")
except psutil.AccessDenied:
print("权限不足,无法修改优先级(请用管理员运行)")
set_high_priority()
import json
import logging
import os
import re
import sys
import traceback
import warnings
import torch
import torchaudio
from text.LangSegmenter import LangSegmenter
logging.getLogger("markdown_it").setLevel(logging.ERROR)
logging.getLogger("urllib3").setLevel(logging.ERROR)
logging.getLogger("httpcore").setLevel(logging.ERROR)
logging.getLogger("httpx").setLevel(logging.ERROR)
logging.getLogger("asyncio").setLevel(logging.ERROR)
logging.getLogger("charset_normalizer").setLevel(logging.ERROR)
logging.getLogger("torchaudio._extension").setLevel(logging.ERROR)
logging.getLogger("multipart.multipart").setLevel(logging.ERROR)
warnings.simplefilter(action="ignore", category=FutureWarning)
version = model_version = os.environ.get("version", "v2")
from config import change_choices, get_weights_names, name2gpt_path, name2sovits_path
SoVITS_names, GPT_names = get_weights_names()
from config import pretrained_sovits_name
path_sovits_v3 = pretrained_sovits_name["v3"]
path_sovits_v4 = pretrained_sovits_name["v4"]
is_exist_s2gv3 = os.path.exists(path_sovits_v3)
is_exist_s2gv4 = os.path.exists(path_sovits_v4)
if os.path.exists("./weight.json"):
pass
else:
with open("./weight.json", "w", encoding="utf-8") as file:
json.dump({"GPT": {}, "SoVITS": {}}, file)
with open("./weight.json", "r", encoding="utf-8") as file:
weight_data = file.read()
weight_data = json.loads(weight_data)
gpt_path = os.environ.get("gpt_path", weight_data.get("GPT", {}).get(version, GPT_names[-1]))
sovits_path = os.environ.get("sovits_path", weight_data.get("SoVITS", {}).get(version, SoVITS_names[0]))
if isinstance(gpt_path, list):
gpt_path = gpt_path[0]
if isinstance(sovits_path, list):
sovits_path = sovits_path[0]
# print(2333333)
# print(os.environ["gpt_path"])
# print(gpt_path)
# print(GPT_names)
# print(weight_data)
# print(weight_data.get("GPT", {}))
# print(version)###GPT version里没有s2的v2pro
# print(weight_data.get("GPT", {}).get(version, GPT_names[-1]))
cnhubert_base_path = os.environ.get("cnhubert_base_path", "GPT_SoVITS/pretrained_models/chinese-hubert-base")
bert_path = os.environ.get("bert_path", "GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large")
infer_ttswebui = os.environ.get("infer_ttswebui", 9872)
infer_ttswebui = int(infer_ttswebui)
is_share = os.environ.get("is_share", "False")
is_share = eval(is_share)
if "_CUDA_VISIBLE_DEVICES" in os.environ:
os.environ["CUDA_VISIBLE_DEVICES"] = os.environ["_CUDA_VISIBLE_DEVICES"]
is_half = eval(os.environ.get("is_half", "True")) and torch.cuda.is_available()
# is_half=False
punctuation = set(["!", "?", "…", ",", ".", "-", " "])
import gradio as gr
import librosa
import numpy as np
from feature_extractor import cnhubert
from transformers import AutoModelForMaskedLM, AutoTokenizer
cnhubert.cnhubert_base_path = cnhubert_base_path
import random
from GPT_SoVITS.module.models import Generator, SynthesizerTrn, SynthesizerTrnV3
def set_seed(seed):
if seed == -1:
seed = random.randint(0, 1000000)
seed = int(seed)
random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
# set_seed(42)
from time import time as ttime
from AR.models.t2s_lightning_module import Text2SemanticLightningModule
from peft import LoraConfig, get_peft_model
from text import cleaned_text_to_sequence
from text.cleaner import clean_text
from tools.assets import css, js, top_html
from tools.i18n.i18n import I18nAuto, scan_language_list
language = os.environ.get("language", "Auto")
language = sys.argv[-1] if sys.argv[-1] in scan_language_list() else language
i18n = I18nAuto(language=language)
# os.environ['PYTORCH_ENABLE_MPS_FALLBACK'] = '1' # 确保直接启动推理UI时也能够设置。
if torch.cuda.is_available():
device = "cuda"
else:
device = "cpu"
dict_language_v1 = {
i18n("中文"): "all_zh", # 全部按中文识别
i18n("英文"): "en", # 全部按英文识别#######不变
i18n("日文"): "all_ja", # 全部按日文识别
i18n("中英混合"): "zh", # 按中英混合识别####不变
i18n("日英混合"): "ja", # 按日英混合识别####不变
i18n("多语种混合"): "auto", # 多语种启动切分识别语种
}
dict_language_v2 = {
i18n("中文"): "all_zh", # 全部按中文识别
i18n("英文"): "en", # 全部按英文识别#######不变
i18n("日文"): "all_ja", # 全部按日文识别
i18n("粤语"): "all_yue", # 全部按中文识别
i18n("韩文"): "all_ko", # 全部按韩文识别
i18n("中英混合"): "zh", # 按中英混合识别####不变
i18n("日英混合"): "ja", # 按日英混合识别####不变
i18n("粤英混合"): "yue", # 按粤英混合识别####不变
i18n("韩英混合"): "ko", # 按韩英混合识别####不变
i18n("多语种混合"): "auto", # 多语种启动切分识别语种
i18n("多语种混合(粤语)"): "auto_yue", # 多语种启动切分识别语种
}
dict_language = dict_language_v1 if version == "v1" else dict_language_v2
tokenizer = AutoTokenizer.from_pretrained(bert_path)
bert_model = AutoModelForMaskedLM.from_pretrained(bert_path)
if is_half == True:
bert_model = bert_model.half().to(device)
else:
bert_model = bert_model.to(device)
def get_bert_feature(text, word2ph):
with torch.no_grad():
inputs = tokenizer(text, return_tensors="pt")
for i in inputs:
inputs[i] = inputs[i].to(device)
res = bert_model(**inputs, output_hidden_states=True)
res = torch.cat(res["hidden_states"][-3:-2], -1)[0].cpu()[1:-1]
assert len(word2ph) == len(text)
phone_level_feature = []
for i in range(len(word2ph)):
repeat_feature = res[i].repeat(word2ph[i], 1)
phone_level_feature.append(repeat_feature)
phone_level_feature = torch.cat(phone_level_feature, dim=0)
return phone_level_feature.T
class DictToAttrRecursive(dict):
def __init__(self, input_dict):
super().__init__(input_dict)
for key, value in input_dict.items():
if isinstance(value, dict):
value = DictToAttrRecursive(value)
self[key] = value
setattr(self, key, value)
def __getattr__(self, item):
try:
return self[item]
except KeyError:
raise AttributeError(f"Attribute {item} not found")
def __setattr__(self, key, value):
if isinstance(value, dict):
value = DictToAttrRecursive(value)
super(DictToAttrRecursive, self).__setitem__(key, value)
super().__setattr__(key, value)
def __delattr__(self, item):
try:
del self[item]
except KeyError:
raise AttributeError(f"Attribute {item} not found")
ssl_model = cnhubert.get_model()
if is_half == True:
ssl_model = ssl_model.half().to(device)
else:
ssl_model = ssl_model.to(device)
###todo:put them to process_ckpt and modify my_save func (save sovits weights), gpt save weights use my_save in process_ckpt
# symbol_version-model_version-if_lora_v3
from process_ckpt import get_sovits_version_from_path_fast, load_sovits_new
v3v4set = {"v3", "v4"}
def change_sovits_weights(sovits_path, prompt_language=None, text_language=None):
if "!" in sovits_path or "!" in sovits_path:
sovits_path = name2sovits_path[sovits_path]
global vq_model, hps, version, model_version, dict_language, if_lora_v3
version, model_version, if_lora_v3 = get_sovits_version_from_path_fast(sovits_path)
print(sovits_path, version, model_version, if_lora_v3)
is_exist = is_exist_s2gv3 if model_version == "v3" else is_exist_s2gv4
path_sovits = path_sovits_v3 if model_version == "v3" else path_sovits_v4
if if_lora_v3 == True and is_exist == False:
info = path_sovits + "SoVITS %s" % model_version + i18n("底模缺失,无法加载相应 LoRA 权重")
gr.Warning(info)
raise FileExistsError(info)
dict_language = dict_language_v1 if version == "v1" else dict_language_v2
if prompt_language is not None and text_language is not None:
if prompt_language in list(dict_language.keys()):
prompt_text_update, prompt_language_update = (
{"__type__": "update"},
{"__type__": "update", "value": prompt_language},
)
else:
prompt_text_update = {"__type__": "update", "value": ""}
prompt_language_update = {"__type__": "update", "value": i18n("中文")}
if text_language in list(dict_language.keys()):
text_update, text_language_update = {"__type__": "update"}, {"__type__": "update", "value": text_language}
else:
text_update = {"__type__": "update", "value": ""}
text_language_update = {"__type__": "update", "value": i18n("中文")}
if model_version in v3v4set:
visible_sample_steps = True
visible_inp_refs = False
else:
visible_sample_steps = False
visible_inp_refs = True
yield (
{"__type__": "update", "choices": list(dict_language.keys())},
{"__type__": "update", "choices": list(dict_language.keys())},
prompt_text_update,
prompt_language_update,
text_update,
text_language_update,
{
"__type__": "update",
"visible": visible_sample_steps,
"value": 32 if model_version == "v3" else 8,
"choices": [4, 8, 16, 32, 64, 128] if model_version == "v3" else [4, 8, 16, 32],
},
{"__type__": "update", "visible": visible_inp_refs},
{"__type__": "update", "value": False, "interactive": True if model_version not in v3v4set else False},
{"__type__": "update", "visible": True if model_version == "v3" else False},
{"__type__": "update", "value": i18n("模型加载中,请等待"), "interactive": False},
)
dict_s2 = load_sovits_new(sovits_path)
hps = dict_s2["config"]
hps = DictToAttrRecursive(hps)
hps.model.semantic_frame_rate = "25hz"
if "enc_p.text_embedding.weight" not in dict_s2["weight"]:
hps.model.version = "v2" # v3model,v2sybomls
elif dict_s2["weight"]["enc_p.text_embedding.weight"].shape[0] == 322:
hps.model.version = "v1"
else:
hps.model.version = "v2"
version = hps.model.version
# print("sovits版本:",hps.model.version)
if model_version not in v3v4set:
if "Pro" not in model_version:
model_version = version
else:
hps.model.version = model_version
vq_model = SynthesizerTrn(
hps.data.filter_length // 2 + 1,
hps.train.segment_size // hps.data.hop_length,
n_speakers=hps.data.n_speakers,
**hps.model,
)
else:
hps.model.version = model_version
vq_model = SynthesizerTrnV3(
hps.data.filter_length // 2 + 1,
hps.train.segment_size // hps.data.hop_length,
n_speakers=hps.data.n_speakers,
**hps.model,
)
if "pretrained" not in sovits_path:
try:
del vq_model.enc_q
except:
pass
if is_half == True:
vq_model = vq_model.half().to(device)
else:
vq_model = vq_model.to(device)
vq_model.eval()
if if_lora_v3 == False:
print("loading sovits_%s" % model_version, vq_model.load_state_dict(dict_s2["weight"], strict=False))
else:
path_sovits = path_sovits_v3 if model_version == "v3" else path_sovits_v4
print(
"loading sovits_%spretrained_G" % model_version,
vq_model.load_state_dict(load_sovits_new(path_sovits)["weight"], strict=False),
)
lora_rank = dict_s2["lora_rank"]
lora_config = LoraConfig(
target_modules=["to_k", "to_q", "to_v", "to_out.0"],
r=lora_rank,
lora_alpha=lora_rank,
init_lora_weights=True,
)
vq_model.cfm = get_peft_model(vq_model.cfm, lora_config)
print("loading sovits_%s_lora%s" % (model_version, lora_rank))
vq_model.load_state_dict(dict_s2["weight"], strict=False)
vq_model.cfm = vq_model.cfm.merge_and_unload()
# torch.save(vq_model.state_dict(),"merge_win.pth")
vq_model.eval()
yield (
{"__type__": "update", "choices": list(dict_language.keys())},
{"__type__": "update", "choices": list(dict_language.keys())},
prompt_text_update,
prompt_language_update,
text_update,
text_language_update,
{
"__type__": "update",
"visible": visible_sample_steps,
"value": 32 if model_version == "v3" else 8,
"choices": [4, 8, 16, 32, 64, 128] if model_version == "v3" else [4, 8, 16, 32],
},
{"__type__": "update", "visible": visible_inp_refs},
{"__type__": "update", "value": False, "interactive": True if model_version not in v3v4set else False},
{"__type__": "update", "visible": True if model_version == "v3" else False},
{"__type__": "update", "value": i18n("合成语音"), "interactive": True},
)
with open("./weight.json") as f:
data = f.read()
data = json.loads(data)
data["SoVITS"][version] = sovits_path
with open("./weight.json", "w") as f:
f.write(json.dumps(data))
try:
next(change_sovits_weights(sovits_path))
except:
pass
def change_gpt_weights(gpt_path):
if "!" in gpt_path or "!" in gpt_path:
gpt_path = name2gpt_path[gpt_path]
global hz, max_sec, t2s_model, config
hz = 50
dict_s1 = torch.load(gpt_path, map_location="cpu", weights_only=False)
config = dict_s1["config"]
max_sec = config["data"]["max_sec"]
t2s_model = Text2SemanticLightningModule(config, "****", is_train=False)
t2s_model.load_state_dict(dict_s1["weight"])
if is_half == True:
t2s_model = t2s_model.half()
t2s_model = t2s_model.to(device)
t2s_model.eval()
# total = sum([param.nelement() for param in t2s_model.parameters()])
# print("Number of parameter: %.2fM" % (total / 1e6))
with open("./weight.json") as f:
data = f.read()
data = json.loads(data)
data["GPT"][version] = gpt_path
with open("./weight.json", "w") as f:
f.write(json.dumps(data))
change_gpt_weights(gpt_path)
os.environ["HF_ENDPOINT"] = "https://hf-mirror.com"
import torch
now_dir = os.getcwd()
def clean_hifigan_model():
global hifigan_model
if hifigan_model:
hifigan_model = hifigan_model.cpu()
hifigan_model = None
try:
torch.cuda.empty_cache()
except:
pass
def clean_bigvgan_model():
global bigvgan_model
if bigvgan_model:
bigvgan_model = bigvgan_model.cpu()
bigvgan_model = None
try:
torch.cuda.empty_cache()
except:
pass
def clean_sv_cn_model():
global sv_cn_model
if sv_cn_model:
sv_cn_model.embedding_model = sv_cn_model.embedding_model.cpu()
sv_cn_model = None
try:
torch.cuda.empty_cache()
except:
pass
def init_bigvgan():
global bigvgan_model, hifigan_model, sv_cn_model
from BigVGAN import bigvgan
bigvgan_model = bigvgan.BigVGAN.from_pretrained(
"%s/GPT_SoVITS/pretrained_models/models--nvidia--bigvgan_v2_24khz_100band_256x" % (now_dir,),
use_cuda_kernel=False,
) # if True, RuntimeError: Ninja is required to load C++ extensions
# remove weight norm in the model and set to eval mode
bigvgan_model.remove_weight_norm()
bigvgan_model = bigvgan_model.eval()
clean_hifigan_model()
clean_sv_cn_model()
if is_half == True:
bigvgan_model = bigvgan_model.half().to(device)
else:
bigvgan_model = bigvgan_model.to(device)
def init_hifigan():
global hifigan_model, bigvgan_model, sv_cn_model
hifigan_model = Generator(
initial_channel=100,
resblock="1",
resblock_kernel_sizes=[3, 7, 11],
resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5], [1, 3, 5]],
upsample_rates=[10, 6, 2, 2, 2],
upsample_initial_channel=512,
upsample_kernel_sizes=[20, 12, 4, 4, 4],
gin_channels=0,
is_bias=True,
)
hifigan_model.eval()
hifigan_model.remove_weight_norm()
state_dict_g = torch.load(
"%s/GPT_SoVITS/pretrained_models/gsv-v4-pretrained/vocoder.pth" % (now_dir,),
map_location="cpu",
weights_only=False,
)
print("loading vocoder", hifigan_model.load_state_dict(state_dict_g))
clean_bigvgan_model()
clean_sv_cn_model()
if is_half == True:
hifigan_model = hifigan_model.half().to(device)
else:
hifigan_model = hifigan_model.to(device)
from sv import SV
def init_sv_cn():
global hifigan_model, bigvgan_model, sv_cn_model
sv_cn_model = SV(device, is_half)
clean_bigvgan_model()
clean_hifigan_model()
bigvgan_model = hifigan_model = sv_cn_model = None
if model_version == "v3":
init_bigvgan()
if model_version == "v4":
init_hifigan()
if model_version in {"v2Pro", "v2ProPlus"}:
init_sv_cn()
resample_transform_dict = {}
def resample(audio_tensor, sr0, sr1, device):
global resample_transform_dict
key = "%s-%s-%s" % (sr0, sr1, str(device))
if key not in resample_transform_dict:
resample_transform_dict[key] = torchaudio.transforms.Resample(sr0, sr1).to(device)
return resample_transform_dict[key](audio_tensor)
def get_spepc(hps, filename, dtype, device, is_v2pro=False):
# audio = load_audio(filename, int(hps.data.sampling_rate))
# audio, sampling_rate = librosa.load(filename, sr=int(hps.data.sampling_rate))
# audio = torch.FloatTensor(audio)
sr1 = int(hps.data.sampling_rate)
audio, sr0 = torchaudio.load(filename)
if sr0 != sr1:
audio = audio.to(device)
if audio.shape[0] == 2:
audio = audio.mean(0).unsqueeze(0)
audio = resample(audio, sr0, sr1, device)
else:
audio = audio.to(device)
if audio.shape[0] == 2:
audio = audio.mean(0).unsqueeze(0)
maxx = audio.abs().max()
if maxx > 1:
audio /= min(2, maxx)
spec = spectrogram_torch(
audio,
hps.data.filter_length,
hps.data.sampling_rate,
hps.data.hop_length,
hps.data.win_length,
center=False,
)
spec = spec.to(dtype)
if is_v2pro == True:
audio = resample(audio, sr1, 16000, device).to(dtype)
return spec, audio
def clean_text_inf(text, language, version):
language = language.replace("all_", "")
phones, word2ph, norm_text = clean_text(text, language, version)
phones = cleaned_text_to_sequence(phones, version)
return phones, word2ph, norm_text
dtype = torch.float16 if is_half == True else torch.float32
def get_bert_inf(phones, word2ph, norm_text, language):
language = language.replace("all_", "")
if language == "zh":
bert = get_bert_feature(norm_text, word2ph).to(device) # .to(dtype)
else:
bert = torch.zeros(
(1024, len(phones)),
dtype=torch.float16 if is_half == True else torch.float32,
).to(device)
return bert
splits = {
",",
"。",
"?",
"!",
",",
".",
"?",
"!",
"~",
":",
":",
"—",
"…",
}
def get_first(text):
pattern = "[" + "".join(re.escape(sep) for sep in splits) + "]"
text = re.split(pattern, text)[0].strip()
return text
from text import chinese
def get_phones_and_bert(text, language, version, final=False):
text = re.sub(r' {2,}', ' ', text)
textlist = []
langlist = []
if language == "all_zh":
for tmp in LangSegmenter.getTexts(text,"zh"):
langlist.append(tmp["lang"])
textlist.append(tmp["text"])
elif language == "all_yue":
for tmp in LangSegmenter.getTexts(text,"zh"):
if tmp["lang"] == "zh":
tmp["lang"] = "yue"
langlist.append(tmp["lang"])
textlist.append(tmp["text"])
elif language == "all_ja":
for tmp in LangSegmenter.getTexts(text,"ja"):
langlist.append(tmp["lang"])
textlist.append(tmp["text"])
elif language == "all_ko":
for tmp in LangSegmenter.getTexts(text,"ko"):
langlist.append(tmp["lang"])
textlist.append(tmp["text"])
elif language == "en":
langlist.append("en")
textlist.append(text)
elif language == "auto":
for tmp in LangSegmenter.getTexts(text):
langlist.append(tmp["lang"])
textlist.append(tmp["text"])
elif language == "auto_yue":
for tmp in LangSegmenter.getTexts(text):
if tmp["lang"] == "zh":
tmp["lang"] = "yue"
langlist.append(tmp["lang"])
textlist.append(tmp["text"])
else:
for tmp in LangSegmenter.getTexts(text):
if langlist:
if (tmp["lang"] == "en" and langlist[-1] == "en") or (tmp["lang"] != "en" and langlist[-1] != "en"):
textlist[-1] += tmp["text"]
continue
if tmp["lang"] == "en":
langlist.append(tmp["lang"])
else:
# 因无法区别中日韩文汉字,以用户输入为准
langlist.append(language)
textlist.append(tmp["text"])
print(textlist)
print(langlist)
phones_list = []
bert_list = []
norm_text_list = []
for i in range(len(textlist)):
lang = langlist[i]
phones, word2ph, norm_text = clean_text_inf(textlist[i], lang, version)
bert = get_bert_inf(phones, word2ph, norm_text, lang)
phones_list.append(phones)
norm_text_list.append(norm_text)
bert_list.append(bert)
bert = torch.cat(bert_list, dim=1)
phones = sum(phones_list, [])
norm_text = "".join(norm_text_list)
if not final and len(phones) < 6:
return get_phones_and_bert("." + text, language, version, final=True)
return phones, bert.to(dtype), norm_text
from module.mel_processing import mel_spectrogram_torch, spectrogram_torch
spec_min = -12
spec_max = 2
def norm_spec(x):
return (x - spec_min) / (spec_max - spec_min) * 2 - 1
def denorm_spec(x):
return (x + 1) / 2 * (spec_max - spec_min) + spec_min
mel_fn = lambda x: mel_spectrogram_torch(
x,
**{
"n_fft": 1024,
"win_size": 1024,
"hop_size": 256,
"num_mels": 100,
"sampling_rate": 24000,
"fmin": 0,
"fmax": None,
"center": False,
},
)
mel_fn_v4 = lambda x: mel_spectrogram_torch(
x,
**{
"n_fft": 1280,
"win_size": 1280,
"hop_size": 320,
"num_mels": 100,
"sampling_rate": 32000,
"fmin": 0,
"fmax": None,
"center": False,
},
)
def merge_short_text_in_array(texts, threshold):
if (len(texts)) < 2:
return texts
result = []
text = ""
for ele in texts:
text += ele
if len(text) >= threshold:
result.append(text)
text = ""
if len(text) > 0:
if len(result) == 0:
result.append(text)
else:
result[len(result) - 1] += text
return result
sr_model = None
def audio_sr(audio, sr):
global sr_model
if sr_model == None:
from tools.audio_sr import AP_BWE
try:
sr_model = AP_BWE(device, DictToAttrRecursive)
except FileNotFoundError:
gr.Warning(i18n("你没有下载超分模型的参数,因此不进行超分。如想超分请先参照教程把文件下载好"))
return audio.cpu().detach().numpy(), sr
return sr_model(audio, sr)
##ref_wav_path+prompt_text+prompt_language+text(单个)+text_language+top_k+top_p+temperature
# cache_tokens={}#暂未实现清理机制
cache = {}
def get_tts_wav(
ref_wav_path,
prompt_text,
prompt_language,
text,
text_language,
how_to_cut=i18n("不切"),
top_k=20,
top_p=0.6,
temperature=0.6,
ref_free=False,
speed=1,
if_freeze=False,
inp_refs=None,
sample_steps=8,
if_sr=False,
pause_second=0.3,
):
global cache
if ref_wav_path:
pass
else:
gr.Warning(i18n("请上传参考音频"))
if text:
pass
else:
gr.Warning(i18n("请填入推理文本"))
t = []
if prompt_text is None or len(prompt_text) == 0:
ref_free = True
if model_version in v3v4set:
ref_free = False # s2v3暂不支持ref_free
else:
if_sr = False
if model_version not in {"v3", "v4", "v2Pro", "v2ProPlus"}:
clean_bigvgan_model()
clean_hifigan_model()
clean_sv_cn_model()
t0 = ttime()
prompt_language = dict_language[prompt_language]
text_language = dict_language[text_language]
if not ref_free:
prompt_text = prompt_text.strip("\n")
if prompt_text[-1] not in splits:
prompt_text += "。" if prompt_language != "en" else "."
print(i18n("实际输入的参考文本:"), prompt_text)
text = text.strip("\n")
# if (text[0] not in splits and len(get_first(text)) < 4): text = "。" + text if text_language != "en" else "." + text
print(i18n("实际输入的目标文本:"), text)
zero_wav = np.zeros(
int(hps.data.sampling_rate * pause_second),
dtype=np.float16 if is_half == True else np.float32,
)
zero_wav_torch = torch.from_numpy(zero_wav)
if is_half == True:
zero_wav_torch = zero_wav_torch.half().to(device)
else:
zero_wav_torch = zero_wav_torch.to(device)
if not ref_free:
with torch.no_grad():
wav16k, sr = librosa.load(ref_wav_path, sr=16000)
if wav16k.shape[0] > 160000 or wav16k.shape[0] < 48000:
gr.Warning(i18n("参考音频在3~10秒范围外,请更换!"))
raise OSError(i18n("参考音频在3~10秒范围外,请更换!"))
wav16k = torch.from_numpy(wav16k)
if is_half == True:
wav16k = wav16k.half().to(device)
else:
wav16k = wav16k.to(device)
wav16k = torch.cat([wav16k, zero_wav_torch])
ssl_content = ssl_model.model(wav16k.unsqueeze(0))["last_hidden_state"].transpose(1, 2) # .float()
codes = vq_model.extract_latent(ssl_content)
prompt_semantic = codes[0, 0]
prompt = prompt_semantic.unsqueeze(0).to(device)
t1 = ttime()
t.append(t1 - t0)
if how_to_cut == i18n("凑四句一切"):
text = cut1(text)
elif how_to_cut == i18n("凑50字一切"):
text = cut2(text)
elif how_to_cut == i18n("按中文句号。切"):
text = cut3(text)
elif how_to_cut == i18n("按英文句号.切"):
text = cut4(text)
elif how_to_cut == i18n("按标点符号切"):
text = cut5(text)
while "\n\n" in text:
text = text.replace("\n\n", "\n")
print(i18n("实际输入的目标文本(切句后):"), text)
texts = text.split("\n")
texts = process_text(texts)
texts = merge_short_text_in_array(texts, 5)
audio_opt = []
###s2v3暂不支持ref_free
if not ref_free:
phones1, bert1, norm_text1 = get_phones_and_bert(prompt_text, prompt_language, version)
for i_text, text in enumerate(texts):
# 解决输入目标文本的空行导致报错的问题
if len(text.strip()) == 0:
continue
if text[-1] not in splits:
text += "。" if text_language != "en" else "."
print(i18n("实际输入的目标文本(每句):"), text)
phones2, bert2, norm_text2 = get_phones_and_bert(text, text_language, version)
print(i18n("前端处理后的文本(每句):"), norm_text2)
if not ref_free:
bert = torch.cat([bert1, bert2], 1)
all_phoneme_ids = torch.LongTensor(phones1 + phones2).to(device).unsqueeze(0)
else:
bert = bert2
all_phoneme_ids = torch.LongTensor(phones2).to(device).unsqueeze(0)
bert = bert.to(device).unsqueeze(0)
all_phoneme_len = torch.tensor([all_phoneme_ids.shape[-1]]).to(device)
t2 = ttime()
# cache_key="%s-%s-%s-%s-%s-%s-%s-%s"%(ref_wav_path,prompt_text,prompt_language,text,text_language,top_k,top_p,temperature)
# print(cache.keys(),if_freeze)
if i_text in cache and if_freeze == True:
pred_semantic = cache[i_text]
else:
with torch.no_grad():
pred_semantic, idx = t2s_model.model.infer_panel(
all_phoneme_ids,
all_phoneme_len,
None if ref_free else prompt,
bert,
# prompt_phone_len=ph_offset,
top_k=top_k,
top_p=top_p,
temperature=temperature,
early_stop_num=hz * max_sec,
)
pred_semantic = pred_semantic[:, -idx:].unsqueeze(0)
cache[i_text] = pred_semantic
t3 = ttime()
is_v2pro = model_version in {"v2Pro", "v2ProPlus"}
# print(23333,is_v2pro,model_version)
###v3不存在以下逻辑和inp_refs
if model_version not in v3v4set:
refers = []
if is_v2pro:
sv_emb = []
if sv_cn_model == None:
init_sv_cn()
if inp_refs:
for path in inp_refs:
try: #####这里加上提取sv的逻辑,要么一堆sv一堆refer,要么单个sv单个refer
refer, audio_tensor = get_spepc(hps, path.name, dtype, device, is_v2pro)
refers.append(refer)
if is_v2pro:
sv_emb.append(sv_cn_model.compute_embedding3(audio_tensor))
except:
traceback.print_exc()
if len(refers) == 0:
refers, audio_tensor = get_spepc(hps, ref_wav_path, dtype, device, is_v2pro)
refers = [refers]
if is_v2pro:
sv_emb = [sv_cn_model.compute_embedding3(audio_tensor)]
if is_v2pro:
audio = vq_model.decode(
pred_semantic, torch.LongTensor(phones2).to(device).unsqueeze(0), refers, speed=speed, sv_emb=sv_emb
)[0][0]
else:
audio = vq_model.decode(
pred_semantic, torch.LongTensor(phones2).to(device).unsqueeze(0), refers, speed=speed
)[0][0]
else:
refer, audio_tensor = get_spepc(hps, ref_wav_path, dtype, device)
phoneme_ids0 = torch.LongTensor(phones1).to(device).unsqueeze(0)
phoneme_ids1 = torch.LongTensor(phones2).to(device).unsqueeze(0)
fea_ref, ge = vq_model.decode_encp(prompt.unsqueeze(0), phoneme_ids0, refer)
ref_audio, sr = torchaudio.load(ref_wav_path)
ref_audio = ref_audio.to(device).float()
if ref_audio.shape[0] == 2:
ref_audio = ref_audio.mean(0).unsqueeze(0)
tgt_sr = 24000 if model_version == "v3" else 32000
if sr != tgt_sr:
ref_audio = resample(ref_audio, sr, tgt_sr, device)
# print("ref_audio",ref_audio.abs().mean())
mel2 = mel_fn(ref_audio) if model_version == "v3" else mel_fn_v4(ref_audio)
mel2 = norm_spec(mel2)
T_min = min(mel2.shape[2], fea_ref.shape[2])
mel2 = mel2[:, :, :T_min]
fea_ref = fea_ref[:, :, :T_min]
Tref = 468 if model_version == "v3" else 500
Tchunk = 934 if model_version == "v3" else 1000
if T_min > Tref:
mel2 = mel2[:, :, -Tref:]
fea_ref = fea_ref[:, :, -Tref:]
T_min = Tref
chunk_len = Tchunk - T_min
mel2 = mel2.to(dtype)
fea_todo, ge = vq_model.decode_encp(pred_semantic, phoneme_ids1, refer, ge, speed)
cfm_resss = []
idx = 0
while 1:
fea_todo_chunk = fea_todo[:, :, idx : idx + chunk_len]
if fea_todo_chunk.shape[-1] == 0:
break
idx += chunk_len
fea = torch.cat([fea_ref, fea_todo_chunk], 2).transpose(2, 1)
cfm_res = vq_model.cfm.inference(
fea, torch.LongTensor([fea.size(1)]).to(fea.device), mel2, sample_steps, inference_cfg_rate=0
)
cfm_res = cfm_res[:, :, mel2.shape[2] :]
mel2 = cfm_res[:, :, -T_min:]
fea_ref = fea_todo_chunk[:, :, -T_min:]
cfm_resss.append(cfm_res)
cfm_res = torch.cat(cfm_resss, 2)
cfm_res = denorm_spec(cfm_res)
if model_version == "v3":
if bigvgan_model == None:
init_bigvgan()
else: # v4
if hifigan_model == None:
init_hifigan()
vocoder_model = bigvgan_model if model_version == "v3" else hifigan_model
with torch.inference_mode():
wav_gen = vocoder_model(cfm_res)
audio = wav_gen[0][0] # .cpu().detach().numpy()
max_audio = torch.abs(audio).max() # 简单防止16bit爆音
if max_audio > 1:
audio = audio / max_audio
audio_opt.append(audio)
audio_opt.append(zero_wav_torch) # zero_wav
t4 = ttime()
t.extend([t2 - t1, t3 - t2, t4 - t3])
t1 = ttime()
print("%.3f\t%.3f\t%.3f\t%.3f" % (t[0], sum(t[1::3]), sum(t[2::3]), sum(t[3::3])))
audio_opt = torch.cat(audio_opt, 0) # np.concatenate
if model_version in {"v1", "v2", "v2Pro", "v2ProPlus"}:
opt_sr = 32000
elif model_version == "v3":
opt_sr = 24000
else:
opt_sr = 48000 # v4
if if_sr == True and opt_sr == 24000:
print(i18n("音频超分中"))
audio_opt, opt_sr = audio_sr(audio_opt.unsqueeze(0), opt_sr)
max_audio = np.abs(audio_opt).max()
if max_audio > 1:
audio_opt /= max_audio
else:
audio_opt = audio_opt.cpu().detach().numpy()
yield opt_sr, (audio_opt * 32767).astype(np.int16)
def split(todo_text):
todo_text = todo_text.replace("……", "。").replace("——", ",")
if todo_text[-1] not in splits:
todo_text += "。"
i_split_head = i_split_tail = 0
len_text = len(todo_text)
todo_texts = []
while 1:
if i_split_head >= len_text:
break # 结尾一定有标点,所以直接跳出即可,最后一段在上次已加入
if todo_text[i_split_head] in splits:
i_split_head += 1
todo_texts.append(todo_text[i_split_tail:i_split_head])
i_split_tail = i_split_head
else:
i_split_head += 1
return todo_texts
def cut1(inp):
inp = inp.strip("\n")
inps = split(inp)
split_idx = list(range(0, len(inps), 4))
split_idx[-1] = None
if len(split_idx) > 1:
opts = []
for idx in range(len(split_idx) - 1):
opts.append("".join(inps[split_idx[idx] : split_idx[idx + 1]]))
else:
opts = [inp]
opts = [item for item in opts if not set(item).issubset(punctuation)]
return "\n".join(opts)
def cut2(inp):
inp = inp.strip("\n")
inps = split(inp)
if len(inps) < 2:
return inp
opts = []
summ = 0
tmp_str = ""
for i in range(len(inps)):
summ += len(inps[i])
tmp_str += inps[i]
if summ > 50:
summ = 0
opts.append(tmp_str)
tmp_str = ""
if tmp_str != "":
opts.append(tmp_str)
# print(opts)
if len(opts) > 1 and len(opts[-1]) < 50: ##如果最后一个太短了,和前一个合一起
opts[-2] = opts[-2] + opts[-1]
opts = opts[:-1]
opts = [item for item in opts if not set(item).issubset(punctuation)]
return "\n".join(opts)
def cut3(inp):
inp = inp.strip("\n")
opts = ["%s" % item for item in inp.strip("。").split("。")]
opts = [item for item in opts if not set(item).issubset(punctuation)]
return "\n".join(opts)
def cut4(inp):
inp = inp.strip("\n")
opts = re.split(r"(?<!\d)\.(?!\d)", inp.strip("."))
opts = [item for item in opts if not set(item).issubset(punctuation)]
return "\n".join(opts)
# contributed by https://github.com/AI-Hobbyist/GPT-SoVITS/blob/main/GPT_SoVITS/inference_webui.py
def cut5(inp):
inp = inp.strip("\n")
punds = {",", ".", ";", "?", "!", "、", ",", "。", "?", "!", ";", ":", "…"}
mergeitems = []
items = []
for i, char in enumerate(inp):
if char in punds:
if char == "." and i > 0 and i < len(inp) - 1 and inp[i - 1].isdigit() and inp[i + 1].isdigit():
items.append(char)
else:
items.append(char)
mergeitems.append("".join(items))
items = []
else:
items.append(char)
if items:
mergeitems.append("".join(items))
opt = [item for item in mergeitems if not set(item).issubset(punds)]
return "\n".join(opt)
def custom_sort_key(s):
# 使用正则表达式提取字符串中的数字部分和非数字部分
parts = re.split("(\d+)", s)
# 将数字部分转换为整数,非数字部分保持不变
parts = [int(part) if part.isdigit() else part for part in parts]
return parts
def process_text(texts):
_text = []
if all(text in [None, " ", "\n", ""] for text in texts):
raise ValueError(i18n("请输入有效文本"))
for text in texts:
if text in [None, " ", ""]:
pass
else:
_text.append(text)
return _text
def html_center(text, label="p"):
return f"""<div style="text-align: center; margin: 100; padding: 50;">
<{label} style="margin: 0; padding: 0;">{text}</{label}>
</div>"""
def html_left(text, label="p"):
return f"""<div style="text-align: left; margin: 0; padding: 0;">
<{label} style="margin: 0; padding: 0;">{text}</{label}>
</div>"""
with gr.Blocks(title="GPT-SoVITS WebUI", analytics_enabled=False, js=js, css=css) as app:
gr.HTML(
top_html.format(
i18n("本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.")
+ i18n("如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.")
),
elem_classes="markdown",
)
with gr.Group():
gr.Markdown(html_center(i18n("模型切换"), "h3"))
with gr.Row():
GPT_dropdown = gr.Dropdown(
label=i18n("GPT模型列表"),
choices=sorted(GPT_names, key=custom_sort_key),
value=gpt_path,
interactive=True,
scale=14,
)
SoVITS_dropdown = gr.Dropdown(
label=i18n("SoVITS模型列表"),
choices=sorted(SoVITS_names, key=custom_sort_key),
value=sovits_path,
interactive=True,
scale=14,
)
refresh_button = gr.Button(i18n("刷新模型路径"), variant="primary", scale=14)
refresh_button.click(fn=change_choices, inputs=[], outputs=[SoVITS_dropdown, GPT_dropdown])
gr.Markdown(html_center(i18n("*请上传并填写参考信息"), "h3"))
with gr.Row():
inp_ref = gr.Audio(label=i18n("请上传3~10秒内参考音频,超过会报错!"), type="filepath", scale=13)
with gr.Column(scale=13):
ref_text_free = gr.Checkbox(
label=i18n("开启无参考文本模式。不填参考文本亦相当于开启。")
+ i18n("v3暂不支持该模式,使用了会报错。"),
value=False,
interactive=True if model_version not in v3v4set else False,
show_label=True,
scale=1,
)
gr.Markdown(
html_left(
i18n("使用无参考文本模式时建议使用微调的GPT")
+ "<br>"
+ i18n("听不清参考音频说的啥(不晓得写啥)可以开。开启后无视填写的参考文本。")
)
)
prompt_text = gr.Textbox(label=i18n("参考音频的文本"), value="", lines=5, max_lines=5, scale=1)
with gr.Column(scale=14):
prompt_language = gr.Dropdown(
label=i18n("参考音频的语种"),
choices=list(dict_language.keys()),
value=i18n("中文"),
)
inp_refs = (
gr.File(
label=i18n(
"可选项:通过拖拽多个文件上传多个参考音频(建议同性),平均融合他们的音色。如不填写此项,音色由左侧单个参考音频控制。如是微调模型,建议参考音频全部在微调训练集音色内,底模不用管。"
),
file_count="multiple",
)
if model_version not in v3v4set
else gr.File(
label=i18n(
"可选项:通过拖拽多个文件上传多个参考音频(建议同性),平均融合他们的音色。如不填写此项,音色由左侧单个参考音频控制。如是微调模型,建议参考音频全部在微调训练集音色内,底模不用管。"
),
file_count="multiple",
visible=False,
)
)
sample_steps = (
gr.Radio(
label=i18n("采样步数,如果觉得电,提高试试,如果觉得慢,降低试试"),
value=32 if model_version == "v3" else 8,
choices=[4, 8, 16, 32, 64, 128] if model_version == "v3" else [4, 8, 16, 32],
visible=True,
)
if model_version in v3v4set
else gr.Radio(
label=i18n("采样步数,如果觉得电,提高试试,如果觉得慢,降低试试"),
choices=[4, 8, 16, 32, 64, 128] if model_version == "v3" else [4, 8, 16, 32],
visible=False,
value=32 if model_version == "v3" else 8,
)
)
if_sr_Checkbox = gr.Checkbox(
label=i18n("v3输出如果觉得闷可以试试开超分"),
value=False,
interactive=True,
show_label=True,
visible=False if model_version != "v3" else True,
)
gr.Markdown(html_center(i18n("*请填写需要合成的目标文本和语种模式"), "h3"))
with gr.Row():
with gr.Column(scale=13):
text = gr.Textbox(label=i18n("需要合成的文本"), value="", lines=26, max_lines=26)
with gr.Column(scale=7):
text_language = gr.Dropdown(
label=i18n("需要合成的语种") + i18n(".限制范围越小判别效果越好。"),
choices=list(dict_language.keys()),
value=i18n("中文"),
scale=1,
)
how_to_cut = gr.Dropdown(
label=i18n("怎么切"),
choices=[
i18n("不切"),
i18n("凑四句一切"),
i18n("凑50字一切"),
i18n("按中文句号。切"),
i18n("按英文句号.切"),
i18n("按标点符号切"),
],
value=i18n("凑四句一切"),
interactive=True,
scale=1,
)
gr.Markdown(value=html_center(i18n("语速调整,高为更快")))
if_freeze = gr.Checkbox(
label=i18n("是否直接对上次合成结果调整语速和音色。防止随机性。"),
value=False,
interactive=True,
show_label=True,
scale=1,
)
with gr.Row():
speed = gr.Slider(
minimum=0.6, maximum=1.65, step=0.05, label=i18n("语速"), value=1, interactive=True, scale=1
)
pause_second_slider = gr.Slider(
minimum=0.1,
maximum=0.5,
step=0.01,
label=i18n("句间停顿秒数"),
value=0.3,
interactive=True,
scale=1,
)
gr.Markdown(html_center(i18n("GPT采样参数(无参考文本时不要太低。不懂就用默认):")))
top_k = gr.Slider(
minimum=1, maximum=100, step=1, label=i18n("top_k"), value=15, interactive=True, scale=1
)
top_p = gr.Slider(
minimum=0, maximum=1, step=0.05, label=i18n("top_p"), value=1, interactive=True, scale=1
)
temperature = gr.Slider(
minimum=0, maximum=1, step=0.05, label=i18n("temperature"), value=1, interactive=True, scale=1
)
# with gr.Column():
# gr.Markdown(value=i18n("手工调整音素。当音素框不为空时使用手工音素输入推理,无视目标文本框。"))
# phoneme=gr.Textbox(label=i18n("音素框"), value="")
# get_phoneme_button = gr.Button(i18n("目标文本转音素"), variant="primary")
with gr.Row():
inference_button = gr.Button(value=i18n("合成语音"), variant="primary", size="lg", scale=25)
output = gr.Audio(label=i18n("输出的语音"), scale=14)
inference_button.click(
get_tts_wav,
[
inp_ref,
prompt_text,
prompt_language,
text,
text_language,
how_to_cut,
top_k,
top_p,
temperature,
ref_text_free,
speed,
if_freeze,
inp_refs,
sample_steps,
if_sr_Checkbox,
pause_second_slider,
],
[output],
)
SoVITS_dropdown.change(
change_sovits_weights,
[SoVITS_dropdown, prompt_language, text_language],
[
prompt_language,
text_language,
prompt_text,
prompt_language,
text,
text_language,
sample_steps,
inp_refs,
ref_text_free,
if_sr_Checkbox,
inference_button,
],
)
GPT_dropdown.change(change_gpt_weights, [GPT_dropdown], [])
# gr.Markdown(value=i18n("文本切分工具。太长的文本合成出来效果不一定好,所以太长建议先切。合成会根据文本的换行分开合成再拼起来。"))
# with gr.Row():
# text_inp = gr.Textbox(label=i18n("需要合成的切分前文本"), value="")
# button1 = gr.Button(i18n("凑四句一切"), variant="primary")
# button2 = gr.Button(i18n("凑50字一切"), variant="primary")
# button3 = gr.Button(i18n("按中文句号。切"), variant="primary")
# button4 = gr.Button(i18n("按英文句号.切"), variant="primary")
# button5 = gr.Button(i18n("按标点符号切"), variant="primary")
# text_opt = gr.Textbox(label=i18n("切分后文本"), value="")
# button1.click(cut1, [text_inp], [text_opt])
# button2.click(cut2, [text_inp], [text_opt])
# button3.click(cut3, [text_inp], [text_opt])
# button4.click(cut4, [text_inp], [text_opt])
# button5.click(cut5, [text_inp], [text_opt])
# gr.Markdown(html_center(i18n("后续将支持转音素、手工修改音素、语音合成分步执行。")))
if __name__ == "__main__":
app.queue().launch( # concurrency_count=511, max_size=1022
server_name="0.0.0.0",
inbrowser=True,
share=is_share,
server_port=infer_ttswebui,
# quiet=True,
) | --- +++ @@ -1,7 +1,16 @@+"""
+按中英混合识别
+按日英混合识别
+多语种启动切分识别语种
+全部按中文识别
+全部按英文识别
+全部按日文识别
+"""
import psutil
import os
def set_high_priority():
+ """把当前 Python 进程设为 HIGH_PRIORITY_CLASS"""
if os.name != "nt":
return # 仅 Windows 有效
p = psutil.Process(os.getpid())
@@ -1341,4 +1350,4 @@ share=is_share,
server_port=infer_ttswebui,
# quiet=True,
- )+ )
| https://raw.githubusercontent.com/RVC-Boss/GPT-SoVITS/HEAD/GPT_SoVITS/inference_webui.py |
Add docstrings explaining edge cases | import psutil
import os
def set_high_priority():
if os.name != "nt":
return # 仅 Windows 有效
p = psutil.Process(os.getpid())
try:
p.nice(psutil.HIGH_PRIORITY_CLASS)
print("已将进程优先级设为 High")
except psutil.AccessDenied:
print("权限不足,无法修改优先级(请用管理员运行)")
set_high_priority()
import json
import logging
import os
import random
import re
import sys
import torch
now_dir = os.getcwd()
sys.path.append(now_dir)
sys.path.append("%s/GPT_SoVITS" % (now_dir))
logging.getLogger("markdown_it").setLevel(logging.ERROR)
logging.getLogger("urllib3").setLevel(logging.ERROR)
logging.getLogger("httpcore").setLevel(logging.ERROR)
logging.getLogger("httpx").setLevel(logging.ERROR)
logging.getLogger("asyncio").setLevel(logging.ERROR)
logging.getLogger("charset_normalizer").setLevel(logging.ERROR)
logging.getLogger("torchaudio._extension").setLevel(logging.ERROR)
infer_ttswebui = os.environ.get("infer_ttswebui", 9872)
infer_ttswebui = int(infer_ttswebui)
is_share = os.environ.get("is_share", "False")
is_share = eval(is_share)
if "_CUDA_VISIBLE_DEVICES" in os.environ:
os.environ["CUDA_VISIBLE_DEVICES"] = os.environ["_CUDA_VISIBLE_DEVICES"]
is_half = eval(os.environ.get("is_half", "True")) and torch.cuda.is_available()
gpt_path = os.environ.get("gpt_path", None)
sovits_path = os.environ.get("sovits_path", None)
cnhubert_base_path = os.environ.get("cnhubert_base_path", None)
bert_path = os.environ.get("bert_path", None)
version = model_version = os.environ.get("version", "v2")
import gradio as gr
from TTS_infer_pack.text_segmentation_method import get_method
from TTS_infer_pack.TTS import NO_PROMPT_ERROR, TTS, TTS_Config
from tools.assets import css, js, top_html
from tools.i18n.i18n import I18nAuto, scan_language_list
language = os.environ.get("language", "Auto")
language = sys.argv[-1] if sys.argv[-1] in scan_language_list() else language
i18n = I18nAuto(language=language)
# os.environ['PYTORCH_ENABLE_MPS_FALLBACK'] = '1' # 确保直接启动推理UI时也能够设置。
if torch.cuda.is_available():
device = "cuda"
# elif torch.backends.mps.is_available():
# device = "mps"
else:
device = "cpu"
# is_half = False
# device = "cpu"
dict_language_v1 = {
i18n("中文"): "all_zh", # 全部按中文识别
i18n("英文"): "en", # 全部按英文识别#######不变
i18n("日文"): "all_ja", # 全部按日文识别
i18n("中英混合"): "zh", # 按中英混合识别####不变
i18n("日英混合"): "ja", # 按日英混合识别####不变
i18n("多语种混合"): "auto", # 多语种启动切分识别语种
}
dict_language_v2 = {
i18n("中文"): "all_zh", # 全部按中文识别
i18n("英文"): "en", # 全部按英文识别#######不变
i18n("日文"): "all_ja", # 全部按日文识别
i18n("粤语"): "all_yue", # 全部按中文识别
i18n("韩文"): "all_ko", # 全部按韩文识别
i18n("中英混合"): "zh", # 按中英混合识别####不变
i18n("日英混合"): "ja", # 按日英混合识别####不变
i18n("粤英混合"): "yue", # 按粤英混合识别####不变
i18n("韩英混合"): "ko", # 按韩英混合识别####不变
i18n("多语种混合"): "auto", # 多语种启动切分识别语种
i18n("多语种混合(粤语)"): "auto_yue", # 多语种启动切分识别语种
}
dict_language = dict_language_v1 if version == "v1" else dict_language_v2
cut_method = {
i18n("不切"): "cut0",
i18n("凑四句一切"): "cut1",
i18n("凑50字一切"): "cut2",
i18n("按中文句号。切"): "cut3",
i18n("按英文句号.切"): "cut4",
i18n("按标点符号切"): "cut5",
}
from config import change_choices, get_weights_names, name2gpt_path, name2sovits_path
SoVITS_names, GPT_names = get_weights_names()
from config import pretrained_sovits_name
path_sovits_v3 = pretrained_sovits_name["v3"]
path_sovits_v4 = pretrained_sovits_name["v4"]
is_exist_s2gv3 = os.path.exists(path_sovits_v3)
is_exist_s2gv4 = os.path.exists(path_sovits_v4)
tts_config = TTS_Config("GPT_SoVITS/configs/tts_infer.yaml")
tts_config.device = device
tts_config.is_half = is_half
# tts_config.version = version
tts_config.update_version(version)
if gpt_path is not None:
if "!" in gpt_path or "!" in gpt_path:
gpt_path = name2gpt_path[gpt_path]
tts_config.t2s_weights_path = gpt_path
if sovits_path is not None:
if "!" in sovits_path or "!" in sovits_path:
sovits_path = name2sovits_path[sovits_path]
tts_config.vits_weights_path = sovits_path
if cnhubert_base_path is not None:
tts_config.cnhuhbert_base_path = cnhubert_base_path
if bert_path is not None:
tts_config.bert_base_path = bert_path
print(tts_config)
tts_pipeline = TTS(tts_config)
gpt_path = tts_config.t2s_weights_path
sovits_path = tts_config.vits_weights_path
version = tts_config.version
def inference(
text,
text_lang,
ref_audio_path,
aux_ref_audio_paths,
prompt_text,
prompt_lang,
top_k,
top_p,
temperature,
text_split_method,
batch_size,
speed_factor,
ref_text_free,
split_bucket,
fragment_interval,
seed,
keep_random,
parallel_infer,
repetition_penalty,
sample_steps,
super_sampling,
):
seed = -1 if keep_random else seed
actual_seed = seed if seed not in [-1, "", None] else random.randint(0, 2**32 - 1)
inputs = {
"text": text,
"text_lang": dict_language[text_lang],
"ref_audio_path": ref_audio_path,
"aux_ref_audio_paths": [item.name for item in aux_ref_audio_paths] if aux_ref_audio_paths is not None else [],
"prompt_text": prompt_text if not ref_text_free else "",
"prompt_lang": dict_language[prompt_lang],
"top_k": top_k,
"top_p": top_p,
"temperature": temperature,
"text_split_method": cut_method[text_split_method],
"batch_size": int(batch_size),
"speed_factor": float(speed_factor),
"split_bucket": split_bucket,
"return_fragment": False,
"fragment_interval": fragment_interval,
"seed": actual_seed,
"parallel_infer": parallel_infer,
"repetition_penalty": repetition_penalty,
"sample_steps": int(sample_steps),
"super_sampling": super_sampling,
}
try:
for item in tts_pipeline.run(inputs):
yield item, actual_seed
except NO_PROMPT_ERROR:
gr.Warning(i18n("V3不支持无参考文本模式,请填写参考文本!"))
def custom_sort_key(s):
# 使用正则表达式提取字符串中的数字部分和非数字部分
parts = re.split("(\d+)", s)
# 将数字部分转换为整数,非数字部分保持不变
parts = [int(part) if part.isdigit() else part for part in parts]
return parts
if os.path.exists("./weight.json"):
pass
else:
with open("./weight.json", "w", encoding="utf-8") as file:
json.dump({"GPT": {}, "SoVITS": {}}, file)
with open("./weight.json", "r", encoding="utf-8") as file:
weight_data = file.read()
weight_data = json.loads(weight_data)
gpt_path = os.environ.get("gpt_path", weight_data.get("GPT", {}).get(version, GPT_names[-1]))
sovits_path = os.environ.get("sovits_path", weight_data.get("SoVITS", {}).get(version, SoVITS_names[0]))
if isinstance(gpt_path, list):
gpt_path = gpt_path[0]
if isinstance(sovits_path, list):
sovits_path = sovits_path[0]
from process_ckpt import get_sovits_version_from_path_fast
v3v4set = {"v3", "v4"}
def change_sovits_weights(sovits_path, prompt_language=None, text_language=None):
if "!" in sovits_path or "!" in sovits_path:
sovits_path = name2sovits_path[sovits_path]
global version, model_version, dict_language, if_lora_v3
version, model_version, if_lora_v3 = get_sovits_version_from_path_fast(sovits_path)
# print(sovits_path,version, model_version, if_lora_v3)
is_exist = is_exist_s2gv3 if model_version == "v3" else is_exist_s2gv4
path_sovits = path_sovits_v3 if model_version == "v3" else path_sovits_v4
if if_lora_v3 == True and is_exist == False:
info = path_sovits + "SoVITS %s" % model_version + i18n("底模缺失,无法加载相应 LoRA 权重")
gr.Warning(info)
raise FileExistsError(info)
dict_language = dict_language_v1 if version == "v1" else dict_language_v2
if prompt_language is not None and text_language is not None:
if prompt_language in list(dict_language.keys()):
prompt_text_update, prompt_language_update = (
{"__type__": "update"},
{"__type__": "update", "value": prompt_language},
)
else:
prompt_text_update = {"__type__": "update", "value": ""}
prompt_language_update = {"__type__": "update", "value": i18n("中文")}
if text_language in list(dict_language.keys()):
text_update, text_language_update = {"__type__": "update"}, {"__type__": "update", "value": text_language}
else:
text_update = {"__type__": "update", "value": ""}
text_language_update = {"__type__": "update", "value": i18n("中文")}
if model_version in v3v4set:
visible_sample_steps = True
visible_inp_refs = False
else:
visible_sample_steps = False
visible_inp_refs = True
yield (
{"__type__": "update", "choices": list(dict_language.keys())},
{"__type__": "update", "choices": list(dict_language.keys())},
prompt_text_update,
prompt_language_update,
text_update,
text_language_update,
{"__type__": "update", "interactive": visible_sample_steps, "value": 32},
{"__type__": "update", "visible": visible_inp_refs},
{"__type__": "update", "interactive": True if model_version not in v3v4set else False},
{"__type__": "update", "value": i18n("模型加载中,请等待"), "interactive": False},
)
tts_pipeline.init_vits_weights(sovits_path)
yield (
{"__type__": "update", "choices": list(dict_language.keys())},
{"__type__": "update", "choices": list(dict_language.keys())},
prompt_text_update,
prompt_language_update,
text_update,
text_language_update,
{"__type__": "update", "interactive": visible_sample_steps, "value": 32},
{"__type__": "update", "visible": visible_inp_refs},
{"__type__": "update", "interactive": True if model_version not in v3v4set else False},
{"__type__": "update", "value": i18n("合成语音"), "interactive": True},
)
with open("./weight.json") as f:
data = f.read()
data = json.loads(data)
data["SoVITS"][version] = sovits_path
with open("./weight.json", "w") as f:
f.write(json.dumps(data))
def change_gpt_weights(gpt_path):
if "!" in gpt_path or "!" in gpt_path:
gpt_path = name2gpt_path[gpt_path]
tts_pipeline.init_t2s_weights(gpt_path)
with gr.Blocks(title="GPT-SoVITS WebUI", analytics_enabled=False, js=js, css=css) as app:
gr.HTML(
top_html.format(
i18n("本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.")
+ i18n("如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.")
),
elem_classes="markdown",
)
with gr.Column():
# with gr.Group():
gr.Markdown(value=i18n("模型切换"))
with gr.Row():
GPT_dropdown = gr.Dropdown(
label=i18n("GPT模型列表"),
choices=sorted(GPT_names, key=custom_sort_key),
value=gpt_path,
interactive=True,
)
SoVITS_dropdown = gr.Dropdown(
label=i18n("SoVITS模型列表"),
choices=sorted(SoVITS_names, key=custom_sort_key),
value=sovits_path,
interactive=True,
)
refresh_button = gr.Button(i18n("刷新模型路径"), variant="primary")
refresh_button.click(fn=change_choices, inputs=[], outputs=[SoVITS_dropdown, GPT_dropdown])
with gr.Row():
with gr.Column():
gr.Markdown(value=i18n("*请上传并填写参考信息"))
with gr.Row():
inp_ref = gr.Audio(label=i18n("主参考音频(请上传3~10秒内参考音频,超过会报错!)"), type="filepath")
inp_refs = gr.File(
label=i18n("辅参考音频(可选多个,或不选)"),
file_count="multiple",
visible=True if model_version != "v3" else False,
)
prompt_text = gr.Textbox(label=i18n("主参考音频的文本"), value="", lines=2)
with gr.Row():
prompt_language = gr.Dropdown(
label=i18n("主参考音频的语种"), choices=list(dict_language.keys()), value=i18n("中文")
)
with gr.Column():
ref_text_free = gr.Checkbox(
label=i18n("开启无参考文本模式。不填参考文本亦相当于开启。"),
value=False,
interactive=True if model_version != "v3" else False,
show_label=True,
)
gr.Markdown(
i18n("使用无参考文本模式时建议使用微调的GPT")
+ "<br>"
+ i18n("听不清参考音频说的啥(不晓得写啥)可以开。开启后无视填写的参考文本。")
)
with gr.Column():
gr.Markdown(value=i18n("*请填写需要合成的目标文本和语种模式"))
text = gr.Textbox(label=i18n("需要合成的文本"), value="", lines=20, max_lines=20)
text_language = gr.Dropdown(
label=i18n("需要合成的文本的语种"), choices=list(dict_language.keys()), value=i18n("中文")
)
with gr.Group():
gr.Markdown(value=i18n("推理设置"))
with gr.Row():
with gr.Column():
with gr.Row():
batch_size = gr.Slider(
minimum=1, maximum=200, step=1, label=i18n("batch_size"), value=20, interactive=True
)
sample_steps = gr.Radio(
label=i18n("采样步数(仅对V3/4生效)"), value=32, choices=[4, 8, 16, 32, 64, 128], visible=True
)
with gr.Row():
fragment_interval = gr.Slider(
minimum=0.01, maximum=1, step=0.01, label=i18n("分段间隔(秒)"), value=0.3, interactive=True
)
speed_factor = gr.Slider(
minimum=0.6, maximum=1.65, step=0.05, label="语速", value=1.0, interactive=True
)
with gr.Row():
top_k = gr.Slider(minimum=1, maximum=100, step=1, label=i18n("top_k"), value=15, interactive=True)
top_p = gr.Slider(minimum=0, maximum=1, step=0.05, label=i18n("top_p"), value=1, interactive=True)
with gr.Row():
temperature = gr.Slider(
minimum=0, maximum=1, step=0.05, label=i18n("temperature"), value=1, interactive=True
)
repetition_penalty = gr.Slider(
minimum=0, maximum=2, step=0.05, label=i18n("重复惩罚"), value=1.35, interactive=True
)
with gr.Column():
with gr.Row():
how_to_cut = gr.Dropdown(
label=i18n("怎么切"),
choices=[
i18n("不切"),
i18n("凑四句一切"),
i18n("凑50字一切"),
i18n("按中文句号。切"),
i18n("按英文句号.切"),
i18n("按标点符号切"),
],
value=i18n("凑四句一切"),
interactive=True,
scale=1,
)
super_sampling = gr.Checkbox(
label=i18n("音频超采样(仅对V3生效))"), value=False, interactive=True, show_label=True
)
with gr.Row():
parallel_infer = gr.Checkbox(label=i18n("并行推理"), value=True, interactive=True, show_label=True)
split_bucket = gr.Checkbox(
label=i18n("数据分桶(并行推理时会降低一点计算量)"),
value=True,
interactive=True,
show_label=True,
)
with gr.Row():
seed = gr.Number(label=i18n("随机种子"), value=-1)
keep_random = gr.Checkbox(label=i18n("保持随机"), value=True, interactive=True, show_label=True)
output = gr.Audio(label=i18n("输出的语音"))
with gr.Row():
inference_button = gr.Button(i18n("合成语音"), variant="primary")
stop_infer = gr.Button(i18n("终止合成"), variant="primary")
inference_button.click(
inference,
[
text,
text_language,
inp_ref,
inp_refs,
prompt_text,
prompt_language,
top_k,
top_p,
temperature,
how_to_cut,
batch_size,
speed_factor,
ref_text_free,
split_bucket,
fragment_interval,
seed,
keep_random,
parallel_infer,
repetition_penalty,
sample_steps,
super_sampling,
],
[output, seed],
)
stop_infer.click(tts_pipeline.stop, [], [])
SoVITS_dropdown.change(
change_sovits_weights,
[SoVITS_dropdown, prompt_language, text_language],
[
prompt_language,
text_language,
prompt_text,
prompt_language,
text,
text_language,
sample_steps,
inp_refs,
ref_text_free,
inference_button,
],
) #
GPT_dropdown.change(change_gpt_weights, [GPT_dropdown], [])
with gr.Group():
gr.Markdown(
value=i18n(
"文本切分工具。太长的文本合成出来效果不一定好,所以太长建议先切。合成会根据文本的换行分开合成再拼起来。"
)
)
with gr.Row():
text_inp = gr.Textbox(label=i18n("需要合成的切分前文本"), value="", lines=4)
with gr.Column():
_how_to_cut = gr.Radio(
label=i18n("怎么切"),
choices=[
i18n("不切"),
i18n("凑四句一切"),
i18n("凑50字一切"),
i18n("按中文句号。切"),
i18n("按英文句号.切"),
i18n("按标点符号切"),
],
value=i18n("凑四句一切"),
interactive=True,
)
cut_text = gr.Button(i18n("切分"), variant="primary")
def to_cut(text_inp, how_to_cut):
if len(text_inp.strip()) == 0 or text_inp == []:
return ""
method = get_method(cut_method[how_to_cut])
return method(text_inp)
text_opt = gr.Textbox(label=i18n("切分后文本"), value="", lines=4)
cut_text.click(to_cut, [text_inp, _how_to_cut], [text_opt])
gr.Markdown(value=i18n("后续将支持转音素、手工修改音素、语音合成分步执行。"))
if __name__ == "__main__":
app.queue().launch( # concurrency_count=511, max_size=1022
server_name="0.0.0.0",
inbrowser=True,
share=is_share,
server_port=infer_ttswebui,
# quiet=True,
) | --- +++ @@ -1,7 +1,16 @@+"""
+按中英混合识别
+按日英混合识别
+多语种启动切分识别语种
+全部按中文识别
+全部按英文识别
+全部按日文识别
+"""
import psutil
import os
def set_high_priority():
+ """把当前 Python 进程设为 HIGH_PRIORITY_CLASS"""
if os.name != "nt":
return # 仅 Windows 有效
p = psutil.Process(os.getpid())
@@ -511,4 +520,4 @@ share=is_share,
server_port=infer_ttswebui,
# quiet=True,
- )+ )
| https://raw.githubusercontent.com/RVC-Boss/GPT-SoVITS/HEAD/GPT_SoVITS/inference_webui_fast.py |
Fully document this Python code with docstrings | # reference: https://github.com/ORI-Muchim/MB-iSTFT-VITS-Korean/blob/main/text/korean.py
import re
from jamo import h2j, j2hcj
import ko_pron
from g2pk2 import G2p
import importlib
import os
# 防止win下无法读取模型
if os.name == "nt":
class win_G2p(G2p):
def check_mecab(self):
super().check_mecab()
spam_spec = importlib.util.find_spec("eunjeon")
non_found = spam_spec is None
if non_found:
print("you have to install eunjeon. install it...")
else:
installpath = spam_spec.submodule_search_locations[0]
if not (re.match(r"^[A-Za-z0-9_/\\:.\-]*$", installpath)):
import sys
from eunjeon import Mecab as _Mecab
class Mecab(_Mecab):
def get_dicpath(installpath):
if not (re.match(r"^[A-Za-z0-9_/\\:.\-]*$", installpath)):
import shutil
python_dir = os.getcwd()
if installpath[: len(python_dir)].upper() == python_dir.upper():
dicpath = os.path.join(os.path.relpath(installpath, python_dir), "data", "mecabrc")
else:
if not os.path.exists("TEMP"):
os.mkdir("TEMP")
if not os.path.exists(os.path.join("TEMP", "ko")):
os.mkdir(os.path.join("TEMP", "ko"))
if os.path.exists(os.path.join("TEMP", "ko", "ko_dict")):
shutil.rmtree(os.path.join("TEMP", "ko", "ko_dict"))
shutil.copytree(
os.path.join(installpath, "data"), os.path.join("TEMP", "ko", "ko_dict")
)
dicpath = os.path.join("TEMP", "ko", "ko_dict", "mecabrc")
else:
dicpath = os.path.abspath(os.path.join(installpath, "data/mecabrc"))
return dicpath
def __init__(self, dicpath=get_dicpath(installpath)):
super().__init__(dicpath=dicpath)
sys.modules["eunjeon"].Mecab = Mecab
G2p = win_G2p
from text.symbols2 import symbols
# This is a list of Korean classifiers preceded by pure Korean numerals.
_korean_classifiers = (
"군데 권 개 그루 닢 대 두 마리 모 모금 뭇 발 발짝 방 번 벌 보루 살 수 술 시 쌈 움큼 정 짝 채 척 첩 축 켤레 톨 통"
)
# List of (hangul, hangul divided) pairs:
_hangul_divided = [
(re.compile("%s" % x[0]), x[1])
for x in [
# ('ㄳ', 'ㄱㅅ'), # g2pk2, A Syllable-ending Rule
# ('ㄵ', 'ㄴㅈ'),
# ('ㄶ', 'ㄴㅎ'),
# ('ㄺ', 'ㄹㄱ'),
# ('ㄻ', 'ㄹㅁ'),
# ('ㄼ', 'ㄹㅂ'),
# ('ㄽ', 'ㄹㅅ'),
# ('ㄾ', 'ㄹㅌ'),
# ('ㄿ', 'ㄹㅍ'),
# ('ㅀ', 'ㄹㅎ'),
# ('ㅄ', 'ㅂㅅ'),
("ㅘ", "ㅗㅏ"),
("ㅙ", "ㅗㅐ"),
("ㅚ", "ㅗㅣ"),
("ㅝ", "ㅜㅓ"),
("ㅞ", "ㅜㅔ"),
("ㅟ", "ㅜㅣ"),
("ㅢ", "ㅡㅣ"),
("ㅑ", "ㅣㅏ"),
("ㅒ", "ㅣㅐ"),
("ㅕ", "ㅣㅓ"),
("ㅖ", "ㅣㅔ"),
("ㅛ", "ㅣㅗ"),
("ㅠ", "ㅣㅜ"),
]
]
# List of (Latin alphabet, hangul) pairs:
_latin_to_hangul = [
(re.compile("%s" % x[0], re.IGNORECASE), x[1])
for x in [
("a", "에이"),
("b", "비"),
("c", "시"),
("d", "디"),
("e", "이"),
("f", "에프"),
("g", "지"),
("h", "에이치"),
("i", "아이"),
("j", "제이"),
("k", "케이"),
("l", "엘"),
("m", "엠"),
("n", "엔"),
("o", "오"),
("p", "피"),
("q", "큐"),
("r", "아르"),
("s", "에스"),
("t", "티"),
("u", "유"),
("v", "브이"),
("w", "더블유"),
("x", "엑스"),
("y", "와이"),
("z", "제트"),
]
]
# List of (ipa, lazy ipa) pairs:
_ipa_to_lazy_ipa = [
(re.compile("%s" % x[0], re.IGNORECASE), x[1])
for x in [
("t͡ɕ", "ʧ"),
("d͡ʑ", "ʥ"),
("ɲ", "n^"),
("ɕ", "ʃ"),
("ʷ", "w"),
("ɭ", "l`"),
("ʎ", "ɾ"),
("ɣ", "ŋ"),
("ɰ", "ɯ"),
("ʝ", "j"),
("ʌ", "ə"),
("ɡ", "g"),
("\u031a", "#"),
("\u0348", "="),
("\u031e", ""),
("\u0320", ""),
("\u0339", ""),
]
]
def fix_g2pk2_error(text):
new_text = ""
i = 0
while i < len(text) - 4:
if (text[i : i + 3] == "ㅇㅡㄹ" or text[i : i + 3] == "ㄹㅡㄹ") and text[i + 3] == " " and text[i + 4] == "ㄹ":
new_text += text[i : i + 3] + " " + "ㄴ"
i += 5
else:
new_text += text[i]
i += 1
new_text += text[i:]
return new_text
def latin_to_hangul(text):
for regex, replacement in _latin_to_hangul:
text = re.sub(regex, replacement, text)
return text
def divide_hangul(text):
text = j2hcj(h2j(text))
for regex, replacement in _hangul_divided:
text = re.sub(regex, replacement, text)
return text
def hangul_number(num, sino=True):
num = re.sub(",", "", num)
if num == "0":
return "영"
if not sino and num == "20":
return "스무"
digits = "123456789"
names = "일이삼사오육칠팔구"
digit2name = {d: n for d, n in zip(digits, names)}
modifiers = "한 두 세 네 다섯 여섯 일곱 여덟 아홉"
decimals = "열 스물 서른 마흔 쉰 예순 일흔 여든 아흔"
digit2mod = {d: mod for d, mod in zip(digits, modifiers.split())}
digit2dec = {d: dec for d, dec in zip(digits, decimals.split())}
spelledout = []
for i, digit in enumerate(num):
i = len(num) - i - 1
if sino:
if i == 0:
name = digit2name.get(digit, "")
elif i == 1:
name = digit2name.get(digit, "") + "십"
name = name.replace("일십", "십")
else:
if i == 0:
name = digit2mod.get(digit, "")
elif i == 1:
name = digit2dec.get(digit, "")
if digit == "0":
if i % 4 == 0:
last_three = spelledout[-min(3, len(spelledout)) :]
if "".join(last_three) == "":
spelledout.append("")
continue
else:
spelledout.append("")
continue
if i == 2:
name = digit2name.get(digit, "") + "백"
name = name.replace("일백", "백")
elif i == 3:
name = digit2name.get(digit, "") + "천"
name = name.replace("일천", "천")
elif i == 4:
name = digit2name.get(digit, "") + "만"
name = name.replace("일만", "만")
elif i == 5:
name = digit2name.get(digit, "") + "십"
name = name.replace("일십", "십")
elif i == 6:
name = digit2name.get(digit, "") + "백"
name = name.replace("일백", "백")
elif i == 7:
name = digit2name.get(digit, "") + "천"
name = name.replace("일천", "천")
elif i == 8:
name = digit2name.get(digit, "") + "억"
elif i == 9:
name = digit2name.get(digit, "") + "십"
elif i == 10:
name = digit2name.get(digit, "") + "백"
elif i == 11:
name = digit2name.get(digit, "") + "천"
elif i == 12:
name = digit2name.get(digit, "") + "조"
elif i == 13:
name = digit2name.get(digit, "") + "십"
elif i == 14:
name = digit2name.get(digit, "") + "백"
elif i == 15:
name = digit2name.get(digit, "") + "천"
spelledout.append(name)
return "".join(elem for elem in spelledout)
def number_to_hangul(text):
tokens = set(re.findall(r"(\d[\d,]*)([\uac00-\ud71f]+)", text))
for token in tokens:
num, classifier = token
if classifier[:2] in _korean_classifiers or classifier[0] in _korean_classifiers:
spelledout = hangul_number(num, sino=False)
else:
spelledout = hangul_number(num, sino=True)
text = text.replace(f"{num}{classifier}", f"{spelledout}{classifier}")
# digit by digit for remaining digits
digits = "0123456789"
names = "영일이삼사오육칠팔구"
for d, n in zip(digits, names):
text = text.replace(d, n)
return text
def korean_to_lazy_ipa(text):
text = latin_to_hangul(text)
text = number_to_hangul(text)
text = re.sub("[\uac00-\ud7af]+", lambda x: ko_pron.romanise(x.group(0), "ipa").split("] ~ [")[0], text)
for regex, replacement in _ipa_to_lazy_ipa:
text = re.sub(regex, replacement, text)
return text
_g2p = G2p()
def korean_to_ipa(text):
text = latin_to_hangul(text)
text = number_to_hangul(text)
text = _g2p(text)
text = fix_g2pk2_error(text)
text = korean_to_lazy_ipa(text)
return text.replace("ʧ", "tʃ").replace("ʥ", "dʑ")
def post_replace_ph(ph):
rep_map = {
":": ",",
";": ",",
",": ",",
"。": ".",
"!": "!",
"?": "?",
"\n": ".",
"·": ",",
"、": ",",
"...": "…",
" ": "空",
}
if ph in rep_map.keys():
ph = rep_map[ph]
if ph in symbols:
return ph
if ph not in symbols:
ph = "停"
return ph
def g2p(text):
text = latin_to_hangul(text)
text = _g2p(text)
text = divide_hangul(text)
text = fix_g2pk2_error(text)
text = re.sub(r"([\u3131-\u3163])$", r"\1.", text)
# text = "".join([post_replace_ph(i) for i in text])
text = [post_replace_ph(i) for i in text]
return text
if __name__ == "__main__":
text = "안녕하세요"
print(g2p(text)) | --- +++ @@ -1,335 +1,337 @@-# reference: https://github.com/ORI-Muchim/MB-iSTFT-VITS-Korean/blob/main/text/korean.py
-
-import re
-from jamo import h2j, j2hcj
-import ko_pron
-from g2pk2 import G2p
-
-import importlib
-import os
-
-# 防止win下无法读取模型
-if os.name == "nt":
-
- class win_G2p(G2p):
- def check_mecab(self):
- super().check_mecab()
- spam_spec = importlib.util.find_spec("eunjeon")
- non_found = spam_spec is None
- if non_found:
- print("you have to install eunjeon. install it...")
- else:
- installpath = spam_spec.submodule_search_locations[0]
- if not (re.match(r"^[A-Za-z0-9_/\\:.\-]*$", installpath)):
- import sys
- from eunjeon import Mecab as _Mecab
-
- class Mecab(_Mecab):
- def get_dicpath(installpath):
- if not (re.match(r"^[A-Za-z0-9_/\\:.\-]*$", installpath)):
- import shutil
-
- python_dir = os.getcwd()
- if installpath[: len(python_dir)].upper() == python_dir.upper():
- dicpath = os.path.join(os.path.relpath(installpath, python_dir), "data", "mecabrc")
- else:
- if not os.path.exists("TEMP"):
- os.mkdir("TEMP")
- if not os.path.exists(os.path.join("TEMP", "ko")):
- os.mkdir(os.path.join("TEMP", "ko"))
- if os.path.exists(os.path.join("TEMP", "ko", "ko_dict")):
- shutil.rmtree(os.path.join("TEMP", "ko", "ko_dict"))
-
- shutil.copytree(
- os.path.join(installpath, "data"), os.path.join("TEMP", "ko", "ko_dict")
- )
- dicpath = os.path.join("TEMP", "ko", "ko_dict", "mecabrc")
- else:
- dicpath = os.path.abspath(os.path.join(installpath, "data/mecabrc"))
- return dicpath
-
- def __init__(self, dicpath=get_dicpath(installpath)):
- super().__init__(dicpath=dicpath)
-
- sys.modules["eunjeon"].Mecab = Mecab
-
- G2p = win_G2p
-
-
-from text.symbols2 import symbols
-
-# This is a list of Korean classifiers preceded by pure Korean numerals.
-_korean_classifiers = (
- "군데 권 개 그루 닢 대 두 마리 모 모금 뭇 발 발짝 방 번 벌 보루 살 수 술 시 쌈 움큼 정 짝 채 척 첩 축 켤레 톨 통"
-)
-
-# List of (hangul, hangul divided) pairs:
-_hangul_divided = [
- (re.compile("%s" % x[0]), x[1])
- for x in [
- # ('ㄳ', 'ㄱㅅ'), # g2pk2, A Syllable-ending Rule
- # ('ㄵ', 'ㄴㅈ'),
- # ('ㄶ', 'ㄴㅎ'),
- # ('ㄺ', 'ㄹㄱ'),
- # ('ㄻ', 'ㄹㅁ'),
- # ('ㄼ', 'ㄹㅂ'),
- # ('ㄽ', 'ㄹㅅ'),
- # ('ㄾ', 'ㄹㅌ'),
- # ('ㄿ', 'ㄹㅍ'),
- # ('ㅀ', 'ㄹㅎ'),
- # ('ㅄ', 'ㅂㅅ'),
- ("ㅘ", "ㅗㅏ"),
- ("ㅙ", "ㅗㅐ"),
- ("ㅚ", "ㅗㅣ"),
- ("ㅝ", "ㅜㅓ"),
- ("ㅞ", "ㅜㅔ"),
- ("ㅟ", "ㅜㅣ"),
- ("ㅢ", "ㅡㅣ"),
- ("ㅑ", "ㅣㅏ"),
- ("ㅒ", "ㅣㅐ"),
- ("ㅕ", "ㅣㅓ"),
- ("ㅖ", "ㅣㅔ"),
- ("ㅛ", "ㅣㅗ"),
- ("ㅠ", "ㅣㅜ"),
- ]
-]
-
-# List of (Latin alphabet, hangul) pairs:
-_latin_to_hangul = [
- (re.compile("%s" % x[0], re.IGNORECASE), x[1])
- for x in [
- ("a", "에이"),
- ("b", "비"),
- ("c", "시"),
- ("d", "디"),
- ("e", "이"),
- ("f", "에프"),
- ("g", "지"),
- ("h", "에이치"),
- ("i", "아이"),
- ("j", "제이"),
- ("k", "케이"),
- ("l", "엘"),
- ("m", "엠"),
- ("n", "엔"),
- ("o", "오"),
- ("p", "피"),
- ("q", "큐"),
- ("r", "아르"),
- ("s", "에스"),
- ("t", "티"),
- ("u", "유"),
- ("v", "브이"),
- ("w", "더블유"),
- ("x", "엑스"),
- ("y", "와이"),
- ("z", "제트"),
- ]
-]
-
-# List of (ipa, lazy ipa) pairs:
-_ipa_to_lazy_ipa = [
- (re.compile("%s" % x[0], re.IGNORECASE), x[1])
- for x in [
- ("t͡ɕ", "ʧ"),
- ("d͡ʑ", "ʥ"),
- ("ɲ", "n^"),
- ("ɕ", "ʃ"),
- ("ʷ", "w"),
- ("ɭ", "l`"),
- ("ʎ", "ɾ"),
- ("ɣ", "ŋ"),
- ("ɰ", "ɯ"),
- ("ʝ", "j"),
- ("ʌ", "ə"),
- ("ɡ", "g"),
- ("\u031a", "#"),
- ("\u0348", "="),
- ("\u031e", ""),
- ("\u0320", ""),
- ("\u0339", ""),
- ]
-]
-
-
-def fix_g2pk2_error(text):
- new_text = ""
- i = 0
- while i < len(text) - 4:
- if (text[i : i + 3] == "ㅇㅡㄹ" or text[i : i + 3] == "ㄹㅡㄹ") and text[i + 3] == " " and text[i + 4] == "ㄹ":
- new_text += text[i : i + 3] + " " + "ㄴ"
- i += 5
- else:
- new_text += text[i]
- i += 1
-
- new_text += text[i:]
- return new_text
-
-
-def latin_to_hangul(text):
- for regex, replacement in _latin_to_hangul:
- text = re.sub(regex, replacement, text)
- return text
-
-
-def divide_hangul(text):
- text = j2hcj(h2j(text))
- for regex, replacement in _hangul_divided:
- text = re.sub(regex, replacement, text)
- return text
-
-
-def hangul_number(num, sino=True):
- num = re.sub(",", "", num)
-
- if num == "0":
- return "영"
- if not sino and num == "20":
- return "스무"
-
- digits = "123456789"
- names = "일이삼사오육칠팔구"
- digit2name = {d: n for d, n in zip(digits, names)}
-
- modifiers = "한 두 세 네 다섯 여섯 일곱 여덟 아홉"
- decimals = "열 스물 서른 마흔 쉰 예순 일흔 여든 아흔"
- digit2mod = {d: mod for d, mod in zip(digits, modifiers.split())}
- digit2dec = {d: dec for d, dec in zip(digits, decimals.split())}
-
- spelledout = []
- for i, digit in enumerate(num):
- i = len(num) - i - 1
- if sino:
- if i == 0:
- name = digit2name.get(digit, "")
- elif i == 1:
- name = digit2name.get(digit, "") + "십"
- name = name.replace("일십", "십")
- else:
- if i == 0:
- name = digit2mod.get(digit, "")
- elif i == 1:
- name = digit2dec.get(digit, "")
- if digit == "0":
- if i % 4 == 0:
- last_three = spelledout[-min(3, len(spelledout)) :]
- if "".join(last_three) == "":
- spelledout.append("")
- continue
- else:
- spelledout.append("")
- continue
- if i == 2:
- name = digit2name.get(digit, "") + "백"
- name = name.replace("일백", "백")
- elif i == 3:
- name = digit2name.get(digit, "") + "천"
- name = name.replace("일천", "천")
- elif i == 4:
- name = digit2name.get(digit, "") + "만"
- name = name.replace("일만", "만")
- elif i == 5:
- name = digit2name.get(digit, "") + "십"
- name = name.replace("일십", "십")
- elif i == 6:
- name = digit2name.get(digit, "") + "백"
- name = name.replace("일백", "백")
- elif i == 7:
- name = digit2name.get(digit, "") + "천"
- name = name.replace("일천", "천")
- elif i == 8:
- name = digit2name.get(digit, "") + "억"
- elif i == 9:
- name = digit2name.get(digit, "") + "십"
- elif i == 10:
- name = digit2name.get(digit, "") + "백"
- elif i == 11:
- name = digit2name.get(digit, "") + "천"
- elif i == 12:
- name = digit2name.get(digit, "") + "조"
- elif i == 13:
- name = digit2name.get(digit, "") + "십"
- elif i == 14:
- name = digit2name.get(digit, "") + "백"
- elif i == 15:
- name = digit2name.get(digit, "") + "천"
- spelledout.append(name)
- return "".join(elem for elem in spelledout)
-
-
-def number_to_hangul(text):
- tokens = set(re.findall(r"(\d[\d,]*)([\uac00-\ud71f]+)", text))
- for token in tokens:
- num, classifier = token
- if classifier[:2] in _korean_classifiers or classifier[0] in _korean_classifiers:
- spelledout = hangul_number(num, sino=False)
- else:
- spelledout = hangul_number(num, sino=True)
- text = text.replace(f"{num}{classifier}", f"{spelledout}{classifier}")
- # digit by digit for remaining digits
- digits = "0123456789"
- names = "영일이삼사오육칠팔구"
- for d, n in zip(digits, names):
- text = text.replace(d, n)
- return text
-
-
-def korean_to_lazy_ipa(text):
- text = latin_to_hangul(text)
- text = number_to_hangul(text)
- text = re.sub("[\uac00-\ud7af]+", lambda x: ko_pron.romanise(x.group(0), "ipa").split("] ~ [")[0], text)
- for regex, replacement in _ipa_to_lazy_ipa:
- text = re.sub(regex, replacement, text)
- return text
-
-
-_g2p = G2p()
-
-
-def korean_to_ipa(text):
- text = latin_to_hangul(text)
- text = number_to_hangul(text)
- text = _g2p(text)
- text = fix_g2pk2_error(text)
- text = korean_to_lazy_ipa(text)
- return text.replace("ʧ", "tʃ").replace("ʥ", "dʑ")
-
-
-def post_replace_ph(ph):
- rep_map = {
- ":": ",",
- ";": ",",
- ",": ",",
- "。": ".",
- "!": "!",
- "?": "?",
- "\n": ".",
- "·": ",",
- "、": ",",
- "...": "…",
- " ": "空",
- }
- if ph in rep_map.keys():
- ph = rep_map[ph]
- if ph in symbols:
- return ph
- if ph not in symbols:
- ph = "停"
- return ph
-
-
-def g2p(text):
- text = latin_to_hangul(text)
- text = _g2p(text)
- text = divide_hangul(text)
- text = fix_g2pk2_error(text)
- text = re.sub(r"([\u3131-\u3163])$", r"\1.", text)
- # text = "".join([post_replace_ph(i) for i in text])
- text = [post_replace_ph(i) for i in text]
- return text
-
-
-if __name__ == "__main__":
- text = "안녕하세요"
- print(g2p(text))+# reference: https://github.com/ORI-Muchim/MB-iSTFT-VITS-Korean/blob/main/text/korean.py
+
+import re
+from jamo import h2j, j2hcj
+import ko_pron
+from g2pk2 import G2p
+
+import importlib
+import os
+
+# 防止win下无法读取模型
+if os.name == "nt":
+
+ class win_G2p(G2p):
+ def check_mecab(self):
+ super().check_mecab()
+ spam_spec = importlib.util.find_spec("eunjeon")
+ non_found = spam_spec is None
+ if non_found:
+ print("you have to install eunjeon. install it...")
+ else:
+ installpath = spam_spec.submodule_search_locations[0]
+ if not (re.match(r"^[A-Za-z0-9_/\\:.\-]*$", installpath)):
+ import sys
+ from eunjeon import Mecab as _Mecab
+
+ class Mecab(_Mecab):
+ def get_dicpath(installpath):
+ if not (re.match(r"^[A-Za-z0-9_/\\:.\-]*$", installpath)):
+ import shutil
+
+ python_dir = os.getcwd()
+ if installpath[: len(python_dir)].upper() == python_dir.upper():
+ dicpath = os.path.join(os.path.relpath(installpath, python_dir), "data", "mecabrc")
+ else:
+ if not os.path.exists("TEMP"):
+ os.mkdir("TEMP")
+ if not os.path.exists(os.path.join("TEMP", "ko")):
+ os.mkdir(os.path.join("TEMP", "ko"))
+ if os.path.exists(os.path.join("TEMP", "ko", "ko_dict")):
+ shutil.rmtree(os.path.join("TEMP", "ko", "ko_dict"))
+
+ shutil.copytree(
+ os.path.join(installpath, "data"), os.path.join("TEMP", "ko", "ko_dict")
+ )
+ dicpath = os.path.join("TEMP", "ko", "ko_dict", "mecabrc")
+ else:
+ dicpath = os.path.abspath(os.path.join(installpath, "data/mecabrc"))
+ return dicpath
+
+ def __init__(self, dicpath=get_dicpath(installpath)):
+ super().__init__(dicpath=dicpath)
+
+ sys.modules["eunjeon"].Mecab = Mecab
+
+ G2p = win_G2p
+
+
+from text.symbols2 import symbols
+
+# This is a list of Korean classifiers preceded by pure Korean numerals.
+_korean_classifiers = (
+ "군데 권 개 그루 닢 대 두 마리 모 모금 뭇 발 발짝 방 번 벌 보루 살 수 술 시 쌈 움큼 정 짝 채 척 첩 축 켤레 톨 통"
+)
+
+# List of (hangul, hangul divided) pairs:
+_hangul_divided = [
+ (re.compile("%s" % x[0]), x[1])
+ for x in [
+ # ('ㄳ', 'ㄱㅅ'), # g2pk2, A Syllable-ending Rule
+ # ('ㄵ', 'ㄴㅈ'),
+ # ('ㄶ', 'ㄴㅎ'),
+ # ('ㄺ', 'ㄹㄱ'),
+ # ('ㄻ', 'ㄹㅁ'),
+ # ('ㄼ', 'ㄹㅂ'),
+ # ('ㄽ', 'ㄹㅅ'),
+ # ('ㄾ', 'ㄹㅌ'),
+ # ('ㄿ', 'ㄹㅍ'),
+ # ('ㅀ', 'ㄹㅎ'),
+ # ('ㅄ', 'ㅂㅅ'),
+ ("ㅘ", "ㅗㅏ"),
+ ("ㅙ", "ㅗㅐ"),
+ ("ㅚ", "ㅗㅣ"),
+ ("ㅝ", "ㅜㅓ"),
+ ("ㅞ", "ㅜㅔ"),
+ ("ㅟ", "ㅜㅣ"),
+ ("ㅢ", "ㅡㅣ"),
+ ("ㅑ", "ㅣㅏ"),
+ ("ㅒ", "ㅣㅐ"),
+ ("ㅕ", "ㅣㅓ"),
+ ("ㅖ", "ㅣㅔ"),
+ ("ㅛ", "ㅣㅗ"),
+ ("ㅠ", "ㅣㅜ"),
+ ]
+]
+
+# List of (Latin alphabet, hangul) pairs:
+_latin_to_hangul = [
+ (re.compile("%s" % x[0], re.IGNORECASE), x[1])
+ for x in [
+ ("a", "에이"),
+ ("b", "비"),
+ ("c", "시"),
+ ("d", "디"),
+ ("e", "이"),
+ ("f", "에프"),
+ ("g", "지"),
+ ("h", "에이치"),
+ ("i", "아이"),
+ ("j", "제이"),
+ ("k", "케이"),
+ ("l", "엘"),
+ ("m", "엠"),
+ ("n", "엔"),
+ ("o", "오"),
+ ("p", "피"),
+ ("q", "큐"),
+ ("r", "아르"),
+ ("s", "에스"),
+ ("t", "티"),
+ ("u", "유"),
+ ("v", "브이"),
+ ("w", "더블유"),
+ ("x", "엑스"),
+ ("y", "와이"),
+ ("z", "제트"),
+ ]
+]
+
+# List of (ipa, lazy ipa) pairs:
+_ipa_to_lazy_ipa = [
+ (re.compile("%s" % x[0], re.IGNORECASE), x[1])
+ for x in [
+ ("t͡ɕ", "ʧ"),
+ ("d͡ʑ", "ʥ"),
+ ("ɲ", "n^"),
+ ("ɕ", "ʃ"),
+ ("ʷ", "w"),
+ ("ɭ", "l`"),
+ ("ʎ", "ɾ"),
+ ("ɣ", "ŋ"),
+ ("ɰ", "ɯ"),
+ ("ʝ", "j"),
+ ("ʌ", "ə"),
+ ("ɡ", "g"),
+ ("\u031a", "#"),
+ ("\u0348", "="),
+ ("\u031e", ""),
+ ("\u0320", ""),
+ ("\u0339", ""),
+ ]
+]
+
+
+def fix_g2pk2_error(text):
+ new_text = ""
+ i = 0
+ while i < len(text) - 4:
+ if (text[i : i + 3] == "ㅇㅡㄹ" or text[i : i + 3] == "ㄹㅡㄹ") and text[i + 3] == " " and text[i + 4] == "ㄹ":
+ new_text += text[i : i + 3] + " " + "ㄴ"
+ i += 5
+ else:
+ new_text += text[i]
+ i += 1
+
+ new_text += text[i:]
+ return new_text
+
+
+def latin_to_hangul(text):
+ for regex, replacement in _latin_to_hangul:
+ text = re.sub(regex, replacement, text)
+ return text
+
+
+def divide_hangul(text):
+ text = j2hcj(h2j(text))
+ for regex, replacement in _hangul_divided:
+ text = re.sub(regex, replacement, text)
+ return text
+
+
+def hangul_number(num, sino=True):
+ """Reference https://github.com/Kyubyong/g2pK"""
+ num = re.sub(",", "", num)
+
+ if num == "0":
+ return "영"
+ if not sino and num == "20":
+ return "스무"
+
+ digits = "123456789"
+ names = "일이삼사오육칠팔구"
+ digit2name = {d: n for d, n in zip(digits, names)}
+
+ modifiers = "한 두 세 네 다섯 여섯 일곱 여덟 아홉"
+ decimals = "열 스물 서른 마흔 쉰 예순 일흔 여든 아흔"
+ digit2mod = {d: mod for d, mod in zip(digits, modifiers.split())}
+ digit2dec = {d: dec for d, dec in zip(digits, decimals.split())}
+
+ spelledout = []
+ for i, digit in enumerate(num):
+ i = len(num) - i - 1
+ if sino:
+ if i == 0:
+ name = digit2name.get(digit, "")
+ elif i == 1:
+ name = digit2name.get(digit, "") + "십"
+ name = name.replace("일십", "십")
+ else:
+ if i == 0:
+ name = digit2mod.get(digit, "")
+ elif i == 1:
+ name = digit2dec.get(digit, "")
+ if digit == "0":
+ if i % 4 == 0:
+ last_three = spelledout[-min(3, len(spelledout)) :]
+ if "".join(last_three) == "":
+ spelledout.append("")
+ continue
+ else:
+ spelledout.append("")
+ continue
+ if i == 2:
+ name = digit2name.get(digit, "") + "백"
+ name = name.replace("일백", "백")
+ elif i == 3:
+ name = digit2name.get(digit, "") + "천"
+ name = name.replace("일천", "천")
+ elif i == 4:
+ name = digit2name.get(digit, "") + "만"
+ name = name.replace("일만", "만")
+ elif i == 5:
+ name = digit2name.get(digit, "") + "십"
+ name = name.replace("일십", "십")
+ elif i == 6:
+ name = digit2name.get(digit, "") + "백"
+ name = name.replace("일백", "백")
+ elif i == 7:
+ name = digit2name.get(digit, "") + "천"
+ name = name.replace("일천", "천")
+ elif i == 8:
+ name = digit2name.get(digit, "") + "억"
+ elif i == 9:
+ name = digit2name.get(digit, "") + "십"
+ elif i == 10:
+ name = digit2name.get(digit, "") + "백"
+ elif i == 11:
+ name = digit2name.get(digit, "") + "천"
+ elif i == 12:
+ name = digit2name.get(digit, "") + "조"
+ elif i == 13:
+ name = digit2name.get(digit, "") + "십"
+ elif i == 14:
+ name = digit2name.get(digit, "") + "백"
+ elif i == 15:
+ name = digit2name.get(digit, "") + "천"
+ spelledout.append(name)
+ return "".join(elem for elem in spelledout)
+
+
+def number_to_hangul(text):
+ """Reference https://github.com/Kyubyong/g2pK"""
+ tokens = set(re.findall(r"(\d[\d,]*)([\uac00-\ud71f]+)", text))
+ for token in tokens:
+ num, classifier = token
+ if classifier[:2] in _korean_classifiers or classifier[0] in _korean_classifiers:
+ spelledout = hangul_number(num, sino=False)
+ else:
+ spelledout = hangul_number(num, sino=True)
+ text = text.replace(f"{num}{classifier}", f"{spelledout}{classifier}")
+ # digit by digit for remaining digits
+ digits = "0123456789"
+ names = "영일이삼사오육칠팔구"
+ for d, n in zip(digits, names):
+ text = text.replace(d, n)
+ return text
+
+
+def korean_to_lazy_ipa(text):
+ text = latin_to_hangul(text)
+ text = number_to_hangul(text)
+ text = re.sub("[\uac00-\ud7af]+", lambda x: ko_pron.romanise(x.group(0), "ipa").split("] ~ [")[0], text)
+ for regex, replacement in _ipa_to_lazy_ipa:
+ text = re.sub(regex, replacement, text)
+ return text
+
+
+_g2p = G2p()
+
+
+def korean_to_ipa(text):
+ text = latin_to_hangul(text)
+ text = number_to_hangul(text)
+ text = _g2p(text)
+ text = fix_g2pk2_error(text)
+ text = korean_to_lazy_ipa(text)
+ return text.replace("ʧ", "tʃ").replace("ʥ", "dʑ")
+
+
+def post_replace_ph(ph):
+ rep_map = {
+ ":": ",",
+ ";": ",",
+ ",": ",",
+ "。": ".",
+ "!": "!",
+ "?": "?",
+ "\n": ".",
+ "·": ",",
+ "、": ",",
+ "...": "…",
+ " ": "空",
+ }
+ if ph in rep_map.keys():
+ ph = rep_map[ph]
+ if ph in symbols:
+ return ph
+ if ph not in symbols:
+ ph = "停"
+ return ph
+
+
+def g2p(text):
+ text = latin_to_hangul(text)
+ text = _g2p(text)
+ text = divide_hangul(text)
+ text = fix_g2pk2_error(text)
+ text = re.sub(r"([\u3131-\u3163])$", r"\1.", text)
+ # text = "".join([post_replace_ph(i) for i in text])
+ text = [post_replace_ph(i) for i in text]
+ return text
+
+
+if __name__ == "__main__":
+ text = "안녕하세요"
+ print(g2p(text))
| https://raw.githubusercontent.com/RVC-Boss/GPT-SoVITS/HEAD/GPT_SoVITS/text/korean.py |
Add docstrings that explain logic | import math
import torch
from torch.nn import functional as F
def init_weights(m, mean=0.0, std=0.01):
classname = m.__class__.__name__
if classname.find("Conv") != -1:
m.weight.data.normal_(mean, std)
def get_padding(kernel_size, dilation=1):
return int((kernel_size * dilation - dilation) / 2)
# def convert_pad_shape(pad_shape):
# l = pad_shape[::-1]
# pad_shape = [item for sublist in l for item in sublist]
# return pad_shape
def intersperse(lst, item):
result = [item] * (len(lst) * 2 + 1)
result[1::2] = lst
return result
def kl_divergence(m_p, logs_p, m_q, logs_q):
kl = (logs_q - logs_p) - 0.5
kl += 0.5 * (torch.exp(2.0 * logs_p) + ((m_p - m_q) ** 2)) * torch.exp(-2.0 * logs_q)
return kl
def rand_gumbel(shape):
uniform_samples = torch.rand(shape) * 0.99998 + 0.00001
return -torch.log(-torch.log(uniform_samples))
def rand_gumbel_like(x):
g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device)
return g
def slice_segments(x, ids_str, segment_size=4):
ret = torch.zeros_like(x[:, :, :segment_size])
for i in range(x.size(0)):
idx_str = ids_str[i]
idx_end = idx_str + segment_size
ret[i] = x[i, :, idx_str:idx_end]
return ret
def rand_slice_segments(x, x_lengths=None, segment_size=4):
b, d, t = x.size()
if x_lengths is None:
x_lengths = t
ids_str_max = x_lengths - segment_size + 1
ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
ret = slice_segments(x, ids_str, segment_size)
return ret, ids_str
def get_timing_signal_1d(length, channels, min_timescale=1.0, max_timescale=1.0e4):
position = torch.arange(length, dtype=torch.float)
num_timescales = channels // 2
log_timescale_increment = math.log(float(max_timescale) / float(min_timescale)) / (num_timescales - 1)
inv_timescales = min_timescale * torch.exp(
torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment
)
scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1)
signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0)
signal = F.pad(signal, [0, 0, 0, channels % 2])
signal = signal.view(1, channels, length)
return signal
def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4):
b, channels, length = x.size()
signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
return x + signal.to(dtype=x.dtype, device=x.device)
def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1):
b, channels, length = x.size()
signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis)
def subsequent_mask(length):
mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0)
return mask
@torch.jit.script
def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
n_channels_int = n_channels[0]
in_act = input_a + input_b
t_act = torch.tanh(in_act[:, :n_channels_int, :])
s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
acts = t_act * s_act
return acts
def convert_pad_shape(pad_shape):
l = pad_shape[::-1]
pad_shape = [item for sublist in l for item in sublist]
return pad_shape
def shift_1d(x):
x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1]
return x
def sequence_mask(length, max_length=None):
if max_length is None:
max_length = length.max()
x = torch.arange(max_length, dtype=length.dtype, device=length.device)
return x.unsqueeze(0) < length.unsqueeze(1)
def generate_path(duration, mask):
device = duration.device
b, _, t_y, t_x = mask.shape
cum_duration = torch.cumsum(duration, -1)
cum_duration_flat = cum_duration.view(b * t_x)
path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype)
path = path.view(b, t_x, t_y)
path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1]
path = path.unsqueeze(1).transpose(2, 3) * mask
return path
def clip_grad_value_(parameters, clip_value, norm_type=2):
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
parameters = list(filter(lambda p: p.grad is not None, parameters))
norm_type = float(norm_type)
if clip_value is not None:
clip_value = float(clip_value)
total_norm = 0
for p in parameters:
param_norm = p.grad.data.norm(norm_type)
total_norm += param_norm.item() ** norm_type
if clip_value is not None:
p.grad.data.clamp_(min=-clip_value, max=clip_value)
total_norm = total_norm ** (1.0 / norm_type)
return total_norm
def squeeze(x, x_mask=None, n_sqz=2):
b, c, t = x.size()
t = (t // n_sqz) * n_sqz
x = x[:, :, :t]
x_sqz = x.view(b, c, t // n_sqz, n_sqz)
x_sqz = x_sqz.permute(0, 3, 1, 2).contiguous().view(b, c * n_sqz, t // n_sqz)
if x_mask is not None:
x_mask = x_mask[:, :, n_sqz - 1 :: n_sqz]
else:
x_mask = torch.ones(b, 1, t // n_sqz).to(device=x.device, dtype=x.dtype)
return x_sqz * x_mask, x_mask
def unsqueeze(x, x_mask=None, n_sqz=2):
b, c, t = x.size()
x_unsqz = x.view(b, n_sqz, c // n_sqz, t)
x_unsqz = x_unsqz.permute(0, 2, 3, 1).contiguous().view(b, c // n_sqz, t * n_sqz)
if x_mask is not None:
x_mask = x_mask.unsqueeze(-1).repeat(1, 1, 1, n_sqz).view(b, 1, t * n_sqz)
else:
x_mask = torch.ones(b, 1, t * n_sqz).to(device=x.device, dtype=x.dtype)
return x_unsqz * x_mask, x_mask | --- +++ @@ -26,12 +26,14 @@
def kl_divergence(m_p, logs_p, m_q, logs_q):
+ """KL(P||Q)"""
kl = (logs_q - logs_p) - 0.5
kl += 0.5 * (torch.exp(2.0 * logs_p) + ((m_p - m_q) ** 2)) * torch.exp(-2.0 * logs_q)
return kl
def rand_gumbel(shape):
+ """Sample from the Gumbel distribution, protect from overflows."""
uniform_samples = torch.rand(shape) * 0.99998 + 0.00001
return -torch.log(-torch.log(uniform_samples))
@@ -120,6 +122,10 @@
def generate_path(duration, mask):
+ """
+ duration: [b, 1, t_x]
+ mask: [b, 1, t_y, t_x]
+ """
device = duration.device
b, _, t_y, t_x = mask.shape
@@ -176,4 +182,4 @@ x_mask = x_mask.unsqueeze(-1).repeat(1, 1, 1, n_sqz).view(b, 1, t * n_sqz)
else:
x_mask = torch.ones(b, 1, t * n_sqz).to(device=x.device, dtype=x.dtype)
- return x_unsqz * x_mask, x_mask+ return x_unsqz * x_mask, x_mask
| https://raw.githubusercontent.com/RVC-Boss/GPT-SoVITS/HEAD/GPT_SoVITS/module/commons.py |
Generate docstrings for script automation | # modified from https://github.com/CjangCjengh/vits/blob/main/text/japanese.py
import re
import os
import hashlib
try:
import pyopenjtalk
current_file_path = os.path.dirname(__file__)
# 防止win下无法读取模型
if os.name == "nt":
python_dir = os.getcwd()
OPEN_JTALK_DICT_DIR = pyopenjtalk.OPEN_JTALK_DICT_DIR.decode("utf-8")
if not (re.match(r"^[A-Za-z0-9_/\\:.\-]*$", OPEN_JTALK_DICT_DIR)):
if OPEN_JTALK_DICT_DIR[: len(python_dir)].upper() == python_dir.upper():
OPEN_JTALK_DICT_DIR = os.path.join(os.path.relpath(OPEN_JTALK_DICT_DIR, python_dir))
else:
import shutil
if not os.path.exists("TEMP"):
os.mkdir("TEMP")
if not os.path.exists(os.path.join("TEMP", "ja")):
os.mkdir(os.path.join("TEMP", "ja"))
if os.path.exists(os.path.join("TEMP", "ja", "open_jtalk_dic")):
shutil.rmtree(os.path.join("TEMP", "ja", "open_jtalk_dic"))
shutil.copytree(
pyopenjtalk.OPEN_JTALK_DICT_DIR.decode("utf-8"),
os.path.join("TEMP", "ja", "open_jtalk_dic"),
)
OPEN_JTALK_DICT_DIR = os.path.join("TEMP", "ja", "open_jtalk_dic")
pyopenjtalk.OPEN_JTALK_DICT_DIR = OPEN_JTALK_DICT_DIR.encode("utf-8")
if not (re.match(r"^[A-Za-z0-9_/\\:.\-]*$", current_file_path)):
if current_file_path[: len(python_dir)].upper() == python_dir.upper():
current_file_path = os.path.join(os.path.relpath(current_file_path, python_dir))
else:
if not os.path.exists("TEMP"):
os.mkdir("TEMP")
if not os.path.exists(os.path.join("TEMP", "ja")):
os.mkdir(os.path.join("TEMP", "ja"))
if not os.path.exists(os.path.join("TEMP", "ja", "ja_userdic")):
os.mkdir(os.path.join("TEMP", "ja", "ja_userdic"))
shutil.copyfile(
os.path.join(current_file_path, "ja_userdic", "userdict.csv"),
os.path.join("TEMP", "ja", "ja_userdic", "userdict.csv"),
)
current_file_path = os.path.join("TEMP", "ja")
def get_hash(fp: str) -> str:
hash_md5 = hashlib.md5()
with open(fp, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
USERDIC_CSV_PATH = os.path.join(current_file_path, "ja_userdic", "userdict.csv")
USERDIC_BIN_PATH = os.path.join(current_file_path, "ja_userdic", "user.dict")
USERDIC_HASH_PATH = os.path.join(current_file_path, "ja_userdic", "userdict.md5")
# 如果没有用户词典,就生成一个;如果有,就检查md5,如果不一样,就重新生成
if os.path.exists(USERDIC_CSV_PATH):
if (
not os.path.exists(USERDIC_BIN_PATH)
or get_hash(USERDIC_CSV_PATH) != open(USERDIC_HASH_PATH, "r", encoding="utf-8").read()
):
pyopenjtalk.mecab_dict_index(USERDIC_CSV_PATH, USERDIC_BIN_PATH)
with open(USERDIC_HASH_PATH, "w", encoding="utf-8") as f:
f.write(get_hash(USERDIC_CSV_PATH))
if os.path.exists(USERDIC_BIN_PATH):
pyopenjtalk.update_global_jtalk_with_user_dict(USERDIC_BIN_PATH)
except Exception:
# print(e)
import pyopenjtalk
# failed to load user dictionary, ignore.
pass
from text.symbols import punctuation
# Regular expression matching Japanese without punctuation marks:
_japanese_characters = re.compile(
r"[A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]"
)
# Regular expression matching non-Japanese characters or punctuation marks:
_japanese_marks = re.compile(
r"[^A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]"
)
# List of (symbol, Japanese) pairs for marks:
_symbols_to_japanese = [(re.compile("%s" % x[0]), x[1]) for x in [("%", "パーセント")]]
# List of (consonant, sokuon) pairs:
_real_sokuon = [
(re.compile("%s" % x[0]), x[1])
for x in [
(r"Q([↑↓]*[kg])", r"k#\1"),
(r"Q([↑↓]*[tdjʧ])", r"t#\1"),
(r"Q([↑↓]*[sʃ])", r"s\1"),
(r"Q([↑↓]*[pb])", r"p#\1"),
]
]
# List of (consonant, hatsuon) pairs:
_real_hatsuon = [
(re.compile("%s" % x[0]), x[1])
for x in [
(r"N([↑↓]*[pbm])", r"m\1"),
(r"N([↑↓]*[ʧʥj])", r"n^\1"),
(r"N([↑↓]*[tdn])", r"n\1"),
(r"N([↑↓]*[kg])", r"ŋ\1"),
]
]
def post_replace_ph(ph):
rep_map = {
":": ",",
";": ",",
",": ",",
"。": ".",
"!": "!",
"?": "?",
"\n": ".",
"·": ",",
"、": ",",
"...": "…",
}
if ph in rep_map.keys():
ph = rep_map[ph]
return ph
def replace_consecutive_punctuation(text):
punctuations = "".join(re.escape(p) for p in punctuation)
pattern = f"([{punctuations}])([{punctuations}])+"
result = re.sub(pattern, r"\1", text)
return result
def symbols_to_japanese(text):
for regex, replacement in _symbols_to_japanese:
text = re.sub(regex, replacement, text)
return text
def preprocess_jap(text, with_prosody=False):
text = symbols_to_japanese(text)
# English words to lower case, should have no influence on japanese words.
text = text.lower()
sentences = re.split(_japanese_marks, text)
marks = re.findall(_japanese_marks, text)
text = []
for i, sentence in enumerate(sentences):
if re.match(_japanese_characters, sentence):
if with_prosody:
text += pyopenjtalk_g2p_prosody(sentence)[1:-1]
else:
p = pyopenjtalk.g2p(sentence)
text += p.split(" ")
if i < len(marks):
if marks[i] == " ": # 防止意外的UNK
continue
text += [marks[i].replace(" ", "")]
return text
def text_normalize(text):
# todo: jap text normalize
# 避免重复标点引起的参考泄露
text = replace_consecutive_punctuation(text)
return text
# Copied from espnet https://github.com/espnet/espnet/blob/master/espnet2/text/phoneme_tokenizer.py
def pyopenjtalk_g2p_prosody(text, drop_unvoiced_vowels=True):
labels = pyopenjtalk.make_label(pyopenjtalk.run_frontend(text))
N = len(labels)
phones = []
for n in range(N):
lab_curr = labels[n]
# current phoneme
p3 = re.search(r"\-(.*?)\+", lab_curr).group(1)
# deal unvoiced vowels as normal vowels
if drop_unvoiced_vowels and p3 in "AEIOU":
p3 = p3.lower()
# deal with sil at the beginning and the end of text
if p3 == "sil":
assert n == 0 or n == N - 1
if n == 0:
phones.append("^")
elif n == N - 1:
# check question form or not
e3 = _numeric_feature_by_regex(r"!(\d+)_", lab_curr)
if e3 == 0:
phones.append("$")
elif e3 == 1:
phones.append("?")
continue
elif p3 == "pau":
phones.append("_")
continue
else:
phones.append(p3)
# accent type and position info (forward or backward)
a1 = _numeric_feature_by_regex(r"/A:([0-9\-]+)\+", lab_curr)
a2 = _numeric_feature_by_regex(r"\+(\d+)\+", lab_curr)
a3 = _numeric_feature_by_regex(r"\+(\d+)/", lab_curr)
# number of mora in accent phrase
f1 = _numeric_feature_by_regex(r"/F:(\d+)_", lab_curr)
a2_next = _numeric_feature_by_regex(r"\+(\d+)\+", labels[n + 1])
# accent phrase border
if a3 == 1 and a2_next == 1 and p3 in "aeiouAEIOUNcl":
phones.append("#")
# pitch falling
elif a1 == 0 and a2_next == a2 + 1 and a2 != f1:
phones.append("]")
# pitch rising
elif a2 == 1 and a2_next == 2:
phones.append("[")
return phones
# Copied from espnet https://github.com/espnet/espnet/blob/master/espnet2/text/phoneme_tokenizer.py
def _numeric_feature_by_regex(regex, s):
match = re.search(regex, s)
if match is None:
return -50
return int(match.group(1))
def g2p(norm_text, with_prosody=True):
phones = preprocess_jap(norm_text, with_prosody)
phones = [post_replace_ph(i) for i in phones]
# todo: implement tones and word2ph
return phones
if __name__ == "__main__":
phones = g2p("Hello.こんにちは!今日もNiCe天気ですね!tokyotowerに行きましょう!")
print(phones) | --- +++ @@ -149,6 +149,7 @@
def preprocess_jap(text, with_prosody=False):
+ """Reference https://r9y9.github.io/ttslearn/latest/notebooks/ch10_Recipe-Tacotron.html"""
text = symbols_to_japanese(text)
# English words to lower case, should have no influence on japanese words.
text = text.lower()
@@ -180,6 +181,27 @@
# Copied from espnet https://github.com/espnet/espnet/blob/master/espnet2/text/phoneme_tokenizer.py
def pyopenjtalk_g2p_prosody(text, drop_unvoiced_vowels=True):
+ """Extract phoneme + prosoody symbol sequence from input full-context labels.
+
+ The algorithm is based on `Prosodic features control by symbols as input of
+ sequence-to-sequence acoustic modeling for neural TTS`_ with some r9y9's tweaks.
+
+ Args:
+ text (str): Input text.
+ drop_unvoiced_vowels (bool): whether to drop unvoiced vowels.
+
+ Returns:
+ List[str]: List of phoneme + prosody symbols.
+
+ Examples:
+ >>> from espnet2.text.phoneme_tokenizer import pyopenjtalk_g2p_prosody
+ >>> pyopenjtalk_g2p_prosody("こんにちは。")
+ ['^', 'k', 'o', '[', 'N', 'n', 'i', 'ch', 'i', 'w', 'a', '$']
+
+ .. _`Prosodic features control by symbols as input of sequence-to-sequence acoustic
+ modeling for neural TTS`: https://doi.org/10.1587/transinf.2020EDP7104
+
+ """
labels = pyopenjtalk.make_label(pyopenjtalk.run_frontend(text))
N = len(labels)
@@ -251,4 +273,4 @@
if __name__ == "__main__":
phones = g2p("Hello.こんにちは!今日もNiCe天気ですね!tokyotowerに行きましょう!")
- print(phones)+ print(phones)
| https://raw.githubusercontent.com/RVC-Boss/GPT-SoVITS/HEAD/GPT_SoVITS/text/japanese.py |
Turn comments into proper docstrings | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from .num import DIGITS
from .num import num2str
from .num import verbalize_cardinal
from .num import verbalize_digit
def _time_num2str(num_string: str) -> str:
result = num2str(num_string.lstrip("0"))
if num_string.startswith("0"):
result = DIGITS["0"] + result
return result
# 时刻表达式
RE_TIME = re.compile(
r"([0-1]?[0-9]|2[0-3])"
r":([0-5][0-9])"
r"(:([0-5][0-9]))?"
)
# 时间范围,如8:30-12:30
RE_TIME_RANGE = re.compile(
r"([0-1]?[0-9]|2[0-3])"
r":([0-5][0-9])"
r"(:([0-5][0-9]))?"
r"(~|-)"
r"([0-1]?[0-9]|2[0-3])"
r":([0-5][0-9])"
r"(:([0-5][0-9]))?"
)
def replace_time(match) -> str:
is_range = len(match.groups()) > 5
hour = match.group(1)
minute = match.group(2)
second = match.group(4)
if is_range:
hour_2 = match.group(6)
minute_2 = match.group(7)
second_2 = match.group(9)
result = f"{num2str(hour)}点"
if minute.lstrip("0"):
if int(minute) == 30:
result += "半"
else:
result += f"{_time_num2str(minute)}分"
if second and second.lstrip("0"):
result += f"{_time_num2str(second)}秒"
if is_range:
result += "至"
result += f"{num2str(hour_2)}点"
if minute_2.lstrip("0"):
if int(minute) == 30:
result += "半"
else:
result += f"{_time_num2str(minute_2)}分"
if second_2 and second_2.lstrip("0"):
result += f"{_time_num2str(second_2)}秒"
return result
RE_DATE = re.compile(
r"(\d{4}|\d{2})年"
r"((0?[1-9]|1[0-2])月)?"
r"(((0?[1-9])|((1|2)[0-9])|30|31)([日号]))?"
)
def replace_date(match) -> str:
year = match.group(1)
month = match.group(3)
day = match.group(5)
result = ""
if year:
result += f"{verbalize_digit(year)}年"
if month:
result += f"{verbalize_cardinal(month)}月"
if day:
result += f"{verbalize_cardinal(day)}{match.group(9)}"
return result
# 用 / 或者 - 分隔的 YY/MM/DD 或者 YY-MM-DD 日期
RE_DATE2 = re.compile(r"(\d{4})([- /.])(0[1-9]|1[012])\2(0[1-9]|[12][0-9]|3[01])")
def replace_date2(match) -> str:
year = match.group(1)
month = match.group(3)
day = match.group(4)
result = ""
if year:
result += f"{verbalize_digit(year)}年"
if month:
result += f"{verbalize_cardinal(month)}月"
if day:
result += f"{verbalize_cardinal(day)}日"
return result | --- +++ @@ -20,6 +20,7 @@
def _time_num2str(num_string: str) -> str:
+ """A special case for verbalizing number in time."""
result = num2str(num_string.lstrip("0"))
if num_string.startswith("0"):
result = DIGITS["0"] + result
@@ -46,6 +47,12 @@
def replace_time(match) -> str:
+ """
+ Args:
+ match (re.Match)
+ Returns:
+ str
+ """
is_range = len(match.groups()) > 5
@@ -89,6 +96,12 @@
def replace_date(match) -> str:
+ """
+ Args:
+ match (re.Match)
+ Returns:
+ str
+ """
year = match.group(1)
month = match.group(3)
day = match.group(5)
@@ -107,6 +120,12 @@
def replace_date2(match) -> str:
+ """
+ Args:
+ match (re.Match)
+ Returns:
+ str
+ """
year = match.group(1)
month = match.group(3)
day = match.group(4)
@@ -117,4 +136,4 @@ result += f"{verbalize_cardinal(month)}月"
if day:
result += f"{verbalize_cardinal(day)}日"
- return result+ return result
| https://raw.githubusercontent.com/RVC-Boss/GPT-SoVITS/HEAD/GPT_SoVITS/text/zh_normalization/chronology.py |
Include argument descriptions in docstrings | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from .num import verbalize_digit
# 规范化固话/手机号码
# 手机
# http://www.jihaoba.com/news/show/13680
# 移动:139、138、137、136、135、134、159、158、157、150、151、152、188、187、182、183、184、178、198
# 联通:130、131、132、156、155、186、185、176
# 电信:133、153、189、180、181、177
RE_MOBILE_PHONE = re.compile(r"(?<!\d)((\+?86 ?)?1([38]\d|5[0-35-9]|7[678]|9[89])\d{8})(?!\d)")
RE_TELEPHONE = re.compile(r"(?<!\d)((0(10|2[1-3]|[3-9]\d{2})-?)?[1-9]\d{6,7})(?!\d)")
# 全国统一的号码400开头
RE_NATIONAL_UNIFORM_NUMBER = re.compile(r"(400)(-)?\d{3}(-)?\d{4}")
def phone2str(phone_string: str, mobile=True) -> str:
if mobile:
sp_parts = phone_string.strip("+").split()
result = ",".join([verbalize_digit(part, alt_one=True) for part in sp_parts])
return result
else:
sil_parts = phone_string.split("-")
result = ",".join([verbalize_digit(part, alt_one=True) for part in sil_parts])
return result
def replace_phone(match) -> str:
return phone2str(match.group(0), mobile=False)
def replace_mobile(match) -> str:
return phone2str(match.group(0)) | --- +++ @@ -40,8 +40,20 @@
def replace_phone(match) -> str:
+ """
+ Args:
+ match (re.Match)
+ Returns:
+ str
+ """
return phone2str(match.group(0), mobile=False)
def replace_mobile(match) -> str:
- return phone2str(match.group(0))+ """
+ Args:
+ match (re.Match)
+ Returns:
+ str
+ """
+ return phone2str(match.group(0))
| https://raw.githubusercontent.com/RVC-Boss/GPT-SoVITS/HEAD/GPT_SoVITS/text/zh_normalization/phonecode.py |
Create documentation for each function signature | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from collections import OrderedDict
from typing import List
DIGITS = {str(i): tran for i, tran in enumerate("零一二三四五六七八九")}
UNITS = OrderedDict(
{
1: "十",
2: "百",
3: "千",
4: "万",
8: "亿",
}
)
COM_QUANTIFIERS = "(处|台|架|枚|趟|幅|平|方|堵|间|床|株|批|项|例|列|篇|栋|注|亩|封|艘|把|目|套|段|人|所|朵|匹|张|座|回|场|尾|条|个|首|阙|阵|网|炮|顶|丘|棵|只|支|袭|辆|挑|担|颗|壳|窠|曲|墙|群|腔|砣|座|客|贯|扎|捆|刀|令|打|手|罗|坡|山|岭|江|溪|钟|队|单|双|对|出|口|头|脚|板|跳|枝|件|贴|针|线|管|名|位|身|堂|课|本|页|家|户|层|丝|毫|厘|分|钱|两|斤|担|铢|石|钧|锱|忽|(千|毫|微)克|毫|厘|(公)分|分|寸|尺|丈|里|寻|常|铺|程|(千|分|厘|毫|微)米|米|撮|勺|合|升|斗|石|盘|碗|碟|叠|桶|笼|盆|盒|杯|钟|斛|锅|簋|篮|盘|桶|罐|瓶|壶|卮|盏|箩|箱|煲|啖|袋|钵|年|月|日|季|刻|时|周|天|秒|分|小时|旬|纪|岁|世|更|夜|春|夏|秋|冬|代|伏|辈|丸|泡|粒|颗|幢|堆|条|根|支|道|面|片|张|颗|块|元|(亿|千万|百万|万|千|百)|(亿|千万|百万|万|千|百|美|)元|(亿|千万|百万|万|千|百|十|)吨|(亿|千万|百万|万|千|百|)块|角|毛|分)"
# 分数表达式
RE_FRAC = re.compile(r"(-?)(\d+)/(\d+)")
def replace_frac(match) -> str:
sign = match.group(1)
nominator = match.group(2)
denominator = match.group(3)
sign: str = "负" if sign else ""
nominator: str = num2str(nominator)
denominator: str = num2str(denominator)
result = f"{sign}{denominator}分之{nominator}"
return result
# 百分数表达式
RE_PERCENTAGE = re.compile(r"(-?)(\d+(\.\d+)?)%")
def replace_percentage(match) -> str:
sign = match.group(1)
percent = match.group(2)
sign: str = "负" if sign else ""
percent: str = num2str(percent)
result = f"{sign}百分之{percent}"
return result
# 整数表达式
# 带负号的整数 -10
RE_INTEGER = re.compile(r"(-)" r"(\d+)")
def replace_negative_num(match) -> str:
sign = match.group(1)
number = match.group(2)
sign: str = "负" if sign else ""
number: str = num2str(number)
result = f"{sign}{number}"
return result
# 编号-无符号整形
# 00078
RE_DEFAULT_NUM = re.compile(r"\d{3}\d*")
def replace_default_num(match):
number = match.group(0)
return verbalize_digit(number, alt_one=True)
# 加减乘除
# RE_ASMD = re.compile(
# r'((-?)((\d+)(\.\d+)?)|(\.(\d+)))([\+\-\×÷=])((-?)((\d+)(\.\d+)?)|(\.(\d+)))')
RE_ASMD = re.compile(
r"((-?)((\d+)(\.\d+)?[⁰¹²³⁴⁵⁶⁷⁸⁹ˣʸⁿ]*)|(\.\d+[⁰¹²³⁴⁵⁶⁷⁸⁹ˣʸⁿ]*)|([A-Za-z][⁰¹²³⁴⁵⁶⁷⁸⁹ˣʸⁿ]*))([\+\-\×÷=])((-?)((\d+)(\.\d+)?[⁰¹²³⁴⁵⁶⁷⁸⁹ˣʸⁿ]*)|(\.\d+[⁰¹²³⁴⁵⁶⁷⁸⁹ˣʸⁿ]*)|([A-Za-z][⁰¹²³⁴⁵⁶⁷⁸⁹ˣʸⁿ]*))"
)
asmd_map = {"+": "加", "-": "减", "×": "乘", "÷": "除", "=": "等于"}
def replace_asmd(match) -> str:
result = match.group(1) + asmd_map[match.group(8)] + match.group(9)
return result
# 次方专项
RE_POWER = re.compile(r"[⁰¹²³⁴⁵⁶⁷⁸⁹ˣʸⁿ]+")
power_map = {
"⁰": "0",
"¹": "1",
"²": "2",
"³": "3",
"⁴": "4",
"⁵": "5",
"⁶": "6",
"⁷": "7",
"⁸": "8",
"⁹": "9",
"ˣ": "x",
"ʸ": "y",
"ⁿ": "n",
}
def replace_power(match) -> str:
power_num = ""
for m in match.group(0):
power_num += power_map[m]
result = "的" + power_num + "次方"
return result
# 数字表达式
# 纯小数
RE_DECIMAL_NUM = re.compile(r"(-?)((\d+)(\.\d+))" r"|(\.(\d+))")
# 正整数 + 量词
RE_POSITIVE_QUANTIFIERS = re.compile(r"(\d+)([多余几\+])?" + COM_QUANTIFIERS)
RE_NUMBER = re.compile(r"(-?)((\d+)(\.\d+)?)" r"|(\.(\d+))")
def replace_positive_quantifier(match) -> str:
number = match.group(1)
match_2 = match.group(2)
if match_2 == "+":
match_2 = "多"
match_2: str = match_2 if match_2 else ""
quantifiers: str = match.group(3)
number: str = num2str(number)
number = "两" if number == "二" else number
result = f"{number}{match_2}{quantifiers}"
return result
def replace_number(match) -> str:
sign = match.group(1)
number = match.group(2)
pure_decimal = match.group(5)
if pure_decimal:
result = num2str(pure_decimal)
else:
sign: str = "负" if sign else ""
number: str = num2str(number)
result = f"{sign}{number}"
return result
# 范围表达式
# match.group(1) and match.group(8) are copy from RE_NUMBER
RE_RANGE = re.compile(
r"""
(?<![\d\+\-\×÷=]) # 使用反向前瞻以确保数字范围之前没有其他数字和操作符
((-?)((\d+)(\.\d+)?)) # 匹配范围起始的负数或正数(整数或小数)
[-~] # 匹配范围分隔符
((-?)((\d+)(\.\d+)?)) # 匹配范围结束的负数或正数(整数或小数)
(?![\d\+\-\×÷=]) # 使用正向前瞻以确保数字范围之后没有其他数字和操作符
""",
re.VERBOSE,
)
def replace_range(match) -> str:
first, second = match.group(1), match.group(6)
first = RE_NUMBER.sub(replace_number, first)
second = RE_NUMBER.sub(replace_number, second)
result = f"{first}到{second}"
return result
# ~至表达式
RE_TO_RANGE = re.compile(
r"((-?)((\d+)(\.\d+)?)|(\.(\d+)))(%|°C|℃|度|摄氏度|cm2|cm²|cm3|cm³|cm|db|ds|kg|km|m2|m²|m³|m3|ml|m|mm|s)[~]((-?)((\d+)(\.\d+)?)|(\.(\d+)))(%|°C|℃|度|摄氏度|cm2|cm²|cm3|cm³|cm|db|ds|kg|km|m2|m²|m³|m3|ml|m|mm|s)"
)
def replace_to_range(match) -> str:
result = match.group(0).replace("~", "至")
return result
RE_VERSION_NUM = re.compile(r"((\d+)(\.\d+)(\.\d+)?(\.\d+)+)")
def replace_vrsion_num(match) -> str:
result = ""
for c in match.group(1):
if c == ".":
result += "点"
else:
result += num2str(c)
return result
def _get_value(value_string: str, use_zero: bool = True) -> List[str]:
stripped = value_string.lstrip("0")
if len(stripped) == 0:
return []
elif len(stripped) == 1:
if use_zero and len(stripped) < len(value_string):
return [DIGITS["0"], DIGITS[stripped]]
else:
return [DIGITS[stripped]]
else:
largest_unit = next(power for power in reversed(UNITS.keys()) if power < len(stripped))
first_part = value_string[:-largest_unit]
second_part = value_string[-largest_unit:]
return _get_value(first_part) + [UNITS[largest_unit]] + _get_value(second_part)
def verbalize_cardinal(value_string: str) -> str:
if not value_string:
return ""
# 000 -> '零' , 0 -> '零'
value_string = value_string.lstrip("0")
if len(value_string) == 0:
return DIGITS["0"]
result_symbols = _get_value(value_string)
# verbalized number starting with '一十*' is abbreviated as `十*`
if len(result_symbols) >= 2 and result_symbols[0] == DIGITS["1"] and result_symbols[1] == UNITS[1]:
result_symbols = result_symbols[1:]
return "".join(result_symbols)
def verbalize_digit(value_string: str, alt_one=False) -> str:
result_symbols = [DIGITS[digit] for digit in value_string]
result = "".join(result_symbols)
if alt_one:
result = result.replace("一", "幺")
return result
def num2str(value_string: str) -> str:
integer_decimal = value_string.split(".")
if len(integer_decimal) == 1:
integer = integer_decimal[0]
decimal = ""
elif len(integer_decimal) == 2:
integer, decimal = integer_decimal
else:
raise ValueError(f"The value string: '${value_string}' has more than one point in it.")
result = verbalize_cardinal(integer)
if decimal.endswith("0"):
decimal = decimal.rstrip("0") + "0"
else:
decimal = decimal.rstrip("0")
if decimal:
# '.22' is verbalized as '零点二二'
# '3.20' is verbalized as '三点二
result = result if result else "零"
result += "点" + verbalize_digit(decimal)
return result | --- +++ @@ -11,6 +11,10 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+"""
+Rules to verbalize numbers into Chinese characters.
+https://zh.wikipedia.org/wiki/中文数字#現代中文
+"""
import re
from collections import OrderedDict
@@ -34,6 +38,12 @@
def replace_frac(match) -> str:
+ """
+ Args:
+ match (re.Match)
+ Returns:
+ str
+ """
sign = match.group(1)
nominator = match.group(2)
denominator = match.group(3)
@@ -49,6 +59,12 @@
def replace_percentage(match) -> str:
+ """
+ Args:
+ match (re.Match)
+ Returns:
+ str
+ """
sign = match.group(1)
percent = match.group(2)
sign: str = "负" if sign else ""
@@ -63,6 +79,12 @@
def replace_negative_num(match) -> str:
+ """
+ Args:
+ match (re.Match)
+ Returns:
+ str
+ """
sign = match.group(1)
number = match.group(2)
sign: str = "负" if sign else ""
@@ -77,6 +99,12 @@
def replace_default_num(match):
+ """
+ Args:
+ match (re.Match)
+ Returns:
+ str
+ """
number = match.group(0)
return verbalize_digit(number, alt_one=True)
@@ -92,6 +120,12 @@
def replace_asmd(match) -> str:
+ """
+ Args:
+ match (re.Match)
+ Returns:
+ str
+ """
result = match.group(1) + asmd_map[match.group(8)] + match.group(9)
return result
@@ -117,6 +151,12 @@
def replace_power(match) -> str:
+ """
+ Args:
+ match (re.Match)
+ Returns:
+ str
+ """
power_num = ""
for m in match.group(0):
power_num += power_map[m]
@@ -133,6 +173,12 @@
def replace_positive_quantifier(match) -> str:
+ """
+ Args:
+ match (re.Match)
+ Returns:
+ str
+ """
number = match.group(1)
match_2 = match.group(2)
if match_2 == "+":
@@ -146,6 +192,12 @@
def replace_number(match) -> str:
+ """
+ Args:
+ match (re.Match)
+ Returns:
+ str
+ """
sign = match.group(1)
number = match.group(2)
pure_decimal = match.group(5)
@@ -174,6 +226,12 @@
def replace_range(match) -> str:
+ """
+ Args:
+ match (re.Match)
+ Returns:
+ str
+ """
first, second = match.group(1), match.group(6)
first = RE_NUMBER.sub(replace_number, first)
second = RE_NUMBER.sub(replace_number, second)
@@ -188,12 +246,24 @@
def replace_to_range(match) -> str:
+ """
+ Args:
+ match (re.Match)
+ Returns:
+ str
+ """
result = match.group(0).replace("~", "至")
return result
RE_VERSION_NUM = re.compile(r"((\d+)(\.\d+)(\.\d+)?(\.\d+)+)")
def replace_vrsion_num(match) -> str:
+ """
+ Args:
+ match (re.Match)
+ Returns:
+ str
+ """
result = ""
for c in match.group(1):
if c == ".":
@@ -266,4 +336,4 @@ # '3.20' is verbalized as '三点二
result = result if result else "零"
result += "点" + verbalize_digit(decimal)
- return result+ return result
| https://raw.githubusercontent.com/RVC-Boss/GPT-SoVITS/HEAD/GPT_SoVITS/text/zh_normalization/num.py |
Write docstrings for backend logic | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# This implementation is inspired from
# https://github.com/lucidrains/vector-quantize-pytorch
# which is released under MIT License. Hereafter, the original license:
# MIT License
#
# Copyright (c) 2020 Phil Wang
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import typing as tp
from einops import rearrange, repeat
import torch
from torch import nn
import torch.nn.functional as F
import torch.distributed as dist
from module.distrib import broadcast_tensors, is_distributed
from module.ddp_utils import SyncFunction
from tqdm import tqdm
def default(val: tp.Any, d: tp.Any) -> tp.Any:
return val if val is not None else d
def ema_inplace(moving_avg, new, decay: float):
moving_avg.data.mul_(decay).add_(new, alpha=(1 - decay))
def laplace_smoothing(x, n_categories: int, epsilon: float = 1e-5):
return (x + epsilon) / (x.sum() + n_categories * epsilon)
def uniform_init(*shape: int):
t = torch.empty(shape)
nn.init.kaiming_uniform_(t)
return t
def sample_vectors(samples, num: int):
num_samples, device = samples.shape[0], samples.device
if num_samples >= num:
indices = torch.randperm(num_samples, device=device)[:num]
else:
indices = torch.randint(0, num_samples, (num,), device=device)
return samples[indices]
def kmeans(samples, num_clusters: int, num_iters: int = 10, frames_to_use: int = 10_000, batch_size: int = 64):
N, D = samples.shape
dtype, device = samples.dtype, samples.device
if frames_to_use < N:
indices = torch.randperm(N, device=device)[:frames_to_use]
samples = samples[indices]
means = sample_vectors(samples, num_clusters)
print("kmeans start ... ")
for _ in tqdm(range(num_iters)):
# Store cluster assignments
all_assignments = []
for i in range(0, samples.shape[0], batch_size):
batch = samples[i : i + batch_size] # [B, D]
dists = torch.cdist(batch, means, p=2) # [B, C]
assignments = dists.argmin(dim=1) # [B]
all_assignments.append(assignments)
buckets = torch.cat(all_assignments, dim=0) # [N]
bins = torch.bincount(buckets, minlength=num_clusters)
zero_mask = bins == 0
bins_min_clamped = bins.masked_fill(zero_mask, 1)
# Compute new means
new_means = torch.zeros_like(means)
for i in range(num_clusters):
mask = buckets == i
if mask.any():
new_means[i] = samples[mask].mean(dim=0)
means = torch.where(zero_mask[:, None], means, new_means)
return means, bins
class EuclideanCodebook(nn.Module):
def __init__(
self,
dim: int,
codebook_size: int,
kmeans_init: int = False,
kmeans_iters: int = 10,
decay: float = 0.99,
epsilon: float = 1e-5,
threshold_ema_dead_code: int = 2,
):
super().__init__()
self.decay = decay
init_fn: tp.Union[tp.Callable[..., torch.Tensor], tp.Any] = uniform_init if not kmeans_init else torch.zeros
embed = init_fn(codebook_size, dim)
self.codebook_size = codebook_size
self.kmeans_iters = kmeans_iters
self.epsilon = epsilon
self.threshold_ema_dead_code = threshold_ema_dead_code
self.register_buffer("inited", torch.Tensor([not kmeans_init]))
self.register_buffer("cluster_size", torch.zeros(codebook_size))
self.register_buffer("embed", embed)
self.register_buffer("embed_avg", embed.clone())
@torch.jit.ignore
def init_embed_(self, data):
if self.inited:
return
if dist.is_available() and dist.is_initialized():
# [B * T * world_size, D]
data = SyncFunction.apply(data)
if dist.get_rank() == 0:
embed, cluster_size = kmeans(data, self.codebook_size, self.kmeans_iters)
else:
embed = torch.empty_like(self.embed)
cluster_size = torch.empty_like(self.cluster_size)
dist.broadcast(embed, src=0)
dist.broadcast(cluster_size, src=0)
self.embed.data.copy_(embed)
self.embed_avg.data.copy_(embed.clone())
self.cluster_size.data.copy_(cluster_size)
self.inited.data.copy_(torch.Tensor([True]))
# Make sure all buffers across workers are in sync after initialization
broadcast_tensors(self.buffers())
def replace_(self, samples, mask):
modified_codebook = torch.where(mask[..., None], sample_vectors(samples, self.codebook_size), self.embed)
self.embed.data.copy_(modified_codebook)
def expire_codes_(self, batch_samples):
if self.threshold_ema_dead_code == 0:
return
expired_codes = self.cluster_size < self.threshold_ema_dead_code
if not torch.any(expired_codes):
return
if is_distributed():
# [B * T * world_size, D]
batch_samples = SyncFunction.apply(batch_samples)
if dist.get_rank() == 0:
new_embeds = sample_vectors(batch_samples, expired_codes.sum())
else:
new_embeds = torch.zeros(expired_codes.sum(), self.embed.size(1), device=self.embed.device)
dist.broadcast(new_embeds, src=0)
self.embed.data[expired_codes] = new_embeds
broadcast_tensors(self.buffers())
def preprocess(self, x):
x = rearrange(x, "... d -> (...) d")
return x
def quantize(self, x):
embed = self.embed.t()
dist = -(x.pow(2).sum(1, keepdim=True) - 2 * x @ embed + embed.pow(2).sum(0, keepdim=True))
embed_ind = dist.max(dim=-1).indices
return embed_ind
def postprocess_emb(self, embed_ind, shape):
return embed_ind.view(*shape[:-1])
def dequantize(self, embed_ind):
quantize = F.embedding(embed_ind, self.embed)
return quantize
def encode(self, x):
shape = x.shape
# pre-process
x = self.preprocess(x)
# quantize
embed_ind = self.quantize(x)
# post-process
embed_ind = self.postprocess_emb(embed_ind, shape)
return embed_ind
def decode(self, embed_ind):
quantize = self.dequantize(embed_ind)
return quantize
def forward(self, x):
shape, dtype = x.shape, x.dtype
x = self.preprocess(x)
self.init_embed_(x)
embed_ind = self.quantize(x)
embed_onehot = F.one_hot(embed_ind, self.codebook_size).type(dtype)
embed_ind = self.postprocess_emb(embed_ind, shape)
quantize = self.dequantize(embed_ind)
if self.training:
### Update codebook by EMA
embed_onehot_sum = embed_onehot.sum(0) # [cb-size,]
embed_sum = x.t() @ embed_onehot # [D, cb-size]
if is_distributed():
dist.all_reduce(embed_onehot_sum)
dist.all_reduce(embed_sum)
# Update ema cluster count N_i^t, eq. (6) in vqvae paper
self.cluster_size.data.mul_(self.decay).add_(embed_onehot_sum, alpha=1 - self.decay)
# Update ema embed: eq. (7) in vqvae paper
self.embed_avg.data.mul_(self.decay).add_(embed_sum.t(), alpha=1 - self.decay)
# apply laplace smoothing
n = self.cluster_size.sum()
cluster_size = (self.cluster_size + self.epsilon) / (n + self.codebook_size * self.epsilon) * n
# Update ema embed: eq. (8) in vqvae paper
embed_normalized = self.embed_avg / cluster_size.unsqueeze(1)
self.embed.data.copy_(embed_normalized)
# We do the expiry of code at that point as buffers are in sync
# and all the workers will take the same decision.
self.expire_codes_(x)
return quantize, embed_ind
class VectorQuantization(nn.Module):
def __init__(
self,
dim: int,
codebook_size: int,
codebook_dim: tp.Optional[int] = None,
decay: float = 0.99,
epsilon: float = 1e-5,
kmeans_init: bool = True,
kmeans_iters: int = 50,
threshold_ema_dead_code: int = 2,
commitment_weight: float = 1.0,
):
super().__init__()
_codebook_dim: int = default(codebook_dim, dim)
requires_projection = _codebook_dim != dim
self.project_in = nn.Linear(dim, _codebook_dim) if requires_projection else nn.Identity()
self.project_out = nn.Linear(_codebook_dim, dim) if requires_projection else nn.Identity()
self.epsilon = epsilon
self.commitment_weight = commitment_weight
self._codebook = EuclideanCodebook(
dim=_codebook_dim,
codebook_size=codebook_size,
kmeans_init=kmeans_init,
kmeans_iters=kmeans_iters,
decay=decay,
epsilon=epsilon,
threshold_ema_dead_code=threshold_ema_dead_code,
)
self.codebook_size = codebook_size
@property
def codebook(self):
return self._codebook.embed
def encode(self, x):
x = rearrange(x, "b d n -> b n d")
x = self.project_in(x)
embed_in = self._codebook.encode(x)
return embed_in
def decode(self, embed_ind):
quantize = self._codebook.decode(embed_ind)
quantize = self.project_out(quantize)
quantize = rearrange(quantize, "b n d -> b d n")
return quantize
def forward(self, x):
device = x.device
x = rearrange(x, "b d n -> b n d")
x = self.project_in(x)
quantize, embed_ind = self._codebook(x)
if self.training:
quantize = x + (quantize - x).detach()
loss = torch.tensor([0.0], device=device, requires_grad=self.training)
if self.training:
if self.commitment_weight > 0:
commit_loss = F.mse_loss(quantize.detach(), x)
loss = loss + commit_loss * self.commitment_weight
quantize = self.project_out(quantize)
quantize = rearrange(quantize, "b n d -> b d n")
return quantize, embed_ind, loss
class ResidualVectorQuantization(nn.Module):
def __init__(self, *, num_quantizers, **kwargs):
super().__init__()
self.layers = nn.ModuleList([VectorQuantization(**kwargs) for _ in range(num_quantizers)])
def forward(self, x, n_q: tp.Optional[int] = None, layers: tp.Optional[list] = None):
quantized_out = 0.0
residual = x
all_losses = []
all_indices = []
out_quantized = []
n_q = n_q or len(self.layers)
for i, layer in enumerate(self.layers[:n_q]):
quantized, indices, loss = layer(residual)
residual = residual - quantized
quantized_out = quantized_out + quantized
all_indices.append(indices)
all_losses.append(loss)
if layers and i in layers:
out_quantized.append(quantized)
out_losses, out_indices = map(torch.stack, (all_losses, all_indices))
return quantized_out, out_indices, out_losses, out_quantized
def encode(self, x: torch.Tensor, n_q: tp.Optional[int] = None, st: tp.Optional[int] = None) -> torch.Tensor:
residual = x
all_indices = []
n_q = n_q or len(self.layers)
st = st or 0
for layer in self.layers[st:n_q]:
indices = layer.encode(residual)
quantized = layer.decode(indices)
residual = residual - quantized
all_indices.append(indices)
out_indices = torch.stack(all_indices)
return out_indices
def decode(self, q_indices: torch.Tensor, st: int = 0) -> torch.Tensor:
quantized_out = torch.tensor(0.0, device=q_indices.device)
for i, indices in enumerate(q_indices):
layer = self.layers[st + i]
quantized = layer.decode(indices)
quantized_out = quantized_out + quantized
return quantized_out | --- +++ @@ -29,6 +29,7 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
+"""Core vector quantization implementation."""
import typing as tp
@@ -111,6 +112,20 @@
class EuclideanCodebook(nn.Module):
+ """Codebook with Euclidean distance.
+ Args:
+ dim (int): Dimension.
+ codebook_size (int): Codebook size.
+ kmeans_init (bool): Whether to use k-means to initialize the codebooks.
+ If set to true, run the k-means algorithm on the first training batch and use
+ the learned centroids as initialization.
+ kmeans_iters (int): Number of iterations used for k-means algorithm at initialization.
+ decay (float): Decay for exponential moving average over the codebooks.
+ epsilon (float): Epsilon value for numerical stability.
+ threshold_ema_dead_code (int): Threshold for dead code expiration. Replace any codes
+ that have an exponential moving average cluster size less than the specified threshold with
+ randomly selected vector from the current batch.
+ """
def __init__(
self,
@@ -254,6 +269,21 @@
class VectorQuantization(nn.Module):
+ """Vector quantization implementation.
+ Currently supports only euclidean distance.
+ Args:
+ dim (int): Dimension
+ codebook_size (int): Codebook size
+ codebook_dim (int): Codebook dimension. If not defined, uses the specified dimension in dim.
+ decay (float): Decay for exponential moving average over the codebooks.
+ epsilon (float): Epsilon value for numerical stability.
+ kmeans_init (bool): Whether to use kmeans to initialize the codebooks.
+ kmeans_iters (int): Number of iterations used for kmeans initialization.
+ threshold_ema_dead_code (int): Threshold for dead code expiration. Replace any codes
+ that have an exponential moving average cluster size less than the specified threshold with
+ randomly selected vector from the current batch.
+ commitment_weight (float): Weight for commitment loss.
+ """
def __init__(
self,
@@ -327,6 +357,9 @@
class ResidualVectorQuantization(nn.Module):
+ """Residual vector quantization implementation.
+ Follows Algorithm 1. in https://arxiv.org/pdf/2107.03312.pdf
+ """
def __init__(self, *, num_quantizers, **kwargs):
super().__init__()
@@ -374,4 +407,4 @@ layer = self.layers[st + i]
quantized = layer.decode(indices)
quantized_out = quantized_out + quantized
- return quantized_out+ return quantized_out
| https://raw.githubusercontent.com/RVC-Boss/GPT-SoVITS/HEAD/GPT_SoVITS/module/core_vq.py |
Provide docstrings following PEP 257 | import torch
import torch.utils.data
from librosa.filters import mel as librosa_mel_fn
MAX_WAV_VALUE = 32768.0
def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
return torch.log(torch.clamp(x, min=clip_val) * C)
def dynamic_range_decompression_torch(x, C=1):
return torch.exp(x) / C
def spectral_normalize_torch(magnitudes):
output = dynamic_range_compression_torch(magnitudes)
return output
def spectral_de_normalize_torch(magnitudes):
output = dynamic_range_decompression_torch(magnitudes)
return output
mel_basis = {}
hann_window = {}
def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False):
if torch.min(y) < -1.2:
print("min value is ", torch.min(y))
if torch.max(y) > 1.2:
print("max value is ", torch.max(y))
global hann_window
dtype_device = str(y.dtype) + "_" + str(y.device)
# wnsize_dtype_device = str(win_size) + '_' + dtype_device
key = "%s-%s-%s-%s-%s" % (dtype_device, n_fft, sampling_rate, hop_size, win_size)
# if wnsize_dtype_device not in hann_window:
if key not in hann_window:
# hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)
hann_window[key] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)
y = torch.nn.functional.pad(
y.unsqueeze(1), (int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)), mode="reflect"
)
y = y.squeeze(1)
# spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device],
spec = torch.stft(
y,
n_fft,
hop_length=hop_size,
win_length=win_size,
window=hann_window[key],
center=center,
pad_mode="reflect",
normalized=False,
onesided=True,
return_complex=False,
)
spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-8)
return spec
def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax):
global mel_basis
dtype_device = str(spec.dtype) + "_" + str(spec.device)
# fmax_dtype_device = str(fmax) + '_' + dtype_device
key = "%s-%s-%s-%s-%s-%s" % (dtype_device, n_fft, num_mels, sampling_rate, fmin, fmax)
# if fmax_dtype_device not in mel_basis:
if key not in mel_basis:
mel = librosa_mel_fn(sr=sampling_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax)
# mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device)
mel_basis[key] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device)
# spec = torch.matmul(mel_basis[fmax_dtype_device], spec)
spec = torch.matmul(mel_basis[key], spec)
spec = spectral_normalize_torch(spec)
return spec
def mel_spectrogram_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False):
if torch.min(y) < -1.2:
print("min value is ", torch.min(y))
if torch.max(y) > 1.2:
print("max value is ", torch.max(y))
global mel_basis, hann_window
dtype_device = str(y.dtype) + "_" + str(y.device)
# fmax_dtype_device = str(fmax) + '_' + dtype_device
fmax_dtype_device = "%s-%s-%s-%s-%s-%s-%s-%s" % (
dtype_device,
n_fft,
num_mels,
sampling_rate,
hop_size,
win_size,
fmin,
fmax,
)
# wnsize_dtype_device = str(win_size) + '_' + dtype_device
wnsize_dtype_device = fmax_dtype_device
if fmax_dtype_device not in mel_basis:
mel = librosa_mel_fn(sr=sampling_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax)
mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device)
if wnsize_dtype_device not in hann_window:
hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)
y = torch.nn.functional.pad(
y.unsqueeze(1), (int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)), mode="reflect"
)
y = y.squeeze(1)
spec = torch.stft(
y,
n_fft,
hop_length=hop_size,
win_length=win_size,
window=hann_window[wnsize_dtype_device],
center=center,
pad_mode="reflect",
normalized=False,
onesided=True,
return_complex=False,
)
spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-8)
spec = torch.matmul(mel_basis[fmax_dtype_device], spec)
spec = spectral_normalize_torch(spec)
return spec | --- +++ @@ -6,10 +6,20 @@
def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
+ """
+ PARAMS
+ ------
+ C: compression factor
+ """
return torch.log(torch.clamp(x, min=clip_val) * C)
def dynamic_range_decompression_torch(x, C=1):
+ """
+ PARAMS
+ ------
+ C: compression factor used to compress
+ """
return torch.exp(x) / C
@@ -130,4 +140,4 @@ spec = torch.matmul(mel_basis[fmax_dtype_device], spec)
spec = spectral_normalize_torch(spec)
- return spec+ return spec
| https://raw.githubusercontent.com/RVC-Boss/GPT-SoVITS/HEAD/GPT_SoVITS/module/mel_processing.py |
Add docstrings to clarify complex logic | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import typing as tp
import torch
def rank():
if torch.distributed.is_initialized():
return torch.distributed.get_rank()
else:
return 0
def world_size():
if torch.distributed.is_initialized():
return torch.distributed.get_world_size()
else:
return 1
def is_distributed():
return world_size() > 1
def all_reduce(tensor: torch.Tensor, op=torch.distributed.ReduceOp.SUM):
if is_distributed():
return torch.distributed.all_reduce(tensor, op)
def _is_complex_or_float(tensor):
return torch.is_floating_point(tensor) or torch.is_complex(tensor)
def _check_number_of_params(params: tp.List[torch.Tensor]):
# utility function to check that the number of params in all workers is the same,
# and thus avoid a deadlock with distributed all reduce.
if not is_distributed() or not params:
return
# print('params[0].device ', params[0].device)
tensor = torch.tensor([len(params)], device=params[0].device, dtype=torch.long)
all_reduce(tensor)
if tensor.item() != len(params) * world_size():
# If not all the workers have the same number, for at least one of them,
# this inequality will be verified.
raise RuntimeError(
f"Mismatch in number of params: ours is {len(params)}, at least one worker has a different one."
)
def broadcast_tensors(tensors: tp.Iterable[torch.Tensor], src: int = 0):
if not is_distributed():
return
tensors = [tensor for tensor in tensors if _is_complex_or_float(tensor)]
_check_number_of_params(tensors)
handles = []
for tensor in tensors:
handle = torch.distributed.broadcast(tensor.data, src=src, async_op=True)
handles.append(handle)
for handle in handles:
handle.wait()
def sync_buffer(buffers, average=True):
if not is_distributed():
return
handles = []
for buffer in buffers:
if torch.is_floating_point(buffer.data):
if average:
handle = torch.distributed.all_reduce(buffer.data, op=torch.distributed.ReduceOp.SUM, async_op=True)
else:
handle = torch.distributed.broadcast(buffer.data, src=0, async_op=True)
handles.append((buffer, handle))
for buffer, handle in handles:
handle.wait()
if average:
buffer.data /= world_size
def sync_grad(params):
if not is_distributed():
return
handles = []
for p in params:
if p.grad is not None:
handle = torch.distributed.all_reduce(p.grad.data, op=torch.distributed.ReduceOp.SUM, async_op=True)
handles.append((p, handle))
for p, handle in handles:
handle.wait()
p.grad.data /= world_size()
def average_metrics(metrics: tp.Dict[str, float], count=1.0):
if not is_distributed():
return metrics
keys, values = zip(*metrics.items())
device = "cuda" if torch.cuda.is_available() else "cpu"
tensor = torch.tensor(list(values) + [1], device=device, dtype=torch.float32)
tensor *= count
all_reduce(tensor)
averaged = (tensor[:-1] / tensor[-1]).cpu().tolist()
return dict(zip(keys, averaged)) | --- +++ @@ -4,6 +4,7 @@ # This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
+"""Torch distributed utilities."""
import typing as tp
@@ -54,6 +55,9 @@
def broadcast_tensors(tensors: tp.Iterable[torch.Tensor], src: int = 0):
+ """Broadcast the tensors from the given parameters to all workers.
+ This can be used to ensure that all workers have the same model to start with.
+ """
if not is_distributed():
return
tensors = [tensor for tensor in tensors if _is_complex_or_float(tensor)]
@@ -67,6 +71,9 @@
def sync_buffer(buffers, average=True):
+ """
+ Sync grad for buffers. If average is False, broadcast instead of averaging.
+ """
if not is_distributed():
return
handles = []
@@ -84,6 +91,11 @@
def sync_grad(params):
+ """
+ Simpler alternative to DistributedDataParallel, that doesn't rely
+ on any black magic. For simple models it can also be as fast.
+ Just call this on your model parameters after the call to backward!
+ """
if not is_distributed():
return
handles = []
@@ -97,6 +109,9 @@
def average_metrics(metrics: tp.Dict[str, float], count=1.0):
+ """Average a dictionary of metrics across all workers, using the optional
+ `count` as unormalized weight.
+ """
if not is_distributed():
return metrics
keys, values = zip(*metrics.items())
@@ -105,4 +120,4 @@ tensor *= count
all_reduce(tensor)
averaged = (tensor[:-1] / tensor[-1]).cpu().tolist()
- return dict(zip(keys, averaged))+ return dict(zip(keys, averaged))
| https://raw.githubusercontent.com/RVC-Boss/GPT-SoVITS/HEAD/GPT_SoVITS/module/distrib.py |
Generate descriptive docstrings automatically | import os
import random
import traceback
import torch
import torch.utils.data
from tqdm import tqdm
from module.mel_processing import spectrogram_torch, spec_to_mel_torch
from text import cleaned_text_to_sequence
import torch.nn.functional as F
from tools.my_utils import load_audio
version = os.environ.get("version", None)
# ZeroDivisionError fixed by Tybost (https://github.com/RVC-Boss/GPT-SoVITS/issues/79)
class TextAudioSpeakerLoader(torch.utils.data.Dataset):
def __init__(self, hparams, version=None, val=False):
exp_dir = hparams.exp_dir
self.path2 = "%s/2-name2text.txt" % exp_dir
self.path4 = "%s/4-cnhubert" % exp_dir
self.path5 = "%s/5-wav32k" % exp_dir
assert os.path.exists(self.path2)
assert os.path.exists(self.path4)
assert os.path.exists(self.path5)
self.is_v2Pro = version in {"v2Pro", "v2ProPlus"}
if self.is_v2Pro:
self.path7 = "%s/7-sv_cn" % exp_dir
assert os.path.exists(self.path7)
names4 = set([name[:-3] for name in list(os.listdir(self.path4))]) # 去除.pt后缀
names5 = set(os.listdir(self.path5))
if self.is_v2Pro:
names6 = set([name[:-3] for name in list(os.listdir(self.path7))]) # 去除.pt后缀
self.phoneme_data = {}
with open(self.path2, "r", encoding="utf8") as f:
lines = f.read().strip("\n").split("\n")
for line in lines:
tmp = line.split("\t")
if len(tmp) != 4:
continue
self.phoneme_data[tmp[0]] = [tmp[1]]
if self.is_v2Pro:
self.audiopaths_sid_text = list(set(self.phoneme_data) & names4 & names5 & names6)
else:
self.audiopaths_sid_text = list(set(self.phoneme_data) & names4 & names5)
tmp = self.audiopaths_sid_text
leng = len(tmp)
min_num = 100
if leng < min_num:
self.audiopaths_sid_text = []
for _ in range(max(2, int(min_num / leng))):
self.audiopaths_sid_text += tmp
self.max_wav_value = hparams.max_wav_value
self.sampling_rate = hparams.sampling_rate
self.filter_length = hparams.filter_length
self.hop_length = hparams.hop_length
self.win_length = hparams.win_length
self.sampling_rate = hparams.sampling_rate
self.val = val
random.seed(1234)
random.shuffle(self.audiopaths_sid_text)
print("phoneme_data_len:", len(self.phoneme_data.keys()))
print("wav_data_len:", len(self.audiopaths_sid_text))
audiopaths_sid_text_new = []
lengths = []
skipped_phone = 0
skipped_dur = 0
for audiopath in tqdm(self.audiopaths_sid_text):
try:
phoneme = self.phoneme_data[audiopath][0]
phoneme = phoneme.split(" ")
phoneme_ids = cleaned_text_to_sequence(phoneme, version)
except Exception:
print(f"{audiopath} not in self.phoneme_data !")
skipped_phone += 1
continue
size = os.path.getsize("%s/%s" % (self.path5, audiopath))
duration = size / self.sampling_rate / 2
if duration == 0:
print(f"Zero duration for {audiopath}, skipping...")
skipped_dur += 1
continue
if 54 > duration > 0.6 or self.val:
audiopaths_sid_text_new.append([audiopath, phoneme_ids])
lengths.append(size // (2 * self.hop_length))
else:
skipped_dur += 1
continue
print("skipped_phone: ", skipped_phone, ", skipped_dur: ", skipped_dur)
print("total left: ", len(audiopaths_sid_text_new))
assert len(audiopaths_sid_text_new) > 1 # 至少能凑够batch size,这里todo
self.audiopaths_sid_text = audiopaths_sid_text_new
self.lengths = lengths
def get_audio_text_speaker_pair(self, audiopath_sid_text):
audiopath, phoneme_ids = audiopath_sid_text
text = torch.FloatTensor(phoneme_ids)
try:
spec, wav = self.get_audio("%s/%s" % (self.path5, audiopath))
with torch.no_grad():
ssl = torch.load("%s/%s.pt" % (self.path4, audiopath), map_location="cpu")
if ssl.shape[-1] != spec.shape[-1]:
typee = ssl.dtype
ssl = F.pad(ssl.float(), (0, 1), mode="replicate").to(typee)
ssl.requires_grad = False
if self.is_v2Pro:
sv_emb = torch.load("%s/%s.pt" % (self.path7, audiopath), map_location="cpu")
except:
traceback.print_exc()
spec = torch.zeros(1025, 100)
wav = torch.zeros(1, 100 * self.hop_length)
ssl = torch.zeros(1, 768, 100)
text = text[-1:]
if self.is_v2Pro:
sv_emb = torch.zeros(1, 20480)
print("load audio or ssl error!!!!!!", audiopath)
if self.is_v2Pro:
return (ssl, spec, wav, text, sv_emb)
else:
return (ssl, spec, wav, text)
def get_audio(self, filename):
audio_array = load_audio(filename, self.sampling_rate) # load_audio的方法是已经归一化到-1~1之间的,不用再/32768
audio = torch.FloatTensor(audio_array) # /32768
audio_norm = audio
audio_norm = audio_norm.unsqueeze(0)
spec = spectrogram_torch(
audio_norm, self.filter_length, self.sampling_rate, self.hop_length, self.win_length, center=False
)
spec = torch.squeeze(spec, 0)
return spec, audio_norm
def get_sid(self, sid):
sid = torch.LongTensor([int(sid)])
return sid
def __getitem__(self, index):
# with torch.no_grad():
return self.get_audio_text_speaker_pair(self.audiopaths_sid_text[index])
def __len__(self):
return len(self.audiopaths_sid_text)
def random_slice(self, ssl, wav, mel):
assert abs(ssl.shape[-1] - wav.shape[-1] // self.hop_length) < 3, ("first", ssl.shape, wav.shape)
len_mel = mel.shape[1]
if self.val:
reference_mel = mel[:, : len_mel // 3]
return reference_mel, ssl, wav, mel
dir = random.randint(0, 1)
sep_point = random.randint(int(len_mel // 3), int(len_mel // 3 * 2))
if dir == 0:
reference_mel = mel[:, :sep_point]
ssl = ssl[:, :, sep_point:]
wav2 = wav[:, sep_point * self.hop_length :]
mel = mel[:, sep_point:]
else:
reference_mel = mel[:, sep_point:]
ssl = ssl[:, :, :sep_point]
wav2 = wav[:, : sep_point * self.hop_length]
mel = mel[:, :sep_point]
assert abs(ssl.shape[-1] - wav2.shape[-1] // self.hop_length) < 3, (
ssl.shape,
wav.shape,
wav2.shape,
mel.shape,
sep_point,
self.hop_length,
sep_point * self.hop_length,
dir,
)
return reference_mel, ssl, wav2, mel
class TextAudioSpeakerCollate:
def __init__(self, return_ids=False, version=None):
self.return_ids = return_ids
self.is_v2Pro = version in {"v2Pro", "v2ProPlus"}
def __call__(self, batch):
# Right zero-pad all one-hot text sequences to max input length
_, ids_sorted_decreasing = torch.sort(torch.LongTensor([x[1].size(1) for x in batch]), dim=0, descending=True)
max_ssl_len = max([x[0].size(2) for x in batch])
max_ssl_len = int(2 * ((max_ssl_len // 2) + 1))
max_spec_len = max([x[1].size(1) for x in batch])
max_spec_len = int(2 * ((max_spec_len // 2) + 1))
max_wav_len = max([x[2].size(1) for x in batch])
max_text_len = max([x[3].size(0) for x in batch])
ssl_lengths = torch.LongTensor(len(batch))
spec_lengths = torch.LongTensor(len(batch))
wav_lengths = torch.LongTensor(len(batch))
text_lengths = torch.LongTensor(len(batch))
spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len)
wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len)
ssl_padded = torch.FloatTensor(len(batch), batch[0][0].size(1), max_ssl_len)
text_padded = torch.LongTensor(len(batch), max_text_len)
spec_padded.zero_()
wav_padded.zero_()
ssl_padded.zero_()
text_padded.zero_()
if self.is_v2Pro:
sv_embs = torch.FloatTensor(len(batch), 20480)
for i in range(len(ids_sorted_decreasing)):
row = batch[ids_sorted_decreasing[i]]
ssl = row[0]
ssl_padded[i, :, : ssl.size(2)] = ssl[0, :, :]
ssl_lengths[i] = ssl.size(2)
spec = row[1]
spec_padded[i, :, : spec.size(1)] = spec
spec_lengths[i] = spec.size(1)
wav = row[2]
wav_padded[i, :, : wav.size(1)] = wav
wav_lengths[i] = wav.size(1)
text = row[3]
text_padded[i, : text.size(0)] = text
text_lengths[i] = text.size(0)
if self.is_v2Pro:
sv_embs[i] = row[4]
if self.is_v2Pro:
return (
ssl_padded,
ssl_lengths,
spec_padded,
spec_lengths,
wav_padded,
wav_lengths,
text_padded,
text_lengths,
sv_embs,
)
else:
return (
ssl_padded,
ssl_lengths,
spec_padded,
spec_lengths,
wav_padded,
wav_lengths,
text_padded,
text_lengths,
)
class TextAudioSpeakerLoaderV3(torch.utils.data.Dataset):
def __init__(self, hparams, val=False):
exp_dir = hparams.exp_dir
self.path2 = "%s/2-name2text.txt" % exp_dir
self.path4 = "%s/4-cnhubert" % exp_dir
self.path5 = "%s/5-wav32k" % exp_dir
assert os.path.exists(self.path2)
assert os.path.exists(self.path4)
assert os.path.exists(self.path5)
names4 = set([name[:-3] for name in list(os.listdir(self.path4))]) # 去除.pt后缀
names5 = set(os.listdir(self.path5))
self.phoneme_data = {}
with open(self.path2, "r", encoding="utf8") as f:
lines = f.read().strip("\n").split("\n")
for line in lines:
tmp = line.split("\t")
if len(tmp) != 4:
continue
self.phoneme_data[tmp[0]] = [tmp[1]]
self.audiopaths_sid_text = list(set(self.phoneme_data) & names4 & names5)
tmp = self.audiopaths_sid_text
leng = len(tmp)
min_num = 100
if leng < min_num:
self.audiopaths_sid_text = []
for _ in range(max(2, int(min_num / leng))):
self.audiopaths_sid_text += tmp
self.max_wav_value = hparams.max_wav_value
self.sampling_rate = hparams.sampling_rate
self.filter_length = hparams.filter_length
self.hop_length = hparams.hop_length
self.win_length = hparams.win_length
self.sampling_rate = hparams.sampling_rate
self.val = val
random.seed(1234)
random.shuffle(self.audiopaths_sid_text)
print("phoneme_data_len:", len(self.phoneme_data.keys()))
print("wav_data_len:", len(self.audiopaths_sid_text))
audiopaths_sid_text_new = []
lengths = []
skipped_phone = 0
skipped_dur = 0
for audiopath in tqdm(self.audiopaths_sid_text):
try:
phoneme = self.phoneme_data[audiopath][0]
phoneme = phoneme.split(" ")
phoneme_ids = cleaned_text_to_sequence(phoneme, version)
except Exception:
print(f"{audiopath} not in self.phoneme_data !")
skipped_phone += 1
continue
size = os.path.getsize("%s/%s" % (self.path5, audiopath))
duration = size / self.sampling_rate / 2
if duration == 0:
print(f"Zero duration for {audiopath}, skipping...")
skipped_dur += 1
continue
if 54 > duration > 0.6 or self.val:
audiopaths_sid_text_new.append([audiopath, phoneme_ids])
lengths.append(size // (2 * self.hop_length))
else:
skipped_dur += 1
continue
print("skipped_phone: ", skipped_phone, ", skipped_dur: ", skipped_dur)
print("total left: ", len(audiopaths_sid_text_new))
assert len(audiopaths_sid_text_new) > 1 # 至少能凑够batch size,这里todo
self.audiopaths_sid_text = audiopaths_sid_text_new
self.lengths = lengths
self.spec_min = -12
self.spec_max = 2
self.filter_length_mel = self.win_length_mel = 1024
self.hop_length_mel = 256
self.n_mel_channels = 100
self.sampling_rate_mel = 24000
self.mel_fmin = 0
self.mel_fmax = None
def norm_spec(self, x):
return (x - self.spec_min) / (self.spec_max - self.spec_min) * 2 - 1
def get_audio_text_speaker_pair(self, audiopath_sid_text):
audiopath, phoneme_ids = audiopath_sid_text
text = torch.FloatTensor(phoneme_ids)
try:
spec, mel = self.get_audio("%s/%s" % (self.path5, audiopath))
with torch.no_grad():
ssl = torch.load("%s/%s.pt" % (self.path4, audiopath), map_location="cpu")
if ssl.shape[-1] != spec.shape[-1]:
typee = ssl.dtype
ssl = F.pad(ssl.float(), (0, 1), mode="replicate").to(typee)
ssl.requires_grad = False
except:
traceback.print_exc()
mel = torch.zeros(100, 180)
# wav = torch.zeros(1, 96 * self.hop_length)
spec = torch.zeros(1025, 96)
ssl = torch.zeros(1, 768, 96)
text = text[-1:]
print("load audio or ssl error!!!!!!", audiopath)
return (ssl, spec, mel, text)
def get_audio(self, filename):
audio_array = load_audio(filename, self.sampling_rate) # load_audio的方法是已经归一化到-1~1之间的,不用再/32768
audio = torch.FloatTensor(audio_array) # /32768
audio_norm = audio
audio_norm = audio_norm.unsqueeze(0)
audio_array24 = load_audio(
filename, 24000
) # load_audio的方法是已经归一化到-1~1之间的,不用再/32768######这里可以用GPU重采样加速
audio24 = torch.FloatTensor(audio_array24) # /32768
audio_norm24 = audio24
audio_norm24 = audio_norm24.unsqueeze(0)
spec = spectrogram_torch(
audio_norm, self.filter_length, self.sampling_rate, self.hop_length, self.win_length, center=False
)
spec = torch.squeeze(spec, 0)
spec1 = spectrogram_torch(
audio_norm24,
self.filter_length_mel,
self.sampling_rate_mel,
self.hop_length_mel,
self.win_length_mel,
center=False,
)
mel = spec_to_mel_torch(
spec1, self.filter_length_mel, self.n_mel_channels, self.sampling_rate_mel, self.mel_fmin, self.mel_fmax
)
mel = torch.squeeze(mel, 0)
mel = self.norm_spec(mel)
# print(1111111,spec.shape,mel.shape)
return spec, mel
def get_sid(self, sid):
sid = torch.LongTensor([int(sid)])
return sid
def __getitem__(self, index):
# with torch.no_grad():
return self.get_audio_text_speaker_pair(self.audiopaths_sid_text[index])
def __len__(self):
return len(self.audiopaths_sid_text)
class TextAudioSpeakerCollateV3:
def __init__(self, return_ids=False):
self.return_ids = return_ids
def __call__(self, batch):
# ssl, spec, wav,mel, text
# Right zero-pad all one-hot text sequences to max input length
_, ids_sorted_decreasing = torch.sort(torch.LongTensor([x[1].size(1) for x in batch]), dim=0, descending=True)
# (ssl, spec,mel, text)
max_ssl_len = max([x[0].size(2) for x in batch])
max_ssl_len1 = int(8 * ((max_ssl_len // 8) + 1))
max_ssl_len = int(2 * ((max_ssl_len // 2) + 1))
# max_ssl_len = int(8 * ((max_ssl_len // 8) + 1))
# max_ssl_len1=max_ssl_len
max_spec_len = max([x[1].size(1) for x in batch])
max_spec_len = int(2 * ((max_spec_len // 2) + 1))
# max_wav_len = max([x[2].size(1) for x in batch])
max_text_len = max([x[3].size(0) for x in batch])
max_mel_len = int(max_ssl_len1 * 1.25 * 1.5) ###24000/256,32000/640=16000/320
ssl_lengths = torch.LongTensor(len(batch))
spec_lengths = torch.LongTensor(len(batch))
text_lengths = torch.LongTensor(len(batch))
# wav_lengths = torch.LongTensor(len(batch))
mel_lengths = torch.LongTensor(len(batch))
spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len)
mel_padded = torch.FloatTensor(len(batch), batch[0][2].size(0), max_mel_len)
ssl_padded = torch.FloatTensor(len(batch), batch[0][0].size(1), max_ssl_len)
text_padded = torch.LongTensor(len(batch), max_text_len)
# wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len)
spec_padded.zero_()
mel_padded.zero_()
ssl_padded.zero_()
text_padded.zero_()
# wav_padded.zero_()
for i in range(len(ids_sorted_decreasing)):
row = batch[ids_sorted_decreasing[i]]
# ssl, spec, wav,mel, text
ssl = row[0]
ssl_padded[i, :, : ssl.size(2)] = ssl[0, :, :]
ssl_lengths[i] = ssl.size(2)
spec = row[1]
spec_padded[i, :, : spec.size(1)] = spec
spec_lengths[i] = spec.size(1)
# wav = row[2]
# wav_padded[i, :, :wav.size(1)] = wav
# wav_lengths[i] = wav.size(1)
mel = row[2]
mel_padded[i, :, : mel.size(1)] = mel
mel_lengths[i] = mel.size(1)
text = row[3]
text_padded[i, : text.size(0)] = text
text_lengths[i] = text.size(0)
# return ssl_padded, spec_padded,mel_padded, ssl_lengths, spec_lengths, text_padded, text_lengths, wav_padded, wav_lengths,mel_lengths
return ssl_padded, spec_padded, mel_padded, ssl_lengths, spec_lengths, text_padded, text_lengths, mel_lengths
class TextAudioSpeakerLoaderV4(torch.utils.data.Dataset):
def __init__(self, hparams, val=False):
exp_dir = hparams.exp_dir
self.path2 = "%s/2-name2text.txt" % exp_dir
self.path4 = "%s/4-cnhubert" % exp_dir
self.path5 = "%s/5-wav32k" % exp_dir
assert os.path.exists(self.path2)
assert os.path.exists(self.path4)
assert os.path.exists(self.path5)
names4 = set([name[:-3] for name in list(os.listdir(self.path4))]) # 去除.pt后缀
names5 = set(os.listdir(self.path5))
self.phoneme_data = {}
with open(self.path2, "r", encoding="utf8") as f:
lines = f.read().strip("\n").split("\n")
for line in lines:
tmp = line.split("\t")
if len(tmp) != 4:
continue
self.phoneme_data[tmp[0]] = [tmp[1]]
self.audiopaths_sid_text = list(set(self.phoneme_data) & names4 & names5)
tmp = self.audiopaths_sid_text
leng = len(tmp)
min_num = 100
if leng < min_num:
self.audiopaths_sid_text = []
for _ in range(max(2, int(min_num / leng))):
self.audiopaths_sid_text += tmp
self.max_wav_value = hparams.max_wav_value
self.sampling_rate = hparams.sampling_rate
self.filter_length = hparams.filter_length
self.hop_length = hparams.hop_length
self.win_length = hparams.win_length
self.sampling_rate = hparams.sampling_rate
self.val = val
random.seed(1234)
random.shuffle(self.audiopaths_sid_text)
print("phoneme_data_len:", len(self.phoneme_data.keys()))
print("wav_data_len:", len(self.audiopaths_sid_text))
audiopaths_sid_text_new = []
lengths = []
skipped_phone = 0
skipped_dur = 0
for audiopath in tqdm(self.audiopaths_sid_text):
try:
phoneme = self.phoneme_data[audiopath][0]
phoneme = phoneme.split(" ")
phoneme_ids = cleaned_text_to_sequence(phoneme, version)
except Exception:
print(f"{audiopath} not in self.phoneme_data !")
skipped_phone += 1
continue
size = os.path.getsize("%s/%s" % (self.path5, audiopath))
duration = size / self.sampling_rate / 2
if duration == 0:
print(f"Zero duration for {audiopath}, skipping...")
skipped_dur += 1
continue
if 54 > duration > 0.6 or self.val:
audiopaths_sid_text_new.append([audiopath, phoneme_ids])
lengths.append(size // (2 * self.hop_length))
else:
skipped_dur += 1
continue
print("skipped_phone: ", skipped_phone, ", skipped_dur: ", skipped_dur)
print("total left: ", len(audiopaths_sid_text_new))
assert len(audiopaths_sid_text_new) > 1 # 至少能凑够batch size,这里todo
self.audiopaths_sid_text = audiopaths_sid_text_new
self.lengths = lengths
self.spec_min = -12
self.spec_max = 2
self.filter_length_mel = self.win_length_mel = 1280
self.hop_length_mel = 320
self.n_mel_channels = 100
self.sampling_rate_mel = 32000
self.mel_fmin = 0
self.mel_fmax = None
def norm_spec(self, x):
return (x - self.spec_min) / (self.spec_max - self.spec_min) * 2 - 1
def get_audio_text_speaker_pair(self, audiopath_sid_text):
audiopath, phoneme_ids = audiopath_sid_text
text = torch.FloatTensor(phoneme_ids)
try:
spec, mel = self.get_audio("%s/%s" % (self.path5, audiopath))
with torch.no_grad():
ssl = torch.load("%s/%s.pt" % (self.path4, audiopath), map_location="cpu")
if ssl.shape[-1] != spec.shape[-1]:
typee = ssl.dtype
ssl = F.pad(ssl.float(), (0, 1), mode="replicate").to(typee)
ssl.requires_grad = False
except:
traceback.print_exc()
mel = torch.zeros(100, 192)
# wav = torch.zeros(1, 96 * self.hop_length)
spec = torch.zeros(1025, 96)
ssl = torch.zeros(1, 768, 96)
text = text[-1:]
print("load audio or ssl error!!!!!!", audiopath)
return (ssl, spec, mel, text)
def get_audio(self, filename):
audio_array = load_audio(filename, self.sampling_rate) # load_audio的方法是已经归一化到-1~1之间的,不用再/32768
audio = torch.FloatTensor(audio_array) # /32768
audio_norm = audio
audio_norm = audio_norm.unsqueeze(0)
spec = spectrogram_torch(
audio_norm, self.filter_length, self.sampling_rate, self.hop_length, self.win_length, center=False
)
spec = torch.squeeze(spec, 0)
spec1 = spectrogram_torch(audio_norm, 1280, 32000, 320, 1280, center=False)
mel = spec_to_mel_torch(spec1, 1280, 100, 32000, 0, None)
mel = self.norm_spec(torch.squeeze(mel, 0))
return spec, mel
def get_sid(self, sid):
sid = torch.LongTensor([int(sid)])
return sid
def __getitem__(self, index):
# with torch.no_grad():
return self.get_audio_text_speaker_pair(self.audiopaths_sid_text[index])
def __len__(self):
return len(self.audiopaths_sid_text)
class TextAudioSpeakerCollateV4:
def __init__(self, return_ids=False):
self.return_ids = return_ids
def __call__(self, batch):
# ssl, spec, wav,mel, text
# Right zero-pad all one-hot text sequences to max input length
_, ids_sorted_decreasing = torch.sort(torch.LongTensor([x[1].size(1) for x in batch]), dim=0, descending=True)
# (ssl, spec,mel, text)
max_ssl_len = max([x[0].size(2) for x in batch])
max_ssl_len = int(2 * ((max_ssl_len // 2) + 1))
max_spec_len = max([x[1].size(1) for x in batch])
max_spec_len = int(2 * ((max_spec_len // 2) + 1))
# max_wav_len = max([x[2].size(1) for x in batch])
max_text_len = max([x[3].size(0) for x in batch])
ssl_lengths = torch.LongTensor(len(batch))
spec_lengths = torch.LongTensor(len(batch))
text_lengths = torch.LongTensor(len(batch))
# wav_lengths = torch.LongTensor(len(batch))
mel_lengths = torch.LongTensor(len(batch))
spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len)
mel_padded = torch.FloatTensor(len(batch), batch[0][2].size(0), max_spec_len * 2)
ssl_padded = torch.FloatTensor(len(batch), batch[0][0].size(1), max_ssl_len)
text_padded = torch.LongTensor(len(batch), max_text_len)
# wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len)
spec_padded.zero_()
mel_padded.zero_()
ssl_padded.zero_()
text_padded.zero_()
# wav_padded.zero_()
for i in range(len(ids_sorted_decreasing)):
row = batch[ids_sorted_decreasing[i]]
# ssl, spec, wav,mel, text
ssl = row[0]
ssl_padded[i, :, : ssl.size(2)] = ssl[0, :, :]
ssl_lengths[i] = ssl.size(2)
spec = row[1]
spec_padded[i, :, : spec.size(1)] = spec
spec_lengths[i] = spec.size(1)
# wav = row[2]
# wav_padded[i, :, :wav.size(1)] = wav
# wav_lengths[i] = wav.size(1)
mel = row[2]
mel_padded[i, :, : mel.size(1)] = mel
mel_lengths[i] = mel.size(1)
text = row[3]
text_padded[i, : text.size(0)] = text
text_lengths[i] = text.size(0)
# return ssl_padded, spec_padded,mel_padded, ssl_lengths, spec_lengths, text_padded, text_lengths, wav_padded, wav_lengths,mel_lengths
return ssl_padded, spec_padded, mel_padded, ssl_lengths, spec_lengths, text_padded, text_lengths, mel_lengths
class TextAudioSpeakerLoaderV3b(torch.utils.data.Dataset):
def __init__(self, hparams, val=False):
exp_dir = hparams.exp_dir
self.path2 = "%s/2-name2text.txt" % exp_dir
self.path4 = "%s/4-cnhubert" % exp_dir
self.path5 = "%s/5-wav32k" % exp_dir
assert os.path.exists(self.path2)
assert os.path.exists(self.path4)
assert os.path.exists(self.path5)
names4 = set([name[:-3] for name in list(os.listdir(self.path4))]) # 去除.pt后缀
names5 = set(os.listdir(self.path5))
self.phoneme_data = {}
with open(self.path2, "r", encoding="utf8") as f:
lines = f.read().strip("\n").split("\n")
for line in lines:
tmp = line.split("\t")
if len(tmp) != 4:
continue
self.phoneme_data[tmp[0]] = [tmp[1]]
self.audiopaths_sid_text = list(set(self.phoneme_data) & names4 & names5)
tmp = self.audiopaths_sid_text
leng = len(tmp)
min_num = 100
if leng < min_num:
self.audiopaths_sid_text = []
for _ in range(max(2, int(min_num / leng))):
self.audiopaths_sid_text += tmp
self.max_wav_value = hparams.max_wav_value
self.sampling_rate = hparams.sampling_rate
self.filter_length = hparams.filter_length
self.hop_length = hparams.hop_length
self.win_length = hparams.win_length
self.sampling_rate = hparams.sampling_rate
self.val = val
random.seed(1234)
random.shuffle(self.audiopaths_sid_text)
print("phoneme_data_len:", len(self.phoneme_data.keys()))
print("wav_data_len:", len(self.audiopaths_sid_text))
audiopaths_sid_text_new = []
lengths = []
skipped_phone = 0
skipped_dur = 0
for audiopath in tqdm(self.audiopaths_sid_text):
try:
phoneme = self.phoneme_data[audiopath][0]
phoneme = phoneme.split(" ")
phoneme_ids = cleaned_text_to_sequence(phoneme, version)
except Exception:
print(f"{audiopath} not in self.phoneme_data !")
skipped_phone += 1
continue
size = os.path.getsize("%s/%s" % (self.path5, audiopath))
duration = size / self.sampling_rate / 2
if duration == 0:
print(f"Zero duration for {audiopath}, skipping...")
skipped_dur += 1
continue
if 54 > duration > 0.6 or self.val:
audiopaths_sid_text_new.append([audiopath, phoneme_ids])
lengths.append(size // (2 * self.hop_length))
else:
skipped_dur += 1
continue
print("skipped_phone: ", skipped_phone, ", skipped_dur: ", skipped_dur)
print("total left: ", len(audiopaths_sid_text_new))
assert len(audiopaths_sid_text_new) > 1 # 至少能凑够batch size,这里todo
self.audiopaths_sid_text = audiopaths_sid_text_new
self.lengths = lengths
self.spec_min = -12
self.spec_max = 2
self.filter_length_mel = self.win_length_mel = 1024
self.hop_length_mel = 256
self.n_mel_channels = 100
self.sampling_rate_mel = 24000
self.mel_fmin = 0
self.mel_fmax = None
def norm_spec(self, x):
return (x - self.spec_min) / (self.spec_max - self.spec_min) * 2 - 1
def get_audio_text_speaker_pair(self, audiopath_sid_text):
audiopath, phoneme_ids = audiopath_sid_text
text = torch.FloatTensor(phoneme_ids)
try:
spec, mel, wav = self.get_audio("%s/%s" % (self.path5, audiopath))
with torch.no_grad():
ssl = torch.load("%s/%s.pt" % (self.path4, audiopath), map_location="cpu")
if ssl.shape[-1] != spec.shape[-1]:
typee = ssl.dtype
ssl = F.pad(ssl.float(), (0, 1), mode="replicate").to(typee)
ssl.requires_grad = False
except:
traceback.print_exc()
mel = torch.zeros(100, 180)
wav = torch.zeros(1, 96 * self.hop_length)
spec = torch.zeros(1025, 96)
ssl = torch.zeros(1, 768, 96)
text = text[-1:]
print("load audio or ssl error!!!!!!", audiopath)
return (ssl, spec, wav, mel, text)
def get_audio(self, filename):
audio_array = load_audio(filename, self.sampling_rate) # load_audio的方法是已经归一化到-1~1之间的,不用再/32768
audio = torch.FloatTensor(audio_array) # /32768
audio_norm = audio
audio_norm = audio_norm.unsqueeze(0)
audio_array24 = load_audio(
filename, 24000
) # load_audio的方法是已经归一化到-1~1之间的,不用再/32768######这里可以用GPU重采样加速
audio24 = torch.FloatTensor(audio_array24) # /32768
audio_norm24 = audio24
audio_norm24 = audio_norm24.unsqueeze(0)
spec = spectrogram_torch(
audio_norm, self.filter_length, self.sampling_rate, self.hop_length, self.win_length, center=False
)
spec = torch.squeeze(spec, 0)
spec1 = spectrogram_torch(
audio_norm24,
self.filter_length_mel,
self.sampling_rate_mel,
self.hop_length_mel,
self.win_length_mel,
center=False,
)
mel = spec_to_mel_torch(
spec1, self.filter_length_mel, self.n_mel_channels, self.sampling_rate_mel, self.mel_fmin, self.mel_fmax
)
mel = torch.squeeze(mel, 0)
mel = self.norm_spec(mel)
# print(1111111,spec.shape,mel.shape)
return spec, mel, audio_norm
def get_sid(self, sid):
sid = torch.LongTensor([int(sid)])
return sid
def __getitem__(self, index):
# with torch.no_grad():
return self.get_audio_text_speaker_pair(self.audiopaths_sid_text[index])
def __len__(self):
return len(self.audiopaths_sid_text)
class TextAudioSpeakerCollateV3b:
def __init__(self, return_ids=False):
self.return_ids = return_ids
def __call__(self, batch):
# ssl, spec, wav,mel, text
# Right zero-pad all one-hot text sequences to max input length
_, ids_sorted_decreasing = torch.sort(torch.LongTensor([x[1].size(1) for x in batch]), dim=0, descending=True)
# (ssl, spec,mel, text)
max_ssl_len = max([x[0].size(2) for x in batch])
max_ssl_len1 = int(8 * ((max_ssl_len // 8) + 1))
max_ssl_len = int(2 * ((max_ssl_len // 2) + 1))
# max_ssl_len = int(8 * ((max_ssl_len // 8) + 1))
# max_ssl_len1=max_ssl_len
max_spec_len = max([x[1].size(1) for x in batch])
max_spec_len = int(2 * ((max_spec_len // 2) + 1))
max_wav_len = max([x[2].size(1) for x in batch])
max_text_len = max([x[4].size(0) for x in batch])
max_mel_len = int(max_ssl_len1 * 1.25 * 1.5) ###24000/256,32000/640=16000/320
ssl_lengths = torch.LongTensor(len(batch))
spec_lengths = torch.LongTensor(len(batch))
text_lengths = torch.LongTensor(len(batch))
wav_lengths = torch.LongTensor(len(batch))
mel_lengths = torch.LongTensor(len(batch))
spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len)
mel_padded = torch.FloatTensor(len(batch), batch[0][3].size(0), max_mel_len)
ssl_padded = torch.FloatTensor(len(batch), batch[0][0].size(1), max_ssl_len)
text_padded = torch.LongTensor(len(batch), max_text_len)
wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len)
spec_padded.zero_()
mel_padded.zero_()
ssl_padded.zero_()
text_padded.zero_()
wav_padded.zero_()
for i in range(len(ids_sorted_decreasing)):
row = batch[ids_sorted_decreasing[i]]
# ssl, spec, wav,mel, text
ssl = row[0]
ssl_padded[i, :, : ssl.size(2)] = ssl[0, :, :]
ssl_lengths[i] = ssl.size(2)
spec = row[1]
spec_padded[i, :, : spec.size(1)] = spec
spec_lengths[i] = spec.size(1)
wav = row[2]
wav_padded[i, :, : wav.size(1)] = wav
wav_lengths[i] = wav.size(1)
mel = row[3]
mel_padded[i, :, : mel.size(1)] = mel
mel_lengths[i] = mel.size(1)
text = row[4]
text_padded[i, : text.size(0)] = text
text_lengths[i] = text.size(0)
return (
ssl_padded,
spec_padded,
mel_padded,
ssl_lengths,
spec_lengths,
text_padded,
text_lengths,
wav_padded,
wav_lengths,
mel_lengths,
)
# return ssl_padded, spec_padded,mel_padded, ssl_lengths, spec_lengths, text_padded, text_lengths,mel_lengths
class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler):
def __init__(self, dataset, batch_size, boundaries, num_replicas=None, rank=None, shuffle=True):
super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)
self.lengths = dataset.lengths
self.batch_size = batch_size
self.boundaries = boundaries
self.buckets, self.num_samples_per_bucket = self._create_buckets()
self.total_size = sum(self.num_samples_per_bucket)
self.num_samples = self.total_size // self.num_replicas
def _create_buckets(self):
buckets = [[] for _ in range(len(self.boundaries) - 1)]
for i in range(len(self.lengths)):
length = self.lengths[i]
idx_bucket = self._bisect(length)
if idx_bucket != -1:
buckets[idx_bucket].append(i)
i = len(buckets) - 1
while i >= 0:
if len(buckets[i]) == 0:
buckets.pop(i)
self.boundaries.pop(i + 1)
i -= 1
num_samples_per_bucket = []
for i in range(len(buckets)):
len_bucket = len(buckets[i])
total_batch_size = self.num_replicas * self.batch_size
rem = (total_batch_size - (len_bucket % total_batch_size)) % total_batch_size
num_samples_per_bucket.append(len_bucket + rem)
return buckets, num_samples_per_bucket
def __iter__(self):
g = torch.Generator()
g.manual_seed(self.epoch)
indices = []
if self.shuffle:
for bucket in self.buckets:
indices.append(torch.randperm(len(bucket), generator=g).tolist())
else:
for bucket in self.buckets:
indices.append(list(range(len(bucket))))
batches = []
for i in range(len(self.buckets)):
bucket = self.buckets[i]
len_bucket = len(bucket)
ids_bucket = indices[i]
num_samples_bucket = self.num_samples_per_bucket[i]
rem = num_samples_bucket - len_bucket
ids_bucket = ids_bucket + ids_bucket * (rem // len_bucket) + ids_bucket[: (rem % len_bucket)]
ids_bucket = ids_bucket[self.rank :: self.num_replicas]
for j in range(len(ids_bucket) // self.batch_size):
batch = [bucket[idx] for idx in ids_bucket[j * self.batch_size : (j + 1) * self.batch_size]]
batches.append(batch)
if self.shuffle:
batch_ids = torch.randperm(len(batches), generator=g).tolist()
batches = [batches[i] for i in batch_ids]
self.batches = batches
assert len(self.batches) * self.batch_size == self.num_samples
return iter(self.batches)
def _bisect(self, x, lo=0, hi=None):
if hi is None:
hi = len(self.boundaries) - 1
if hi > lo:
mid = (hi + lo) // 2
if self.boundaries[mid] < x and x <= self.boundaries[mid + 1]:
return mid
elif x <= self.boundaries[mid]:
return self._bisect(x, lo, mid)
else:
return self._bisect(x, mid + 1, hi)
else:
return -1
def __len__(self):
return self.num_samples // self.batch_size | --- +++ @@ -15,6 +15,11 @@
# ZeroDivisionError fixed by Tybost (https://github.com/RVC-Boss/GPT-SoVITS/issues/79)
class TextAudioSpeakerLoader(torch.utils.data.Dataset):
+ """
+ 1) loads audio, speaker_id, text pairs
+ 2) normalizes text and converts them to sequences of integers
+ 3) computes spectrograms from audio files.
+ """
def __init__(self, hparams, version=None, val=False):
exp_dir = hparams.exp_dir
@@ -185,12 +190,18 @@
class TextAudioSpeakerCollate:
+ """Zero-pads model inputs and targets"""
def __init__(self, return_ids=False, version=None):
self.return_ids = return_ids
self.is_v2Pro = version in {"v2Pro", "v2ProPlus"}
def __call__(self, batch):
+ """Collate's training batch from normalized text, audio and speaker identities
+ PARAMS
+ ------
+ batch: [text_normalized, spec_normalized, wav_normalized, sid]
+ """
# Right zero-pad all one-hot text sequences to max input length
_, ids_sorted_decreasing = torch.sort(torch.LongTensor([x[1].size(1) for x in batch]), dim=0, descending=True)
@@ -266,6 +277,11 @@
class TextAudioSpeakerLoaderV3(torch.utils.data.Dataset):
+ """
+ 1) loads audio, speaker_id, text pairs
+ 2) normalizes text and converts them to sequences of integers
+ 3) computes spectrograms from audio files.
+ """
def __init__(self, hparams, val=False):
exp_dir = hparams.exp_dir
@@ -423,11 +439,17 @@
class TextAudioSpeakerCollateV3:
+ """Zero-pads model inputs and targets"""
def __init__(self, return_ids=False):
self.return_ids = return_ids
def __call__(self, batch):
+ """Collate's training batch from normalized text, audio and speaker identities
+ PARAMS
+ ------
+ batch: [text_normalized, spec_normalized, wav_normalized, sid]
+ """
# ssl, spec, wav,mel, text
# Right zero-pad all one-hot text sequences to max input length
_, ids_sorted_decreasing = torch.sort(torch.LongTensor([x[1].size(1) for x in batch]), dim=0, descending=True)
@@ -493,6 +515,11 @@
class TextAudioSpeakerLoaderV4(torch.utils.data.Dataset):
+ """
+ 1) loads audio, speaker_id, text pairs
+ 2) normalizes text and converts them to sequences of integers
+ 3) computes spectrograms from audio files.
+ """
def __init__(self, hparams, val=False):
exp_dir = hparams.exp_dir
@@ -631,11 +658,17 @@
class TextAudioSpeakerCollateV4:
+ """Zero-pads model inputs and targets"""
def __init__(self, return_ids=False):
self.return_ids = return_ids
def __call__(self, batch):
+ """Collate's training batch from normalized text, audio and speaker identities
+ PARAMS
+ ------
+ batch: [text_normalized, spec_normalized, wav_normalized, sid]
+ """
# ssl, spec, wav,mel, text
# Right zero-pad all one-hot text sequences to max input length
_, ids_sorted_decreasing = torch.sort(torch.LongTensor([x[1].size(1) for x in batch]), dim=0, descending=True)
@@ -693,6 +726,11 @@
class TextAudioSpeakerLoaderV3b(torch.utils.data.Dataset):
+ """
+ 1) loads audio, speaker_id, text pairs
+ 2) normalizes text and converts them to sequences of integers
+ 3) computes spectrograms from audio files.
+ """
def __init__(self, hparams, val=False):
exp_dir = hparams.exp_dir
@@ -850,11 +888,17 @@
class TextAudioSpeakerCollateV3b:
+ """Zero-pads model inputs and targets"""
def __init__(self, return_ids=False):
self.return_ids = return_ids
def __call__(self, batch):
+ """Collate's training batch from normalized text, audio and speaker identities
+ PARAMS
+ ------
+ batch: [text_normalized, spec_normalized, wav_normalized, sid]
+ """
# ssl, spec, wav,mel, text
# Right zero-pad all one-hot text sequences to max input length
_, ids_sorted_decreasing = torch.sort(torch.LongTensor([x[1].size(1) for x in batch]), dim=0, descending=True)
@@ -930,6 +974,14 @@
class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler):
+ """
+ Maintain similar input lengths in a batch.
+ Length groups are specified by boundaries.
+ Ex) boundaries = [b1, b2, b3] -> any batch is included either {x | b1 < length(x) <=b2} or {x | b2 < length(x) <= b3}.
+
+ It removes samples which are not included in the boundaries.
+ Ex) boundaries = [b1, b2, b3] -> any x s.t. length(x) <= b1 or length(x) > b3 are discarded.
+ """
def __init__(self, dataset, batch_size, boundaries, num_replicas=None, rank=None, shuffle=True):
super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)
@@ -1016,4 +1068,4 @@ return -1
def __len__(self):
- return self.num_samples // self.batch_size+ return self.num_samples // self.batch_size
| https://raw.githubusercontent.com/RVC-Boss/GPT-SoVITS/HEAD/GPT_SoVITS/module/data_utils.py |
Create docstrings for all classes and functions | import math
import torch
from torch import nn
from torch.nn import functional as F
from module import commons
from typing import Optional
class LayerNorm(nn.Module):
def __init__(self, channels, eps=1e-5):
super().__init__()
self.channels = channels
self.eps = eps
self.gamma = nn.Parameter(torch.ones(channels))
self.beta = nn.Parameter(torch.zeros(channels))
def forward(self, x):
x = x.transpose(1, -1)
x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
return x.transpose(1, -1)
@torch.jit.script
def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
n_channels_int = n_channels[0]
in_act = input_a + input_b
t_act = torch.tanh(in_act[:, :n_channels_int, :])
s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
acts = t_act * s_act
return acts
class Encoder(nn.Module):
def __init__(
self,
hidden_channels,
filter_channels,
n_heads,
n_layers,
kernel_size=1,
p_dropout=0.0,
window_size=4,
isflow=True,
**kwargs,
):
super().__init__()
self.hidden_channels = hidden_channels
self.filter_channels = filter_channels
self.n_heads = n_heads
self.n_layers = n_layers
self.kernel_size = kernel_size
self.p_dropout = p_dropout
self.window_size = window_size
# if isflow:
# cond_layer = torch.nn.Conv1d(256, 2*hidden_channels*n_layers, 1)
# self.cond_pre = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, 1)
# self.cond_layer = weight_norm(cond_layer, name='weight')
# self.gin_channels = 256
self.cond_layer_idx = self.n_layers
self.spk_emb_linear = nn.Linear(256, self.hidden_channels)
if "gin_channels" in kwargs:
self.gin_channels = kwargs["gin_channels"]
if self.gin_channels != 0:
self.spk_emb_linear = nn.Linear(self.gin_channels, self.hidden_channels)
# vits2 says 3rd block, so idx is 2 by default
self.cond_layer_idx = kwargs["cond_layer_idx"] if "cond_layer_idx" in kwargs else 2
logging.debug(self.gin_channels, self.cond_layer_idx)
assert self.cond_layer_idx < self.n_layers, "cond_layer_idx should be less than n_layers"
self.drop = nn.Dropout(p_dropout)
self.attn_layers = nn.ModuleList()
self.norm_layers_1 = nn.ModuleList()
self.ffn_layers = nn.ModuleList()
self.norm_layers_2 = nn.ModuleList()
for i in range(self.n_layers):
self.attn_layers.append(
MultiHeadAttention(
hidden_channels,
hidden_channels,
n_heads,
p_dropout=p_dropout,
window_size=window_size,
)
)
self.norm_layers_1.append(LayerNorm(hidden_channels))
self.ffn_layers.append(
FFN(
hidden_channels,
hidden_channels,
filter_channels,
kernel_size,
p_dropout=p_dropout,
)
)
self.norm_layers_2.append(LayerNorm(hidden_channels))
# def forward(self, x, x_mask, g=None):
# attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
# x = x * x_mask
# for i in range(self.n_layers):
# if i == self.cond_layer_idx and g is not None:
# g = self.spk_emb_linear(g.transpose(1, 2))
# g = g.transpose(1, 2)
# x = x + g
# x = x * x_mask
# y = self.attn_layers[i](x, x, attn_mask)
# y = self.drop(y)
# x = self.norm_layers_1[i](x + y)
# y = self.ffn_layers[i](x, x_mask)
# y = self.drop(y)
# x = self.norm_layers_2[i](x + y)
# x = x * x_mask
# return x
def forward(self, x, x_mask):
attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
x = x * x_mask
for attn_layers, norm_layers_1, ffn_layers, norm_layers_2 in zip(
self.attn_layers, self.norm_layers_1, self.ffn_layers, self.norm_layers_2
):
y = attn_layers(x, x, attn_mask)
y = self.drop(y)
x = norm_layers_1(x + y)
y = ffn_layers(x, x_mask)
y = self.drop(y)
x = norm_layers_2(x + y)
x = x * x_mask
return x
class MultiHeadAttention(nn.Module):
def __init__(
self,
channels,
out_channels,
n_heads,
p_dropout=0.0,
window_size=None,
heads_share=True,
block_length=None,
proximal_bias=False,
proximal_init=False,
):
super().__init__()
assert channels % n_heads == 0
self.channels = channels
self.out_channels = out_channels
self.n_heads = n_heads
self.p_dropout = p_dropout
self.window_size = window_size
self.heads_share = heads_share
self.block_length = block_length
self.proximal_bias = proximal_bias
self.proximal_init = proximal_init
self.attn = None
self.k_channels = channels // n_heads
self.conv_q = nn.Conv1d(channels, channels, 1)
self.conv_k = nn.Conv1d(channels, channels, 1)
self.conv_v = nn.Conv1d(channels, channels, 1)
self.conv_o = nn.Conv1d(channels, out_channels, 1)
self.drop = nn.Dropout(p_dropout)
if window_size is not None:
n_heads_rel = 1 if heads_share else n_heads
rel_stddev = self.k_channels**-0.5
self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
nn.init.xavier_uniform_(self.conv_q.weight)
nn.init.xavier_uniform_(self.conv_k.weight)
nn.init.xavier_uniform_(self.conv_v.weight)
if proximal_init:
with torch.no_grad():
self.conv_k.weight.copy_(self.conv_q.weight)
self.conv_k.bias.copy_(self.conv_q.bias)
def forward(self, x, c, attn_mask: Optional[torch.Tensor] = None):
q = self.conv_q(x)
k = self.conv_k(c)
v = self.conv_v(c)
# x, self.attn = self.attention(q, k, v, mask=attn_mask)
x, _ = self.attention(q, k, v, mask=attn_mask)
x = self.conv_o(x)
return x
def attention(self, query, key, value, mask: Optional[torch.Tensor] = None):
# reshape [b, d, t] -> [b, n_h, t, d_k]
b, d, t_s, _ = (*key.size(), query.size(2))
query = query.view(b, self.n_heads, self.k_channels, -1).transpose(2, 3)
key = key.view(b, self.n_heads, self.k_channels, -1).transpose(2, 3)
value = value.view(b, self.n_heads, self.k_channels, -1).transpose(2, 3)
scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
if self.window_size is not None:
key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
rel_logits = self._matmul_with_relative_keys(query / math.sqrt(self.k_channels), key_relative_embeddings)
scores_local = self._relative_position_to_absolute_position(rel_logits)
scores = scores + scores_local
if mask is not None:
scores = scores.masked_fill(mask == 0, -1e4)
p_attn = F.softmax(scores, dim=-1)
p_attn = self.drop(p_attn)
output = torch.matmul(p_attn, value)
if self.window_size is not None:
relative_weights = self._absolute_position_to_relative_position(p_attn)
value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s)
output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings)
output = output.transpose(2, 3).contiguous().view(b, d, -1)
return output, p_attn
def _matmul_with_relative_values(self, x, y):
ret = torch.matmul(x, y.unsqueeze(0))
return ret
def _matmul_with_relative_keys(self, x, y):
ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
return ret
def _get_relative_embeddings(self, relative_embeddings, length):
max_relative_position = 2 * self.window_size + 1
# Pad first before slice to avoid using cond ops.
pad_l = torch.zeros((1), dtype=torch.int64) + length - (self.window_size + 1)
pad_s = torch.zeros((1), dtype=torch.int64) + (self.window_size + 1) - length
pad_length = torch.max(pad_l, other=torch.zeros((1), dtype=torch.int64))
slice_start_position = torch.max(pad_s, other=torch.zeros((1), dtype=torch.int64))
slice_end_position = slice_start_position + 2 * length - 1
padded_relative_embeddings = F.pad(
relative_embeddings,
commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]),
)
used_relative_embeddings = padded_relative_embeddings[:, slice_start_position:slice_end_position]
return used_relative_embeddings
def _relative_position_to_absolute_position(self, x):
batch, heads, length, _ = x.size()
# Concat columns of pad to shift from relative to absolute indexing.
x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]]))
# Concat extra elements so to add up to shape (len+1, 2*len-1).
x_flat = x.view([batch, heads, length * 2 * length])
x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [0, length - 1]]))
# Reshape and slice out the padded elements.
x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[:, :, :length, length - 1 :]
return x_final
def _absolute_position_to_relative_position(self, x):
batch, heads, length, _ = x.size()
# padd along column
x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]]))
x_flat = x.view([batch, heads, length**2 + length * (length - 1)])
# add 0's in the beginning that will skew the elements after reshape
x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:]
return x_final
def _attention_bias_proximal(self, length):
r = torch.arange(length, dtype=torch.float32)
diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
class FFN(nn.Module):
def __init__(
self,
in_channels,
out_channels,
filter_channels,
kernel_size,
p_dropout=0.0,
activation="",
causal=False,
):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.filter_channels = filter_channels
self.kernel_size = kernel_size
self.p_dropout = p_dropout
self.activation = activation
self.causal = causal
# 从上下文看这里一定是 False
# if causal:
# self.padding = self._causal_padding
# else:
# self.padding = self._same_padding
self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
self.drop = nn.Dropout(p_dropout)
def forward(self, x, x_mask):
x = self.conv_1(self.padding(x * x_mask))
if self.activation == "gelu":
x = x * torch.sigmoid(1.702 * x)
else:
x = torch.relu(x)
x = self.drop(x)
x = self.conv_2(self.padding(x * x_mask))
return x * x_mask
def padding(self, x):
return self._same_padding(x)
def _causal_padding(self, x):
if self.kernel_size == 1:
return x
pad_l = self.kernel_size - 1
pad_r = 0
padding = [[0, 0], [0, 0], [pad_l, pad_r]]
x = F.pad(x, commons.convert_pad_shape(padding))
return x
def _same_padding(self, x):
if self.kernel_size == 1:
return x
pad_l = (self.kernel_size - 1) // 2
pad_r = self.kernel_size // 2
padding = [[0, 0], [0, 0], [pad_l, pad_r]]
x = F.pad(x, commons.convert_pad_shape(padding))
return x
class MRTE(nn.Module):
def __init__(
self,
content_enc_channels=192,
hidden_size=512,
out_channels=192,
kernel_size=5,
n_heads=4,
ge_layer=2,
):
super(MRTE, self).__init__()
self.cross_attention = MultiHeadAttention(hidden_size, hidden_size, n_heads)
self.c_pre = nn.Conv1d(content_enc_channels, hidden_size, 1)
self.text_pre = nn.Conv1d(content_enc_channels, hidden_size, 1)
self.c_post = nn.Conv1d(hidden_size, out_channels, 1)
def forward(self, ssl_enc, ssl_mask, text, text_mask, ge):
attn_mask = text_mask.unsqueeze(2) * ssl_mask.unsqueeze(-1)
ssl_enc = self.c_pre(ssl_enc * ssl_mask)
text_enc = self.text_pre(text * text_mask)
x = self.cross_attention(ssl_enc * ssl_mask, text_enc * text_mask, attn_mask) + ssl_enc + ge
x = self.c_post(x * ssl_mask)
return x | --- +++ @@ -221,10 +221,20 @@ return output, p_attn
def _matmul_with_relative_values(self, x, y):
+ """
+ x: [b, h, l, m]
+ y: [h or 1, m, d]
+ ret: [b, h, l, d]
+ """
ret = torch.matmul(x, y.unsqueeze(0))
return ret
def _matmul_with_relative_keys(self, x, y):
+ """
+ x: [b, h, l, d]
+ y: [h or 1, m, d]
+ ret: [b, h, l, m]
+ """
ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
return ret
@@ -245,6 +255,10 @@ return used_relative_embeddings
def _relative_position_to_absolute_position(self, x):
+ """
+ x: [b, h, l, 2*l-1]
+ ret: [b, h, l, l]
+ """
batch, heads, length, _ = x.size()
# Concat columns of pad to shift from relative to absolute indexing.
x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]]))
@@ -258,6 +272,10 @@ return x_final
def _absolute_position_to_relative_position(self, x):
+ """
+ x: [b, h, l, l]
+ ret: [b, h, l, 2*l-1]
+ """
batch, heads, length, _ = x.size()
# padd along column
x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]]))
@@ -268,6 +286,12 @@ return x_final
def _attention_bias_proximal(self, length):
+ """Bias for self-attention to encourage attention to close positions.
+ Args:
+ length: an integer scalar.
+ Returns:
+ a Tensor with shape [1, 1, length, length]
+ """
r = torch.arange(length, dtype=torch.float32)
diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
@@ -358,4 +382,4 @@ text_enc = self.text_pre(text * text_mask)
x = self.cross_attention(ssl_enc * ssl_mask, text_enc * text_mask, attn_mask) + ssl_enc + ge
x = self.c_post(x * ssl_mask)
- return x+ return x
| https://raw.githubusercontent.com/RVC-Boss/GPT-SoVITS/HEAD/GPT_SoVITS/module/attentions_onnx.py |
Help me document legacy Python code |
import os
import sys
import traceback
from typing import Generator, Union
now_dir = os.getcwd()
sys.path.append(now_dir)
sys.path.append("%s/GPT_SoVITS" % (now_dir))
import argparse
import subprocess
import wave
import signal
import numpy as np
import soundfile as sf
from fastapi import FastAPI, Response
from fastapi.responses import StreamingResponse, JSONResponse
import uvicorn
from io import BytesIO
from tools.i18n.i18n import I18nAuto
from GPT_SoVITS.TTS_infer_pack.TTS import TTS, TTS_Config
from GPT_SoVITS.TTS_infer_pack.text_segmentation_method import get_method_names as get_cut_method_names
from pydantic import BaseModel
import threading
# print(sys.path)
i18n = I18nAuto()
cut_method_names = get_cut_method_names()
parser = argparse.ArgumentParser(description="GPT-SoVITS api")
parser.add_argument("-c", "--tts_config", type=str, default="GPT_SoVITS/configs/tts_infer.yaml", help="tts_infer路径")
parser.add_argument("-a", "--bind_addr", type=str, default="127.0.0.1", help="default: 127.0.0.1")
parser.add_argument("-p", "--port", type=int, default="9880", help="default: 9880")
args = parser.parse_args()
config_path = args.tts_config
# device = args.device
port = args.port
host = args.bind_addr
argv = sys.argv
if config_path in [None, ""]:
config_path = "GPT-SoVITS/configs/tts_infer.yaml"
tts_config = TTS_Config(config_path)
print(tts_config)
tts_pipeline = TTS(tts_config)
APP = FastAPI()
class TTS_Request(BaseModel):
text: str = None
text_lang: str = None
ref_audio_path: str = None
aux_ref_audio_paths: list = None
prompt_lang: str = None
prompt_text: str = ""
top_k: int = 15
top_p: float = 1
temperature: float = 1
text_split_method: str = "cut5"
batch_size: int = 1
batch_threshold: float = 0.75
split_bucket: bool = True
speed_factor: float = 1.0
fragment_interval: float = 0.3
seed: int = -1
media_type: str = "wav"
streaming_mode: Union[bool, int] = False
parallel_infer: bool = True
repetition_penalty: float = 1.35
sample_steps: int = 32
super_sampling: bool = False
overlap_length: int = 2
min_chunk_length: int = 16
def pack_ogg(io_buffer: BytesIO, data: np.ndarray, rate: int):
# Author: AkagawaTsurunaki
# Issue:
# Stack overflow probabilistically occurs
# when the function `sf_writef_short` of `libsndfile_64bit.dll` is called
# using the Python library `soundfile`
# Note:
# This is an issue related to `libsndfile`, not this project itself.
# It happens when you generate a large audio tensor (about 499804 frames in my PC)
# and try to convert it to an ogg file.
# Related:
# https://github.com/RVC-Boss/GPT-SoVITS/issues/1199
# https://github.com/libsndfile/libsndfile/issues/1023
# https://github.com/bastibe/python-soundfile/issues/396
# Suggestion:
# Or split the whole audio data into smaller audio segment to avoid stack overflow?
def handle_pack_ogg():
with sf.SoundFile(io_buffer, mode="w", samplerate=rate, channels=1, format="ogg") as audio_file:
audio_file.write(data)
# See: https://docs.python.org/3/library/threading.html
# The stack size of this thread is at least 32768
# If stack overflow error still occurs, just modify the `stack_size`.
# stack_size = n * 4096, where n should be a positive integer.
# Here we chose n = 4096.
stack_size = 4096 * 4096
try:
threading.stack_size(stack_size)
pack_ogg_thread = threading.Thread(target=handle_pack_ogg)
pack_ogg_thread.start()
pack_ogg_thread.join()
except RuntimeError as e:
# If changing the thread stack size is unsupported, a RuntimeError is raised.
print("RuntimeError: {}".format(e))
print("Changing the thread stack size is unsupported.")
except ValueError as e:
# If the specified stack size is invalid, a ValueError is raised and the stack size is unmodified.
print("ValueError: {}".format(e))
print("The specified stack size is invalid.")
return io_buffer
def pack_raw(io_buffer: BytesIO, data: np.ndarray, rate: int):
io_buffer.write(data.tobytes())
return io_buffer
def pack_wav(io_buffer: BytesIO, data: np.ndarray, rate: int):
io_buffer = BytesIO()
sf.write(io_buffer, data, rate, format="wav")
return io_buffer
def pack_aac(io_buffer: BytesIO, data: np.ndarray, rate: int):
process = subprocess.Popen(
[
"ffmpeg",
"-f",
"s16le", # 输入16位有符号小端整数PCM
"-ar",
str(rate), # 设置采样率
"-ac",
"1", # 单声道
"-i",
"pipe:0", # 从管道读取输入
"-c:a",
"aac", # 音频编码器为AAC
"-b:a",
"192k", # 比特率
"-vn", # 不包含视频
"-f",
"adts", # 输出AAC数据流格式
"pipe:1", # 将输出写入管道
],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
out, _ = process.communicate(input=data.tobytes())
io_buffer.write(out)
return io_buffer
def pack_audio(io_buffer: BytesIO, data: np.ndarray, rate: int, media_type: str):
if media_type == "ogg":
io_buffer = pack_ogg(io_buffer, data, rate)
elif media_type == "aac":
io_buffer = pack_aac(io_buffer, data, rate)
elif media_type == "wav":
io_buffer = pack_wav(io_buffer, data, rate)
else:
io_buffer = pack_raw(io_buffer, data, rate)
io_buffer.seek(0)
return io_buffer
# from https://huggingface.co/spaces/coqui/voice-chat-with-mistral/blob/main/app.py
def wave_header_chunk(frame_input=b"", channels=1, sample_width=2, sample_rate=32000):
# This will create a wave header then append the frame input
# It should be first on a streaming wav file
# Other frames better should not have it (else you will hear some artifacts each chunk start)
wav_buf = BytesIO()
with wave.open(wav_buf, "wb") as vfout:
vfout.setnchannels(channels)
vfout.setsampwidth(sample_width)
vfout.setframerate(sample_rate)
vfout.writeframes(frame_input)
wav_buf.seek(0)
return wav_buf.read()
def handle_control(command: str):
if command == "restart":
os.execl(sys.executable, sys.executable, *argv)
elif command == "exit":
os.kill(os.getpid(), signal.SIGTERM)
exit(0)
def check_params(req: dict):
text: str = req.get("text", "")
text_lang: str = req.get("text_lang", "")
ref_audio_path: str = req.get("ref_audio_path", "")
streaming_mode: bool = req.get("streaming_mode", False)
media_type: str = req.get("media_type", "wav")
prompt_lang: str = req.get("prompt_lang", "")
text_split_method: str = req.get("text_split_method", "cut5")
if ref_audio_path in [None, ""]:
return JSONResponse(status_code=400, content={"message": "ref_audio_path is required"})
if text in [None, ""]:
return JSONResponse(status_code=400, content={"message": "text is required"})
if text_lang in [None, ""]:
return JSONResponse(status_code=400, content={"message": "text_lang is required"})
elif text_lang.lower() not in tts_config.languages:
return JSONResponse(
status_code=400,
content={"message": f"text_lang: {text_lang} is not supported in version {tts_config.version}"},
)
if prompt_lang in [None, ""]:
return JSONResponse(status_code=400, content={"message": "prompt_lang is required"})
elif prompt_lang.lower() not in tts_config.languages:
return JSONResponse(
status_code=400,
content={"message": f"prompt_lang: {prompt_lang} is not supported in version {tts_config.version}"},
)
if media_type not in ["wav", "raw", "ogg", "aac"]:
return JSONResponse(status_code=400, content={"message": f"media_type: {media_type} is not supported"})
# elif media_type == "ogg" and not streaming_mode:
# return JSONResponse(status_code=400, content={"message": "ogg format is not supported in non-streaming mode"})
if text_split_method not in cut_method_names:
return JSONResponse(
status_code=400, content={"message": f"text_split_method:{text_split_method} is not supported"}
)
return None
async def tts_handle(req: dict):
streaming_mode = req.get("streaming_mode", False)
return_fragment = req.get("return_fragment", False)
media_type = req.get("media_type", "wav")
check_res = check_params(req)
if check_res is not None:
return check_res
if streaming_mode == 0:
streaming_mode = False
return_fragment = False
fixed_length_chunk = False
elif streaming_mode == 1:
streaming_mode = False
return_fragment = True
fixed_length_chunk = False
elif streaming_mode == 2:
streaming_mode = True
return_fragment = False
fixed_length_chunk = False
elif streaming_mode == 3:
streaming_mode = True
return_fragment = False
fixed_length_chunk = True
else:
return JSONResponse(status_code=400, content={"message": f"the value of streaming_mode must be 0, 1, 2, 3(int) or true/false(bool)"})
req["streaming_mode"] = streaming_mode
req["return_fragment"] = return_fragment
req["fixed_length_chunk"] = fixed_length_chunk
print(f"{streaming_mode} {return_fragment} {fixed_length_chunk}")
streaming_mode = streaming_mode or return_fragment
try:
tts_generator = tts_pipeline.run(req)
if streaming_mode:
def streaming_generator(tts_generator: Generator, media_type: str):
if_frist_chunk = True
for sr, chunk in tts_generator:
if if_frist_chunk and media_type == "wav":
yield wave_header_chunk(sample_rate=sr)
media_type = "raw"
if_frist_chunk = False
yield pack_audio(BytesIO(), chunk, sr, media_type).getvalue()
# _media_type = f"audio/{media_type}" if not (streaming_mode and media_type in ["wav", "raw"]) else f"audio/x-{media_type}"
return StreamingResponse(
streaming_generator(
tts_generator,
media_type,
),
media_type=f"audio/{media_type}",
)
else:
sr, audio_data = next(tts_generator)
audio_data = pack_audio(BytesIO(), audio_data, sr, media_type).getvalue()
return Response(audio_data, media_type=f"audio/{media_type}")
except Exception as e:
return JSONResponse(status_code=400, content={"message": "tts failed", "Exception": str(e)})
@APP.get("/control")
async def control(command: str = None):
if command is None:
return JSONResponse(status_code=400, content={"message": "command is required"})
handle_control(command)
@APP.get("/tts")
async def tts_get_endpoint(
text: str = None,
text_lang: str = None,
ref_audio_path: str = None,
aux_ref_audio_paths: list = None,
prompt_lang: str = None,
prompt_text: str = "",
top_k: int = 15,
top_p: float = 1,
temperature: float = 1,
text_split_method: str = "cut5",
batch_size: int = 1,
batch_threshold: float = 0.75,
split_bucket: bool = True,
speed_factor: float = 1.0,
fragment_interval: float = 0.3,
seed: int = -1,
media_type: str = "wav",
parallel_infer: bool = True,
repetition_penalty: float = 1.35,
sample_steps: int = 32,
super_sampling: bool = False,
streaming_mode: Union[bool, int] = False,
overlap_length: int = 2,
min_chunk_length: int = 16,
):
req = {
"text": text,
"text_lang": text_lang.lower(),
"ref_audio_path": ref_audio_path,
"aux_ref_audio_paths": aux_ref_audio_paths,
"prompt_text": prompt_text,
"prompt_lang": prompt_lang.lower(),
"top_k": top_k,
"top_p": top_p,
"temperature": temperature,
"text_split_method": text_split_method,
"batch_size": int(batch_size),
"batch_threshold": float(batch_threshold),
"speed_factor": float(speed_factor),
"split_bucket": split_bucket,
"fragment_interval": fragment_interval,
"seed": seed,
"media_type": media_type,
"streaming_mode": streaming_mode,
"parallel_infer": parallel_infer,
"repetition_penalty": float(repetition_penalty),
"sample_steps": int(sample_steps),
"super_sampling": super_sampling,
"overlap_length": int(overlap_length),
"min_chunk_length": int(min_chunk_length),
}
return await tts_handle(req)
@APP.post("/tts")
async def tts_post_endpoint(request: TTS_Request):
req = request.dict()
return await tts_handle(req)
@APP.get("/set_refer_audio")
async def set_refer_aduio(refer_audio_path: str = None):
try:
tts_pipeline.set_ref_audio(refer_audio_path)
except Exception as e:
return JSONResponse(status_code=400, content={"message": "set refer audio failed", "Exception": str(e)})
return JSONResponse(status_code=200, content={"message": "success"})
# @APP.post("/set_refer_audio")
# async def set_refer_aduio_post(audio_file: UploadFile = File(...)):
# try:
# # 检查文件类型,确保是音频文件
# if not audio_file.content_type.startswith("audio/"):
# return JSONResponse(status_code=400, content={"message": "file type is not supported"})
# os.makedirs("uploaded_audio", exist_ok=True)
# save_path = os.path.join("uploaded_audio", audio_file.filename)
# # 保存音频文件到服务器上的一个目录
# with open(save_path , "wb") as buffer:
# buffer.write(await audio_file.read())
# tts_pipeline.set_ref_audio(save_path)
# except Exception as e:
# return JSONResponse(status_code=400, content={"message": f"set refer audio failed", "Exception": str(e)})
# return JSONResponse(status_code=200, content={"message": "success"})
@APP.get("/set_gpt_weights")
async def set_gpt_weights(weights_path: str = None):
try:
if weights_path in ["", None]:
return JSONResponse(status_code=400, content={"message": "gpt weight path is required"})
tts_pipeline.init_t2s_weights(weights_path)
except Exception as e:
return JSONResponse(status_code=400, content={"message": "change gpt weight failed", "Exception": str(e)})
return JSONResponse(status_code=200, content={"message": "success"})
@APP.get("/set_sovits_weights")
async def set_sovits_weights(weights_path: str = None):
try:
if weights_path in ["", None]:
return JSONResponse(status_code=400, content={"message": "sovits weight path is required"})
tts_pipeline.init_vits_weights(weights_path)
except Exception as e:
return JSONResponse(status_code=400, content={"message": "change sovits weight failed", "Exception": str(e)})
return JSONResponse(status_code=200, content={"message": "success"})
if __name__ == "__main__":
try:
if host == "None": # 在调用时使用 -a None 参数,可以让api监听双栈
host = None
uvicorn.run(app=APP, host=host, port=port, workers=1)
except Exception:
traceback.print_exc()
os.kill(os.getpid(), signal.SIGTERM)
exit(0) | --- +++ @@ -1,3 +1,105 @@+"""
+# WebAPI文档
+
+` python api_v2.py -a 127.0.0.1 -p 9880 -c GPT_SoVITS/configs/tts_infer.yaml `
+
+## 执行参数:
+ `-a` - `绑定地址, 默认"127.0.0.1"`
+ `-p` - `绑定端口, 默认9880`
+ `-c` - `TTS配置文件路径, 默认"GPT_SoVITS/configs/tts_infer.yaml"`
+
+## 调用:
+
+### 推理
+
+endpoint: `/tts`
+GET:
+```
+http://127.0.0.1:9880/tts?text=先帝创业未半而中道崩殂,今天下三分,益州疲弊,此诚危急存亡之秋也。&text_lang=zh&ref_audio_path=archive_jingyuan_1.wav&prompt_lang=zh&prompt_text=我是「罗浮」云骑将军景元。不必拘谨,「将军」只是一时的身份,你称呼我景元便可&text_split_method=cut5&batch_size=1&media_type=wav&streaming_mode=true
+```
+
+POST:
+```json
+{
+ "text": "", # str.(required) text to be synthesized
+ "text_lang: "", # str.(required) language of the text to be synthesized
+ "ref_audio_path": "", # str.(required) reference audio path
+ "aux_ref_audio_paths": [], # list.(optional) auxiliary reference audio paths for multi-speaker tone fusion
+ "prompt_text": "", # str.(optional) prompt text for the reference audio
+ "prompt_lang": "", # str.(required) language of the prompt text for the reference audio
+ "top_k": 15, # int. top k sampling
+ "top_p": 1, # float. top p sampling
+ "temperature": 1, # float. temperature for sampling
+ "text_split_method": "cut5", # str. text split method, see text_segmentation_method.py for details.
+ "batch_size": 1, # int. batch size for inference
+ "batch_threshold": 0.75, # float. threshold for batch splitting.
+ "split_bucket": True, # bool. whether to split the batch into multiple buckets.
+ "speed_factor":1.0, # float. control the speed of the synthesized audio.
+ "fragment_interval":0.3, # float. to control the interval of the audio fragment.
+ "seed": -1, # int. random seed for reproducibility.
+ "parallel_infer": True, # bool. whether to use parallel inference.
+ "repetition_penalty": 1.35, # float. repetition penalty for T2S model.
+ "sample_steps": 32, # int. number of sampling steps for VITS model V3.
+ "super_sampling": False, # bool. whether to use super-sampling for audio when using VITS model V3.
+ "streaming_mode": False, # bool or int. return audio chunk by chunk.T he available options are: 0,1,2,3 or True/False (0/False: Disabled | 1/True: Best Quality, Slowest response speed (old version streaming_mode) | 2: Medium Quality, Slow response speed | 3: Lower Quality, Faster response speed )
+ "overlap_length": 2, # int. overlap length of semantic tokens for streaming mode.
+ "min_chunk_length": 16, # int. The minimum chunk length of semantic tokens for streaming mode. (affects audio chunk size)
+}
+```
+
+RESP:
+成功: 直接返回 wav 音频流, http code 200
+失败: 返回包含错误信息的 json, http code 400
+
+### 命令控制
+
+endpoint: `/control`
+
+command:
+"restart": 重新运行
+"exit": 结束运行
+
+GET:
+```
+http://127.0.0.1:9880/control?command=restart
+```
+POST:
+```json
+{
+ "command": "restart"
+}
+```
+
+RESP: 无
+
+
+### 切换GPT模型
+
+endpoint: `/set_gpt_weights`
+
+GET:
+```
+http://127.0.0.1:9880/set_gpt_weights?weights_path=GPT_SoVITS/pretrained_models/s1bert25hz-2kh-longer-epoch=68e-step=50232.ckpt
+```
+RESP:
+成功: 返回"success", http code 200
+失败: 返回包含错误信息的 json, http code 400
+
+
+### 切换Sovits模型
+
+endpoint: `/set_sovits_weights`
+
+GET:
+```
+http://127.0.0.1:9880/set_sovits_weights?weights_path=GPT_SoVITS/pretrained_models/s2G488k.pth
+```
+
+RESP:
+成功: 返回"success", http code 200
+失败: 返回包含错误信息的 json, http code 400
+
+"""
import os
import sys
@@ -241,6 +343,39 @@
async def tts_handle(req: dict):
+ """
+ Text to speech handler.
+
+ Args:
+ req (dict):
+ {
+ "text": "", # str.(required) text to be synthesized
+ "text_lang: "", # str.(required) language of the text to be synthesized
+ "ref_audio_path": "", # str.(required) reference audio path
+ "aux_ref_audio_paths": [], # list.(optional) auxiliary reference audio paths for multi-speaker tone fusion
+ "prompt_text": "", # str.(optional) prompt text for the reference audio
+ "prompt_lang": "", # str.(required) language of the prompt text for the reference audio
+ "top_k": 15, # int. top k sampling
+ "top_p": 1, # float. top p sampling
+ "temperature": 1, # float. temperature for sampling
+ "text_split_method": "cut5", # str. text split method, see text_segmentation_method.py for details.
+ "batch_size": 1, # int. batch size for inference
+ "batch_threshold": 0.75, # float. threshold for batch splitting.
+ "split_bucket": True, # bool. whether to split the batch into multiple buckets.
+ "speed_factor":1.0, # float. control the speed of the synthesized audio.
+ "fragment_interval":0.3, # float. to control the interval of the audio fragment.
+ "seed": -1, # int. random seed for reproducibility.
+ "parallel_infer": True, # bool. whether to use parallel inference.
+ "repetition_penalty": 1.35, # float. repetition penalty for T2S model.
+ "sample_steps": 32, # int. number of sampling steps for VITS model V3.
+ "super_sampling": False, # bool. whether to use super-sampling for audio when using VITS model V3.
+ "streaming_mode": False, # bool or int. return audio chunk by chunk.T he available options are: 0,1,2,3 or True/False (0/False: Disabled | 1/True: Best Quality, Slowest response speed (old version streaming_mode) | 2: Medium Quality, Slow response speed | 3: Lower Quality, Faster response speed )
+ "overlap_length": 2, # int. overlap length of semantic tokens for streaming mode.
+ "min_chunk_length": 16, # int. The minimum chunk length of semantic tokens for streaming mode. (affects audio chunk size)
+ }
+ returns:
+ StreamingResponse: audio stream response.
+ """
streaming_mode = req.get("streaming_mode", False)
return_fragment = req.get("return_fragment", False)
@@ -438,4 +573,4 @@ except Exception:
traceback.print_exc()
os.kill(os.getpid(), signal.SIGTERM)
- exit(0)+ exit(0)
| https://raw.githubusercontent.com/RVC-Boss/GPT-SoVITS/HEAD/api_v2.py |
Generate NumPy-style docstrings | import math
import torch
from torch import nn
from torch.nn import functional as F
from module import commons
from module.modules import LayerNorm
class Encoder(nn.Module):
def __init__(
self,
hidden_channels,
filter_channels,
n_heads,
n_layers,
kernel_size=1,
p_dropout=0.0,
window_size=4,
isflow=False,
**kwargs,
):
super().__init__()
self.hidden_channels = hidden_channels
self.filter_channels = filter_channels
self.n_heads = n_heads
self.n_layers = n_layers
self.kernel_size = kernel_size
self.p_dropout = p_dropout
self.window_size = window_size
self.drop = nn.Dropout(p_dropout)
self.attn_layers = nn.ModuleList()
self.norm_layers_1 = nn.ModuleList()
self.ffn_layers = nn.ModuleList()
self.norm_layers_2 = nn.ModuleList()
for i in range(self.n_layers):
self.attn_layers.append(
MultiHeadAttention(
hidden_channels,
hidden_channels,
n_heads,
p_dropout=p_dropout,
window_size=window_size,
)
)
self.norm_layers_1.append(LayerNorm(hidden_channels))
self.ffn_layers.append(
FFN(
hidden_channels,
hidden_channels,
filter_channels,
kernel_size,
p_dropout=p_dropout,
)
)
self.norm_layers_2.append(LayerNorm(hidden_channels))
if isflow:
cond_layer = torch.nn.Conv1d(kwargs["gin_channels"], 2 * hidden_channels * n_layers, 1)
self.cond_pre = torch.nn.Conv1d(hidden_channels, 2 * hidden_channels, 1)
self.cond_layer = weight_norm_modules(cond_layer, name="weight")
self.gin_channels = kwargs["gin_channels"]
def forward(self, x, x_mask, g=None):
attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
x = x * x_mask
if g is not None:
g = self.cond_layer(g)
for i in range(self.n_layers):
if g is not None:
x = self.cond_pre(x)
cond_offset = i * 2 * self.hidden_channels
g_l = g[:, cond_offset : cond_offset + 2 * self.hidden_channels, :]
x = commons.fused_add_tanh_sigmoid_multiply(x, g_l, torch.IntTensor([self.hidden_channels]))
y = self.attn_layers[i](x, x, attn_mask)
y = self.drop(y)
x = self.norm_layers_1[i](x + y)
y = self.ffn_layers[i](x, x_mask)
y = self.drop(y)
x = self.norm_layers_2[i](x + y)
x = x * x_mask
return x
class Decoder(nn.Module):
def __init__(
self,
hidden_channels,
filter_channels,
n_heads,
n_layers,
kernel_size=1,
p_dropout=0.0,
proximal_bias=False,
proximal_init=True,
**kwargs,
):
super().__init__()
self.hidden_channels = hidden_channels
self.filter_channels = filter_channels
self.n_heads = n_heads
self.n_layers = n_layers
self.kernel_size = kernel_size
self.p_dropout = p_dropout
self.proximal_bias = proximal_bias
self.proximal_init = proximal_init
self.drop = nn.Dropout(p_dropout)
self.self_attn_layers = nn.ModuleList()
self.norm_layers_0 = nn.ModuleList()
self.encdec_attn_layers = nn.ModuleList()
self.norm_layers_1 = nn.ModuleList()
self.ffn_layers = nn.ModuleList()
self.norm_layers_2 = nn.ModuleList()
for i in range(self.n_layers):
self.self_attn_layers.append(
MultiHeadAttention(
hidden_channels,
hidden_channels,
n_heads,
p_dropout=p_dropout,
proximal_bias=proximal_bias,
proximal_init=proximal_init,
)
)
self.norm_layers_0.append(LayerNorm(hidden_channels))
self.encdec_attn_layers.append(
MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout)
)
self.norm_layers_1.append(LayerNorm(hidden_channels))
self.ffn_layers.append(
FFN(
hidden_channels,
hidden_channels,
filter_channels,
kernel_size,
p_dropout=p_dropout,
causal=True,
)
)
self.norm_layers_2.append(LayerNorm(hidden_channels))
def forward(self, x, x_mask, h, h_mask):
self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype)
encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
x = x * x_mask
for i in range(self.n_layers):
y = self.self_attn_layers[i](x, x, self_attn_mask)
y = self.drop(y)
x = self.norm_layers_0[i](x + y)
y = self.encdec_attn_layers[i](x, h, encdec_attn_mask)
y = self.drop(y)
x = self.norm_layers_1[i](x + y)
y = self.ffn_layers[i](x, x_mask)
y = self.drop(y)
x = self.norm_layers_2[i](x + y)
x = x * x_mask
return x
class MultiHeadAttention(nn.Module):
def __init__(
self,
channels,
out_channels,
n_heads,
p_dropout=0.0,
window_size=None,
heads_share=True,
block_length=None,
proximal_bias=False,
proximal_init=False,
):
super().__init__()
assert channels % n_heads == 0
self.channels = channels
self.out_channels = out_channels
self.n_heads = n_heads
self.p_dropout = p_dropout
self.window_size = window_size
self.heads_share = heads_share
self.block_length = block_length
self.proximal_bias = proximal_bias
self.proximal_init = proximal_init
self.attn = None
self.k_channels = channels // n_heads
self.conv_q = nn.Conv1d(channels, channels, 1)
self.conv_k = nn.Conv1d(channels, channels, 1)
self.conv_v = nn.Conv1d(channels, channels, 1)
self.conv_o = nn.Conv1d(channels, out_channels, 1)
self.drop = nn.Dropout(p_dropout)
if window_size is not None:
n_heads_rel = 1 if heads_share else n_heads
rel_stddev = self.k_channels**-0.5
self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
nn.init.xavier_uniform_(self.conv_q.weight)
nn.init.xavier_uniform_(self.conv_k.weight)
nn.init.xavier_uniform_(self.conv_v.weight)
if proximal_init:
with torch.no_grad():
self.conv_k.weight.copy_(self.conv_q.weight)
self.conv_k.bias.copy_(self.conv_q.bias)
def forward(self, x, c, attn_mask=None):
q = self.conv_q(x)
k = self.conv_k(c)
v = self.conv_v(c)
x, self.attn = self.attention(q, k, v, mask=attn_mask)
x = self.conv_o(x)
return x
def attention(self, query, key, value, mask=None):
# reshape [b, d, t] -> [b, n_h, t, d_k]
b, d, t_s, t_t = (*key.size(), query.size(2))
query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
if self.window_size is not None:
assert t_s == t_t, "Relative attention is only available for self-attention."
key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
rel_logits = self._matmul_with_relative_keys(query / math.sqrt(self.k_channels), key_relative_embeddings)
scores_local = self._relative_position_to_absolute_position(rel_logits)
scores = scores + scores_local
if self.proximal_bias:
assert t_s == t_t, "Proximal bias is only available for self-attention."
scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype)
if mask is not None:
scores = scores.masked_fill(mask == 0, -1e4)
if self.block_length is not None:
assert t_s == t_t, "Local attention is only available for self-attention."
block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length)
scores = scores.masked_fill(block_mask == 0, -1e4)
p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
p_attn = self.drop(p_attn)
output = torch.matmul(p_attn, value)
if self.window_size is not None:
relative_weights = self._absolute_position_to_relative_position(p_attn)
value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s)
output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings)
output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t]
return output, p_attn
def _matmul_with_relative_values(self, x, y):
ret = torch.matmul(x, y.unsqueeze(0))
return ret
def _matmul_with_relative_keys(self, x, y):
ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
return ret
def _get_relative_embeddings(self, relative_embeddings, length):
max_relative_position = 2 * self.window_size + 1
# Pad first before slice to avoid using cond ops.
pad_length = max(length - (self.window_size + 1), 0)
slice_start_position = max((self.window_size + 1) - length, 0)
slice_end_position = slice_start_position + 2 * length - 1
if pad_length > 0:
padded_relative_embeddings = F.pad(
relative_embeddings,
commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]),
)
else:
padded_relative_embeddings = relative_embeddings
used_relative_embeddings = padded_relative_embeddings[:, slice_start_position:slice_end_position]
return used_relative_embeddings
def _relative_position_to_absolute_position(self, x):
batch, heads, length, _ = x.size()
# Concat columns of pad to shift from relative to absolute indexing.
x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]]))
# Concat extra elements so to add up to shape (len+1, 2*len-1).
x_flat = x.view([batch, heads, length * 2 * length])
x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [0, length - 1]]))
# Reshape and slice out the padded elements.
x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[:, :, :length, length - 1 :]
return x_final
def _absolute_position_to_relative_position(self, x):
batch, heads, length, _ = x.size()
# padd along column
x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]]))
x_flat = x.view([batch, heads, length**2 + length * (length - 1)])
# add 0's in the beginning that will skew the elements after reshape
x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:]
return x_final
def _attention_bias_proximal(self, length):
r = torch.arange(length, dtype=torch.float32)
diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
class FFN(nn.Module):
def __init__(
self,
in_channels,
out_channels,
filter_channels,
kernel_size,
p_dropout=0.0,
activation=None,
causal=False,
):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.filter_channels = filter_channels
self.kernel_size = kernel_size
self.p_dropout = p_dropout
self.activation = activation
self.causal = causal
if causal:
self.padding = self._causal_padding
else:
self.padding = self._same_padding
self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
self.drop = nn.Dropout(p_dropout)
def forward(self, x, x_mask):
x = self.conv_1(self.padding(x * x_mask))
if self.activation == "gelu":
x = x * torch.sigmoid(1.702 * x)
else:
x = torch.relu(x)
x = self.drop(x)
x = self.conv_2(self.padding(x * x_mask))
return x * x_mask
def _causal_padding(self, x):
if self.kernel_size == 1:
return x
pad_l = self.kernel_size - 1
pad_r = 0
padding = [[0, 0], [0, 0], [pad_l, pad_r]]
x = F.pad(x, commons.convert_pad_shape(padding))
return x
def _same_padding(self, x):
if self.kernel_size == 1:
return x
pad_l = (self.kernel_size - 1) // 2
pad_r = self.kernel_size // 2
padding = [[0, 0], [0, 0], [pad_l, pad_r]]
x = F.pad(x, commons.convert_pad_shape(padding))
return x
import torch.nn as nn
from torch.nn.utils import remove_weight_norm, weight_norm
class Depthwise_Separable_Conv1D(nn.Module):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
bias=True,
padding_mode="zeros", # TODO: refine this type
device=None,
dtype=None,
):
super().__init__()
self.depth_conv = nn.Conv1d(
in_channels=in_channels,
out_channels=in_channels,
kernel_size=kernel_size,
groups=in_channels,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias,
padding_mode=padding_mode,
device=device,
dtype=dtype,
)
self.point_conv = nn.Conv1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
bias=bias,
device=device,
dtype=dtype,
)
def forward(self, input):
return self.point_conv(self.depth_conv(input))
def weight_norm(self):
self.depth_conv = weight_norm(self.depth_conv, name="weight")
self.point_conv = weight_norm(self.point_conv, name="weight")
def remove_weight_norm(self):
self.depth_conv = remove_weight_norm(self.depth_conv, name="weight")
self.point_conv = remove_weight_norm(self.point_conv, name="weight")
class Depthwise_Separable_TransposeConv1D(nn.Module):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
output_padding=0,
bias=True,
dilation=1,
padding_mode="zeros", # TODO: refine this type
device=None,
dtype=None,
):
super().__init__()
self.depth_conv = nn.ConvTranspose1d(
in_channels=in_channels,
out_channels=in_channels,
kernel_size=kernel_size,
groups=in_channels,
stride=stride,
output_padding=output_padding,
padding=padding,
dilation=dilation,
bias=bias,
padding_mode=padding_mode,
device=device,
dtype=dtype,
)
self.point_conv = nn.Conv1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
bias=bias,
device=device,
dtype=dtype,
)
def forward(self, input):
return self.point_conv(self.depth_conv(input))
def weight_norm(self):
self.depth_conv = weight_norm(self.depth_conv, name="weight")
self.point_conv = weight_norm(self.point_conv, name="weight")
def remove_weight_norm(self):
remove_weight_norm(self.depth_conv, name="weight")
remove_weight_norm(self.point_conv, name="weight")
def weight_norm_modules(module, name="weight", dim=0):
if isinstance(module, Depthwise_Separable_Conv1D) or isinstance(module, Depthwise_Separable_TransposeConv1D):
module.weight_norm()
return module
else:
return weight_norm(module, name, dim)
def remove_weight_norm_modules(module, name="weight"):
if isinstance(module, Depthwise_Separable_Conv1D) or isinstance(module, Depthwise_Separable_TransposeConv1D):
module.remove_weight_norm()
else:
remove_weight_norm(module, name)
class FFT(nn.Module):
def __init__(
self,
hidden_channels,
filter_channels,
n_heads,
n_layers=1,
kernel_size=1,
p_dropout=0.0,
proximal_bias=False,
proximal_init=True,
isflow=False,
**kwargs,
):
super().__init__()
self.hidden_channels = hidden_channels
self.filter_channels = filter_channels
self.n_heads = n_heads
self.n_layers = n_layers
self.kernel_size = kernel_size
self.p_dropout = p_dropout
self.proximal_bias = proximal_bias
self.proximal_init = proximal_init
if isflow:
cond_layer = torch.nn.Conv1d(kwargs["gin_channels"], 2 * hidden_channels * n_layers, 1)
self.cond_pre = torch.nn.Conv1d(hidden_channels, 2 * hidden_channels, 1)
self.cond_layer = weight_norm_modules(cond_layer, name="weight")
self.gin_channels = kwargs["gin_channels"]
self.drop = nn.Dropout(p_dropout)
self.self_attn_layers = nn.ModuleList()
self.norm_layers_0 = nn.ModuleList()
self.ffn_layers = nn.ModuleList()
self.norm_layers_1 = nn.ModuleList()
for i in range(self.n_layers):
self.self_attn_layers.append(
MultiHeadAttention(
hidden_channels,
hidden_channels,
n_heads,
p_dropout=p_dropout,
proximal_bias=proximal_bias,
proximal_init=proximal_init,
)
)
self.norm_layers_0.append(LayerNorm(hidden_channels))
self.ffn_layers.append(
FFN(
hidden_channels,
hidden_channels,
filter_channels,
kernel_size,
p_dropout=p_dropout,
causal=True,
)
)
self.norm_layers_1.append(LayerNorm(hidden_channels))
def forward(self, x, x_mask, g=None):
if g is not None:
g = self.cond_layer(g)
self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype)
x = x * x_mask
for i in range(self.n_layers):
if g is not None:
x = self.cond_pre(x)
cond_offset = i * 2 * self.hidden_channels
g_l = g[:, cond_offset : cond_offset + 2 * self.hidden_channels, :]
x = commons.fused_add_tanh_sigmoid_multiply(x, g_l, torch.IntTensor([self.hidden_channels]))
y = self.self_attn_layers[i](x, x, self_attn_mask)
y = self.drop(y)
x = self.norm_layers_0[i](x + y)
y = self.ffn_layers[i](x, x_mask)
y = self.drop(y)
x = self.norm_layers_1[i](x + y)
x = x * x_mask
return x
class TransformerCouplingLayer(nn.Module):
def __init__(
self,
channels,
hidden_channels,
kernel_size,
n_layers,
n_heads,
p_dropout=0,
filter_channels=0,
mean_only=False,
wn_sharing_parameter=None,
gin_channels=0,
):
assert channels % 2 == 0, "channels should be divisible by 2"
super().__init__()
self.channels = channels
self.hidden_channels = hidden_channels
self.kernel_size = kernel_size
self.n_layers = n_layers
self.half_channels = channels // 2
self.mean_only = mean_only
self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1)
self.enc = (
Encoder(
hidden_channels,
filter_channels,
n_heads,
n_layers,
kernel_size,
p_dropout,
isflow=True,
gin_channels=gin_channels,
)
if wn_sharing_parameter is None
else wn_sharing_parameter
)
self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1)
self.post.weight.data.zero_()
self.post.bias.data.zero_()
def forward(self, x, x_mask, g=None, reverse=False):
x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
h = self.pre(x0) * x_mask
h = self.enc(h, x_mask, g=g)
stats = self.post(h) * x_mask
if not self.mean_only:
m, logs = torch.split(stats, [self.half_channels] * 2, 1)
else:
m = stats
logs = torch.zeros_like(m)
if not reverse:
x1 = m + x1 * torch.exp(logs) * x_mask
x = torch.cat([x0, x1], 1)
logdet = torch.sum(logs, [1, 2])
return x, logdet
else:
x1 = (x1 - m) * torch.exp(-logs) * x_mask
x = torch.cat([x0, x1], 1)
return x | --- +++ @@ -143,6 +143,10 @@ self.norm_layers_2.append(LayerNorm(hidden_channels))
def forward(self, x, x_mask, h, h_mask):
+ """
+ x: decoder input
+ h: encoder output
+ """
self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype)
encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
x = x * x_mask
@@ -254,10 +258,20 @@ return output, p_attn
def _matmul_with_relative_values(self, x, y):
+ """
+ x: [b, h, l, m]
+ y: [h or 1, m, d]
+ ret: [b, h, l, d]
+ """
ret = torch.matmul(x, y.unsqueeze(0))
return ret
def _matmul_with_relative_keys(self, x, y):
+ """
+ x: [b, h, l, d]
+ y: [h or 1, m, d]
+ ret: [b, h, l, m]
+ """
ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
return ret
@@ -278,6 +292,10 @@ return used_relative_embeddings
def _relative_position_to_absolute_position(self, x):
+ """
+ x: [b, h, l, 2*l-1]
+ ret: [b, h, l, l]
+ """
batch, heads, length, _ = x.size()
# Concat columns of pad to shift from relative to absolute indexing.
x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]]))
@@ -291,6 +309,10 @@ return x_final
def _absolute_position_to_relative_position(self, x):
+ """
+ x: [b, h, l, l]
+ ret: [b, h, l, 2*l-1]
+ """
batch, heads, length, _ = x.size()
# padd along column
x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]]))
@@ -301,6 +323,12 @@ return x_final
def _attention_bias_proximal(self, length):
+ """Bias for self-attention to encourage attention to close positions.
+ Args:
+ length: an integer scalar.
+ Returns:
+ a Tensor with shape [1, 1, length, length]
+ """
r = torch.arange(length, dtype=torch.float32)
diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
@@ -541,6 +569,10 @@ self.norm_layers_1.append(LayerNorm(hidden_channels))
def forward(self, x, x_mask, g=None):
+ """
+ x: decoder input
+ h: encoder output
+ """
if g is not None:
g = self.cond_layer(g)
@@ -624,4 +656,4 @@ else:
x1 = (x1 - m) * torch.exp(-logs) * x_mask
x = torch.cat([x0, x1], 1)
- return x+ return x
| https://raw.githubusercontent.com/RVC-Boss/GPT-SoVITS/HEAD/GPT_SoVITS/module/attentions.py |
Write Python docstrings for this snippet | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass, field
import typing as tp
import torch
from torch import nn
from module.core_vq import ResidualVectorQuantization
@dataclass
class QuantizedResult:
quantized: torch.Tensor
codes: torch.Tensor
bandwidth: torch.Tensor # bandwidth in kb/s used, per batch item.
penalty: tp.Optional[torch.Tensor] = None
metrics: dict = field(default_factory=dict)
class ResidualVectorQuantizer(nn.Module):
def __init__(
self,
dimension: int = 256,
n_q: int = 8,
bins: int = 1024,
decay: float = 0.99,
kmeans_init: bool = True,
kmeans_iters: int = 50,
threshold_ema_dead_code: int = 2,
):
super().__init__()
self.n_q = n_q
self.dimension = dimension
self.bins = bins
self.decay = decay
self.kmeans_init = kmeans_init
self.kmeans_iters = kmeans_iters
self.threshold_ema_dead_code = threshold_ema_dead_code
self.vq = ResidualVectorQuantization(
dim=self.dimension,
codebook_size=self.bins,
num_quantizers=self.n_q,
decay=self.decay,
kmeans_init=self.kmeans_init,
kmeans_iters=self.kmeans_iters,
threshold_ema_dead_code=self.threshold_ema_dead_code,
)
def forward(
self,
x: torch.Tensor,
n_q: tp.Optional[int] = None,
layers: tp.Optional[list] = None,
) -> QuantizedResult:
n_q = n_q if n_q else self.n_q
if layers and max(layers) >= n_q:
raise ValueError(
f"Last layer index in layers: A {max(layers)}. Number of quantizers in RVQ: B {self.n_q}. A must less than B."
)
quantized, codes, commit_loss, quantized_list = self.vq(x, n_q=n_q, layers=layers)
return quantized, codes, torch.mean(commit_loss), quantized_list
def encode(self, x: torch.Tensor, n_q: tp.Optional[int] = None, st: tp.Optional[int] = None) -> torch.Tensor:
n_q = n_q if n_q else self.n_q
st = st or 0
codes = self.vq.encode(x, n_q=n_q, st=st)
return codes
def decode(self, codes: torch.Tensor, st: int = 0) -> torch.Tensor:
quantized = self.vq.decode(codes, st=st)
return quantized | --- +++ @@ -4,6 +4,7 @@ # This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
+"""Residual vector quantizer implementation."""
from dataclasses import dataclass, field
import typing as tp
@@ -24,6 +25,18 @@
class ResidualVectorQuantizer(nn.Module):
+ """Residual Vector Quantizer.
+ Args:
+ dimension (int): Dimension of the codebooks.
+ n_q (int): Number of residual vector quantizers used.
+ bins (int): Codebook size.
+ decay (float): Decay for exponential moving average over the codebooks.
+ kmeans_init (bool): Whether to use kmeans to initialize the codebooks.
+ kmeans_iters (int): Number of iterations used for kmeans initialization.
+ threshold_ema_dead_code (int): Threshold for dead code expiration. Replace any codes
+ that have an exponential moving average cluster size less than the specified threshold with
+ randomly selected vector from the current batch.
+ """
def __init__(
self,
@@ -59,6 +72,16 @@ n_q: tp.Optional[int] = None,
layers: tp.Optional[list] = None,
) -> QuantizedResult:
+ """Residual vector quantization on the given input tensor.
+ Args:
+ x (torch.Tensor): Input tensor.
+ n_q (int): Number of quantizer used to quantize. Default: All quantizers.
+ layers (list): Layer that need to return quantized. Defalt: None.
+ Returns:
+ QuantizedResult:
+ The quantized (or approximately quantized) representation with
+ the associated numbert quantizers and layer quantized required to return.
+ """
n_q = n_q if n_q else self.n_q
if layers and max(layers) >= n_q:
raise ValueError(
@@ -68,11 +91,24 @@ return quantized, codes, torch.mean(commit_loss), quantized_list
def encode(self, x: torch.Tensor, n_q: tp.Optional[int] = None, st: tp.Optional[int] = None) -> torch.Tensor:
+ """Encode a given input tensor with the specified sample rate at the given bandwidth.
+ The RVQ encode method sets the appropriate number of quantizer to use
+ and returns indices for each quantizer.
+ Args:
+ x (torch.Tensor): Input tensor.
+ n_q (int): Number of quantizer used to quantize. Default: All quantizers.
+ st (int): Start to encode input from which layers. Default: 0.
+ """
n_q = n_q if n_q else self.n_q
st = st or 0
codes = self.vq.encode(x, n_q=n_q, st=st)
return codes
def decode(self, codes: torch.Tensor, st: int = 0) -> torch.Tensor:
+ """Decode the given codes to the quantized representation.
+ Args:
+ codes (torch.Tensor): Input indices for each quantizer.
+ st (int): Start to decode input codes from which layers. Default: 0.
+ """
quantized = self.vq.decode(codes, st=st)
- return quantized+ return quantized
| https://raw.githubusercontent.com/RVC-Boss/GPT-SoVITS/HEAD/GPT_SoVITS/module/quantize.py |
Create structured documentation for my script | from functools import partial
import torch
from torch import nn
from torch.nn import Module, ModuleList
import torch.nn.functional as F
from bs_roformer.attend import Attend
from torch.utils.checkpoint import checkpoint
from typing import Tuple, Optional, Callable
# from beartype.typing import Tuple, Optional, List, Callable
# from beartype import beartype
from rotary_embedding_torch import RotaryEmbedding
from einops import rearrange, pack, unpack, reduce, repeat
from einops.layers.torch import Rearrange
from librosa import filters
# helper functions
def exists(val):
return val is not None
def default(v, d):
return v if exists(v) else d
def pack_one(t, pattern):
return pack([t], pattern)
def unpack_one(t, ps, pattern):
return unpack(t, ps, pattern)[0]
def pad_at_dim(t, pad, dim=-1, value=0.0):
dims_from_right = (-dim - 1) if dim < 0 else (t.ndim - dim - 1)
zeros = (0, 0) * dims_from_right
return F.pad(t, (*zeros, *pad), value=value)
def l2norm(t):
return F.normalize(t, dim=-1, p=2)
# norm
class RMSNorm(Module):
def __init__(self, dim):
super().__init__()
self.scale = dim**0.5
self.gamma = nn.Parameter(torch.ones(dim))
def forward(self, x):
return F.normalize(x, dim=-1) * self.scale * self.gamma
# attention
class FeedForward(Module):
def __init__(self, dim, mult=4, dropout=0.0):
super().__init__()
dim_inner = int(dim * mult)
self.net = nn.Sequential(
RMSNorm(dim),
nn.Linear(dim, dim_inner),
nn.GELU(),
nn.Dropout(dropout),
nn.Linear(dim_inner, dim),
nn.Dropout(dropout),
)
def forward(self, x):
return self.net(x)
class Attention(Module):
def __init__(self, dim, heads=8, dim_head=64, dropout=0.0, rotary_embed=None, flash=True):
super().__init__()
self.heads = heads
self.scale = dim_head**-0.5
dim_inner = heads * dim_head
self.rotary_embed = rotary_embed
self.attend = Attend(flash=flash, dropout=dropout)
self.norm = RMSNorm(dim)
self.to_qkv = nn.Linear(dim, dim_inner * 3, bias=False)
self.to_gates = nn.Linear(dim, heads)
self.to_out = nn.Sequential(nn.Linear(dim_inner, dim, bias=False), nn.Dropout(dropout))
def forward(self, x):
x = self.norm(x)
q, k, v = rearrange(self.to_qkv(x), "b n (qkv h d) -> qkv b h n d", qkv=3, h=self.heads)
if exists(self.rotary_embed):
q = self.rotary_embed.rotate_queries_or_keys(q)
k = self.rotary_embed.rotate_queries_or_keys(k)
out = self.attend(q, k, v)
gates = self.to_gates(x)
out = out * rearrange(gates, "b n h -> b h n 1").sigmoid()
out = rearrange(out, "b h n d -> b n (h d)")
return self.to_out(out)
class LinearAttention(Module):
# @beartype
def __init__(self, *, dim, dim_head=32, heads=8, scale=8, flash=False, dropout=0.0):
super().__init__()
dim_inner = dim_head * heads
self.norm = RMSNorm(dim)
self.to_qkv = nn.Sequential(
nn.Linear(dim, dim_inner * 3, bias=False), Rearrange("b n (qkv h d) -> qkv b h d n", qkv=3, h=heads)
)
self.temperature = nn.Parameter(torch.ones(heads, 1, 1))
self.attend = Attend(scale=scale, dropout=dropout, flash=flash)
self.to_out = nn.Sequential(Rearrange("b h d n -> b n (h d)"), nn.Linear(dim_inner, dim, bias=False))
def forward(self, x):
x = self.norm(x)
q, k, v = self.to_qkv(x)
q, k = map(l2norm, (q, k))
q = q * self.temperature.exp()
out = self.attend(q, k, v)
return self.to_out(out)
class Transformer(Module):
def __init__(
self,
*,
dim,
depth,
dim_head=64,
heads=8,
attn_dropout=0.0,
ff_dropout=0.0,
ff_mult=4,
norm_output=True,
rotary_embed=None,
flash_attn=True,
linear_attn=False,
):
super().__init__()
self.layers = ModuleList([])
for _ in range(depth):
if linear_attn:
attn = LinearAttention(dim=dim, dim_head=dim_head, heads=heads, dropout=attn_dropout, flash=flash_attn)
else:
attn = Attention(
dim=dim,
dim_head=dim_head,
heads=heads,
dropout=attn_dropout,
rotary_embed=rotary_embed,
flash=flash_attn,
)
self.layers.append(ModuleList([attn, FeedForward(dim=dim, mult=ff_mult, dropout=ff_dropout)]))
self.norm = RMSNorm(dim) if norm_output else nn.Identity()
def forward(self, x):
for attn, ff in self.layers:
x = attn(x) + x
x = ff(x) + x
return self.norm(x)
# bandsplit module
class BandSplit(Module):
# @beartype
def __init__(self, dim, dim_inputs: Tuple[int, ...]):
super().__init__()
self.dim_inputs = dim_inputs
self.to_features = ModuleList([])
for dim_in in dim_inputs:
net = nn.Sequential(RMSNorm(dim_in), nn.Linear(dim_in, dim))
self.to_features.append(net)
def forward(self, x):
x = x.split(self.dim_inputs, dim=-1)
outs = []
for split_input, to_feature in zip(x, self.to_features):
split_output = to_feature(split_input)
outs.append(split_output)
return torch.stack(outs, dim=-2)
def MLP(dim_in, dim_out, dim_hidden=None, depth=1, activation=nn.Tanh):
dim_hidden = default(dim_hidden, dim_in)
net = []
dims = (dim_in, *((dim_hidden,) * depth), dim_out)
for ind, (layer_dim_in, layer_dim_out) in enumerate(zip(dims[:-1], dims[1:])):
is_last = ind == (len(dims) - 2)
net.append(nn.Linear(layer_dim_in, layer_dim_out))
if is_last:
continue
net.append(activation())
return nn.Sequential(*net)
class MaskEstimator(Module):
# @beartype
def __init__(self, dim, dim_inputs: Tuple[int, ...], depth, mlp_expansion_factor=4):
super().__init__()
self.dim_inputs = dim_inputs
self.to_freqs = ModuleList([])
dim_hidden = dim * mlp_expansion_factor
for dim_in in dim_inputs:
net = []
mlp = nn.Sequential(MLP(dim, dim_in * 2, dim_hidden=dim_hidden, depth=depth), nn.GLU(dim=-1))
self.to_freqs.append(mlp)
def forward(self, x):
x = x.unbind(dim=-2)
outs = []
for band_features, mlp in zip(x, self.to_freqs):
freq_out = mlp(band_features)
outs.append(freq_out)
return torch.cat(outs, dim=-1)
# main class
class MelBandRoformer(Module):
# @beartype
def __init__(
self,
dim,
*,
depth,
stereo=False,
num_stems=1,
time_transformer_depth=2,
freq_transformer_depth=2,
linear_transformer_depth=0,
num_bands=60,
dim_head=64,
heads=8,
attn_dropout=0.1,
ff_dropout=0.1,
flash_attn=True,
dim_freqs_in=1025,
sample_rate=44100, # needed for mel filter bank from librosa
stft_n_fft=2048,
stft_hop_length=512,
# 10ms at 44100Hz, from sections 4.1, 4.4 in the paper - @faroit recommends // 2 or // 4 for better reconstruction
stft_win_length=2048,
stft_normalized=False,
stft_window_fn: Optional[Callable] = None,
mask_estimator_depth=1,
multi_stft_resolution_loss_weight=1.0,
multi_stft_resolutions_window_sizes: Tuple[int, ...] = (4096, 2048, 1024, 512, 256),
multi_stft_hop_size=147,
multi_stft_normalized=False,
multi_stft_window_fn: Callable = torch.hann_window,
match_input_audio_length=False, # if True, pad output tensor to match length of input tensor
mlp_expansion_factor=4,
use_torch_checkpoint=False,
skip_connection=False,
):
super().__init__()
self.stereo = stereo
self.audio_channels = 2 if stereo else 1
self.num_stems = num_stems
self.use_torch_checkpoint = use_torch_checkpoint
self.skip_connection = skip_connection
self.layers = ModuleList([])
transformer_kwargs = dict(
dim=dim,
heads=heads,
dim_head=dim_head,
attn_dropout=attn_dropout,
ff_dropout=ff_dropout,
flash_attn=flash_attn,
)
time_rotary_embed = RotaryEmbedding(dim=dim_head)
freq_rotary_embed = RotaryEmbedding(dim=dim_head)
for _ in range(depth):
tran_modules = []
if linear_transformer_depth > 0:
tran_modules.append(Transformer(depth=linear_transformer_depth, linear_attn=True, **transformer_kwargs))
tran_modules.append(
Transformer(depth=time_transformer_depth, rotary_embed=time_rotary_embed, **transformer_kwargs)
)
tran_modules.append(
Transformer(depth=freq_transformer_depth, rotary_embed=freq_rotary_embed, **transformer_kwargs)
)
self.layers.append(nn.ModuleList(tran_modules))
self.stft_window_fn = partial(default(stft_window_fn, torch.hann_window), stft_win_length)
self.stft_kwargs = dict(
n_fft=stft_n_fft, hop_length=stft_hop_length, win_length=stft_win_length, normalized=stft_normalized
)
freqs = torch.stft(
torch.randn(1, 4096), **self.stft_kwargs, window=torch.ones(stft_n_fft), return_complex=True
).shape[1]
# create mel filter bank
# with librosa.filters.mel as in section 2 of paper
mel_filter_bank_numpy = filters.mel(sr=sample_rate, n_fft=stft_n_fft, n_mels=num_bands)
mel_filter_bank = torch.from_numpy(mel_filter_bank_numpy)
# for some reason, it doesn't include the first freq? just force a value for now
mel_filter_bank[0][0] = 1.0
# In some systems/envs we get 0.0 instead of ~1.9e-18 in the last position,
# so let's force a positive value
mel_filter_bank[-1, -1] = 1.0
# binary as in paper (then estimated masks are averaged for overlapping regions)
freqs_per_band = mel_filter_bank > 0
assert freqs_per_band.any(dim=0).all(), "all frequencies need to be covered by all bands for now"
repeated_freq_indices = repeat(torch.arange(freqs), "f -> b f", b=num_bands)
freq_indices = repeated_freq_indices[freqs_per_band]
if stereo:
freq_indices = repeat(freq_indices, "f -> f s", s=2)
freq_indices = freq_indices * 2 + torch.arange(2)
freq_indices = rearrange(freq_indices, "f s -> (f s)")
self.register_buffer("freq_indices", freq_indices, persistent=False)
self.register_buffer("freqs_per_band", freqs_per_band, persistent=False)
num_freqs_per_band = reduce(freqs_per_band, "b f -> b", "sum")
num_bands_per_freq = reduce(freqs_per_band, "b f -> f", "sum")
self.register_buffer("num_freqs_per_band", num_freqs_per_band, persistent=False)
self.register_buffer("num_bands_per_freq", num_bands_per_freq, persistent=False)
# band split and mask estimator
freqs_per_bands_with_complex = tuple(2 * f * self.audio_channels for f in num_freqs_per_band.tolist())
self.band_split = BandSplit(dim=dim, dim_inputs=freqs_per_bands_with_complex)
self.mask_estimators = nn.ModuleList([])
for _ in range(num_stems):
mask_estimator = MaskEstimator(
dim=dim,
dim_inputs=freqs_per_bands_with_complex,
depth=mask_estimator_depth,
mlp_expansion_factor=mlp_expansion_factor,
)
self.mask_estimators.append(mask_estimator)
# for the multi-resolution stft loss
self.multi_stft_resolution_loss_weight = multi_stft_resolution_loss_weight
self.multi_stft_resolutions_window_sizes = multi_stft_resolutions_window_sizes
self.multi_stft_n_fft = stft_n_fft
self.multi_stft_window_fn = multi_stft_window_fn
self.multi_stft_kwargs = dict(hop_length=multi_stft_hop_size, normalized=multi_stft_normalized)
self.match_input_audio_length = match_input_audio_length
def forward(self, raw_audio, target=None, return_loss_breakdown=False):
device = raw_audio.device
if raw_audio.ndim == 2:
raw_audio = rearrange(raw_audio, "b t -> b 1 t")
batch, channels, raw_audio_length = raw_audio.shape
istft_length = raw_audio_length if self.match_input_audio_length else None
assert (not self.stereo and channels == 1) or (self.stereo and channels == 2), (
"stereo needs to be set to True if passing in audio signal that is stereo (channel dimension of 2). also need to be False if mono (channel dimension of 1)"
)
# to stft
raw_audio, batch_audio_channel_packed_shape = pack_one(raw_audio, "* t")
stft_window = self.stft_window_fn(device=device)
stft_repr = torch.stft(raw_audio, **self.stft_kwargs, window=stft_window, return_complex=True)
stft_repr = torch.view_as_real(stft_repr)
stft_repr = unpack_one(stft_repr, batch_audio_channel_packed_shape, "* f t c")
# merge stereo / mono into the frequency, with frequency leading dimension, for band splitting
stft_repr = rearrange(stft_repr, "b s f t c -> b (f s) t c")
# index out all frequencies for all frequency ranges across bands ascending in one go
batch_arange = torch.arange(batch, device=device)[..., None]
# account for stereo
x = stft_repr[batch_arange, self.freq_indices]
# fold the complex (real and imag) into the frequencies dimension
x = rearrange(x, "b f t c -> b t (f c)")
if self.use_torch_checkpoint:
x = checkpoint(self.band_split, x, use_reentrant=False)
else:
x = self.band_split(x)
# axial / hierarchical attention
store = [None] * len(self.layers)
for i, transformer_block in enumerate(self.layers):
if len(transformer_block) == 3:
linear_transformer, time_transformer, freq_transformer = transformer_block
x, ft_ps = pack([x], "b * d")
if self.use_torch_checkpoint:
x = checkpoint(linear_transformer, x, use_reentrant=False)
else:
x = linear_transformer(x)
(x,) = unpack(x, ft_ps, "b * d")
else:
time_transformer, freq_transformer = transformer_block
if self.skip_connection:
# Sum all previous
for j in range(i):
x = x + store[j]
x = rearrange(x, "b t f d -> b f t d")
x, ps = pack([x], "* t d")
if self.use_torch_checkpoint:
x = checkpoint(time_transformer, x, use_reentrant=False)
else:
x = time_transformer(x)
(x,) = unpack(x, ps, "* t d")
x = rearrange(x, "b f t d -> b t f d")
x, ps = pack([x], "* f d")
if self.use_torch_checkpoint:
x = checkpoint(freq_transformer, x, use_reentrant=False)
else:
x = freq_transformer(x)
(x,) = unpack(x, ps, "* f d")
if self.skip_connection:
store[i] = x
num_stems = len(self.mask_estimators)
if self.use_torch_checkpoint:
masks = torch.stack([checkpoint(fn, x, use_reentrant=False) for fn in self.mask_estimators], dim=1)
else:
masks = torch.stack([fn(x) for fn in self.mask_estimators], dim=1)
masks = rearrange(masks, "b n t (f c) -> b n f t c", c=2)
# modulate frequency representation
stft_repr = rearrange(stft_repr, "b f t c -> b 1 f t c")
# complex number multiplication
stft_repr = torch.view_as_complex(stft_repr)
masks = torch.view_as_complex(masks)
masks = masks.type(stft_repr.dtype)
# need to average the estimated mask for the overlapped frequencies
scatter_indices = repeat(self.freq_indices, "f -> b n f t", b=batch, n=num_stems, t=stft_repr.shape[-1])
stft_repr_expanded_stems = repeat(stft_repr, "b 1 ... -> b n ...", n=num_stems)
masks_summed = torch.zeros_like(stft_repr_expanded_stems).scatter_add_(2, scatter_indices, masks)
denom = repeat(self.num_bands_per_freq, "f -> (f r) 1", r=channels)
masks_averaged = masks_summed / denom.clamp(min=1e-8)
# modulate stft repr with estimated mask
stft_repr = stft_repr * masks_averaged
# istft
stft_repr = rearrange(stft_repr, "b n (f s) t -> (b n s) f t", s=self.audio_channels)
recon_audio = torch.istft(
stft_repr, **self.stft_kwargs, window=stft_window, return_complex=False, length=istft_length
)
recon_audio = rearrange(recon_audio, "(b n s) t -> b n s t", b=batch, s=self.audio_channels, n=num_stems)
if num_stems == 1:
recon_audio = rearrange(recon_audio, "b 1 s t -> b s t")
# if a target is passed in, calculate loss for learning
if not exists(target):
return recon_audio
if self.num_stems > 1:
assert target.ndim == 4 and target.shape[1] == self.num_stems
if target.ndim == 2:
target = rearrange(target, "... t -> ... 1 t")
target = target[..., : recon_audio.shape[-1]] # protect against lost length on istft
loss = F.l1_loss(recon_audio, target)
multi_stft_resolution_loss = 0.0
for window_size in self.multi_stft_resolutions_window_sizes:
res_stft_kwargs = dict(
n_fft=max(window_size, self.multi_stft_n_fft), # not sure what n_fft is across multi resolution stft
win_length=window_size,
return_complex=True,
window=self.multi_stft_window_fn(window_size, device=device),
**self.multi_stft_kwargs,
)
recon_Y = torch.stft(rearrange(recon_audio, "... s t -> (... s) t"), **res_stft_kwargs)
target_Y = torch.stft(rearrange(target, "... s t -> (... s) t"), **res_stft_kwargs)
multi_stft_resolution_loss = multi_stft_resolution_loss + F.l1_loss(recon_Y, target_Y)
weighted_multi_resolution_loss = multi_stft_resolution_loss * self.multi_stft_resolution_loss_weight
total_loss = loss + weighted_multi_resolution_loss
if not return_loss_breakdown:
return total_loss
return total_loss, (loss, multi_stft_resolution_loss) | --- +++ @@ -119,6 +119,9 @@
class LinearAttention(Module):
+ """
+ this flavor of linear attention proposed in https://arxiv.org/abs/2106.09681 by El-Nouby et al.
+ """
# @beartype
def __init__(self, *, dim, dim_head=32, heads=8, scale=8, flash=False, dropout=0.0):
@@ -417,6 +420,17 @@ self.match_input_audio_length = match_input_audio_length
def forward(self, raw_audio, target=None, return_loss_breakdown=False):
+ """
+ einops
+
+ b - batch
+ f - freq
+ t - time
+ s - audio channel (1 for mono, 2 for stereo)
+ n - number of 'stems'
+ c - complex (2)
+ d - feature dimension
+ """
device = raw_audio.device
@@ -589,4 +603,4 @@ if not return_loss_breakdown:
return total_loss
- return total_loss, (loss, multi_stft_resolution_loss)+ return total_loss, (loss, multi_stft_resolution_loss)
| https://raw.githubusercontent.com/RVC-Boss/GPT-SoVITS/HEAD/tools/uvr5/bs_roformer/mel_band_roformer.py |
Add docstrings following best practices | import math
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
from torch.nn import Conv1d
from torch.nn.utils import weight_norm, remove_weight_norm
from module import commons
from module.commons import init_weights, get_padding
from module.transforms import piecewise_rational_quadratic_transform
import torch.distributions as D
LRELU_SLOPE = 0.1
class LayerNorm(nn.Module):
def __init__(self, channels, eps=1e-5):
super().__init__()
self.channels = channels
self.eps = eps
self.gamma = nn.Parameter(torch.ones(channels))
self.beta = nn.Parameter(torch.zeros(channels))
def forward(self, x):
x = x.transpose(1, -1)
x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
return x.transpose(1, -1)
class ConvReluNorm(nn.Module):
def __init__(
self,
in_channels,
hidden_channels,
out_channels,
kernel_size,
n_layers,
p_dropout,
):
super().__init__()
self.in_channels = in_channels
self.hidden_channels = hidden_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.n_layers = n_layers
self.p_dropout = p_dropout
assert n_layers > 1, "Number of layers should be larger than 0."
self.conv_layers = nn.ModuleList()
self.norm_layers = nn.ModuleList()
self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size // 2))
self.norm_layers.append(LayerNorm(hidden_channels))
self.relu_drop = nn.Sequential(nn.ReLU(), nn.Dropout(p_dropout))
for _ in range(n_layers - 1):
self.conv_layers.append(
nn.Conv1d(
hidden_channels,
hidden_channels,
kernel_size,
padding=kernel_size // 2,
)
)
self.norm_layers.append(LayerNorm(hidden_channels))
self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
self.proj.weight.data.zero_()
self.proj.bias.data.zero_()
def forward(self, x, x_mask):
x_org = x
for i in range(self.n_layers):
x = self.conv_layers[i](x * x_mask)
x = self.norm_layers[i](x)
x = self.relu_drop(x)
x = x_org + self.proj(x)
return x * x_mask
class DDSConv(nn.Module):
def __init__(self, channels, kernel_size, n_layers, p_dropout=0.0):
super().__init__()
self.channels = channels
self.kernel_size = kernel_size
self.n_layers = n_layers
self.p_dropout = p_dropout
self.drop = nn.Dropout(p_dropout)
self.convs_sep = nn.ModuleList()
self.convs_1x1 = nn.ModuleList()
self.norms_1 = nn.ModuleList()
self.norms_2 = nn.ModuleList()
for i in range(n_layers):
dilation = kernel_size**i
padding = (kernel_size * dilation - dilation) // 2
self.convs_sep.append(
nn.Conv1d(
channels,
channels,
kernel_size,
groups=channels,
dilation=dilation,
padding=padding,
)
)
self.convs_1x1.append(nn.Conv1d(channels, channels, 1))
self.norms_1.append(LayerNorm(channels))
self.norms_2.append(LayerNorm(channels))
def forward(self, x, x_mask, g=None):
if g is not None:
x = x + g
for i in range(self.n_layers):
y = self.convs_sep[i](x * x_mask)
y = self.norms_1[i](y)
y = F.gelu(y)
y = self.convs_1x1[i](y)
y = self.norms_2[i](y)
y = F.gelu(y)
y = self.drop(y)
x = x + y
return x * x_mask
class WN(torch.nn.Module):
def __init__(
self,
hidden_channels,
kernel_size,
dilation_rate,
n_layers,
gin_channels=0,
p_dropout=0,
):
super(WN, self).__init__()
assert kernel_size % 2 == 1
self.hidden_channels = hidden_channels
self.kernel_size = (kernel_size,)
self.dilation_rate = dilation_rate
self.n_layers = n_layers
self.gin_channels = gin_channels
self.p_dropout = p_dropout
self.in_layers = torch.nn.ModuleList()
self.res_skip_layers = torch.nn.ModuleList()
self.drop = nn.Dropout(p_dropout)
if gin_channels != 0:
cond_layer = torch.nn.Conv1d(gin_channels, 2 * hidden_channels * n_layers, 1)
self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name="weight")
for i in range(n_layers):
dilation = dilation_rate**i
padding = int((kernel_size * dilation - dilation) / 2)
in_layer = torch.nn.Conv1d(
hidden_channels,
2 * hidden_channels,
kernel_size,
dilation=dilation,
padding=padding,
)
in_layer = torch.nn.utils.weight_norm(in_layer, name="weight")
self.in_layers.append(in_layer)
# last one is not necessary
if i < n_layers - 1:
res_skip_channels = 2 * hidden_channels
else:
res_skip_channels = hidden_channels
res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name="weight")
self.res_skip_layers.append(res_skip_layer)
def forward(self, x, x_mask, g=None, **kwargs):
output = torch.zeros_like(x)
n_channels_tensor = torch.IntTensor([self.hidden_channels])
if g is not None:
g = self.cond_layer(g)
for i in range(self.n_layers):
x_in = self.in_layers[i](x)
if g is not None:
cond_offset = i * 2 * self.hidden_channels
g_l = g[:, cond_offset : cond_offset + 2 * self.hidden_channels, :]
else:
g_l = torch.zeros_like(x_in)
acts = commons.fused_add_tanh_sigmoid_multiply(x_in, g_l, n_channels_tensor)
acts = self.drop(acts)
res_skip_acts = self.res_skip_layers[i](acts)
if i < self.n_layers - 1:
res_acts = res_skip_acts[:, : self.hidden_channels, :]
x = (x + res_acts) * x_mask
output = output + res_skip_acts[:, self.hidden_channels :, :]
else:
output = output + res_skip_acts
return output * x_mask
def remove_weight_norm(self):
if self.gin_channels != 0:
torch.nn.utils.remove_weight_norm(self.cond_layer)
for l in self.in_layers:
torch.nn.utils.remove_weight_norm(l)
for l in self.res_skip_layers:
torch.nn.utils.remove_weight_norm(l)
class ResBlock1(torch.nn.Module):
def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
super(ResBlock1, self).__init__()
self.convs1 = nn.ModuleList(
[
weight_norm(
Conv1d(
channels,
channels,
kernel_size,
1,
dilation=dilation[0],
padding=get_padding(kernel_size, dilation[0]),
)
),
weight_norm(
Conv1d(
channels,
channels,
kernel_size,
1,
dilation=dilation[1],
padding=get_padding(kernel_size, dilation[1]),
)
),
weight_norm(
Conv1d(
channels,
channels,
kernel_size,
1,
dilation=dilation[2],
padding=get_padding(kernel_size, dilation[2]),
)
),
]
)
self.convs1.apply(init_weights)
self.convs2 = nn.ModuleList(
[
weight_norm(
Conv1d(
channels,
channels,
kernel_size,
1,
dilation=1,
padding=get_padding(kernel_size, 1),
)
),
weight_norm(
Conv1d(
channels,
channels,
kernel_size,
1,
dilation=1,
padding=get_padding(kernel_size, 1),
)
),
weight_norm(
Conv1d(
channels,
channels,
kernel_size,
1,
dilation=1,
padding=get_padding(kernel_size, 1),
)
),
]
)
self.convs2.apply(init_weights)
def forward(self, x, x_mask=None):
for c1, c2 in zip(self.convs1, self.convs2):
xt = F.leaky_relu(x, LRELU_SLOPE)
if x_mask is not None:
xt = xt * x_mask
xt = c1(xt)
xt = F.leaky_relu(xt, LRELU_SLOPE)
if x_mask is not None:
xt = xt * x_mask
xt = c2(xt)
x = xt + x
if x_mask is not None:
x = x * x_mask
return x
def remove_weight_norm(self):
for l in self.convs1:
remove_weight_norm(l)
for l in self.convs2:
remove_weight_norm(l)
class ResBlock2(torch.nn.Module):
def __init__(self, channels, kernel_size=3, dilation=(1, 3)):
super(ResBlock2, self).__init__()
self.convs = nn.ModuleList(
[
weight_norm(
Conv1d(
channels,
channels,
kernel_size,
1,
dilation=dilation[0],
padding=get_padding(kernel_size, dilation[0]),
)
),
weight_norm(
Conv1d(
channels,
channels,
kernel_size,
1,
dilation=dilation[1],
padding=get_padding(kernel_size, dilation[1]),
)
),
]
)
self.convs.apply(init_weights)
def forward(self, x, x_mask=None):
for c in self.convs:
xt = F.leaky_relu(x, LRELU_SLOPE)
if x_mask is not None:
xt = xt * x_mask
xt = c(xt)
x = xt + x
if x_mask is not None:
x = x * x_mask
return x
def remove_weight_norm(self):
for l in self.convs:
remove_weight_norm(l)
class Log(nn.Module):
def forward(self, x, x_mask, reverse=False, **kwargs):
if not reverse:
y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask
logdet = torch.sum(-y, [1, 2])
return y, logdet
else:
x = torch.exp(x) * x_mask
return x
class Flip(nn.Module):
def forward(self, x, *args, reverse=False, **kwargs):
x = torch.flip(x, [1])
if not reverse:
logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device)
return x, logdet
else:
return x
class ElementwiseAffine(nn.Module):
def __init__(self, channels):
super().__init__()
self.channels = channels
self.m = nn.Parameter(torch.zeros(channels, 1))
self.logs = nn.Parameter(torch.zeros(channels, 1))
def forward(self, x, x_mask, reverse=False, **kwargs):
if not reverse:
y = self.m + torch.exp(self.logs) * x
y = y * x_mask
logdet = torch.sum(self.logs * x_mask, [1, 2])
return y, logdet
else:
x = (x - self.m) * torch.exp(-self.logs) * x_mask
return x
class ResidualCouplingLayer(nn.Module):
def __init__(
self,
channels,
hidden_channels,
kernel_size,
dilation_rate,
n_layers,
p_dropout=0,
gin_channels=0,
mean_only=False,
):
assert channels % 2 == 0, "channels should be divisible by 2"
super().__init__()
self.channels = channels
self.hidden_channels = hidden_channels
self.kernel_size = kernel_size
self.dilation_rate = dilation_rate
self.n_layers = n_layers
self.half_channels = channels // 2
self.mean_only = mean_only
self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1)
self.enc = WN(
hidden_channels,
kernel_size,
dilation_rate,
n_layers,
p_dropout=p_dropout,
gin_channels=gin_channels,
)
self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1)
self.post.weight.data.zero_()
self.post.bias.data.zero_()
def forward(self, x, x_mask, g=None, reverse=False):
x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
h = self.pre(x0) * x_mask
h = self.enc(h, x_mask, g=g)
stats = self.post(h) * x_mask
if not self.mean_only:
m, logs = torch.split(stats, [self.half_channels] * 2, 1)
else:
m = stats
logs = torch.zeros_like(m)
if not reverse:
x1 = m + x1 * torch.exp(logs) * x_mask
x = torch.cat([x0, x1], 1)
logdet = torch.sum(logs, [1, 2])
return x, logdet
else:
x1 = (x1 - m) * torch.exp(-logs) * x_mask
x = torch.cat([x0, x1], 1)
return x
class ConvFlow(nn.Module):
def __init__(
self,
in_channels,
filter_channels,
kernel_size,
n_layers,
num_bins=10,
tail_bound=5.0,
):
super().__init__()
self.in_channels = in_channels
self.filter_channels = filter_channels
self.kernel_size = kernel_size
self.n_layers = n_layers
self.num_bins = num_bins
self.tail_bound = tail_bound
self.half_channels = in_channels // 2
self.pre = nn.Conv1d(self.half_channels, filter_channels, 1)
self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.0)
self.proj = nn.Conv1d(filter_channels, self.half_channels * (num_bins * 3 - 1), 1)
self.proj.weight.data.zero_()
self.proj.bias.data.zero_()
def forward(self, x, x_mask, g=None, reverse=False):
x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
h = self.pre(x0)
h = self.convs(h, x_mask, g=g)
h = self.proj(h) * x_mask
b, c, t = x0.shape
h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?]
unnormalized_widths = h[..., : self.num_bins] / math.sqrt(self.filter_channels)
unnormalized_heights = h[..., self.num_bins : 2 * self.num_bins] / math.sqrt(self.filter_channels)
unnormalized_derivatives = h[..., 2 * self.num_bins :]
x1, logabsdet = piecewise_rational_quadratic_transform(
x1,
unnormalized_widths,
unnormalized_heights,
unnormalized_derivatives,
inverse=reverse,
tails="linear",
tail_bound=self.tail_bound,
)
x = torch.cat([x0, x1], 1) * x_mask
logdet = torch.sum(logabsdet * x_mask, [1, 2])
if not reverse:
return x, logdet
else:
return x
class LinearNorm(nn.Module):
def __init__(
self,
in_channels,
out_channels,
bias=True,
spectral_norm=False,
):
super(LinearNorm, self).__init__()
self.fc = nn.Linear(in_channels, out_channels, bias)
if spectral_norm:
self.fc = nn.utils.spectral_norm(self.fc)
def forward(self, input):
out = self.fc(input)
return out
class Mish(nn.Module):
def __init__(self):
super(Mish, self).__init__()
def forward(self, x):
return x * torch.tanh(F.softplus(x))
class Conv1dGLU(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, dropout):
super(Conv1dGLU, self).__init__()
self.out_channels = out_channels
self.conv1 = ConvNorm(in_channels, 2 * out_channels, kernel_size=kernel_size)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
residual = x
x = self.conv1(x)
x1, x2 = torch.split(x, split_size_or_sections=self.out_channels, dim=1)
x = x1 * torch.sigmoid(x2)
x = residual + self.dropout(x)
return x
class ConvNorm(nn.Module):
def __init__(
self,
in_channels,
out_channels,
kernel_size=1,
stride=1,
padding=None,
dilation=1,
bias=True,
spectral_norm=False,
):
super(ConvNorm, self).__init__()
if padding is None:
assert kernel_size % 2 == 1
padding = int(dilation * (kernel_size - 1) / 2)
self.conv = torch.nn.Conv1d(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias,
)
if spectral_norm:
self.conv = nn.utils.spectral_norm(self.conv)
def forward(self, input):
out = self.conv(input)
return out
class MultiHeadAttention(nn.Module):
def __init__(self, n_head, d_model, d_k, d_v, dropout=0.0, spectral_norm=False):
super().__init__()
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.w_qs = nn.Linear(d_model, n_head * d_k)
self.w_ks = nn.Linear(d_model, n_head * d_k)
self.w_vs = nn.Linear(d_model, n_head * d_v)
self.attention = ScaledDotProductAttention(temperature=np.power(d_model, 0.5), dropout=dropout)
self.fc = nn.Linear(n_head * d_v, d_model)
self.dropout = nn.Dropout(dropout)
if spectral_norm:
self.w_qs = nn.utils.spectral_norm(self.w_qs)
self.w_ks = nn.utils.spectral_norm(self.w_ks)
self.w_vs = nn.utils.spectral_norm(self.w_vs)
self.fc = nn.utils.spectral_norm(self.fc)
def forward(self, x, mask=None):
d_k, d_v, n_head = self.d_k, self.d_v, self.n_head
sz_b, len_x, _ = x.size()
residual = x
q = self.w_qs(x).view(sz_b, len_x, n_head, d_k)
k = self.w_ks(x).view(sz_b, len_x, n_head, d_k)
v = self.w_vs(x).view(sz_b, len_x, n_head, d_v)
q = q.permute(2, 0, 1, 3).contiguous().view(-1, len_x, d_k) # (n*b) x lq x dk
k = k.permute(2, 0, 1, 3).contiguous().view(-1, len_x, d_k) # (n*b) x lk x dk
v = v.permute(2, 0, 1, 3).contiguous().view(-1, len_x, d_v) # (n*b) x lv x dv
if mask is not None:
slf_mask = mask.repeat(n_head, 1, 1) # (n*b) x .. x ..
else:
slf_mask = None
output, attn = self.attention(q, k, v, mask=slf_mask)
output = output.view(n_head, sz_b, len_x, d_v)
output = output.permute(1, 2, 0, 3).contiguous().view(sz_b, len_x, -1) # b x lq x (n*dv)
output = self.fc(output)
output = self.dropout(output) + residual
return output, attn
class ScaledDotProductAttention(nn.Module):
def __init__(self, temperature, dropout):
super().__init__()
self.temperature = temperature
self.softmax = nn.Softmax(dim=2)
self.dropout = nn.Dropout(dropout)
def forward(self, q, k, v, mask=None):
attn = torch.bmm(q, k.transpose(1, 2))
attn = attn / self.temperature
if mask is not None:
attn = attn.masked_fill(mask, -np.inf)
attn = self.softmax(attn)
p_attn = self.dropout(attn)
output = torch.bmm(p_attn, v)
return output, attn
class MelStyleEncoder(nn.Module):
def __init__(
self,
n_mel_channels=80,
style_hidden=128,
style_vector_dim=256,
style_kernel_size=5,
style_head=2,
dropout=0.1,
):
super(MelStyleEncoder, self).__init__()
self.in_dim = n_mel_channels
self.hidden_dim = style_hidden
self.out_dim = style_vector_dim
self.kernel_size = style_kernel_size
self.n_head = style_head
self.dropout = dropout
self.spectral = nn.Sequential(
LinearNorm(self.in_dim, self.hidden_dim),
Mish(),
nn.Dropout(self.dropout),
LinearNorm(self.hidden_dim, self.hidden_dim),
Mish(),
nn.Dropout(self.dropout),
)
self.temporal = nn.Sequential(
Conv1dGLU(self.hidden_dim, self.hidden_dim, self.kernel_size, self.dropout),
Conv1dGLU(self.hidden_dim, self.hidden_dim, self.kernel_size, self.dropout),
)
self.slf_attn = MultiHeadAttention(
self.n_head,
self.hidden_dim,
self.hidden_dim // self.n_head,
self.hidden_dim // self.n_head,
self.dropout,
)
self.fc = LinearNorm(self.hidden_dim, self.out_dim)
def temporal_avg_pool(self, x, mask=None):
if mask is None:
out = torch.mean(x, dim=1)
else:
len_ = (~mask).sum(dim=1).unsqueeze(1)
x = x.masked_fill(mask.unsqueeze(-1), 0)
dtype = x.dtype
x = x.float()
x = torch.div(x, len_.unsqueeze(1))
out = x.sum(dim=1).to(dtype)
return out
def forward(self, x, mask=None):
x = x.transpose(1, 2)
if mask is not None:
mask = (mask.int() == 0).squeeze(1)
max_len = x.shape[1]
slf_attn_mask = mask.unsqueeze(1).expand(-1, max_len, -1) if mask is not None else None
# spectral
x = self.spectral(x)
# temporal
x = x.transpose(1, 2)
x = self.temporal(x)
x = x.transpose(1, 2)
# self-attention
if mask is not None:
x = x.masked_fill(mask.unsqueeze(-1), 0)
x, _ = self.slf_attn(x, mask=slf_attn_mask)
# fc
x = self.fc(x)
# temoral average pooling
w = self.temporal_avg_pool(x, mask=mask)
return w.unsqueeze(-1)
class MelStyleEncoderVAE(nn.Module):
def __init__(self, spec_channels, z_latent_dim, emb_dim):
super().__init__()
self.ref_encoder = MelStyleEncoder(spec_channels, style_vector_dim=emb_dim)
self.fc1 = nn.Linear(emb_dim, z_latent_dim)
self.fc2 = nn.Linear(emb_dim, z_latent_dim)
self.fc3 = nn.Linear(z_latent_dim, emb_dim)
self.z_latent_dim = z_latent_dim
def reparameterize(self, mu, logvar):
if self.training:
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return eps.mul(std).add_(mu)
else:
return mu
def forward(self, inputs, mask=None):
enc_out = self.ref_encoder(inputs.squeeze(-1), mask).squeeze(-1)
mu = self.fc1(enc_out)
logvar = self.fc2(enc_out)
posterior = D.Normal(mu, torch.exp(logvar))
kl_divergence = D.kl_divergence(posterior, D.Normal(torch.zeros_like(mu), torch.ones_like(logvar)))
loss_kl = kl_divergence.mean()
z = posterior.rsample()
style_embed = self.fc3(z)
return style_embed.unsqueeze(-1), loss_kl
def infer(self, inputs=None, random_sample=False, manual_latent=None):
if manual_latent is None:
if random_sample:
dev = next(self.parameters()).device
posterior = D.Normal(
torch.zeros(1, self.z_latent_dim, device=dev),
torch.ones(1, self.z_latent_dim, device=dev),
)
z = posterior.rsample()
else:
enc_out = self.ref_encoder(inputs.transpose(1, 2))
mu = self.fc1(enc_out)
z = mu
else:
z = manual_latent
style_embed = self.fc3(z)
return style_embed.unsqueeze(-1), z
class ActNorm(nn.Module):
def __init__(self, channels, ddi=False, **kwargs):
super().__init__()
self.channels = channels
self.initialized = not ddi
self.logs = nn.Parameter(torch.zeros(1, channels, 1))
self.bias = nn.Parameter(torch.zeros(1, channels, 1))
def forward(self, x, x_mask=None, g=None, reverse=False, **kwargs):
if x_mask is None:
x_mask = torch.ones(x.size(0), 1, x.size(2)).to(device=x.device, dtype=x.dtype)
x_len = torch.sum(x_mask, [1, 2])
if not self.initialized:
self.initialize(x, x_mask)
self.initialized = True
if reverse:
z = (x - self.bias) * torch.exp(-self.logs) * x_mask
logdet = None
return z
else:
z = (self.bias + torch.exp(self.logs) * x) * x_mask
logdet = torch.sum(self.logs) * x_len # [b]
return z, logdet
def store_inverse(self):
pass
def set_ddi(self, ddi):
self.initialized = not ddi
def initialize(self, x, x_mask):
with torch.no_grad():
denom = torch.sum(x_mask, [0, 2])
m = torch.sum(x * x_mask, [0, 2]) / denom
m_sq = torch.sum(x * x * x_mask, [0, 2]) / denom
v = m_sq - (m**2)
logs = 0.5 * torch.log(torch.clamp_min(v, 1e-6))
bias_init = (-m * torch.exp(-logs)).view(*self.bias.shape).to(dtype=self.bias.dtype)
logs_init = (-logs).view(*self.logs.shape).to(dtype=self.logs.dtype)
self.bias.data.copy_(bias_init)
self.logs.data.copy_(logs_init)
class InvConvNear(nn.Module):
def __init__(self, channels, n_split=4, no_jacobian=False, **kwargs):
super().__init__()
assert n_split % 2 == 0
self.channels = channels
self.n_split = n_split
self.no_jacobian = no_jacobian
w_init = torch.linalg.qr(torch.FloatTensor(self.n_split, self.n_split).normal_())[0]
if torch.det(w_init) < 0:
w_init[:, 0] = -1 * w_init[:, 0]
self.weight = nn.Parameter(w_init)
def forward(self, x, x_mask=None, g=None, reverse=False, **kwargs):
b, c, t = x.size()
assert c % self.n_split == 0
if x_mask is None:
x_mask = 1
x_len = torch.ones((b,), dtype=x.dtype, device=x.device) * t
else:
x_len = torch.sum(x_mask, [1, 2])
x = x.view(b, 2, c // self.n_split, self.n_split // 2, t)
x = x.permute(0, 1, 3, 2, 4).contiguous().view(b, self.n_split, c // self.n_split, t)
if reverse:
if hasattr(self, "weight_inv"):
weight = self.weight_inv
else:
weight = torch.inverse(self.weight.float()).to(dtype=self.weight.dtype)
logdet = None
else:
weight = self.weight
if self.no_jacobian:
logdet = 0
else:
logdet = torch.logdet(self.weight) * (c / self.n_split) * x_len # [b]
weight = weight.view(self.n_split, self.n_split, 1, 1)
z = F.conv2d(x, weight)
z = z.view(b, 2, self.n_split // 2, c // self.n_split, t)
z = z.permute(0, 1, 3, 2, 4).contiguous().view(b, c, t) * x_mask
if reverse:
return z
else:
return z, logdet
def store_inverse(self):
self.weight_inv = torch.inverse(self.weight.float()).to(dtype=self.weight.dtype) | --- +++ @@ -81,6 +81,9 @@
class DDSConv(nn.Module):
+ """
+ Dialted and Depth-Separable Convolution
+ """
def __init__(self, channels, kernel_size, n_layers, p_dropout=0.0):
super().__init__()
@@ -534,6 +537,10 @@
class Conv1dGLU(nn.Module):
+ """
+ Conv1d + GLU(Gated Linear Unit) with residual connection.
+ For GLU refer to https://arxiv.org/abs/1612.08083 paper.
+ """
def __init__(self, in_channels, out_channels, kernel_size, dropout):
super(Conv1dGLU, self).__init__()
@@ -587,6 +594,7 @@
class MultiHeadAttention(nn.Module):
+ """Multi-Head Attention module"""
def __init__(self, n_head, d_model, d_k, d_v, dropout=0.0, spectral_norm=False):
super().__init__()
@@ -639,6 +647,7 @@
class ScaledDotProductAttention(nn.Module):
+ """Scaled Dot-Product Attention"""
def __init__(self, temperature, dropout):
super().__init__()
@@ -661,6 +670,7 @@
class MelStyleEncoder(nn.Module):
+ """MelStyleEncoder"""
def __init__(
self,
@@ -884,4 +894,4 @@ return z, logdet
def store_inverse(self):
- self.weight_inv = torch.inverse(self.weight.float()).to(dtype=self.weight.dtype)+ self.weight_inv = torch.inverse(self.weight.float()).to(dtype=self.weight.dtype)
| https://raw.githubusercontent.com/RVC-Boss/GPT-SoVITS/HEAD/GPT_SoVITS/module/modules.py |
Add docstrings that explain logic | from functools import partial
import torch
from torch import nn
from torch.nn import Module, ModuleList
import torch.nn.functional as F
from bs_roformer.attend import Attend
from torch.utils.checkpoint import checkpoint
from typing import Tuple, Optional, Callable
# from beartype.typing import Tuple, Optional, List, Callable
# from beartype import beartype
from rotary_embedding_torch import RotaryEmbedding
from einops import rearrange, pack, unpack
from einops.layers.torch import Rearrange
# helper functions
def exists(val):
return val is not None
def default(v, d):
return v if exists(v) else d
def pack_one(t, pattern):
return pack([t], pattern)
def unpack_one(t, ps, pattern):
return unpack(t, ps, pattern)[0]
# norm
def l2norm(t):
return F.normalize(t, dim=-1, p=2)
class RMSNorm(Module):
def __init__(self, dim):
super().__init__()
self.scale = dim**0.5
self.gamma = nn.Parameter(torch.ones(dim))
def forward(self, x):
return F.normalize(x, dim=-1) * self.scale * self.gamma
# attention
class FeedForward(Module):
def __init__(self, dim, mult=4, dropout=0.0):
super().__init__()
dim_inner = int(dim * mult)
self.net = nn.Sequential(
RMSNorm(dim),
nn.Linear(dim, dim_inner),
nn.GELU(),
nn.Dropout(dropout),
nn.Linear(dim_inner, dim),
nn.Dropout(dropout),
)
def forward(self, x):
return self.net(x)
class Attention(Module):
def __init__(self, dim, heads=8, dim_head=64, dropout=0.0, rotary_embed=None, flash=True):
super().__init__()
self.heads = heads
self.scale = dim_head**-0.5
dim_inner = heads * dim_head
self.rotary_embed = rotary_embed
self.attend = Attend(flash=flash, dropout=dropout)
self.norm = RMSNorm(dim)
self.to_qkv = nn.Linear(dim, dim_inner * 3, bias=False)
self.to_gates = nn.Linear(dim, heads)
self.to_out = nn.Sequential(nn.Linear(dim_inner, dim, bias=False), nn.Dropout(dropout))
def forward(self, x):
x = self.norm(x)
q, k, v = rearrange(self.to_qkv(x), "b n (qkv h d) -> qkv b h n d", qkv=3, h=self.heads)
if exists(self.rotary_embed):
q = self.rotary_embed.rotate_queries_or_keys(q)
k = self.rotary_embed.rotate_queries_or_keys(k)
out = self.attend(q, k, v)
gates = self.to_gates(x)
out = out * rearrange(gates, "b n h -> b h n 1").sigmoid()
out = rearrange(out, "b h n d -> b n (h d)")
return self.to_out(out)
class LinearAttention(Module):
# @beartype
def __init__(self, *, dim, dim_head=32, heads=8, scale=8, flash=False, dropout=0.0):
super().__init__()
dim_inner = dim_head * heads
self.norm = RMSNorm(dim)
self.to_qkv = nn.Sequential(
nn.Linear(dim, dim_inner * 3, bias=False), Rearrange("b n (qkv h d) -> qkv b h d n", qkv=3, h=heads)
)
self.temperature = nn.Parameter(torch.ones(heads, 1, 1))
self.attend = Attend(scale=scale, dropout=dropout, flash=flash)
self.to_out = nn.Sequential(Rearrange("b h d n -> b n (h d)"), nn.Linear(dim_inner, dim, bias=False))
def forward(self, x):
x = self.norm(x)
q, k, v = self.to_qkv(x)
q, k = map(l2norm, (q, k))
q = q * self.temperature.exp()
out = self.attend(q, k, v)
return self.to_out(out)
class Transformer(Module):
def __init__(
self,
*,
dim,
depth,
dim_head=64,
heads=8,
attn_dropout=0.0,
ff_dropout=0.0,
ff_mult=4,
norm_output=True,
rotary_embed=None,
flash_attn=True,
linear_attn=False,
):
super().__init__()
self.layers = ModuleList([])
for _ in range(depth):
if linear_attn:
attn = LinearAttention(dim=dim, dim_head=dim_head, heads=heads, dropout=attn_dropout, flash=flash_attn)
else:
attn = Attention(
dim=dim,
dim_head=dim_head,
heads=heads,
dropout=attn_dropout,
rotary_embed=rotary_embed,
flash=flash_attn,
)
self.layers.append(ModuleList([attn, FeedForward(dim=dim, mult=ff_mult, dropout=ff_dropout)]))
self.norm = RMSNorm(dim) if norm_output else nn.Identity()
def forward(self, x):
for attn, ff in self.layers:
x = attn(x) + x
x = ff(x) + x
return self.norm(x)
# bandsplit module
class BandSplit(Module):
# @beartype
def __init__(self, dim, dim_inputs: Tuple[int, ...]):
super().__init__()
self.dim_inputs = dim_inputs
self.to_features = ModuleList([])
for dim_in in dim_inputs:
net = nn.Sequential(RMSNorm(dim_in), nn.Linear(dim_in, dim))
self.to_features.append(net)
def forward(self, x):
x = x.split(self.dim_inputs, dim=-1)
outs = []
for split_input, to_feature in zip(x, self.to_features):
split_output = to_feature(split_input)
outs.append(split_output)
return torch.stack(outs, dim=-2)
def MLP(dim_in, dim_out, dim_hidden=None, depth=1, activation=nn.Tanh):
dim_hidden = default(dim_hidden, dim_in)
net = []
dims = (dim_in, *((dim_hidden,) * (depth - 1)), dim_out)
for ind, (layer_dim_in, layer_dim_out) in enumerate(zip(dims[:-1], dims[1:])):
is_last = ind == (len(dims) - 2)
net.append(nn.Linear(layer_dim_in, layer_dim_out))
if is_last:
continue
net.append(activation())
return nn.Sequential(*net)
class MaskEstimator(Module):
# @beartype
def __init__(self, dim, dim_inputs: Tuple[int, ...], depth, mlp_expansion_factor=4):
super().__init__()
self.dim_inputs = dim_inputs
self.to_freqs = ModuleList([])
dim_hidden = dim * mlp_expansion_factor
for dim_in in dim_inputs:
net = []
mlp = nn.Sequential(MLP(dim, dim_in * 2, dim_hidden=dim_hidden, depth=depth), nn.GLU(dim=-1))
self.to_freqs.append(mlp)
def forward(self, x):
x = x.unbind(dim=-2)
outs = []
for band_features, mlp in zip(x, self.to_freqs):
freq_out = mlp(band_features)
outs.append(freq_out)
return torch.cat(outs, dim=-1)
# main class
DEFAULT_FREQS_PER_BANDS = (
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
12,
12,
12,
12,
12,
12,
12,
12,
24,
24,
24,
24,
24,
24,
24,
24,
48,
48,
48,
48,
48,
48,
48,
48,
128,
129,
)
class BSRoformer(Module):
# @beartype
def __init__(
self,
dim,
*,
depth,
stereo=False,
num_stems=1,
time_transformer_depth=2,
freq_transformer_depth=2,
linear_transformer_depth=0,
freqs_per_bands: Tuple[int, ...] = DEFAULT_FREQS_PER_BANDS,
# in the paper, they divide into ~60 bands, test with 1 for starters
dim_head=64,
heads=8,
attn_dropout=0.0,
ff_dropout=0.0,
flash_attn=True,
dim_freqs_in=1025,
stft_n_fft=2048,
stft_hop_length=512,
# 10ms at 44100Hz, from sections 4.1, 4.4 in the paper - @faroit recommends // 2 or // 4 for better reconstruction
stft_win_length=2048,
stft_normalized=False,
stft_window_fn: Optional[Callable] = None,
mask_estimator_depth=2,
multi_stft_resolution_loss_weight=1.0,
multi_stft_resolutions_window_sizes: Tuple[int, ...] = (4096, 2048, 1024, 512, 256),
multi_stft_hop_size=147,
multi_stft_normalized=False,
multi_stft_window_fn: Callable = torch.hann_window,
mlp_expansion_factor=4,
use_torch_checkpoint=False,
skip_connection=False,
):
super().__init__()
self.stereo = stereo
self.audio_channels = 2 if stereo else 1
self.num_stems = num_stems
self.use_torch_checkpoint = use_torch_checkpoint
self.skip_connection = skip_connection
self.layers = ModuleList([])
transformer_kwargs = dict(
dim=dim,
heads=heads,
dim_head=dim_head,
attn_dropout=attn_dropout,
ff_dropout=ff_dropout,
flash_attn=flash_attn,
norm_output=False,
)
time_rotary_embed = RotaryEmbedding(dim=dim_head)
freq_rotary_embed = RotaryEmbedding(dim=dim_head)
for _ in range(depth):
tran_modules = []
if linear_transformer_depth > 0:
tran_modules.append(Transformer(depth=linear_transformer_depth, linear_attn=True, **transformer_kwargs))
tran_modules.append(
Transformer(depth=time_transformer_depth, rotary_embed=time_rotary_embed, **transformer_kwargs)
)
tran_modules.append(
Transformer(depth=freq_transformer_depth, rotary_embed=freq_rotary_embed, **transformer_kwargs)
)
self.layers.append(nn.ModuleList(tran_modules))
self.final_norm = RMSNorm(dim)
self.stft_kwargs = dict(
n_fft=stft_n_fft, hop_length=stft_hop_length, win_length=stft_win_length, normalized=stft_normalized
)
self.stft_window_fn = partial(default(stft_window_fn, torch.hann_window), stft_win_length)
freqs = torch.stft(
torch.randn(1, 4096), **self.stft_kwargs, window=torch.ones(stft_win_length), return_complex=True
).shape[1]
assert len(freqs_per_bands) > 1
assert sum(freqs_per_bands) == freqs, (
f"the number of freqs in the bands must equal {freqs} based on the STFT settings, but got {sum(freqs_per_bands)}"
)
freqs_per_bands_with_complex = tuple(2 * f * self.audio_channels for f in freqs_per_bands)
self.band_split = BandSplit(dim=dim, dim_inputs=freqs_per_bands_with_complex)
self.mask_estimators = nn.ModuleList([])
for _ in range(num_stems):
mask_estimator = MaskEstimator(
dim=dim,
dim_inputs=freqs_per_bands_with_complex,
depth=mask_estimator_depth,
mlp_expansion_factor=mlp_expansion_factor,
)
self.mask_estimators.append(mask_estimator)
# for the multi-resolution stft loss
self.multi_stft_resolution_loss_weight = multi_stft_resolution_loss_weight
self.multi_stft_resolutions_window_sizes = multi_stft_resolutions_window_sizes
self.multi_stft_n_fft = stft_n_fft
self.multi_stft_window_fn = multi_stft_window_fn
self.multi_stft_kwargs = dict(hop_length=multi_stft_hop_size, normalized=multi_stft_normalized)
def forward(self, raw_audio, target=None, return_loss_breakdown=False):
device = raw_audio.device
# defining whether model is loaded on MPS (MacOS GPU accelerator)
x_is_mps = True if device.type == "mps" else False
if raw_audio.ndim == 2:
raw_audio = rearrange(raw_audio, "b t -> b 1 t")
channels = raw_audio.shape[1]
assert (not self.stereo and channels == 1) or (self.stereo and channels == 2), (
"stereo needs to be set to True if passing in audio signal that is stereo (channel dimension of 2). also need to be False if mono (channel dimension of 1)"
)
# to stft
raw_audio, batch_audio_channel_packed_shape = pack_one(raw_audio, "* t")
stft_window = self.stft_window_fn(device=device)
# RuntimeError: FFT operations are only supported on MacOS 14+
# Since it's tedious to define whether we're on correct MacOS version - simple try-catch is used
try:
stft_repr = torch.stft(raw_audio, **self.stft_kwargs, window=stft_window, return_complex=True)
except:
stft_repr = torch.stft(
raw_audio.cpu() if x_is_mps else raw_audio,
**self.stft_kwargs,
window=stft_window.cpu() if x_is_mps else stft_window,
return_complex=True,
).to(device)
stft_repr = torch.view_as_real(stft_repr)
stft_repr = unpack_one(stft_repr, batch_audio_channel_packed_shape, "* f t c")
# merge stereo / mono into the frequency, with frequency leading dimension, for band splitting
stft_repr = rearrange(stft_repr, "b s f t c -> b (f s) t c")
x = rearrange(stft_repr, "b f t c -> b t (f c)")
if self.use_torch_checkpoint:
x = checkpoint(self.band_split, x, use_reentrant=False)
else:
x = self.band_split(x)
# axial / hierarchical attention
store = [None] * len(self.layers)
for i, transformer_block in enumerate(self.layers):
if len(transformer_block) == 3:
linear_transformer, time_transformer, freq_transformer = transformer_block
x, ft_ps = pack([x], "b * d")
if self.use_torch_checkpoint:
x = checkpoint(linear_transformer, x, use_reentrant=False)
else:
x = linear_transformer(x)
(x,) = unpack(x, ft_ps, "b * d")
else:
time_transformer, freq_transformer = transformer_block
if self.skip_connection:
# Sum all previous
for j in range(i):
x = x + store[j]
x = rearrange(x, "b t f d -> b f t d")
x, ps = pack([x], "* t d")
if self.use_torch_checkpoint:
x = checkpoint(time_transformer, x, use_reentrant=False)
else:
x = time_transformer(x)
(x,) = unpack(x, ps, "* t d")
x = rearrange(x, "b f t d -> b t f d")
x, ps = pack([x], "* f d")
if self.use_torch_checkpoint:
x = checkpoint(freq_transformer, x, use_reentrant=False)
else:
x = freq_transformer(x)
(x,) = unpack(x, ps, "* f d")
if self.skip_connection:
store[i] = x
x = self.final_norm(x)
num_stems = len(self.mask_estimators)
if self.use_torch_checkpoint:
mask = torch.stack([checkpoint(fn, x, use_reentrant=False) for fn in self.mask_estimators], dim=1)
else:
mask = torch.stack([fn(x) for fn in self.mask_estimators], dim=1)
mask = rearrange(mask, "b n t (f c) -> b n f t c", c=2)
# modulate frequency representation
stft_repr = rearrange(stft_repr, "b f t c -> b 1 f t c")
# complex number multiplication
stft_repr = torch.view_as_complex(stft_repr)
mask = torch.view_as_complex(mask)
stft_repr = stft_repr * mask
# istft
stft_repr = rearrange(stft_repr, "b n (f s) t -> (b n s) f t", s=self.audio_channels)
# same as torch.stft() fix for MacOS MPS above
try:
recon_audio = torch.istft(
stft_repr, **self.stft_kwargs, window=stft_window, return_complex=False, length=raw_audio.shape[-1]
)
except:
recon_audio = torch.istft(
stft_repr.cpu() if x_is_mps else stft_repr,
**self.stft_kwargs,
window=stft_window.cpu() if x_is_mps else stft_window,
return_complex=False,
length=raw_audio.shape[-1],
).to(device)
recon_audio = rearrange(recon_audio, "(b n s) t -> b n s t", s=self.audio_channels, n=num_stems)
if num_stems == 1:
recon_audio = rearrange(recon_audio, "b 1 s t -> b s t")
# if a target is passed in, calculate loss for learning
if not exists(target):
return recon_audio
if self.num_stems > 1:
assert target.ndim == 4 and target.shape[1] == self.num_stems
if target.ndim == 2:
target = rearrange(target, "... t -> ... 1 t")
target = target[..., : recon_audio.shape[-1]] # protect against lost length on istft
loss = F.l1_loss(recon_audio, target)
multi_stft_resolution_loss = 0.0
for window_size in self.multi_stft_resolutions_window_sizes:
res_stft_kwargs = dict(
n_fft=max(window_size, self.multi_stft_n_fft), # not sure what n_fft is across multi resolution stft
win_length=window_size,
return_complex=True,
window=self.multi_stft_window_fn(window_size, device=device),
**self.multi_stft_kwargs,
)
recon_Y = torch.stft(rearrange(recon_audio, "... s t -> (... s) t"), **res_stft_kwargs)
target_Y = torch.stft(rearrange(target, "... s t -> (... s) t"), **res_stft_kwargs)
multi_stft_resolution_loss = multi_stft_resolution_loss + F.l1_loss(recon_Y, target_Y)
weighted_multi_resolution_loss = multi_stft_resolution_loss * self.multi_stft_resolution_loss_weight
total_loss = loss + weighted_multi_resolution_loss
if not return_loss_breakdown:
return total_loss
return total_loss, (loss, multi_stft_resolution_loss) | --- +++ @@ -110,6 +110,9 @@
class LinearAttention(Module):
+ """
+ this flavor of linear attention proposed in https://arxiv.org/abs/2106.09681 by El-Nouby et al.
+ """
# @beartype
def __init__(self, *, dim, dim_head=32, heads=8, scale=8, flash=False, dropout=0.0):
@@ -438,6 +441,17 @@ self.multi_stft_kwargs = dict(hop_length=multi_stft_hop_size, normalized=multi_stft_normalized)
def forward(self, raw_audio, target=None, return_loss_breakdown=False):
+ """
+ einops
+
+ b - batch
+ f - freq
+ t - time
+ s - audio channel (1 for mono, 2 for stereo)
+ n - number of 'stems'
+ c - complex (2)
+ d - feature dimension
+ """
device = raw_audio.device
@@ -609,4 +623,4 @@ if not return_loss_breakdown:
return total_loss
- return total_loss, (loss, multi_stft_resolution_loss)+ return total_loss, (loss, multi_stft_resolution_loss)
| https://raw.githubusercontent.com/RVC-Boss/GPT-SoVITS/HEAD/tools/uvr5/bs_roformer/bs_roformer.py |
Create docstrings for each class method | import inspect
from inspect import cleandoc, getdoc, getfile, isclass, ismodule, signature
from typing import Any, Collection, Iterable, Optional, Tuple, Type, Union
from .console import Group, RenderableType
from .control import escape_control_codes
from .highlighter import ReprHighlighter
from .jupyter import JupyterMixin
from .panel import Panel
from .pretty import Pretty
from .table import Table
from .text import Text, TextType
def _first_paragraph(doc: str) -> str:
paragraph, _, _ = doc.partition("\n\n")
return paragraph
class Inspect(JupyterMixin):
def __init__(
self,
obj: Any,
*,
title: Optional[TextType] = None,
help: bool = False,
methods: bool = False,
docs: bool = True,
private: bool = False,
dunder: bool = False,
sort: bool = True,
all: bool = True,
value: bool = True,
) -> None:
self.highlighter = ReprHighlighter()
self.obj = obj
self.title = title or self._make_title(obj)
if all:
methods = private = dunder = True
self.help = help
self.methods = methods
self.docs = docs or help
self.private = private or dunder
self.dunder = dunder
self.sort = sort
self.value = value
def _make_title(self, obj: Any) -> Text:
title_str = (
str(obj)
if (isclass(obj) or callable(obj) or ismodule(obj))
else str(type(obj))
)
title_text = self.highlighter(title_str)
return title_text
def __rich__(self) -> Panel:
return Panel.fit(
Group(*self._render()),
title=self.title,
border_style="scope.border",
padding=(0, 1),
)
def _get_signature(self, name: str, obj: Any) -> Optional[Text]:
try:
_signature = str(signature(obj)) + ":"
except ValueError:
_signature = "(...)"
except TypeError:
return None
source_filename: Optional[str] = None
try:
source_filename = getfile(obj)
except (OSError, TypeError):
# OSError is raised if obj has no source file, e.g. when defined in REPL.
pass
callable_name = Text(name, style="inspect.callable")
if source_filename:
callable_name.stylize(f"link file://{source_filename}")
signature_text = self.highlighter(_signature)
qualname = name or getattr(obj, "__qualname__", name)
if not isinstance(qualname, str):
qualname = getattr(obj, "__name__", name)
if not isinstance(qualname, str):
qualname = name
# If obj is a module, there may be classes (which are callable) to display
if inspect.isclass(obj):
prefix = "class"
elif inspect.iscoroutinefunction(obj):
prefix = "async def"
else:
prefix = "def"
qual_signature = Text.assemble(
(f"{prefix} ", f"inspect.{prefix.replace(' ', '_')}"),
(qualname, "inspect.callable"),
signature_text,
)
return qual_signature
def _render(self) -> Iterable[RenderableType]:
def sort_items(item: Tuple[str, Any]) -> Tuple[bool, str]:
key, (_error, value) = item
return (callable(value), key.strip("_").lower())
def safe_getattr(attr_name: str) -> Tuple[Any, Any]:
try:
return (None, getattr(obj, attr_name))
except Exception as error:
return (error, None)
obj = self.obj
keys = dir(obj)
total_items = len(keys)
if not self.dunder:
keys = [key for key in keys if not key.startswith("__")]
if not self.private:
keys = [key for key in keys if not key.startswith("_")]
not_shown_count = total_items - len(keys)
items = [(key, safe_getattr(key)) for key in keys]
if self.sort:
items.sort(key=sort_items)
items_table = Table.grid(padding=(0, 1), expand=False)
items_table.add_column(justify="right")
add_row = items_table.add_row
highlighter = self.highlighter
if callable(obj):
signature = self._get_signature("", obj)
if signature is not None:
yield signature
yield ""
if self.docs:
_doc = self._get_formatted_doc(obj)
if _doc is not None:
doc_text = Text(_doc, style="inspect.help")
doc_text = highlighter(doc_text)
yield doc_text
yield ""
if self.value and not (isclass(obj) or callable(obj) or ismodule(obj)):
yield Panel(
Pretty(obj, indent_guides=True, max_length=10, max_string=60),
border_style="inspect.value.border",
)
yield ""
for key, (error, value) in items:
key_text = Text.assemble(
(
key,
"inspect.attr.dunder" if key.startswith("__") else "inspect.attr",
),
(" =", "inspect.equals"),
)
if error is not None:
warning = key_text.copy()
warning.stylize("inspect.error")
add_row(warning, highlighter(repr(error)))
continue
if callable(value):
if not self.methods:
continue
_signature_text = self._get_signature(key, value)
if _signature_text is None:
add_row(key_text, Pretty(value, highlighter=highlighter))
else:
if self.docs:
docs = self._get_formatted_doc(value)
if docs is not None:
_signature_text.append("\n" if "\n" in docs else " ")
doc = highlighter(docs)
doc.stylize("inspect.doc")
_signature_text.append(doc)
add_row(key_text, _signature_text)
else:
add_row(key_text, Pretty(value, highlighter=highlighter))
if items_table.row_count:
yield items_table
elif not_shown_count:
yield Text.from_markup(
f"[b cyan]{not_shown_count}[/][i] attribute(s) not shown.[/i] "
f"Run [b][magenta]inspect[/]([not b]inspect[/])[/b] for options."
)
def _get_formatted_doc(self, object_: Any) -> Optional[str]:
docs = getdoc(object_)
if docs is None:
return None
docs = cleandoc(docs).strip()
if not self.help:
docs = _first_paragraph(docs)
return escape_control_codes(docs)
def get_object_types_mro(obj: Union[object, Type[Any]]) -> Tuple[type, ...]:
if not hasattr(obj, "__mro__"):
# N.B. we cannot use `if type(obj) is type` here because it doesn't work with
# some types of classes, such as the ones that use abc.ABCMeta.
obj = type(obj)
return getattr(obj, "__mro__", ())
def get_object_types_mro_as_strings(obj: object) -> Collection[str]:
return [
f'{getattr(type_, "__module__", "")}.{getattr(type_, "__qualname__", "")}'
for type_ in get_object_types_mro(obj)
]
def is_object_one_of_types(
obj: object, fully_qualified_types_names: Collection[str]
) -> bool:
for type_name in get_object_types_mro_as_strings(obj):
if type_name in fully_qualified_types_names:
return True
return False | --- +++ @@ -13,11 +13,26 @@
def _first_paragraph(doc: str) -> str:
+ """Get the first paragraph from a docstring."""
paragraph, _, _ = doc.partition("\n\n")
return paragraph
class Inspect(JupyterMixin):
+ """A renderable to inspect any Python Object.
+
+ Args:
+ obj (Any): An object to inspect.
+ title (str, optional): Title to display over inspect result, or None use type. Defaults to None.
+ help (bool, optional): Show full help text rather than just first paragraph. Defaults to False.
+ methods (bool, optional): Enable inspection of callables. Defaults to False.
+ docs (bool, optional): Also render doc strings. Defaults to True.
+ private (bool, optional): Show private attributes (beginning with underscore). Defaults to False.
+ dunder (bool, optional): Show attributes starting with double underscore. Defaults to False.
+ sort (bool, optional): Sort attributes alphabetically, callables at the top, leading and trailing underscores ignored. Defaults to True.
+ all (bool, optional): Show all attributes. Defaults to False.
+ value (bool, optional): Pretty print value of object. Defaults to True.
+ """
def __init__(
self,
@@ -47,6 +62,7 @@ self.value = value
def _make_title(self, obj: Any) -> Text:
+ """Make a default title."""
title_str = (
str(obj)
if (isclass(obj) or callable(obj) or ismodule(obj))
@@ -64,6 +80,7 @@ )
def _get_signature(self, name: str, obj: Any) -> Optional[Text]:
+ """Get a signature for a callable."""
try:
_signature = str(signature(obj)) + ":"
except ValueError:
@@ -106,12 +123,14 @@ return qual_signature
def _render(self) -> Iterable[RenderableType]:
+ """Render object."""
def sort_items(item: Tuple[str, Any]) -> Tuple[bool, str]:
key, (_error, value) = item
return (callable(value), key.strip("_").lower())
def safe_getattr(attr_name: str) -> Tuple[Any, Any]:
+ """Get attribute or any exception."""
try:
return (None, getattr(obj, attr_name))
except Exception as error:
@@ -197,6 +216,18 @@ )
def _get_formatted_doc(self, object_: Any) -> Optional[str]:
+ """
+ Extract the docstring of an object, process it and returns it.
+ The processing consists in cleaning up the docstring's indentation,
+ taking only its 1st paragraph if `self.help` is not True,
+ and escape its control codes.
+
+ Args:
+ object_ (Any): the object to get the docstring from.
+
+ Returns:
+ Optional[str]: the processed docstring, or None if no docstring was found.
+ """
docs = getdoc(object_)
if docs is None:
return None
@@ -207,6 +238,7 @@
def get_object_types_mro(obj: Union[object, Type[Any]]) -> Tuple[type, ...]:
+ """Returns the MRO of an object's class, or of the object itself if it's a class."""
if not hasattr(obj, "__mro__"):
# N.B. we cannot use `if type(obj) is type` here because it doesn't work with
# some types of classes, such as the ones that use abc.ABCMeta.
@@ -215,6 +247,12 @@
def get_object_types_mro_as_strings(obj: object) -> Collection[str]:
+ """
+ Returns the MRO of an object's class as full qualified names, or of the object itself if it's a class.
+
+ Examples:
+ `object_types_mro_as_strings(JSONDecoder)` will return `['json.decoder.JSONDecoder', 'builtins.object']`
+ """
return [
f'{getattr(type_, "__module__", "")}.{getattr(type_, "__qualname__", "")}'
for type_ in get_object_types_mro(obj)
@@ -224,7 +262,11 @@ def is_object_one_of_types(
obj: object, fully_qualified_types_names: Collection[str]
) -> bool:
+ """
+ Returns `True` if the given object's class (or the object itself, if it's a class) has one of the
+ fully qualified names in its MRO.
+ """
for type_name in get_object_types_mro_as_strings(obj):
if type_name in fully_qualified_types_names:
return True
- return False+ return False
| https://raw.githubusercontent.com/Textualize/rich/HEAD/rich/_inspect.py |
Add detailed documentation for each class |
import os
from typing import IO, TYPE_CHECKING, Any, Callable, Optional, Union
from ._extension import load_ipython_extension # noqa: F401
__all__ = ["get_console", "reconfigure", "print", "inspect", "print_json"]
if TYPE_CHECKING:
from .console import Console
# Global console used by alternative print
_console: Optional["Console"] = None
try:
_IMPORT_CWD = os.path.abspath(os.getcwd())
except FileNotFoundError:
# Can happen if the cwd has been deleted
_IMPORT_CWD = ""
def get_console() -> "Console":
global _console
if _console is None:
from .console import Console
_console = Console()
return _console
def reconfigure(*args: Any, **kwargs: Any) -> None:
from rich.console import Console
new_console = Console(*args, **kwargs)
_console = get_console()
_console.__dict__ = new_console.__dict__
def print(
*objects: Any,
sep: str = " ",
end: str = "\n",
file: Optional[IO[str]] = None,
flush: bool = False,
) -> None:
from .console import Console
write_console = get_console() if file is None else Console(file=file)
return write_console.print(*objects, sep=sep, end=end)
def print_json(
json: Optional[str] = None,
*,
data: Any = None,
indent: Union[None, int, str] = 2,
highlight: bool = True,
skip_keys: bool = False,
ensure_ascii: bool = False,
check_circular: bool = True,
allow_nan: bool = True,
default: Optional[Callable[[Any], Any]] = None,
sort_keys: bool = False,
) -> None:
get_console().print_json(
json,
data=data,
indent=indent,
highlight=highlight,
skip_keys=skip_keys,
ensure_ascii=ensure_ascii,
check_circular=check_circular,
allow_nan=allow_nan,
default=default,
sort_keys=sort_keys,
)
def inspect(
obj: Any,
*,
console: Optional["Console"] = None,
title: Optional[str] = None,
help: bool = False,
methods: bool = False,
docs: bool = True,
private: bool = False,
dunder: bool = False,
sort: bool = True,
all: bool = False,
value: bool = True,
) -> None:
_console = console or get_console()
from rich._inspect import Inspect
# Special case for inspect(inspect)
is_inspect = obj is inspect
_inspect = Inspect(
obj,
title=title,
help=is_inspect or help,
methods=is_inspect or methods,
docs=is_inspect or docs,
private=private,
dunder=dunder,
sort=sort,
all=all,
value=value,
)
_console.print(_inspect)
if __name__ == "__main__": # pragma: no cover
print("Hello, **World**") | --- +++ @@ -1,3 +1,4 @@+"""Rich text and beautiful formatting in the terminal."""
import os
from typing import IO, TYPE_CHECKING, Any, Callable, Optional, Union
@@ -20,6 +21,12 @@
def get_console() -> "Console":
+ """Get a global :class:`~rich.console.Console` instance. This function is used when Rich requires a Console,
+ and hasn't been explicitly given one.
+
+ Returns:
+ Console: A console instance.
+ """
global _console
if _console is None:
from .console import Console
@@ -30,6 +37,12 @@
def reconfigure(*args: Any, **kwargs: Any) -> None:
+ """Reconfigures the global console by replacing it with another.
+
+ Args:
+ *args (Any): Positional arguments for the replacement :class:`~rich.console.Console`.
+ **kwargs (Any): Keyword arguments for the replacement :class:`~rich.console.Console`.
+ """
from rich.console import Console
new_console = Console(*args, **kwargs)
@@ -44,6 +57,17 @@ file: Optional[IO[str]] = None,
flush: bool = False,
) -> None:
+ r"""Print object(s) supplied via positional arguments.
+ This function has an identical signature to the built-in print.
+ For more advanced features, see the :class:`~rich.console.Console` class.
+
+ Args:
+ sep (str, optional): Separator between printed objects. Defaults to " ".
+ end (str, optional): Character to write at end of output. Defaults to "\\n".
+ file (IO[str], optional): File to write to, or None for stdout. Defaults to None.
+ flush (bool, optional): Has no effect as Rich always flushes output. Defaults to False.
+
+ """
from .console import Console
write_console = get_console() if file is None else Console(file=file)
@@ -63,6 +87,21 @@ default: Optional[Callable[[Any], Any]] = None,
sort_keys: bool = False,
) -> None:
+ """Pretty prints JSON. Output will be valid JSON.
+
+ Args:
+ json (str): A string containing JSON.
+ data (Any): If json is not supplied, then encode this data.
+ indent (int, optional): Number of spaces to indent. Defaults to 2.
+ highlight (bool, optional): Enable highlighting of output: Defaults to True.
+ skip_keys (bool, optional): Skip keys not of a basic type. Defaults to False.
+ ensure_ascii (bool, optional): Escape all non-ascii characters. Defaults to False.
+ check_circular (bool, optional): Check for circular references. Defaults to True.
+ allow_nan (bool, optional): Allow NaN and Infinity values. Defaults to True.
+ default (Callable, optional): A callable that converts values that can not be encoded
+ in to something that can be JSON encoded. Defaults to None.
+ sort_keys (bool, optional): Sort dictionary keys. Defaults to False.
+ """
get_console().print_json(
json,
@@ -92,6 +131,27 @@ all: bool = False,
value: bool = True,
) -> None:
+ """Inspect any Python object.
+
+ * inspect(<OBJECT>) to see summarized info.
+ * inspect(<OBJECT>, methods=True) to see methods.
+ * inspect(<OBJECT>, help=True) to see full (non-abbreviated) help.
+ * inspect(<OBJECT>, private=True) to see private attributes (single underscore).
+ * inspect(<OBJECT>, dunder=True) to see attributes beginning with double underscore.
+ * inspect(<OBJECT>, all=True) to see all attributes.
+
+ Args:
+ obj (Any): An object to inspect.
+ title (str, optional): Title to display over inspect result, or None use type. Defaults to None.
+ help (bool, optional): Show full help text rather than just first paragraph. Defaults to False.
+ methods (bool, optional): Enable inspection of callables. Defaults to False.
+ docs (bool, optional): Also render doc strings. Defaults to True.
+ private (bool, optional): Show private attributes (beginning with underscore). Defaults to False.
+ dunder (bool, optional): Show attributes starting with double underscore. Defaults to False.
+ sort (bool, optional): Sort attributes alphabetically, callables at the top, leading and trailing underscores ignored. Defaults to True.
+ all (bool, optional): Show all attributes. Defaults to False.
+ value (bool, optional): Pretty print value. Defaults to True.
+ """
_console = console or get_console()
from rich._inspect import Inspect
@@ -114,4 +174,4 @@
if __name__ == "__main__": # pragma: no cover
- print("Hello, **World**")+ print("Hello, **World**")
| https://raw.githubusercontent.com/Textualize/rich/HEAD/rich/__init__.py |
Add professional docstrings to my codebase | from fractions import Fraction
from math import ceil
from typing import cast, List, Optional, Sequence, Protocol
class Edge(Protocol):
size: Optional[int] = None
ratio: int = 1
minimum_size: int = 1
def ratio_resolve(total: int, edges: Sequence[Edge]) -> List[int]:
# Size of edge or None for yet to be determined
sizes = [(edge.size or None) for edge in edges]
_Fraction = Fraction
# While any edges haven't been calculated
while None in sizes:
# Get flexible edges and index to map these back on to sizes list
flexible_edges = [
(index, edge)
for index, (size, edge) in enumerate(zip(sizes, edges))
if size is None
]
# Remaining space in total
remaining = total - sum(size or 0 for size in sizes)
if remaining <= 0:
# No room for flexible edges
return [
((edge.minimum_size or 1) if size is None else size)
for size, edge in zip(sizes, edges)
]
# Calculate number of characters in a ratio portion
portion = _Fraction(
remaining, sum((edge.ratio or 1) for _, edge in flexible_edges)
)
# If any edges will be less than their minimum, replace size with the minimum
for index, edge in flexible_edges:
if portion * edge.ratio <= edge.minimum_size:
sizes[index] = edge.minimum_size
# New fixed size will invalidate calculations, so we need to repeat the process
break
else:
# Distribute flexible space and compensate for rounding error
# Since edge sizes can only be integers we need to add the remainder
# to the following line
remainder = _Fraction(0)
for index, edge in flexible_edges:
size, remainder = divmod(portion * edge.ratio + remainder, 1)
sizes[index] = size
break
# Sizes now contains integers only
return cast(List[int], sizes)
def ratio_reduce(
total: int, ratios: List[int], maximums: List[int], values: List[int]
) -> List[int]:
ratios = [ratio if _max else 0 for ratio, _max in zip(ratios, maximums)]
total_ratio = sum(ratios)
if not total_ratio:
return values[:]
total_remaining = total
result: List[int] = []
append = result.append
for ratio, maximum, value in zip(ratios, maximums, values):
if ratio and total_ratio > 0:
distributed = min(maximum, round(ratio * total_remaining / total_ratio))
append(value - distributed)
total_remaining -= distributed
total_ratio -= ratio
else:
append(value)
return result
def ratio_distribute(
total: int, ratios: List[int], minimums: Optional[List[int]] = None
) -> List[int]:
if minimums:
ratios = [ratio if _min else 0 for ratio, _min in zip(ratios, minimums)]
total_ratio = sum(ratios)
assert total_ratio > 0, "Sum of ratios must be > 0"
total_remaining = total
distributed_total: List[int] = []
append = distributed_total.append
if minimums is None:
_minimums = [0] * len(ratios)
else:
_minimums = minimums
for ratio, minimum in zip(ratios, _minimums):
if total_ratio > 0:
distributed = max(minimum, ceil(ratio * total_remaining / total_ratio))
else:
distributed = total_remaining
append(distributed)
total_ratio -= ratio
total_remaining -= distributed
return distributed_total
if __name__ == "__main__":
from dataclasses import dataclass
@dataclass
class E:
size: Optional[int] = None
ratio: int = 1
minimum_size: int = 1
resolved = ratio_resolve(110, [E(None, 1, 1), E(None, 1, 1), E(None, 1, 1)])
print(sum(resolved)) | --- +++ @@ -4,6 +4,7 @@
class Edge(Protocol):
+ """Any object that defines an edge (such as Layout)."""
size: Optional[int] = None
ratio: int = 1
@@ -11,6 +12,21 @@
def ratio_resolve(total: int, edges: Sequence[Edge]) -> List[int]:
+ """Divide total space to satisfy size, ratio, and minimum_size, constraints.
+
+ The returned list of integers should add up to total in most cases, unless it is
+ impossible to satisfy all the constraints. For instance, if there are two edges
+ with a minimum size of 20 each and `total` is 30 then the returned list will be
+ greater than total. In practice, this would mean that a Layout object would
+ clip the rows that would overflow the screen height.
+
+ Args:
+ total (int): Total number of characters.
+ edges (List[Edge]): Edges within total space.
+
+ Returns:
+ List[int]: Number of characters for each edge.
+ """
# Size of edge or None for yet to be determined
sizes = [(edge.size or None) for edge in edges]
@@ -59,6 +75,17 @@ def ratio_reduce(
total: int, ratios: List[int], maximums: List[int], values: List[int]
) -> List[int]:
+ """Divide an integer total in to parts based on ratios.
+
+ Args:
+ total (int): The total to divide.
+ ratios (List[int]): A list of integer ratios.
+ maximums (List[int]): List of maximums values for each slot.
+ values (List[int]): List of values
+
+ Returns:
+ List[int]: A list of integers guaranteed to sum to total.
+ """
ratios = [ratio if _max else 0 for ratio, _max in zip(ratios, maximums)]
total_ratio = sum(ratios)
if not total_ratio:
@@ -80,6 +107,16 @@ def ratio_distribute(
total: int, ratios: List[int], minimums: Optional[List[int]] = None
) -> List[int]:
+ """Distribute an integer total in to parts based on ratios.
+
+ Args:
+ total (int): The total to divide.
+ ratios (List[int]): A list of integer ratios.
+ minimums (List[int]): List of minimum values for each slot.
+
+ Returns:
+ List[int]: A list of integers guaranteed to sum to total.
+ """
if minimums:
ratios = [ratio if _min else 0 for ratio, _min in zip(ratios, minimums)]
total_ratio = sum(ratios)
@@ -113,4 +150,4 @@ minimum_size: int = 1
resolved = ratio_resolve(110, [E(None, 1, 1), E(None, 1, 1), E(None, 1, 1)])
- print(sum(resolved))+ print(sum(resolved))
| https://raw.githubusercontent.com/Textualize/rich/HEAD/rich/_ratio.py |
Replace inline comments with docstrings | from typing import Iterable, Tuple, TypeVar
T = TypeVar("T")
def loop_first(values: Iterable[T]) -> Iterable[Tuple[bool, T]]:
iter_values = iter(values)
try:
value = next(iter_values)
except StopIteration:
return
yield True, value
for value in iter_values:
yield False, value
def loop_last(values: Iterable[T]) -> Iterable[Tuple[bool, T]]:
iter_values = iter(values)
try:
previous_value = next(iter_values)
except StopIteration:
return
for value in iter_values:
yield False, previous_value
previous_value = value
yield True, previous_value
def loop_first_last(values: Iterable[T]) -> Iterable[Tuple[bool, bool, T]]:
iter_values = iter(values)
try:
previous_value = next(iter_values)
except StopIteration:
return
first = True
for value in iter_values:
yield first, False, previous_value
first = False
previous_value = value
yield first, True, previous_value | --- +++ @@ -4,6 +4,7 @@
def loop_first(values: Iterable[T]) -> Iterable[Tuple[bool, T]]:
+ """Iterate and generate a tuple with a flag for first value."""
iter_values = iter(values)
try:
value = next(iter_values)
@@ -15,6 +16,7 @@
def loop_last(values: Iterable[T]) -> Iterable[Tuple[bool, T]]:
+ """Iterate and generate a tuple with a flag for last value."""
iter_values = iter(values)
try:
previous_value = next(iter_values)
@@ -27,6 +29,7 @@
def loop_first_last(values: Iterable[T]) -> Iterable[Tuple[bool, bool, T]]:
+ """Iterate and generate a tuple with a flag for first and last value."""
iter_values = iter(values)
try:
previous_value = next(iter_values)
@@ -37,4 +40,4 @@ yield first, False, previous_value
first = False
previous_value = value
- yield first, True, previous_value+ yield first, True, previous_value
| https://raw.githubusercontent.com/Textualize/rich/HEAD/rich/_loop.py |
Add docstrings to make code maintainable | from __future__ import annotations
from functools import lru_cache
from operator import itemgetter
from typing import Callable, NamedTuple, Sequence, Tuple
from rich._unicode_data import load as load_cell_table
CellSpan = Tuple[int, int, int]
_span_get_cell_len = itemgetter(2)
# Ranges of unicode ordinals that produce a 1-cell wide character
# This is non-exhaustive, but covers most common Western characters
_SINGLE_CELL_UNICODE_RANGES: list[tuple[int, int]] = [
(0x20, 0x7E), # Latin (excluding non-printable)
(0xA0, 0xAC),
(0xAE, 0x002FF),
(0x00370, 0x00482), # Greek / Cyrillic
(0x02500, 0x025FC), # Box drawing, box elements, geometric shapes
(0x02800, 0x028FF), # Braille
]
# A frozen set of characters that are a single cell wide
_SINGLE_CELLS = frozenset(
[
character
for _start, _end in _SINGLE_CELL_UNICODE_RANGES
for character in map(chr, range(_start, _end + 1))
]
)
# When called with a string this will return True if all
# characters are single-cell, otherwise False
_is_single_cell_widths: Callable[[str], bool] = _SINGLE_CELLS.issuperset
class CellTable(NamedTuple):
unicode_version: str
widths: Sequence[tuple[int, int, int]]
narrow_to_wide: frozenset[str]
@lru_cache(maxsize=4096)
def get_character_cell_size(character: str, unicode_version: str = "auto") -> int:
codepoint = ord(character)
if codepoint and codepoint < 32 or 0x07F <= codepoint < 0x0A0:
return 0
table = load_cell_table(unicode_version).widths
last_entry = table[-1]
if codepoint > last_entry[1]:
return 1
lower_bound = 0
upper_bound = len(table) - 1
while lower_bound <= upper_bound:
index = (lower_bound + upper_bound) >> 1
start, end, width = table[index]
if codepoint < start:
upper_bound = index - 1
elif codepoint > end:
lower_bound = index + 1
else:
return width
return 1
@lru_cache(4096)
def cached_cell_len(text: str, unicode_version: str = "auto") -> int:
return _cell_len(text, unicode_version)
def cell_len(text: str, unicode_version: str = "auto") -> int:
if len(text) < 512:
return cached_cell_len(text, unicode_version)
return _cell_len(text, unicode_version)
def _cell_len(text: str, unicode_version: str) -> int:
if _is_single_cell_widths(text):
return len(text)
# "\u200d" is zero width joiner
# "\ufe0f" is variation selector 16
if "\u200d" not in text and "\ufe0f" not in text:
# Simplest case with no unicode stuff that changes the size
return sum(
get_character_cell_size(character, unicode_version) for character in text
)
cell_table = load_cell_table(unicode_version)
total_width = 0
last_measured_character: str | None = None
SPECIAL = {"\u200d", "\ufe0f"}
index = 0
character_count = len(text)
while index < character_count:
character = text[index]
if character in SPECIAL:
if character == "\u200d":
index += 1
elif last_measured_character:
total_width += last_measured_character in cell_table.narrow_to_wide
last_measured_character = None
else:
if character_width := get_character_cell_size(character, unicode_version):
last_measured_character = character
total_width += character_width
index += 1
return total_width
def split_graphemes(
text: str, unicode_version: str = "auto"
) -> "tuple[list[CellSpan], int]":
cell_table = load_cell_table(unicode_version)
codepoint_count = len(text)
index = 0
last_measured_character: str | None = None
total_width = 0
spans: list[tuple[int, int, int]] = []
SPECIAL = {"\u200d", "\ufe0f"}
while index < codepoint_count:
if (character := text[index]) in SPECIAL:
if not spans:
# ZWJ or variation selector at the beginning of the string doesn't really make sense.
# But handle it, we must.
spans.append((index, index := index + 1, 0))
continue
if character == "\u200d":
# zero width joiner
# The condition handles the case where a ZWJ is at the end of the string, and has nothing to join
index += 2 if index < (codepoint_count - 1) else 1
start, _end, cell_length = spans[-1]
spans[-1] = (start, index, cell_length)
else:
# variation selector 16
index += 1
if last_measured_character:
start, _end, cell_length = spans[-1]
if last_measured_character in cell_table.narrow_to_wide:
last_measured_character = None
cell_length += 1
total_width += 1
spans[-1] = (start, index, cell_length)
else:
# No previous character to change the size of.
# Shouldn't occur in practice.
# But handle it, we must.
start, _end, cell_length = spans[-1]
spans[-1] = (start, index, cell_length)
continue
if character_width := get_character_cell_size(character, unicode_version):
last_measured_character = character
spans.append((index, index := index + 1, character_width))
total_width += character_width
else:
# Character has zero width
if spans:
# zero width characters are associated with the previous character
start, _end, cell_length = spans[-1]
spans[-1] = (start, index := index + 1, cell_length)
else:
# A zero width character with no prior spans
spans.append((index, index := index + 1, 0))
return (spans, total_width)
def _split_text(
text: str, cell_position: int, unicode_version: str = "auto"
) -> tuple[str, str]:
if cell_position <= 0:
return "", text
spans, cell_length = split_graphemes(text, unicode_version)
# Guess initial offset
offset = int((cell_position / cell_length) * len(spans))
left_size = sum(map(_span_get_cell_len, spans[:offset]))
while True:
if left_size == cell_position:
if offset >= len(spans):
return text, ""
split_index = spans[offset][0]
return text[:split_index], text[split_index:]
if left_size < cell_position:
start, end, cell_size = spans[offset]
if left_size + cell_size > cell_position:
return text[:start] + " ", " " + text[end:]
offset += 1
left_size += cell_size
else: # left_size > cell_position
start, end, cell_size = spans[offset - 1]
if left_size - cell_size < cell_position:
return text[:start] + " ", " " + text[end:]
offset -= 1
left_size -= cell_size
def split_text(
text: str, cell_position: int, unicode_version: str = "auto"
) -> tuple[str, str]:
if _is_single_cell_widths(text):
return text[:cell_position], text[cell_position:]
return _split_text(text, cell_position, unicode_version)
def set_cell_size(text: str, total: int, unicode_version: str = "auto") -> str:
if _is_single_cell_widths(text):
size = len(text)
if size < total:
return text + " " * (total - size)
return text[:total]
if total <= 0:
return ""
cell_size = cell_len(text)
if cell_size == total:
return text
if cell_size < total:
return text + " " * (total - cell_size)
text, _ = _split_text(text, total, unicode_version)
return text
def chop_cells(text: str, width: int, unicode_version: str = "auto") -> list[str]:
if _is_single_cell_widths(text):
return [text[index : index + width] for index in range(0, len(text), width)]
spans, _ = split_graphemes(text, unicode_version)
line_size = 0 # Size of line in cells
lines: list[str] = []
line_offset = 0 # Offset (in codepoints) of start of line
for start, end, cell_size in spans:
if line_size + cell_size > width:
lines.append(text[line_offset:start])
line_offset = start
line_size = 0
line_size += cell_size
if line_size:
lines.append(text[line_offset:])
return lines | --- +++ @@ -36,6 +36,7 @@
class CellTable(NamedTuple):
+ """Contains unicode data required to measure the cell widths of glyphs."""
unicode_version: str
widths: Sequence[tuple[int, int, int]]
@@ -44,6 +45,15 @@
@lru_cache(maxsize=4096)
def get_character_cell_size(character: str, unicode_version: str = "auto") -> int:
+ """Get the cell size of a character.
+
+ Args:
+ character (str): A single character.
+ unicode_version: Unicode version, `"auto"` to auto detect, `"latest"` for the latest unicode version.
+
+ Returns:
+ int: Number of cells (0, 1 or 2) occupied by that character.
+ """
codepoint = ord(character)
if codepoint and codepoint < 32 or 0x07F <= codepoint < 0x0A0:
return 0
@@ -70,16 +80,46 @@
@lru_cache(4096)
def cached_cell_len(text: str, unicode_version: str = "auto") -> int:
+ """Get the number of cells required to display text.
+
+ This method always caches, which may use up a lot of memory. It is recommended to use
+ `cell_len` over this method.
+
+ Args:
+ text (str): Text to display.
+ unicode_version: Unicode version, `"auto"` to auto detect, `"latest"` for the latest unicode version.
+
+ Returns:
+ int: Get the number of cells required to display text.
+ """
return _cell_len(text, unicode_version)
def cell_len(text: str, unicode_version: str = "auto") -> int:
+ """Get the cell length of a string (length as it appears in the terminal).
+
+ Args:
+ text: String to measure.
+ unicode_version: Unicode version, `"auto"` to auto detect, `"latest"` for the latest unicode version.
+
+ Returns:
+ Length of string in terminal cells.
+ """
if len(text) < 512:
return cached_cell_len(text, unicode_version)
return _cell_len(text, unicode_version)
def _cell_len(text: str, unicode_version: str) -> int:
+ """Get the cell length of a string (length as it appears in the terminal).
+
+ Args:
+ text: String to measure.
+ unicode_version: Unicode version, `"auto"` to auto detect, `"latest"` for the latest unicode version.
+
+ Returns:
+ Length of string in terminal cells.
+ """
if _is_single_cell_widths(text):
return len(text)
@@ -121,6 +161,20 @@ def split_graphemes(
text: str, unicode_version: str = "auto"
) -> "tuple[list[CellSpan], int]":
+ """Divide text into spans that define a single grapheme, and additionally return the cell length of the whole string.
+
+ The returned spans will cover every index in the string, with no gaps. It is possible for some graphemes to have a cell length of zero.
+ This can occur for nonsense strings like two zero width joiners, or for control codes that don't contribute to the grapheme size.
+
+ Args:
+ text: String to split.
+ unicode_version: Unicode version, `"auto"` to auto detect, `"latest"` for the latest unicode version.
+
+ Returns:
+ A tuple of a list of *spans* and the cell length of the entire string. A span is a list of tuples
+ of three values consisting of (<START>, <END>, <CELL LENGTH>), where START and END are string indices,
+ and CELL LENGTH is the cell length of the single grapheme.
+ """
cell_table = load_cell_table(unicode_version)
codepoint_count = len(text)
@@ -181,6 +235,18 @@ def _split_text(
text: str, cell_position: int, unicode_version: str = "auto"
) -> tuple[str, str]:
+ """Split text by cell position.
+
+ If the cell position falls within a double width character, it is converted to two spaces.
+
+ Args:
+ text: Text to split.
+ cell_position Offset in cells.
+ unicode_version: Unicode version, `"auto"` to auto detect, `"latest"` for the latest unicode version.
+
+ Returns:
+ Tuple to two split strings.
+ """
if cell_position <= 0:
return "", text
@@ -213,12 +279,34 @@ def split_text(
text: str, cell_position: int, unicode_version: str = "auto"
) -> tuple[str, str]:
+ """Split text by cell position.
+
+ If the cell position falls within a double width character, it is converted to two spaces.
+
+ Args:
+ text: Text to split.
+ cell_position Offset in cells.
+ unicode_version: Unicode version, `"auto"` to auto detect, `"latest"` for the latest unicode version.
+
+ Returns:
+ Tuple to two split strings.
+ """
if _is_single_cell_widths(text):
return text[:cell_position], text[cell_position:]
return _split_text(text, cell_position, unicode_version)
def set_cell_size(text: str, total: int, unicode_version: str = "auto") -> str:
+ """Adjust a string by cropping or padding with spaces such that it fits within the given number of cells.
+
+ Args:
+ text: String to adjust.
+ total: Desired size in cells.
+ unicode_version: Unicode version.
+
+ Returns:
+ A string with cell size equal to total.
+ """
if _is_single_cell_widths(text):
size = len(text)
if size < total:
@@ -236,6 +324,16 @@
def chop_cells(text: str, width: int, unicode_version: str = "auto") -> list[str]:
+ """Split text into lines such that each line fits within the available (cell) width.
+
+ Args:
+ text: The text to fold such that it fits in the given width.
+ width: The width available (number of cells).
+
+ Returns:
+ A list of strings such that each string in the list has cell width
+ less than or equal to the available width.
+ """
if _is_single_cell_widths(text):
return [text[index : index + width] for index in range(0, len(text), width)]
spans, _ = split_graphemes(text, unicode_version)
@@ -251,4 +349,4 @@ if line_size:
lines.append(text[line_offset:])
- return lines+ return lines
| https://raw.githubusercontent.com/Textualize/rich/HEAD/rich/cells.py |
Add concise docstrings to each method | from typing import NamedTuple, Tuple
class ColorTriplet(NamedTuple):
red: int
"""Red component in 0 to 255 range."""
green: int
"""Green component in 0 to 255 range."""
blue: int
"""Blue component in 0 to 255 range."""
@property
def hex(self) -> str:
red, green, blue = self
return f"#{red:02x}{green:02x}{blue:02x}"
@property
def rgb(self) -> str:
red, green, blue = self
return f"rgb({red},{green},{blue})"
@property
def normalized(self) -> Tuple[float, float, float]:
red, green, blue = self
return red / 255.0, green / 255.0, blue / 255.0 | --- +++ @@ -2,6 +2,7 @@
class ColorTriplet(NamedTuple):
+ """The red, green, and blue components of a color."""
red: int
"""Red component in 0 to 255 range."""
@@ -12,15 +13,26 @@
@property
def hex(self) -> str:
+ """get the color triplet in CSS style."""
red, green, blue = self
return f"#{red:02x}{green:02x}{blue:02x}"
@property
def rgb(self) -> str:
+ """The color in RGB format.
+
+ Returns:
+ str: An rgb color, e.g. ``"rgb(100,23,255)"``.
+ """
red, green, blue = self
return f"rgb({red},{green},{blue})"
@property
def normalized(self) -> Tuple[float, float, float]:
+ """Convert components into floats between 0 and 1.
+
+ Returns:
+ Tuple[float, float, float]: A tuple of three normalized colour components.
+ """
red, green, blue = self
- return red / 255.0, green / 255.0, blue / 255.0+ return red / 255.0, green / 255.0, blue / 255.0
| https://raw.githubusercontent.com/Textualize/rich/HEAD/rich/color_triplet.py |
Add detailed documentation for each class | import re
import sys
from contextlib import suppress
from typing import Iterable, NamedTuple, Optional
from .color import Color
from .style import Style
from .text import Text
re_ansi = re.compile(
r"""
(?:\x1b[0-?])|
(?:\x1b\](.*?)\x1b\\)|
(?:\x1b([(@-Z\\-_]|\[[0-?]*[ -/]*[@-~]))
""",
re.VERBOSE,
)
class _AnsiToken(NamedTuple):
plain: str = ""
sgr: Optional[str] = ""
osc: Optional[str] = ""
def _ansi_tokenize(ansi_text: str) -> Iterable[_AnsiToken]:
position = 0
sgr: Optional[str]
osc: Optional[str]
for match in re_ansi.finditer(ansi_text):
start, end = match.span(0)
osc, sgr = match.groups()
if start > position:
yield _AnsiToken(ansi_text[position:start])
if sgr:
if sgr == "(":
position = end + 1
continue
if sgr.endswith("m"):
yield _AnsiToken("", sgr[1:-1], osc)
else:
yield _AnsiToken("", sgr, osc)
position = end
if position < len(ansi_text):
yield _AnsiToken(ansi_text[position:])
SGR_STYLE_MAP = {
1: "bold",
2: "dim",
3: "italic",
4: "underline",
5: "blink",
6: "blink2",
7: "reverse",
8: "conceal",
9: "strike",
21: "underline2",
22: "not dim not bold",
23: "not italic",
24: "not underline",
25: "not blink",
26: "not blink2",
27: "not reverse",
28: "not conceal",
29: "not strike",
30: "color(0)",
31: "color(1)",
32: "color(2)",
33: "color(3)",
34: "color(4)",
35: "color(5)",
36: "color(6)",
37: "color(7)",
39: "default",
40: "on color(0)",
41: "on color(1)",
42: "on color(2)",
43: "on color(3)",
44: "on color(4)",
45: "on color(5)",
46: "on color(6)",
47: "on color(7)",
49: "on default",
51: "frame",
52: "encircle",
53: "overline",
54: "not frame not encircle",
55: "not overline",
90: "color(8)",
91: "color(9)",
92: "color(10)",
93: "color(11)",
94: "color(12)",
95: "color(13)",
96: "color(14)",
97: "color(15)",
100: "on color(8)",
101: "on color(9)",
102: "on color(10)",
103: "on color(11)",
104: "on color(12)",
105: "on color(13)",
106: "on color(14)",
107: "on color(15)",
}
class AnsiDecoder:
def __init__(self) -> None:
self.style = Style.null()
def decode(self, terminal_text: str) -> Iterable[Text]:
for line in terminal_text.splitlines():
yield self.decode_line(line)
def decode_line(self, line: str) -> Text:
from_ansi = Color.from_ansi
from_rgb = Color.from_rgb
_Style = Style
text = Text()
append = text.append
line = line.rsplit("\r", 1)[-1]
for plain_text, sgr, osc in _ansi_tokenize(line):
if plain_text:
append(plain_text, self.style or None)
elif osc is not None:
if osc.startswith("8;"):
_params, semicolon, link = osc[2:].partition(";")
if semicolon:
self.style = self.style.update_link(link or None)
elif sgr is not None:
# Translate in to semi-colon separated codes
# Ignore invalid codes, because we want to be lenient
codes = [
min(255, int(_code) if _code else 0)
for _code in sgr.split(";")
if _code.isdigit() or _code == ""
]
iter_codes = iter(codes)
for code in iter_codes:
if code == 0:
# reset
self.style = _Style.null()
elif code in SGR_STYLE_MAP:
# styles
self.style += _Style.parse(SGR_STYLE_MAP[code])
elif code == 38:
# Foreground
with suppress(StopIteration):
color_type = next(iter_codes)
if color_type == 5:
self.style += _Style.from_color(
from_ansi(next(iter_codes))
)
elif color_type == 2:
self.style += _Style.from_color(
from_rgb(
next(iter_codes),
next(iter_codes),
next(iter_codes),
)
)
elif code == 48:
# Background
with suppress(StopIteration):
color_type = next(iter_codes)
if color_type == 5:
self.style += _Style.from_color(
None, from_ansi(next(iter_codes))
)
elif color_type == 2:
self.style += _Style.from_color(
None,
from_rgb(
next(iter_codes),
next(iter_codes),
next(iter_codes),
),
)
return text
if sys.platform != "win32" and __name__ == "__main__": # pragma: no cover
import io
import os
import pty
import sys
decoder = AnsiDecoder()
stdout = io.BytesIO()
def read(fd: int) -> bytes:
data = os.read(fd, 1024)
stdout.write(data)
return data
pty.spawn(sys.argv[1:], read)
from .console import Console
console = Console(record=True)
stdout_result = stdout.getvalue().decode("utf-8")
print(stdout_result)
for line in decoder.decode(stdout_result):
console.print(line)
console.save_html("stdout.html") | --- +++ @@ -18,6 +18,7 @@
class _AnsiToken(NamedTuple):
+ """Result of ansi tokenized string."""
plain: str = ""
sgr: Optional[str] = ""
@@ -25,6 +26,14 @@
def _ansi_tokenize(ansi_text: str) -> Iterable[_AnsiToken]:
+ """Tokenize a string in to plain text and ANSI codes.
+
+ Args:
+ ansi_text (str): A String containing ANSI codes.
+
+ Yields:
+ AnsiToken: A named tuple of (plain, sgr, osc)
+ """
position = 0
sgr: Optional[str]
@@ -109,15 +118,32 @@
class AnsiDecoder:
+ """Translate ANSI code in to styled Text."""
def __init__(self) -> None:
self.style = Style.null()
def decode(self, terminal_text: str) -> Iterable[Text]:
+ """Decode ANSI codes in an iterable of lines.
+
+ Args:
+ lines (Iterable[str]): An iterable of lines of terminal output.
+
+ Yields:
+ Text: Marked up Text.
+ """
for line in terminal_text.splitlines():
yield self.decode_line(line)
def decode_line(self, line: str) -> Text:
+ """Decode a line containing ansi codes.
+
+ Args:
+ line (str): A line of terminal output.
+
+ Returns:
+ Text: A Text instance marked up according to ansi codes.
+ """
from_ansi = Color.from_ansi
from_rgb = Color.from_rgb
_Style = Style
@@ -212,4 +238,4 @@ for line in decoder.decode(stdout_result):
console.print(line)
- console.save_html("stdout.html")+ console.save_html("stdout.html")
| https://raw.githubusercontent.com/Textualize/rich/HEAD/rich/ansi.py |
Write docstrings for algorithm functions | from itertools import chain
from typing import TYPE_CHECKING, Iterable, Optional, Literal
from .constrain import Constrain
from .jupyter import JupyterMixin
from .measure import Measurement
from .segment import Segment
from .style import StyleType
if TYPE_CHECKING:
from .console import Console, ConsoleOptions, RenderableType, RenderResult
AlignMethod = Literal["left", "center", "right"]
VerticalAlignMethod = Literal["top", "middle", "bottom"]
class Align(JupyterMixin):
def __init__(
self,
renderable: "RenderableType",
align: AlignMethod = "left",
style: Optional[StyleType] = None,
*,
vertical: Optional[VerticalAlignMethod] = None,
pad: bool = True,
width: Optional[int] = None,
height: Optional[int] = None,
) -> None:
if align not in ("left", "center", "right"):
raise ValueError(
f'invalid value for align, expected "left", "center", or "right" (not {align!r})'
)
if vertical is not None and vertical not in ("top", "middle", "bottom"):
raise ValueError(
f'invalid value for vertical, expected "top", "middle", or "bottom" (not {vertical!r})'
)
self.renderable = renderable
self.align = align
self.style = style
self.vertical = vertical
self.pad = pad
self.width = width
self.height = height
def __repr__(self) -> str:
return f"Align({self.renderable!r}, {self.align!r})"
@classmethod
def left(
cls,
renderable: "RenderableType",
style: Optional[StyleType] = None,
*,
vertical: Optional[VerticalAlignMethod] = None,
pad: bool = True,
width: Optional[int] = None,
height: Optional[int] = None,
) -> "Align":
return cls(
renderable,
"left",
style=style,
vertical=vertical,
pad=pad,
width=width,
height=height,
)
@classmethod
def center(
cls,
renderable: "RenderableType",
style: Optional[StyleType] = None,
*,
vertical: Optional[VerticalAlignMethod] = None,
pad: bool = True,
width: Optional[int] = None,
height: Optional[int] = None,
) -> "Align":
return cls(
renderable,
"center",
style=style,
vertical=vertical,
pad=pad,
width=width,
height=height,
)
@classmethod
def right(
cls,
renderable: "RenderableType",
style: Optional[StyleType] = None,
*,
vertical: Optional[VerticalAlignMethod] = None,
pad: bool = True,
width: Optional[int] = None,
height: Optional[int] = None,
) -> "Align":
return cls(
renderable,
"right",
style=style,
vertical=vertical,
pad=pad,
width=width,
height=height,
)
def __rich_console__(
self, console: "Console", options: "ConsoleOptions"
) -> "RenderResult":
align = self.align
width = console.measure(self.renderable, options=options).maximum
rendered = console.render(
Constrain(
self.renderable, width if self.width is None else min(width, self.width)
),
options.update(height=None),
)
lines = list(Segment.split_lines(rendered))
width, height = Segment.get_shape(lines)
lines = Segment.set_shape(lines, width, height)
new_line = Segment.line()
excess_space = options.max_width - width
style = console.get_style(self.style) if self.style is not None else None
def generate_segments() -> Iterable[Segment]:
if excess_space <= 0:
# Exact fit
for line in lines:
yield from line
yield new_line
elif align == "left":
# Pad on the right
pad = Segment(" " * excess_space, style) if self.pad else None
for line in lines:
yield from line
if pad:
yield pad
yield new_line
elif align == "center":
# Pad left and right
left = excess_space // 2
pad = Segment(" " * left, style)
pad_right = (
Segment(" " * (excess_space - left), style) if self.pad else None
)
for line in lines:
if left:
yield pad
yield from line
if pad_right:
yield pad_right
yield new_line
elif align == "right":
# Padding on left
pad = Segment(" " * excess_space, style)
for line in lines:
yield pad
yield from line
yield new_line
blank_line = (
Segment(f"{' ' * (self.width or options.max_width)}\n", style)
if self.pad
else Segment("\n")
)
def blank_lines(count: int) -> Iterable[Segment]:
if count > 0:
for _ in range(count):
yield blank_line
vertical_height = self.height or options.height
iter_segments: Iterable[Segment]
if self.vertical and vertical_height is not None:
if self.vertical == "top":
bottom_space = vertical_height - height
iter_segments = chain(generate_segments(), blank_lines(bottom_space))
elif self.vertical == "middle":
top_space = (vertical_height - height) // 2
bottom_space = vertical_height - top_space - height
iter_segments = chain(
blank_lines(top_space),
generate_segments(),
blank_lines(bottom_space),
)
else: # self.vertical == "bottom":
top_space = vertical_height - height
iter_segments = chain(blank_lines(top_space), generate_segments())
else:
iter_segments = generate_segments()
if self.style:
style = console.get_style(self.style)
iter_segments = Segment.apply_style(iter_segments, style)
yield from iter_segments
def __rich_measure__(
self, console: "Console", options: "ConsoleOptions"
) -> Measurement:
measurement = Measurement.get(console, options, self.renderable)
return measurement
class VerticalCenter(JupyterMixin):
def __init__(
self,
renderable: "RenderableType",
style: Optional[StyleType] = None,
) -> None:
self.renderable = renderable
self.style = style
def __repr__(self) -> str:
return f"VerticalCenter({self.renderable!r})"
def __rich_console__(
self, console: "Console", options: "ConsoleOptions"
) -> "RenderResult":
style = console.get_style(self.style) if self.style is not None else None
lines = console.render_lines(
self.renderable, options.update(height=None), pad=False
)
width, _height = Segment.get_shape(lines)
new_line = Segment.line()
height = options.height or options.size.height
top_space = (height - len(lines)) // 2
bottom_space = height - top_space - len(lines)
blank_line = Segment(f"{' ' * width}", style)
def blank_lines(count: int) -> Iterable[Segment]:
for _ in range(count):
yield blank_line
yield new_line
if top_space > 0:
yield from blank_lines(top_space)
for line in lines:
yield from line
yield new_line
if bottom_space > 0:
yield from blank_lines(bottom_space)
def __rich_measure__(
self, console: "Console", options: "ConsoleOptions"
) -> Measurement:
measurement = Measurement.get(console, options, self.renderable)
return measurement
if __name__ == "__main__": # pragma: no cover
from rich.console import Console, Group
from rich.highlighter import ReprHighlighter
from rich.panel import Panel
highlighter = ReprHighlighter()
console = Console()
panel = Panel(
Group(
Align.left(highlighter("align='left'")),
Align.center(highlighter("align='center'")),
Align.right(highlighter("align='right'")),
),
width=60,
style="on dark_blue",
title="Align",
)
console.print(
Align.center(panel, vertical="middle", style="on red", height=console.height)
) | --- +++ @@ -15,6 +15,34 @@
class Align(JupyterMixin):
+ """Align a renderable by adding spaces if necessary.
+
+ Args:
+ renderable (RenderableType): A console renderable.
+ align (AlignMethod): One of "left", "center", or "right""
+ style (StyleType, optional): An optional style to apply to the background.
+ vertical (Optional[VerticalAlignMethod], optional): Optional vertical align, one of "top", "middle", or "bottom". Defaults to None.
+ pad (bool, optional): Pad the right with spaces. Defaults to True.
+ width (int, optional): Restrict contents to given width, or None to use default width. Defaults to None.
+ height (int, optional): Set height of align renderable, or None to fit to contents. Defaults to None.
+
+ Raises:
+ ValueError: if ``align`` is not one of the expected values.
+
+ Example:
+ .. code-block:: python
+
+ from rich.console import Console
+ from rich.align import Align
+ from rich.panel import Panel
+
+ console = Console()
+ # Create a panel 20 characters wide
+ p = Panel("Hello, [b]World[/b]!", style="on green", width=20)
+
+ # Renders the panel centered in the terminal
+ console.print(Align(p, align="center"))
+ """
def __init__(
self,
@@ -57,6 +85,7 @@ width: Optional[int] = None,
height: Optional[int] = None,
) -> "Align":
+ """Align a renderable to the left."""
return cls(
renderable,
"left",
@@ -78,6 +107,7 @@ width: Optional[int] = None,
height: Optional[int] = None,
) -> "Align":
+ """Align a renderable to the center."""
return cls(
renderable,
"center",
@@ -99,6 +129,7 @@ width: Optional[int] = None,
height: Optional[int] = None,
) -> "Align":
+ """Align a renderable to the right."""
return cls(
renderable,
"right",
@@ -209,6 +240,16 @@
class VerticalCenter(JupyterMixin):
+ """Vertically aligns a renderable.
+
+ Warn:
+ This class is deprecated and may be removed in a future version. Use Align class with
+ `vertical="middle"`.
+
+ Args:
+ renderable (RenderableType): A renderable object.
+ style (StyleType, optional): An optional style to apply to the background. Defaults to None.
+ """
def __init__(
self,
@@ -276,4 +317,4 @@
console.print(
Align.center(panel, vertical="middle", style="on red", height=console.height)
- )+ )
| https://raw.githubusercontent.com/Textualize/rich/HEAD/rich/align.py |
Document my Python code with docstrings | from typing import List, TypeVar
T = TypeVar("T")
class Stack(List[T]):
@property
def top(self) -> T:
return self[-1]
def push(self, item: T) -> None:
self.append(item) | --- +++ @@ -4,10 +4,13 @@
class Stack(List[T]):
+ """A small shim over builtin list."""
@property
def top(self) -> T:
+ """Get top of stack."""
return self[-1]
def push(self, item: T) -> None:
- self.append(item)+ """Push an item on to the stack (append in stack nomenclature)."""
+ self.append(item)
| https://raw.githubusercontent.com/Textualize/rich/HEAD/rich/_stack.py |
Write docstrings for utility functions | from typing import TYPE_CHECKING, Iterable, List, Literal
from ._loop import loop_last
if TYPE_CHECKING:
from rich.console import ConsoleOptions
class Box:
def __init__(self, box: str, *, ascii: bool = False) -> None:
self._box = box
self.ascii = ascii
line1, line2, line3, line4, line5, line6, line7, line8 = box.splitlines()
# top
self.top_left, self.top, self.top_divider, self.top_right = iter(line1)
# head
self.head_left, _, self.head_vertical, self.head_right = iter(line2)
# head_row
(
self.head_row_left,
self.head_row_horizontal,
self.head_row_cross,
self.head_row_right,
) = iter(line3)
# mid
self.mid_left, _, self.mid_vertical, self.mid_right = iter(line4)
# row
self.row_left, self.row_horizontal, self.row_cross, self.row_right = iter(line5)
# foot_row
(
self.foot_row_left,
self.foot_row_horizontal,
self.foot_row_cross,
self.foot_row_right,
) = iter(line6)
# foot
self.foot_left, _, self.foot_vertical, self.foot_right = iter(line7)
# bottom
self.bottom_left, self.bottom, self.bottom_divider, self.bottom_right = iter(
line8
)
def __repr__(self) -> str:
return "Box(...)"
def __str__(self) -> str:
return self._box
def substitute(self, options: "ConsoleOptions", safe: bool = True) -> "Box":
box = self
if options.legacy_windows and safe:
box = LEGACY_WINDOWS_SUBSTITUTIONS.get(box, box)
if options.ascii_only and not box.ascii:
box = ASCII
return box
def get_plain_headed_box(self) -> "Box":
return PLAIN_HEADED_SUBSTITUTIONS.get(self, self)
def get_top(self, widths: Iterable[int]) -> str:
parts: List[str] = []
append = parts.append
append(self.top_left)
for last, width in loop_last(widths):
append(self.top * width)
if not last:
append(self.top_divider)
append(self.top_right)
return "".join(parts)
def get_row(
self,
widths: Iterable[int],
level: Literal["head", "row", "foot", "mid"] = "row",
edge: bool = True,
) -> str:
if level == "head":
left = self.head_row_left
horizontal = self.head_row_horizontal
cross = self.head_row_cross
right = self.head_row_right
elif level == "row":
left = self.row_left
horizontal = self.row_horizontal
cross = self.row_cross
right = self.row_right
elif level == "mid":
left = self.mid_left
horizontal = " "
cross = self.mid_vertical
right = self.mid_right
elif level == "foot":
left = self.foot_row_left
horizontal = self.foot_row_horizontal
cross = self.foot_row_cross
right = self.foot_row_right
else:
raise ValueError("level must be 'head', 'row' or 'foot'")
parts: List[str] = []
append = parts.append
if edge:
append(left)
for last, width in loop_last(widths):
append(horizontal * width)
if not last:
append(cross)
if edge:
append(right)
return "".join(parts)
def get_bottom(self, widths: Iterable[int]) -> str:
parts: List[str] = []
append = parts.append
append(self.bottom_left)
for last, width in loop_last(widths):
append(self.bottom * width)
if not last:
append(self.bottom_divider)
append(self.bottom_right)
return "".join(parts)
# fmt: off
ASCII: Box = Box(
"+--+\n"
"| ||\n"
"|-+|\n"
"| ||\n"
"|-+|\n"
"|-+|\n"
"| ||\n"
"+--+\n",
ascii=True,
)
ASCII2: Box = Box(
"+-++\n"
"| ||\n"
"+-++\n"
"| ||\n"
"+-++\n"
"+-++\n"
"| ||\n"
"+-++\n",
ascii=True,
)
ASCII_DOUBLE_HEAD: Box = Box(
"+-++\n"
"| ||\n"
"+=++\n"
"| ||\n"
"+-++\n"
"+-++\n"
"| ||\n"
"+-++\n",
ascii=True,
)
SQUARE: Box = Box(
"┌─┬┐\n"
"│ ││\n"
"├─┼┤\n"
"│ ││\n"
"├─┼┤\n"
"├─┼┤\n"
"│ ││\n"
"└─┴┘\n"
)
SQUARE_DOUBLE_HEAD: Box = Box(
"┌─┬┐\n"
"│ ││\n"
"╞═╪╡\n"
"│ ││\n"
"├─┼┤\n"
"├─┼┤\n"
"│ ││\n"
"└─┴┘\n"
)
MINIMAL: Box = Box(
" ╷ \n"
" │ \n"
"╶─┼╴\n"
" │ \n"
"╶─┼╴\n"
"╶─┼╴\n"
" │ \n"
" ╵ \n"
)
MINIMAL_HEAVY_HEAD: Box = Box(
" ╷ \n"
" │ \n"
"╺━┿╸\n"
" │ \n"
"╶─┼╴\n"
"╶─┼╴\n"
" │ \n"
" ╵ \n"
)
MINIMAL_DOUBLE_HEAD: Box = Box(
" ╷ \n"
" │ \n"
" ═╪ \n"
" │ \n"
" ─┼ \n"
" ─┼ \n"
" │ \n"
" ╵ \n"
)
SIMPLE: Box = Box(
" \n"
" \n"
" ── \n"
" \n"
" \n"
" ── \n"
" \n"
" \n"
)
SIMPLE_HEAD: Box = Box(
" \n"
" \n"
" ── \n"
" \n"
" \n"
" \n"
" \n"
" \n"
)
SIMPLE_HEAVY: Box = Box(
" \n"
" \n"
" ━━ \n"
" \n"
" \n"
" ━━ \n"
" \n"
" \n"
)
HORIZONTALS: Box = Box(
" ── \n"
" \n"
" ── \n"
" \n"
" ── \n"
" ── \n"
" \n"
" ── \n"
)
ROUNDED: Box = Box(
"╭─┬╮\n"
"│ ││\n"
"├─┼┤\n"
"│ ││\n"
"├─┼┤\n"
"├─┼┤\n"
"│ ││\n"
"╰─┴╯\n"
)
HEAVY: Box = Box(
"┏━┳┓\n"
"┃ ┃┃\n"
"┣━╋┫\n"
"┃ ┃┃\n"
"┣━╋┫\n"
"┣━╋┫\n"
"┃ ┃┃\n"
"┗━┻┛\n"
)
HEAVY_EDGE: Box = Box(
"┏━┯┓\n"
"┃ │┃\n"
"┠─┼┨\n"
"┃ │┃\n"
"┠─┼┨\n"
"┠─┼┨\n"
"┃ │┃\n"
"┗━┷┛\n"
)
HEAVY_HEAD: Box = Box(
"┏━┳┓\n"
"┃ ┃┃\n"
"┡━╇┩\n"
"│ ││\n"
"├─┼┤\n"
"├─┼┤\n"
"│ ││\n"
"└─┴┘\n"
)
DOUBLE: Box = Box(
"╔═╦╗\n"
"║ ║║\n"
"╠═╬╣\n"
"║ ║║\n"
"╠═╬╣\n"
"╠═╬╣\n"
"║ ║║\n"
"╚═╩╝\n"
)
DOUBLE_EDGE: Box = Box(
"╔═╤╗\n"
"║ │║\n"
"╟─┼╢\n"
"║ │║\n"
"╟─┼╢\n"
"╟─┼╢\n"
"║ │║\n"
"╚═╧╝\n"
)
MARKDOWN: Box = Box(
" \n"
"| ||\n"
"|-||\n"
"| ||\n"
"|-||\n"
"|-||\n"
"| ||\n"
" \n",
ascii=True,
)
# fmt: on
# Map Boxes that don't render with raster fonts on to equivalent that do
LEGACY_WINDOWS_SUBSTITUTIONS = {
ROUNDED: SQUARE,
MINIMAL_HEAVY_HEAD: MINIMAL,
SIMPLE_HEAVY: SIMPLE,
HEAVY: SQUARE,
HEAVY_EDGE: SQUARE,
HEAVY_HEAD: SQUARE,
}
# Map headed boxes to their headerless equivalents
PLAIN_HEADED_SUBSTITUTIONS = {
HEAVY_HEAD: SQUARE,
SQUARE_DOUBLE_HEAD: SQUARE,
MINIMAL_DOUBLE_HEAD: MINIMAL,
MINIMAL_HEAVY_HEAD: MINIMAL,
ASCII_DOUBLE_HEAD: ASCII2,
}
if __name__ == "__main__": # pragma: no cover
from rich.columns import Columns
from rich.panel import Panel
from . import box as box
from .console import Console
from .table import Table
from .text import Text
console = Console(record=True)
BOXES = [
"ASCII",
"ASCII2",
"ASCII_DOUBLE_HEAD",
"SQUARE",
"SQUARE_DOUBLE_HEAD",
"MINIMAL",
"MINIMAL_HEAVY_HEAD",
"MINIMAL_DOUBLE_HEAD",
"SIMPLE",
"SIMPLE_HEAD",
"SIMPLE_HEAVY",
"HORIZONTALS",
"ROUNDED",
"HEAVY",
"HEAVY_EDGE",
"HEAVY_HEAD",
"DOUBLE",
"DOUBLE_EDGE",
"MARKDOWN",
]
console.print(Panel("[bold green]Box Constants", style="green"), justify="center")
console.print()
columns = Columns(expand=True, padding=2)
for box_name in sorted(BOXES):
table = Table(
show_footer=True, style="dim", border_style="not dim", expand=True
)
table.add_column("Header 1", "Footer 1")
table.add_column("Header 2", "Footer 2")
table.add_row("Cell", "Cell")
table.add_row("Cell", "Cell")
table.box = getattr(box, box_name)
table.title = Text(f"box.{box_name}", style="magenta")
columns.add_renderable(table)
console.print(columns)
# console.save_svg("box.svg") | --- +++ @@ -8,6 +8,21 @@
class Box:
+ """Defines characters to render boxes.
+
+ ┌─┬┐ top
+ │ ││ head
+ ├─┼┤ head_row
+ │ ││ mid
+ ├─┼┤ row
+ ├─┼┤ foot_row
+ │ ││ foot
+ └─┴┘ bottom
+
+ Args:
+ box (str): Characters making up box.
+ ascii (bool, optional): True if this box uses ascii characters only. Default is False.
+ """
def __init__(self, box: str, *, ascii: bool = False) -> None:
self._box = box
@@ -50,6 +65,16 @@ return self._box
def substitute(self, options: "ConsoleOptions", safe: bool = True) -> "Box":
+ """Substitute this box for another if it won't render due to platform issues.
+
+ Args:
+ options (ConsoleOptions): Console options used in rendering.
+ safe (bool, optional): Substitute this for another Box if there are known problems
+ displaying on the platform (currently only relevant on Windows). Default is True.
+
+ Returns:
+ Box: A different Box or the same Box.
+ """
box = self
if options.legacy_windows and safe:
box = LEGACY_WINDOWS_SUBSTITUTIONS.get(box, box)
@@ -58,9 +83,24 @@ return box
def get_plain_headed_box(self) -> "Box":
+ """If this box uses special characters for the borders of the header, then
+ return the equivalent box that does not.
+
+ Returns:
+ Box: The most similar Box that doesn't use header-specific box characters.
+ If the current Box already satisfies this criterion, then it's returned.
+ """
return PLAIN_HEADED_SUBSTITUTIONS.get(self, self)
def get_top(self, widths: Iterable[int]) -> str:
+ """Get the top of a simple box.
+
+ Args:
+ widths (List[int]): Widths of columns.
+
+ Returns:
+ str: A string of box characters.
+ """
parts: List[str] = []
append = parts.append
@@ -78,6 +118,14 @@ level: Literal["head", "row", "foot", "mid"] = "row",
edge: bool = True,
) -> str:
+ """Get the top of a simple box.
+
+ Args:
+ width (List[int]): Widths of columns.
+
+ Returns:
+ str: A string of box characters.
+ """
if level == "head":
left = self.head_row_left
horizontal = self.head_row_horizontal
@@ -114,6 +162,14 @@ return "".join(parts)
def get_bottom(self, widths: Iterable[int]) -> str:
+ """Get the bottom of a simple box.
+
+ Args:
+ widths (List[int]): Widths of columns.
+
+ Returns:
+ str: A string of box characters.
+ """
parts: List[str] = []
append = parts.append
@@ -415,4 +471,4 @@ columns.add_renderable(table)
console.print(columns)
- # console.save_svg("box.svg")+ # console.save_svg("box.svg")
| https://raw.githubusercontent.com/Textualize/rich/HEAD/rich/box.py |
Write docstrings for algorithm functions | from abc import ABC
class RichRenderable(ABC):
@classmethod
def __subclasshook__(cls, other: type) -> bool:
return hasattr(other, "__rich_console__") or hasattr(other, "__rich__")
if __name__ == "__main__": # pragma: no cover
from rich.text import Text
t = Text()
print(isinstance(Text, RichRenderable))
print(isinstance(t, RichRenderable))
class Foo:
pass
f = Foo()
print(isinstance(f, RichRenderable))
print(isinstance("", RichRenderable)) | --- +++ @@ -2,9 +2,19 @@
class RichRenderable(ABC):
+ """An abstract base class for Rich renderables.
+
+ Note that there is no need to extend this class, the intended use is to check if an
+ object supports the Rich renderable protocol. For example::
+
+ if isinstance(my_object, RichRenderable):
+ console.print(my_object)
+
+ """
@classmethod
def __subclasshook__(cls, other: type) -> bool:
+ """Check if this class supports the rich render protocol."""
return hasattr(other, "__rich_console__") or hasattr(other, "__rich__")
@@ -20,4 +30,4 @@
f = Foo()
print(isinstance(f, RichRenderable))
- print(isinstance("", RichRenderable))+ print(isinstance("", RichRenderable))
| https://raw.githubusercontent.com/Textualize/rich/HEAD/rich/abc.py |
Create docstrings for each class method |
from time import time
import contextlib
from typing import Generator
@contextlib.contextmanager
def timer(subject: str = "time") -> Generator[None, None, None]:
start = time()
yield
elapsed = time() - start
elapsed_ms = elapsed * 1000
print(f"{subject} elapsed {elapsed_ms:.1f}ms") | --- +++ @@ -1,3 +1,7 @@+"""
+Timer context manager, only used in debug.
+
+"""
from time import time
@@ -7,8 +11,9 @@
@contextlib.contextmanager
def timer(subject: str = "time") -> Generator[None, None, None]:
+ """print the elapsed time. (only used in debugging)"""
start = time()
yield
elapsed = time() - start
elapsed_ms = elapsed * 1000
- print(f"{subject} elapsed {elapsed_ms:.1f}ms")+ print(f"{subject} elapsed {elapsed_ms:.1f}ms")
| https://raw.githubusercontent.com/Textualize/rich/HEAD/rich/_timer.py |
Generate consistent documentation across files | from itertools import zip_longest
from typing import (
TYPE_CHECKING,
Iterable,
Iterator,
List,
Optional,
TypeVar,
Union,
overload,
)
if TYPE_CHECKING:
from .console import (
Console,
ConsoleOptions,
JustifyMethod,
OverflowMethod,
RenderResult,
RenderableType,
)
from .text import Text
from .cells import cell_len
from .measure import Measurement
T = TypeVar("T")
class Renderables:
def __init__(
self, renderables: Optional[Iterable["RenderableType"]] = None
) -> None:
self._renderables: List["RenderableType"] = (
list(renderables) if renderables is not None else []
)
def __rich_console__(
self, console: "Console", options: "ConsoleOptions"
) -> "RenderResult":
yield from self._renderables
def __rich_measure__(
self, console: "Console", options: "ConsoleOptions"
) -> "Measurement":
dimensions = [
Measurement.get(console, options, renderable)
for renderable in self._renderables
]
if not dimensions:
return Measurement(1, 1)
_min = max(dimension.minimum for dimension in dimensions)
_max = max(dimension.maximum for dimension in dimensions)
return Measurement(_min, _max)
def append(self, renderable: "RenderableType") -> None:
self._renderables.append(renderable)
def __iter__(self) -> Iterable["RenderableType"]:
return iter(self._renderables)
class Lines:
def __init__(self, lines: Iterable["Text"] = ()) -> None:
self._lines: List["Text"] = list(lines)
def __repr__(self) -> str:
return f"Lines({self._lines!r})"
def __iter__(self) -> Iterator["Text"]:
return iter(self._lines)
@overload
def __getitem__(self, index: int) -> "Text":
...
@overload
def __getitem__(self, index: slice) -> List["Text"]:
...
def __getitem__(self, index: Union[slice, int]) -> Union["Text", List["Text"]]:
return self._lines[index]
def __setitem__(self, index: int, value: "Text") -> "Lines":
self._lines[index] = value
return self
def __len__(self) -> int:
return self._lines.__len__()
def __rich_console__(
self, console: "Console", options: "ConsoleOptions"
) -> "RenderResult":
yield from self._lines
def append(self, line: "Text") -> None:
self._lines.append(line)
def extend(self, lines: Iterable["Text"]) -> None:
self._lines.extend(lines)
def pop(self, index: int = -1) -> "Text":
return self._lines.pop(index)
def justify(
self,
console: "Console",
width: int,
justify: "JustifyMethod" = "left",
overflow: "OverflowMethod" = "fold",
) -> None:
from .text import Text
if justify == "left":
for line in self._lines:
line.truncate(width, overflow=overflow, pad=True)
elif justify == "center":
for line in self._lines:
line.rstrip()
line.truncate(width, overflow=overflow)
line.pad_left((width - cell_len(line.plain)) // 2)
line.pad_right(width - cell_len(line.plain))
elif justify == "right":
for line in self._lines:
line.rstrip()
line.truncate(width, overflow=overflow)
line.pad_left(width - cell_len(line.plain))
elif justify == "full":
for line_index, line in enumerate(self._lines):
if line_index == len(self._lines) - 1:
break
words = line.split(" ")
words_size = sum(cell_len(word.plain) for word in words)
num_spaces = len(words) - 1
spaces = [1 for _ in range(num_spaces)]
index = 0
if spaces:
while words_size + num_spaces < width:
spaces[len(spaces) - index - 1] += 1
num_spaces += 1
index = (index + 1) % len(spaces)
tokens: List[Text] = []
for index, (word, next_word) in enumerate(
zip_longest(words, words[1:])
):
tokens.append(word)
if index < len(spaces):
style = word.get_style_at_offset(console, -1)
next_style = next_word.get_style_at_offset(console, 0)
space_style = style if style == next_style else line.style
tokens.append(Text(" " * spaces[index], style=space_style))
self[line_index] = Text("").join(tokens) | --- +++ @@ -28,6 +28,7 @@
class Renderables:
+ """A list subclass which renders its contents to the console."""
def __init__(
self, renderables: Optional[Iterable["RenderableType"]] = None
@@ -39,6 +40,7 @@ def __rich_console__(
self, console: "Console", options: "ConsoleOptions"
) -> "RenderResult":
+ """Console render method to insert line-breaks."""
yield from self._renderables
def __rich_measure__(
@@ -62,6 +64,7 @@
class Lines:
+ """A list subclass which can render to the console."""
def __init__(self, lines: Iterable["Text"] = ()) -> None:
self._lines: List["Text"] = list(lines)
@@ -93,6 +96,7 @@ def __rich_console__(
self, console: "Console", options: "ConsoleOptions"
) -> "RenderResult":
+ """Console render method to insert line-breaks."""
yield from self._lines
def append(self, line: "Text") -> None:
@@ -111,6 +115,15 @@ justify: "JustifyMethod" = "left",
overflow: "OverflowMethod" = "fold",
) -> None:
+ """Justify and overflow text to a given width.
+
+ Args:
+ console (Console): Console instance.
+ width (int): Number of cells available per line.
+ justify (str, optional): Default justify method for text: "left", "center", "full" or "right". Defaults to "left".
+ overflow (str, optional): Default overflow for text: "crop", "fold", or "ellipsis". Defaults to "fold".
+
+ """
from .text import Text
if justify == "left":
@@ -151,4 +164,4 @@ next_style = next_word.get_style_at_offset(console, 0)
space_style = style if style == next_style else line.style
tokens.append(Text(" " * spaces[index], style=space_style))
- self[line_index] = Text("").join(tokens)+ self[line_index] = Text("").join(tokens)
| https://raw.githubusercontent.com/Textualize/rich/HEAD/rich/containers.py |
Turn comments into proper docstrings | from collections import defaultdict
from itertools import chain
from operator import itemgetter
from typing import Dict, Iterable, List, Optional, Tuple
from .align import Align, AlignMethod
from .console import Console, ConsoleOptions, RenderableType, RenderResult
from .constrain import Constrain
from .measure import Measurement
from .padding import Padding, PaddingDimensions
from .table import Table
from .text import TextType
from .jupyter import JupyterMixin
class Columns(JupyterMixin):
def __init__(
self,
renderables: Optional[Iterable[RenderableType]] = None,
padding: PaddingDimensions = (0, 1),
*,
width: Optional[int] = None,
expand: bool = False,
equal: bool = False,
column_first: bool = False,
right_to_left: bool = False,
align: Optional[AlignMethod] = None,
title: Optional[TextType] = None,
) -> None:
self.renderables = list(renderables or [])
self.width = width
self.padding = padding
self.expand = expand
self.equal = equal
self.column_first = column_first
self.right_to_left = right_to_left
self.align: Optional[AlignMethod] = align
self.title = title
def add_renderable(self, renderable: RenderableType) -> None:
self.renderables.append(renderable)
def __rich_console__(
self, console: Console, options: ConsoleOptions
) -> RenderResult:
render_str = console.render_str
renderables = [
render_str(renderable) if isinstance(renderable, str) else renderable
for renderable in self.renderables
]
if not renderables:
return
_top, right, _bottom, left = Padding.unpack(self.padding)
width_padding = max(left, right)
max_width = options.max_width
widths: Dict[int, int] = defaultdict(int)
column_count = len(renderables)
get_measurement = Measurement.get
renderable_widths = [
get_measurement(console, options, renderable).maximum
for renderable in renderables
]
if self.equal:
renderable_widths = [max(renderable_widths)] * len(renderable_widths)
def iter_renderables(
column_count: int,
) -> Iterable[Tuple[int, Optional[RenderableType]]]:
item_count = len(renderables)
if self.column_first:
width_renderables = list(zip(renderable_widths, renderables))
column_lengths: List[int] = [item_count // column_count] * column_count
for col_no in range(item_count % column_count):
column_lengths[col_no] += 1
row_count = (item_count + column_count - 1) // column_count
cells = [[-1] * column_count for _ in range(row_count)]
row = col = 0
for index in range(item_count):
cells[row][col] = index
column_lengths[col] -= 1
if column_lengths[col]:
row += 1
else:
col += 1
row = 0
for index in chain.from_iterable(cells):
if index == -1:
break
yield width_renderables[index]
else:
yield from zip(renderable_widths, renderables)
# Pad odd elements with spaces
if item_count % column_count:
for _ in range(column_count - (item_count % column_count)):
yield 0, None
table = Table.grid(padding=self.padding, collapse_padding=True, pad_edge=False)
table.expand = self.expand
table.title = self.title
if self.width is not None:
column_count = (max_width) // (self.width + width_padding)
for _ in range(column_count):
table.add_column(width=self.width)
else:
while column_count > 1:
widths.clear()
column_no = 0
for renderable_width, _ in iter_renderables(column_count):
widths[column_no] = max(widths[column_no], renderable_width)
total_width = sum(widths.values()) + width_padding * (
len(widths) - 1
)
if total_width > max_width:
column_count = len(widths) - 1
break
else:
column_no = (column_no + 1) % column_count
else:
break
get_renderable = itemgetter(1)
_renderables = [
get_renderable(_renderable)
for _renderable in iter_renderables(column_count)
]
if self.equal:
_renderables = [
None
if renderable is None
else Constrain(renderable, renderable_widths[0])
for renderable in _renderables
]
if self.align:
align = self.align
_Align = Align
_renderables = [
None if renderable is None else _Align(renderable, align)
for renderable in _renderables
]
right_to_left = self.right_to_left
add_row = table.add_row
for start in range(0, len(_renderables), column_count):
row = _renderables[start : start + column_count]
if right_to_left:
row = row[::-1]
add_row(*row)
yield table
if __name__ == "__main__": # pragma: no cover
import os
console = Console()
files = [f"{i} {s}" for i, s in enumerate(sorted(os.listdir()))]
columns = Columns(files, padding=(0, 1), expand=False, equal=False)
console.print(columns)
console.rule()
columns.column_first = True
console.print(columns)
columns.right_to_left = True
console.rule()
console.print(columns) | --- +++ @@ -14,6 +14,19 @@
class Columns(JupyterMixin):
+ """Display renderables in neat columns.
+
+ Args:
+ renderables (Iterable[RenderableType]): Any number of Rich renderables (including str).
+ width (int, optional): The desired width of the columns, or None to auto detect. Defaults to None.
+ padding (PaddingDimensions, optional): Optional padding around cells. Defaults to (0, 1).
+ expand (bool, optional): Expand columns to full width. Defaults to False.
+ equal (bool, optional): Arrange in to equal sized columns. Defaults to False.
+ column_first (bool, optional): Align items from top to bottom (rather than left to right). Defaults to False.
+ right_to_left (bool, optional): Start column from right hand side. Defaults to False.
+ align (str, optional): Align value ("left", "right", or "center") or None for default. Defaults to None.
+ title (TextType, optional): Optional title for Columns.
+ """
def __init__(
self,
@@ -39,6 +52,11 @@ self.title = title
def add_renderable(self, renderable: RenderableType) -> None:
+ """Add a renderable to the columns.
+
+ Args:
+ renderable (RenderableType): Any renderable object.
+ """
self.renderables.append(renderable)
def __rich_console__(
@@ -166,4 +184,4 @@ console.print(columns)
columns.right_to_left = True
console.rule()
- console.print(columns)+ console.print(columns)
| https://raw.githubusercontent.com/Textualize/rich/HEAD/rich/columns.py |
Write docstrings that follow conventions | import re
import sys
from colorsys import rgb_to_hls
from enum import IntEnum
from functools import lru_cache
from typing import TYPE_CHECKING, NamedTuple, Optional, Tuple
from ._palettes import EIGHT_BIT_PALETTE, STANDARD_PALETTE, WINDOWS_PALETTE
from .color_triplet import ColorTriplet
from .repr import Result, rich_repr
from .terminal_theme import DEFAULT_TERMINAL_THEME
if TYPE_CHECKING: # pragma: no cover
from .terminal_theme import TerminalTheme
from .text import Text
WINDOWS = sys.platform == "win32"
class ColorSystem(IntEnum):
STANDARD = 1
EIGHT_BIT = 2
TRUECOLOR = 3
WINDOWS = 4
def __repr__(self) -> str:
return f"ColorSystem.{self.name}"
def __str__(self) -> str:
return repr(self)
class ColorType(IntEnum):
DEFAULT = 0
STANDARD = 1
EIGHT_BIT = 2
TRUECOLOR = 3
WINDOWS = 4
def __repr__(self) -> str:
return f"ColorType.{self.name}"
ANSI_COLOR_NAMES = {
"black": 0,
"red": 1,
"green": 2,
"yellow": 3,
"blue": 4,
"magenta": 5,
"cyan": 6,
"white": 7,
"bright_black": 8,
"bright_red": 9,
"bright_green": 10,
"bright_yellow": 11,
"bright_blue": 12,
"bright_magenta": 13,
"bright_cyan": 14,
"bright_white": 15,
"grey0": 16,
"gray0": 16,
"navy_blue": 17,
"dark_blue": 18,
"blue3": 20,
"blue1": 21,
"dark_green": 22,
"deep_sky_blue4": 25,
"dodger_blue3": 26,
"dodger_blue2": 27,
"green4": 28,
"spring_green4": 29,
"turquoise4": 30,
"deep_sky_blue3": 32,
"dodger_blue1": 33,
"green3": 40,
"spring_green3": 41,
"dark_cyan": 36,
"light_sea_green": 37,
"deep_sky_blue2": 38,
"deep_sky_blue1": 39,
"spring_green2": 47,
"cyan3": 43,
"dark_turquoise": 44,
"turquoise2": 45,
"green1": 46,
"spring_green1": 48,
"medium_spring_green": 49,
"cyan2": 50,
"cyan1": 51,
"dark_red": 88,
"deep_pink4": 125,
"purple4": 55,
"purple3": 56,
"blue_violet": 57,
"orange4": 94,
"grey37": 59,
"gray37": 59,
"medium_purple4": 60,
"slate_blue3": 62,
"royal_blue1": 63,
"chartreuse4": 64,
"dark_sea_green4": 71,
"pale_turquoise4": 66,
"steel_blue": 67,
"steel_blue3": 68,
"cornflower_blue": 69,
"chartreuse3": 76,
"cadet_blue": 73,
"sky_blue3": 74,
"steel_blue1": 81,
"pale_green3": 114,
"sea_green3": 78,
"aquamarine3": 79,
"medium_turquoise": 80,
"chartreuse2": 112,
"sea_green2": 83,
"sea_green1": 85,
"aquamarine1": 122,
"dark_slate_gray2": 87,
"dark_magenta": 91,
"dark_violet": 128,
"purple": 129,
"light_pink4": 95,
"plum4": 96,
"medium_purple3": 98,
"slate_blue1": 99,
"yellow4": 106,
"wheat4": 101,
"grey53": 102,
"gray53": 102,
"light_slate_grey": 103,
"light_slate_gray": 103,
"medium_purple": 104,
"light_slate_blue": 105,
"dark_olive_green3": 149,
"dark_sea_green": 108,
"light_sky_blue3": 110,
"sky_blue2": 111,
"dark_sea_green3": 150,
"dark_slate_gray3": 116,
"sky_blue1": 117,
"chartreuse1": 118,
"light_green": 120,
"pale_green1": 156,
"dark_slate_gray1": 123,
"red3": 160,
"medium_violet_red": 126,
"magenta3": 164,
"dark_orange3": 166,
"indian_red": 167,
"hot_pink3": 168,
"medium_orchid3": 133,
"medium_orchid": 134,
"medium_purple2": 140,
"dark_goldenrod": 136,
"light_salmon3": 173,
"rosy_brown": 138,
"grey63": 139,
"gray63": 139,
"medium_purple1": 141,
"gold3": 178,
"dark_khaki": 143,
"navajo_white3": 144,
"grey69": 145,
"gray69": 145,
"light_steel_blue3": 146,
"light_steel_blue": 147,
"yellow3": 184,
"dark_sea_green2": 157,
"light_cyan3": 152,
"light_sky_blue1": 153,
"green_yellow": 154,
"dark_olive_green2": 155,
"dark_sea_green1": 193,
"pale_turquoise1": 159,
"deep_pink3": 162,
"magenta2": 200,
"hot_pink2": 169,
"orchid": 170,
"medium_orchid1": 207,
"orange3": 172,
"light_pink3": 174,
"pink3": 175,
"plum3": 176,
"violet": 177,
"light_goldenrod3": 179,
"tan": 180,
"misty_rose3": 181,
"thistle3": 182,
"plum2": 183,
"khaki3": 185,
"light_goldenrod2": 222,
"light_yellow3": 187,
"grey84": 188,
"gray84": 188,
"light_steel_blue1": 189,
"yellow2": 190,
"dark_olive_green1": 192,
"honeydew2": 194,
"light_cyan1": 195,
"red1": 196,
"deep_pink2": 197,
"deep_pink1": 199,
"magenta1": 201,
"orange_red1": 202,
"indian_red1": 204,
"hot_pink": 206,
"dark_orange": 208,
"salmon1": 209,
"light_coral": 210,
"pale_violet_red1": 211,
"orchid2": 212,
"orchid1": 213,
"orange1": 214,
"sandy_brown": 215,
"light_salmon1": 216,
"light_pink1": 217,
"pink1": 218,
"plum1": 219,
"gold1": 220,
"navajo_white1": 223,
"misty_rose1": 224,
"thistle1": 225,
"yellow1": 226,
"light_goldenrod1": 227,
"khaki1": 228,
"wheat1": 229,
"cornsilk1": 230,
"grey100": 231,
"gray100": 231,
"grey3": 232,
"gray3": 232,
"grey7": 233,
"gray7": 233,
"grey11": 234,
"gray11": 234,
"grey15": 235,
"gray15": 235,
"grey19": 236,
"gray19": 236,
"grey23": 237,
"gray23": 237,
"grey27": 238,
"gray27": 238,
"grey30": 239,
"gray30": 239,
"grey35": 240,
"gray35": 240,
"grey39": 241,
"gray39": 241,
"grey42": 242,
"gray42": 242,
"grey46": 243,
"gray46": 243,
"grey50": 244,
"gray50": 244,
"grey54": 245,
"gray54": 245,
"grey58": 246,
"gray58": 246,
"grey62": 247,
"gray62": 247,
"grey66": 248,
"gray66": 248,
"grey70": 249,
"gray70": 249,
"grey74": 250,
"gray74": 250,
"grey78": 251,
"gray78": 251,
"grey82": 252,
"gray82": 252,
"grey85": 253,
"gray85": 253,
"grey89": 254,
"gray89": 254,
"grey93": 255,
"gray93": 255,
}
class ColorParseError(Exception):
RE_COLOR = re.compile(
r"""^
\#([0-9a-f]{6})$|
color\(([0-9]{1,3})\)$|
rgb\(([\d\s,]+)\)$
""",
re.VERBOSE,
)
@rich_repr
class Color(NamedTuple):
name: str
"""The name of the color (typically the input to Color.parse)."""
type: ColorType
"""The type of the color."""
number: Optional[int] = None
"""The color number, if a standard color, or None."""
triplet: Optional[ColorTriplet] = None
"""A triplet of color components, if an RGB color."""
def __rich__(self) -> "Text":
from .style import Style
from .text import Text
return Text.assemble(
f"<color {self.name!r} ({self.type.name.lower()})",
("⬤", Style(color=self)),
" >",
)
def __rich_repr__(self) -> Result:
yield self.name
yield self.type
yield "number", self.number, None
yield "triplet", self.triplet, None
@property
def system(self) -> ColorSystem:
if self.type == ColorType.DEFAULT:
return ColorSystem.STANDARD
return ColorSystem(int(self.type))
@property
def is_system_defined(self) -> bool:
return self.system not in (ColorSystem.EIGHT_BIT, ColorSystem.TRUECOLOR)
@property
def is_default(self) -> bool:
return self.type == ColorType.DEFAULT
def get_truecolor(
self, theme: Optional["TerminalTheme"] = None, foreground: bool = True
) -> ColorTriplet:
if theme is None:
theme = DEFAULT_TERMINAL_THEME
if self.type == ColorType.TRUECOLOR:
assert self.triplet is not None
return self.triplet
elif self.type == ColorType.EIGHT_BIT:
assert self.number is not None
return EIGHT_BIT_PALETTE[self.number]
elif self.type == ColorType.STANDARD:
assert self.number is not None
return theme.ansi_colors[self.number]
elif self.type == ColorType.WINDOWS:
assert self.number is not None
return WINDOWS_PALETTE[self.number]
else: # self.type == ColorType.DEFAULT:
assert self.number is None
return theme.foreground_color if foreground else theme.background_color
@classmethod
def from_ansi(cls, number: int) -> "Color":
return cls(
name=f"color({number})",
type=(ColorType.STANDARD if number < 16 else ColorType.EIGHT_BIT),
number=number,
)
@classmethod
def from_triplet(cls, triplet: "ColorTriplet") -> "Color":
return cls(name=triplet.hex, type=ColorType.TRUECOLOR, triplet=triplet)
@classmethod
def from_rgb(cls, red: float, green: float, blue: float) -> "Color":
return cls.from_triplet(ColorTriplet(int(red), int(green), int(blue)))
@classmethod
def default(cls) -> "Color":
return cls(name="default", type=ColorType.DEFAULT)
@classmethod
@lru_cache(maxsize=1024)
def parse(cls, color: str) -> "Color":
original_color = color
color = color.lower().strip()
if color == "default":
return cls(color, type=ColorType.DEFAULT)
color_number = ANSI_COLOR_NAMES.get(color)
if color_number is not None:
return cls(
color,
type=(ColorType.STANDARD if color_number < 16 else ColorType.EIGHT_BIT),
number=color_number,
)
color_match = RE_COLOR.match(color)
if color_match is None:
raise ColorParseError(f"{original_color!r} is not a valid color")
color_24, color_8, color_rgb = color_match.groups()
if color_24:
triplet = ColorTriplet(
int(color_24[0:2], 16), int(color_24[2:4], 16), int(color_24[4:6], 16)
)
return cls(color, ColorType.TRUECOLOR, triplet=triplet)
elif color_8:
number = int(color_8)
if number > 255:
raise ColorParseError(f"color number must be <= 255 in {color!r}")
return cls(
color,
type=(ColorType.STANDARD if number < 16 else ColorType.EIGHT_BIT),
number=number,
)
else: # color_rgb:
components = color_rgb.split(",")
if len(components) != 3:
raise ColorParseError(
f"expected three components in {original_color!r}"
)
red, green, blue = components
triplet = ColorTriplet(int(red), int(green), int(blue))
if not all(component <= 255 for component in triplet):
raise ColorParseError(
f"color components must be <= 255 in {original_color!r}"
)
return cls(color, ColorType.TRUECOLOR, triplet=triplet)
@lru_cache(maxsize=1024)
def get_ansi_codes(self, foreground: bool = True) -> Tuple[str, ...]:
_type = self.type
if _type == ColorType.DEFAULT:
return ("39" if foreground else "49",)
elif _type == ColorType.WINDOWS:
number = self.number
assert number is not None
fore, back = (30, 40) if number < 8 else (82, 92)
return (str(fore + number if foreground else back + number),)
elif _type == ColorType.STANDARD:
number = self.number
assert number is not None
fore, back = (30, 40) if number < 8 else (82, 92)
return (str(fore + number if foreground else back + number),)
elif _type == ColorType.EIGHT_BIT:
assert self.number is not None
return ("38" if foreground else "48", "5", str(self.number))
else: # self.standard == ColorStandard.TRUECOLOR:
assert self.triplet is not None
red, green, blue = self.triplet
return ("38" if foreground else "48", "2", str(red), str(green), str(blue))
@lru_cache(maxsize=1024)
def downgrade(self, system: ColorSystem) -> "Color":
if self.type in (ColorType.DEFAULT, system):
return self
# Convert to 8-bit color from truecolor color
if system == ColorSystem.EIGHT_BIT and self.system == ColorSystem.TRUECOLOR:
assert self.triplet is not None
_h, l, s = rgb_to_hls(*self.triplet.normalized)
# If saturation is under 15% assume it is grayscale
if s < 0.15:
gray = round(l * 25.0)
if gray == 0:
color_number = 16
elif gray == 25:
color_number = 231
else:
color_number = 231 + gray
return Color(self.name, ColorType.EIGHT_BIT, number=color_number)
red, green, blue = self.triplet
six_red = red / 95 if red < 95 else 1 + (red - 95) / 40
six_green = green / 95 if green < 95 else 1 + (green - 95) / 40
six_blue = blue / 95 if blue < 95 else 1 + (blue - 95) / 40
color_number = (
16 + 36 * round(six_red) + 6 * round(six_green) + round(six_blue)
)
return Color(self.name, ColorType.EIGHT_BIT, number=color_number)
# Convert to standard from truecolor or 8-bit
elif system == ColorSystem.STANDARD:
if self.system == ColorSystem.TRUECOLOR:
assert self.triplet is not None
triplet = self.triplet
else: # self.system == ColorSystem.EIGHT_BIT
assert self.number is not None
triplet = ColorTriplet(*EIGHT_BIT_PALETTE[self.number])
color_number = STANDARD_PALETTE.match(triplet)
return Color(self.name, ColorType.STANDARD, number=color_number)
elif system == ColorSystem.WINDOWS:
if self.system == ColorSystem.TRUECOLOR:
assert self.triplet is not None
triplet = self.triplet
else: # self.system == ColorSystem.EIGHT_BIT
assert self.number is not None
if self.number < 16:
return Color(self.name, ColorType.WINDOWS, number=self.number)
triplet = ColorTriplet(*EIGHT_BIT_PALETTE[self.number])
color_number = WINDOWS_PALETTE.match(triplet)
return Color(self.name, ColorType.WINDOWS, number=color_number)
return self
def parse_rgb_hex(hex_color: str) -> ColorTriplet:
assert len(hex_color) == 6, "must be 6 characters"
color = ColorTriplet(
int(hex_color[0:2], 16), int(hex_color[2:4], 16), int(hex_color[4:6], 16)
)
return color
def blend_rgb(
color1: ColorTriplet, color2: ColorTriplet, cross_fade: float = 0.5
) -> ColorTriplet:
r1, g1, b1 = color1
r2, g2, b2 = color2
new_color = ColorTriplet(
int(r1 + (r2 - r1) * cross_fade),
int(g1 + (g2 - g1) * cross_fade),
int(b1 + (b2 - b1) * cross_fade),
)
return new_color
if __name__ == "__main__": # pragma: no cover
from .console import Console
from .table import Table
from .text import Text
console = Console()
table = Table(show_footer=False, show_edge=True)
table.add_column("Color", width=10, overflow="ellipsis")
table.add_column("Number", justify="right", style="yellow")
table.add_column("Name", style="green")
table.add_column("Hex", style="blue")
table.add_column("RGB", style="magenta")
colors = sorted((v, k) for k, v in ANSI_COLOR_NAMES.items())
for color_number, name in colors:
if "grey" in name:
continue
color_cell = Text(" " * 10, style=f"on {name}")
if color_number < 16:
table.add_row(color_cell, f"{color_number}", Text(f'"{name}"'))
else:
color = EIGHT_BIT_PALETTE[color_number] # type: ignore[has-type]
table.add_row(
color_cell, str(color_number), Text(f'"{name}"'), color.hex, color.rgb
)
console.print(table) | --- +++ @@ -19,6 +19,7 @@
class ColorSystem(IntEnum):
+ """One of the 3 color system supported by terminals."""
STANDARD = 1
EIGHT_BIT = 2
@@ -33,6 +34,7 @@
class ColorType(IntEnum):
+ """Type of color stored in Color class."""
DEFAULT = 0
STANDARD = 1
@@ -284,6 +286,7 @@
class ColorParseError(Exception):
+ """The color could not be parsed."""
RE_COLOR = re.compile(
@@ -298,6 +301,7 @@
@rich_repr
class Color(NamedTuple):
+ """Terminal color definition."""
name: str
"""The name of the color (typically the input to Color.parse)."""
@@ -309,6 +313,7 @@ """A triplet of color components, if an RGB color."""
def __rich__(self) -> "Text":
+ """Displays the actual color if Rich printed."""
from .style import Style
from .text import Text
@@ -326,21 +331,33 @@
@property
def system(self) -> ColorSystem:
+ """Get the native color system for this color."""
if self.type == ColorType.DEFAULT:
return ColorSystem.STANDARD
return ColorSystem(int(self.type))
@property
def is_system_defined(self) -> bool:
+ """Check if the color is ultimately defined by the system."""
return self.system not in (ColorSystem.EIGHT_BIT, ColorSystem.TRUECOLOR)
@property
def is_default(self) -> bool:
+ """Check if the color is a default color."""
return self.type == ColorType.DEFAULT
def get_truecolor(
self, theme: Optional["TerminalTheme"] = None, foreground: bool = True
) -> ColorTriplet:
+ """Get an equivalent color triplet for this color.
+
+ Args:
+ theme (TerminalTheme, optional): Optional terminal theme, or None to use default. Defaults to None.
+ foreground (bool, optional): True for a foreground color, or False for background. Defaults to True.
+
+ Returns:
+ ColorTriplet: A color triplet containing RGB components.
+ """
if theme is None:
theme = DEFAULT_TERMINAL_THEME
@@ -362,6 +379,14 @@
@classmethod
def from_ansi(cls, number: int) -> "Color":
+ """Create a Color number from it's 8-bit ansi number.
+
+ Args:
+ number (int): A number between 0-255 inclusive.
+
+ Returns:
+ Color: A new Color instance.
+ """
return cls(
name=f"color({number})",
type=(ColorType.STANDARD if number < 16 else ColorType.EIGHT_BIT),
@@ -370,19 +395,43 @@
@classmethod
def from_triplet(cls, triplet: "ColorTriplet") -> "Color":
+ """Create a truecolor RGB color from a triplet of values.
+
+ Args:
+ triplet (ColorTriplet): A color triplet containing red, green and blue components.
+
+ Returns:
+ Color: A new color object.
+ """
return cls(name=triplet.hex, type=ColorType.TRUECOLOR, triplet=triplet)
@classmethod
def from_rgb(cls, red: float, green: float, blue: float) -> "Color":
+ """Create a truecolor from three color components in the range(0->255).
+
+ Args:
+ red (float): Red component in range 0-255.
+ green (float): Green component in range 0-255.
+ blue (float): Blue component in range 0-255.
+
+ Returns:
+ Color: A new color object.
+ """
return cls.from_triplet(ColorTriplet(int(red), int(green), int(blue)))
@classmethod
def default(cls) -> "Color":
+ """Get a Color instance representing the default color.
+
+ Returns:
+ Color: Default color.
+ """
return cls(name="default", type=ColorType.DEFAULT)
@classmethod
@lru_cache(maxsize=1024)
def parse(cls, color: str) -> "Color":
+ """Parse a color definition."""
original_color = color
color = color.lower().strip()
@@ -434,6 +483,7 @@
@lru_cache(maxsize=1024)
def get_ansi_codes(self, foreground: bool = True) -> Tuple[str, ...]:
+ """Get the ANSI escape codes for this color."""
_type = self.type
if _type == ColorType.DEFAULT:
return ("39" if foreground else "49",)
@@ -461,6 +511,7 @@
@lru_cache(maxsize=1024)
def downgrade(self, system: ColorSystem) -> "Color":
+ """Downgrade a color system to a system with fewer colors."""
if self.type in (ColorType.DEFAULT, system):
return self
@@ -518,6 +569,7 @@
def parse_rgb_hex(hex_color: str) -> ColorTriplet:
+ """Parse six hex characters in to RGB triplet."""
assert len(hex_color) == 6, "must be 6 characters"
color = ColorTriplet(
int(hex_color[0:2], 16), int(hex_color[2:4], 16), int(hex_color[4:6], 16)
@@ -528,6 +580,7 @@ def blend_rgb(
color1: ColorTriplet, color2: ColorTriplet, cross_fade: float = 0.5
) -> ColorTriplet:
+ """Blend one RGB color in to another."""
r1, g1, b1 = color1
r2, g2, b2 = color2
new_color = ColorTriplet(
@@ -565,4 +618,4 @@ color_cell, str(color_number), Text(f'"{name}"'), color.hex, color.rgb
)
- console.print(table)+ console.print(table)
| https://raw.githubusercontent.com/Textualize/rich/HEAD/rich/color.py |
Improve documentation using docstrings | from __future__ import annotations
import bisect
import os
import sys
if sys.version_info[:2] >= (3, 9):
from functools import cache
else:
from functools import lru_cache as cache # pragma: no cover
from importlib import import_module
from typing import TYPE_CHECKING, cast
from rich._unicode_data._versions import VERSIONS
if TYPE_CHECKING:
from rich.cells import CellTable
VERSION_ORDER = sorted(
[
tuple(
map(int, version.split(".")),
)
for version in VERSIONS
]
)
VERSION_SET = frozenset(VERSIONS)
def _parse_version(version: str) -> tuple[int, int, int]:
version_integers: tuple[int, ...]
try:
version_integers = tuple(
map(int, version.split(".")),
)
except ValueError:
raise ValueError(
f"unicode version string {version!r} is badly formatted"
) from None
while len(version_integers) < 3:
version_integers = version_integers + (0,)
triple = cast("tuple[int, int, int]", version_integers[:3])
return triple
@cache
def load(unicode_version: str = "auto") -> CellTable:
if unicode_version == "auto":
unicode_version = os.environ.get("UNICODE_VERSION", "latest")
try:
_parse_version(unicode_version)
except ValueError:
# The environment variable is invalid
# Fallback to using the latest version seems reasonable
unicode_version = "latest"
if unicode_version == "latest":
version = VERSIONS[-1]
else:
try:
version_numbers = _parse_version(unicode_version)
except ValueError:
version_numbers = _parse_version(VERSIONS[-1])
major, minor, patch = version_numbers
version = f"{major}.{minor}.{patch}"
if version not in VERSION_SET:
insert_position = bisect.bisect_left(VERSION_ORDER, version_numbers)
version = VERSIONS[max(0, insert_position - 1)]
version_path_component = version.replace(".", "-")
module_name = f".unicode{version_path_component}"
module = import_module(module_name, "rich._unicode_data")
if TYPE_CHECKING:
assert isinstance(module.cell_table, CellTable)
return module.cell_table | --- +++ @@ -29,6 +29,17 @@
def _parse_version(version: str) -> tuple[int, int, int]:
+ """Parse a version string into a tuple of 3 integers.
+
+ Args:
+ version: A version string.
+
+ Raises:
+ ValueError: If the version string is invalid.
+
+ Returns:
+ A tuple of 3 integers.
+ """
version_integers: tuple[int, ...]
try:
version_integers = tuple(
@@ -46,6 +57,12 @@
@cache
def load(unicode_version: str = "auto") -> CellTable:
+ """Load a cell table for the given unicode version.
+
+ Args:
+ unicode_version: Unicode version, or `None` to auto-detect.
+
+ """
if unicode_version == "auto":
unicode_version = os.environ.get("UNICODE_VERSION", "latest")
try:
@@ -73,4 +90,4 @@ module = import_module(module_name, "rich._unicode_data")
if TYPE_CHECKING:
assert isinstance(module.cell_table, CellTable)
- return module.cell_table+ return module.cell_table
| https://raw.githubusercontent.com/Textualize/rich/HEAD/rich/_unicode_data/__init__.py |
Document this script properly | import inspect
import os
import sys
import threading
import zlib
from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from datetime import datetime
from functools import wraps
from getpass import getpass
from html import escape
from inspect import isclass
from itertools import islice
from math import ceil
from time import monotonic
from types import FrameType, ModuleType, TracebackType
from typing import (
IO,
TYPE_CHECKING,
Any,
Callable,
Dict,
Iterable,
List,
Literal,
Mapping,
NamedTuple,
Optional,
Protocol,
TextIO,
Tuple,
Type,
Union,
cast,
runtime_checkable,
)
from rich._null_file import NULL_FILE
from . import errors, themes
from ._emoji_replace import _emoji_replace
from ._export_format import CONSOLE_HTML_FORMAT, CONSOLE_SVG_FORMAT
from ._fileno import get_fileno
from ._log_render import FormatTimeCallable, LogRender
from .align import Align, AlignMethod
from .color import ColorSystem, blend_rgb
from .control import Control
from .emoji import EmojiVariant
from .highlighter import NullHighlighter, ReprHighlighter
from .markup import render as render_markup
from .measure import Measurement, measure_renderables
from .pager import Pager, SystemPager
from .pretty import Pretty, is_expandable
from .protocol import rich_cast
from .region import Region
from .scope import render_scope
from .screen import Screen
from .segment import Segment
from .style import Style, StyleType
from .styled import Styled
from .terminal_theme import DEFAULT_TERMINAL_THEME, SVG_EXPORT_THEME, TerminalTheme
from .text import Text, TextType
from .theme import Theme, ThemeStack
if TYPE_CHECKING:
from ._windows import WindowsConsoleFeatures
from .live import Live
from .status import Status
JUPYTER_DEFAULT_COLUMNS = 115
JUPYTER_DEFAULT_LINES = 100
WINDOWS = sys.platform == "win32"
HighlighterType = Callable[[Union[str, "Text"]], "Text"]
JustifyMethod = Literal["default", "left", "center", "right", "full"]
OverflowMethod = Literal["fold", "crop", "ellipsis", "ignore"]
class NoChange:
pass
NO_CHANGE = NoChange()
try:
_STDIN_FILENO = sys.__stdin__.fileno() # type: ignore[union-attr]
except Exception:
_STDIN_FILENO = 0
try:
_STDOUT_FILENO = sys.__stdout__.fileno() # type: ignore[union-attr]
except Exception:
_STDOUT_FILENO = 1
try:
_STDERR_FILENO = sys.__stderr__.fileno() # type: ignore[union-attr]
except Exception:
_STDERR_FILENO = 2
_STD_STREAMS = (_STDIN_FILENO, _STDOUT_FILENO, _STDERR_FILENO)
_STD_STREAMS_OUTPUT = (_STDOUT_FILENO, _STDERR_FILENO)
_TERM_COLORS = {
"kitty": ColorSystem.EIGHT_BIT,
"256color": ColorSystem.EIGHT_BIT,
"16color": ColorSystem.STANDARD,
}
class ConsoleDimensions(NamedTuple):
width: int
"""The width of the console in 'cells'."""
height: int
"""The height of the console in lines."""
@dataclass
class ConsoleOptions:
size: ConsoleDimensions
"""Size of console."""
legacy_windows: bool
"""legacy_windows: flag for legacy windows."""
min_width: int
"""Minimum width of renderable."""
max_width: int
"""Maximum width of renderable."""
is_terminal: bool
"""True if the target is a terminal, otherwise False."""
encoding: str
"""Encoding of terminal."""
max_height: int
"""Height of container (starts as terminal)"""
justify: Optional[JustifyMethod] = None
"""Justify value override for renderable."""
overflow: Optional[OverflowMethod] = None
"""Overflow value override for renderable."""
no_wrap: Optional[bool] = False
"""Disable wrapping for text."""
highlight: Optional[bool] = None
"""Highlight override for render_str."""
markup: Optional[bool] = None
"""Enable markup when rendering strings."""
height: Optional[int] = None
@property
def ascii_only(self) -> bool:
return not self.encoding.startswith("utf")
def copy(self) -> "ConsoleOptions":
options: ConsoleOptions = ConsoleOptions.__new__(ConsoleOptions)
options.__dict__ = self.__dict__.copy()
return options
def update(
self,
*,
width: Union[int, NoChange] = NO_CHANGE,
min_width: Union[int, NoChange] = NO_CHANGE,
max_width: Union[int, NoChange] = NO_CHANGE,
justify: Union[Optional[JustifyMethod], NoChange] = NO_CHANGE,
overflow: Union[Optional[OverflowMethod], NoChange] = NO_CHANGE,
no_wrap: Union[Optional[bool], NoChange] = NO_CHANGE,
highlight: Union[Optional[bool], NoChange] = NO_CHANGE,
markup: Union[Optional[bool], NoChange] = NO_CHANGE,
height: Union[Optional[int], NoChange] = NO_CHANGE,
) -> "ConsoleOptions":
options = self.copy()
if not isinstance(width, NoChange):
options.min_width = options.max_width = max(0, width)
if not isinstance(min_width, NoChange):
options.min_width = min_width
if not isinstance(max_width, NoChange):
options.max_width = max_width
if not isinstance(justify, NoChange):
options.justify = justify
if not isinstance(overflow, NoChange):
options.overflow = overflow
if not isinstance(no_wrap, NoChange):
options.no_wrap = no_wrap
if not isinstance(highlight, NoChange):
options.highlight = highlight
if not isinstance(markup, NoChange):
options.markup = markup
if not isinstance(height, NoChange):
if height is not None:
options.max_height = height
options.height = None if height is None else max(0, height)
return options
def update_width(self, width: int) -> "ConsoleOptions":
options = self.copy()
options.min_width = options.max_width = max(0, width)
return options
def update_height(self, height: int) -> "ConsoleOptions":
options = self.copy()
options.max_height = options.height = height
return options
def reset_height(self) -> "ConsoleOptions":
options = self.copy()
options.height = None
return options
def update_dimensions(self, width: int, height: int) -> "ConsoleOptions":
options = self.copy()
options.min_width = options.max_width = max(0, width)
options.height = options.max_height = height
return options
@runtime_checkable
class RichCast(Protocol):
def __rich__(
self,
) -> Union["ConsoleRenderable", "RichCast", str]: # pragma: no cover
...
@runtime_checkable
class ConsoleRenderable(Protocol):
def __rich_console__(
self, console: "Console", options: "ConsoleOptions"
) -> "RenderResult": # pragma: no cover
...
# A type that may be rendered by Console.
RenderableType = Union[ConsoleRenderable, RichCast, str]
"""A string or any object that may be rendered by Rich."""
# The result of calling a __rich_console__ method.
RenderResult = Iterable[Union[RenderableType, Segment]]
_null_highlighter = NullHighlighter()
class CaptureError(Exception):
class NewLine:
def __init__(self, count: int = 1) -> None:
self.count = count
def __rich_console__(
self, console: "Console", options: "ConsoleOptions"
) -> Iterable[Segment]:
yield Segment("\n" * self.count)
class ScreenUpdate:
def __init__(self, lines: List[List[Segment]], x: int, y: int) -> None:
self._lines = lines
self.x = x
self.y = y
def __rich_console__(
self, console: "Console", options: ConsoleOptions
) -> RenderResult:
x = self.x
move_to = Control.move_to
for offset, line in enumerate(self._lines, self.y):
yield move_to(x, offset)
yield from line
class Capture:
def __init__(self, console: "Console") -> None:
self._console = console
self._result: Optional[str] = None
def __enter__(self) -> "Capture":
self._console.begin_capture()
return self
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> None:
self._result = self._console.end_capture()
def get(self) -> str:
if self._result is None:
raise CaptureError(
"Capture result is not available until context manager exits."
)
return self._result
class ThemeContext:
def __init__(self, console: "Console", theme: Theme, inherit: bool = True) -> None:
self.console = console
self.theme = theme
self.inherit = inherit
def __enter__(self) -> "ThemeContext":
self.console.push_theme(self.theme)
return self
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> None:
self.console.pop_theme()
class PagerContext:
def __init__(
self,
console: "Console",
pager: Optional[Pager] = None,
styles: bool = False,
links: bool = False,
) -> None:
self._console = console
self.pager = SystemPager() if pager is None else pager
self.styles = styles
self.links = links
def __enter__(self) -> "PagerContext":
self._console._enter_buffer()
return self
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> None:
if exc_type is None:
with self._console._lock:
buffer: List[Segment] = self._console._buffer[:]
del self._console._buffer[:]
segments: Iterable[Segment] = buffer
if not self.styles:
segments = Segment.strip_styles(segments)
elif not self.links:
segments = Segment.strip_links(segments)
content = self._console._render_buffer(segments)
self.pager.show(content)
self._console._exit_buffer()
class ScreenContext:
def __init__(
self, console: "Console", hide_cursor: bool, style: StyleType = ""
) -> None:
self.console = console
self.hide_cursor = hide_cursor
self.screen = Screen(style=style)
self._changed = False
def update(
self, *renderables: RenderableType, style: Optional[StyleType] = None
) -> None:
if renderables:
self.screen.renderable = (
Group(*renderables) if len(renderables) > 1 else renderables[0]
)
if style is not None:
self.screen.style = style
self.console.print(self.screen, end="")
def __enter__(self) -> "ScreenContext":
self._changed = self.console.set_alt_screen(True)
if self._changed and self.hide_cursor:
self.console.show_cursor(False)
return self
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> None:
if self._changed:
self.console.set_alt_screen(False)
if self.hide_cursor:
self.console.show_cursor(True)
class Group:
def __init__(self, *renderables: "RenderableType", fit: bool = True) -> None:
self._renderables = renderables
self.fit = fit
self._render: Optional[List[RenderableType]] = None
@property
def renderables(self) -> List["RenderableType"]:
if self._render is None:
self._render = list(self._renderables)
return self._render
def __rich_measure__(
self, console: "Console", options: "ConsoleOptions"
) -> "Measurement":
if self.fit:
return measure_renderables(console, options, self.renderables)
else:
return Measurement(options.max_width, options.max_width)
def __rich_console__(
self, console: "Console", options: "ConsoleOptions"
) -> RenderResult:
yield from self.renderables
def group(fit: bool = True) -> Callable[..., Callable[..., Group]]:
def decorator(
method: Callable[..., Iterable[RenderableType]],
) -> Callable[..., Group]:
@wraps(method)
def _replace(*args: Any, **kwargs: Any) -> Group:
renderables = method(*args, **kwargs)
return Group(*renderables, fit=fit)
return _replace
return decorator
def _is_jupyter() -> bool: # pragma: no cover
try:
get_ipython # type: ignore[name-defined]
except NameError:
return False
ipython = get_ipython() # type: ignore[name-defined]
shell = ipython.__class__.__name__
if (
"google.colab" in str(ipython.__class__)
or os.getenv("DATABRICKS_RUNTIME_VERSION")
or shell == "ZMQInteractiveShell"
):
return True # Jupyter notebook or qtconsole
elif shell == "TerminalInteractiveShell":
return False # Terminal running IPython
else:
return False # Other type (?)
COLOR_SYSTEMS = {
"standard": ColorSystem.STANDARD,
"256": ColorSystem.EIGHT_BIT,
"truecolor": ColorSystem.TRUECOLOR,
"windows": ColorSystem.WINDOWS,
}
_COLOR_SYSTEMS_NAMES = {system: name for name, system in COLOR_SYSTEMS.items()}
@dataclass
class ConsoleThreadLocals(threading.local):
theme_stack: ThemeStack
buffer: List[Segment] = field(default_factory=list)
buffer_index: int = 0
class RenderHook(ABC):
@abstractmethod
def process_renderables(
self, renderables: List[ConsoleRenderable]
) -> List[ConsoleRenderable]:
_windows_console_features: Optional["WindowsConsoleFeatures"] = None
def get_windows_console_features() -> "WindowsConsoleFeatures": # pragma: no cover
global _windows_console_features
if _windows_console_features is not None:
return _windows_console_features
from ._windows import get_windows_console_features
_windows_console_features = get_windows_console_features()
return _windows_console_features
def detect_legacy_windows() -> bool:
return WINDOWS and not get_windows_console_features().vt
class Console:
_environ: Mapping[str, str] = os.environ
def __init__(
self,
*,
color_system: Optional[
Literal["auto", "standard", "256", "truecolor", "windows"]
] = "auto",
force_terminal: Optional[bool] = None,
force_jupyter: Optional[bool] = None,
force_interactive: Optional[bool] = None,
soft_wrap: bool = False,
theme: Optional[Theme] = None,
stderr: bool = False,
file: Optional[IO[str]] = None,
quiet: bool = False,
width: Optional[int] = None,
height: Optional[int] = None,
style: Optional[StyleType] = None,
no_color: Optional[bool] = None,
tab_size: int = 8,
record: bool = False,
markup: bool = True,
emoji: bool = True,
emoji_variant: Optional[EmojiVariant] = None,
highlight: bool = True,
log_time: bool = True,
log_path: bool = True,
log_time_format: Union[str, FormatTimeCallable] = "[%X]",
highlighter: Optional["HighlighterType"] = ReprHighlighter(),
legacy_windows: Optional[bool] = None,
safe_box: bool = True,
get_datetime: Optional[Callable[[], datetime]] = None,
get_time: Optional[Callable[[], float]] = None,
_environ: Optional[Mapping[str, str]] = None,
):
# Copy of os.environ allows us to replace it for testing
if _environ is not None:
self._environ = _environ
self.is_jupyter = _is_jupyter() if force_jupyter is None else force_jupyter
if self.is_jupyter:
if width is None:
jupyter_columns = self._environ.get("JUPYTER_COLUMNS")
if jupyter_columns is not None and jupyter_columns.isdigit():
width = int(jupyter_columns)
else:
width = JUPYTER_DEFAULT_COLUMNS
if height is None:
jupyter_lines = self._environ.get("JUPYTER_LINES")
if jupyter_lines is not None and jupyter_lines.isdigit():
height = int(jupyter_lines)
else:
height = JUPYTER_DEFAULT_LINES
self.tab_size = tab_size
self.record = record
self._markup = markup
self._emoji = emoji
self._emoji_variant: Optional[EmojiVariant] = emoji_variant
self._highlight = highlight
self.legacy_windows: bool = (
(detect_legacy_windows() and not self.is_jupyter)
if legacy_windows is None
else legacy_windows
)
if width is None:
columns = self._environ.get("COLUMNS")
if columns is not None and columns.isdigit():
width = int(columns) - self.legacy_windows
if height is None:
lines = self._environ.get("LINES")
if lines is not None and lines.isdigit():
height = int(lines)
self.soft_wrap = soft_wrap
self._width = width
self._height = height
self._color_system: Optional[ColorSystem]
self._force_terminal = None
if force_terminal is not None:
self._force_terminal = force_terminal
self._file = file
self.quiet = quiet
self.stderr = stderr
if color_system is None:
self._color_system = None
elif color_system == "auto":
self._color_system = self._detect_color_system()
else:
self._color_system = COLOR_SYSTEMS[color_system]
self._lock = threading.RLock()
self._log_render = LogRender(
show_time=log_time,
show_path=log_path,
time_format=log_time_format,
)
self.highlighter: HighlighterType = highlighter or _null_highlighter
self.safe_box = safe_box
self.get_datetime = get_datetime or datetime.now
self.get_time = get_time or monotonic
self.style = style
self.no_color = (
no_color
if no_color is not None
else self._environ.get("NO_COLOR", "") != ""
)
if force_interactive is None:
tty_interactive = self._environ.get("TTY_INTERACTIVE", None)
if tty_interactive is not None:
if tty_interactive == "0":
force_interactive = False
elif tty_interactive == "1":
force_interactive = True
self.is_interactive = (
(self.is_terminal and not self.is_dumb_terminal)
if force_interactive is None
else force_interactive
)
self._record_buffer_lock = threading.RLock()
self._thread_locals = ConsoleThreadLocals(
theme_stack=ThemeStack(themes.DEFAULT if theme is None else theme)
)
self._record_buffer: List[Segment] = []
self._render_hooks: List[RenderHook] = []
self._live_stack: List[Live] = []
self._is_alt_screen = False
def __repr__(self) -> str:
return f"<console width={self.width} {self._color_system!s}>"
@property
def file(self) -> IO[str]:
file = self._file or (sys.stderr if self.stderr else sys.stdout)
file = getattr(file, "rich_proxied_file", file)
if file is None:
file = NULL_FILE
return file
@file.setter
def file(self, new_file: IO[str]) -> None:
self._file = new_file
@property
def _buffer(self) -> List[Segment]:
return self._thread_locals.buffer
@property
def _buffer_index(self) -> int:
return self._thread_locals.buffer_index
@_buffer_index.setter
def _buffer_index(self, value: int) -> None:
self._thread_locals.buffer_index = value
@property
def _theme_stack(self) -> ThemeStack:
return self._thread_locals.theme_stack
def _detect_color_system(self) -> Optional[ColorSystem]:
if self.is_jupyter:
return ColorSystem.TRUECOLOR
if not self.is_terminal or self.is_dumb_terminal:
return None
if WINDOWS: # pragma: no cover
if self.legacy_windows: # pragma: no cover
return ColorSystem.WINDOWS
windows_console_features = get_windows_console_features()
return (
ColorSystem.TRUECOLOR
if windows_console_features.truecolor
else ColorSystem.EIGHT_BIT
)
else:
color_term = self._environ.get("COLORTERM", "").strip().lower()
if color_term in ("truecolor", "24bit"):
return ColorSystem.TRUECOLOR
term = self._environ.get("TERM", "").strip().lower()
_term_name, _hyphen, colors = term.rpartition("-")
color_system = _TERM_COLORS.get(colors, ColorSystem.STANDARD)
return color_system
def _enter_buffer(self) -> None:
self._buffer_index += 1
def _exit_buffer(self) -> None:
self._buffer_index -= 1
self._check_buffer()
def set_live(self, live: "Live") -> bool:
with self._lock:
self._live_stack.append(live)
return len(self._live_stack) == 1
def clear_live(self) -> None:
with self._lock:
self._live_stack.pop()
def push_render_hook(self, hook: RenderHook) -> None:
with self._lock:
self._render_hooks.append(hook)
def pop_render_hook(self) -> None:
with self._lock:
self._render_hooks.pop()
def __enter__(self) -> "Console":
self._enter_buffer()
return self
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
self._exit_buffer()
def begin_capture(self) -> None:
self._enter_buffer()
def end_capture(self) -> str:
render_result = self._render_buffer(self._buffer)
del self._buffer[:]
self._exit_buffer()
return render_result
def push_theme(self, theme: Theme, *, inherit: bool = True) -> None:
self._theme_stack.push_theme(theme, inherit=inherit)
def pop_theme(self) -> None:
self._theme_stack.pop_theme()
def use_theme(self, theme: Theme, *, inherit: bool = True) -> ThemeContext:
return ThemeContext(self, theme, inherit)
@property
def color_system(self) -> Optional[str]:
if self._color_system is not None:
return _COLOR_SYSTEMS_NAMES[self._color_system]
else:
return None
@property
def encoding(self) -> str:
return (getattr(self.file, "encoding", "utf-8") or "utf-8").lower()
@property
def is_terminal(self) -> bool:
# If dev has explicitly set this value, return it
if self._force_terminal is not None:
return self._force_terminal
# Fudge for Idle
if hasattr(sys.stdin, "__module__") and sys.stdin.__module__.startswith(
"idlelib"
):
# Return False for Idle which claims to be a tty but can't handle ansi codes
return False
if self.is_jupyter:
# return False for Jupyter, which may have FORCE_COLOR set
return False
environ = self._environ
tty_compatible = environ.get("TTY_COMPATIBLE", "")
# 0 indicates device is not tty compatible
if tty_compatible == "0":
return False
# 1 indicates device is tty compatible
if tty_compatible == "1":
return True
# https://force-color.org/
force_color = environ.get("FORCE_COLOR")
if force_color is not None:
return force_color != ""
# Any other value defaults to auto detect
isatty: Optional[Callable[[], bool]] = getattr(self.file, "isatty", None)
try:
return False if isatty is None else isatty()
except ValueError:
# in some situation (at the end of a pytest run for example) isatty() can raise
# ValueError: I/O operation on closed file
# return False because we aren't in a terminal anymore
return False
@property
def is_dumb_terminal(self) -> bool:
_term = self._environ.get("TERM", "")
is_dumb = _term.lower() in ("dumb", "unknown")
return self.is_terminal and is_dumb
@property
def options(self) -> ConsoleOptions:
size = self.size
return ConsoleOptions(
max_height=size.height,
size=size,
legacy_windows=self.legacy_windows,
min_width=1,
max_width=size.width,
encoding=self.encoding,
is_terminal=self.is_terminal,
)
@property
def size(self) -> ConsoleDimensions:
if self._width is not None and self._height is not None:
return ConsoleDimensions(self._width - self.legacy_windows, self._height)
if self.is_dumb_terminal:
return ConsoleDimensions(80, 25)
width: Optional[int] = None
height: Optional[int] = None
streams = _STD_STREAMS_OUTPUT if WINDOWS else _STD_STREAMS
for file_descriptor in streams:
try:
width, height = os.get_terminal_size(file_descriptor)
except (AttributeError, ValueError, OSError): # Probably not a terminal
pass
else:
break
columns = self._environ.get("COLUMNS")
if columns is not None and columns.isdigit():
width = int(columns)
lines = self._environ.get("LINES")
if lines is not None and lines.isdigit():
height = int(lines)
# get_terminal_size can report 0, 0 if run from pseudo-terminal
width = width or 80
height = height or 25
return ConsoleDimensions(
width - self.legacy_windows if self._width is None else self._width,
height if self._height is None else self._height,
)
@size.setter
def size(self, new_size: Tuple[int, int]) -> None:
width, height = new_size
self._width = width
self._height = height
@property
def width(self) -> int:
return self.size.width
@width.setter
def width(self, width: int) -> None:
self._width = width
@property
def height(self) -> int:
return self.size.height
@height.setter
def height(self, height: int) -> None:
self._height = height
def bell(self) -> None:
self.control(Control.bell())
def capture(self) -> Capture:
capture = Capture(self)
return capture
def pager(
self, pager: Optional[Pager] = None, styles: bool = False, links: bool = False
) -> PagerContext:
return PagerContext(self, pager=pager, styles=styles, links=links)
def line(self, count: int = 1) -> None:
assert count >= 0, "count must be >= 0"
self.print(NewLine(count))
def clear(self, home: bool = True) -> None:
if home:
self.control(Control.clear(), Control.home())
else:
self.control(Control.clear())
def status(
self,
status: RenderableType,
*,
spinner: str = "dots",
spinner_style: StyleType = "status.spinner",
speed: float = 1.0,
refresh_per_second: float = 12.5,
) -> "Status":
from .status import Status
status_renderable = Status(
status,
console=self,
spinner=spinner,
spinner_style=spinner_style,
speed=speed,
refresh_per_second=refresh_per_second,
)
return status_renderable
def show_cursor(self, show: bool = True) -> bool:
if self.is_terminal:
self.control(Control.show_cursor(show))
return True
return False
def set_alt_screen(self, enable: bool = True) -> bool:
changed = False
if self.is_terminal and not self.legacy_windows:
self.control(Control.alt_screen(enable))
changed = True
self._is_alt_screen = enable
return changed
@property
def is_alt_screen(self) -> bool:
return self._is_alt_screen
def set_window_title(self, title: str) -> bool:
if self.is_terminal:
self.control(Control.title(title))
return True
return False
def screen(
self, hide_cursor: bool = True, style: Optional[StyleType] = None
) -> "ScreenContext":
return ScreenContext(self, hide_cursor=hide_cursor, style=style or "")
def measure(
self, renderable: RenderableType, *, options: Optional[ConsoleOptions] = None
) -> Measurement:
measurement = Measurement.get(self, options or self.options, renderable)
return measurement
def render(
self, renderable: RenderableType, options: Optional[ConsoleOptions] = None
) -> Iterable[Segment]:
_options = options or self.options
if _options.max_width < 1:
# No space to render anything. This prevents potential recursion errors.
return
render_iterable: RenderResult
renderable = rich_cast(renderable)
if hasattr(renderable, "__rich_console__") and not isclass(renderable):
render_iterable = renderable.__rich_console__(self, _options)
elif isinstance(renderable, str):
text_renderable = self.render_str(
renderable, highlight=_options.highlight, markup=_options.markup
)
render_iterable = text_renderable.__rich_console__(self, _options)
else:
raise errors.NotRenderableError(
f"Unable to render {renderable!r}; "
"A str, Segment or object with __rich_console__ method is required"
)
try:
iter_render = iter(render_iterable)
except TypeError:
raise errors.NotRenderableError(
f"object {render_iterable!r} is not renderable"
)
_Segment = Segment
_options = _options.reset_height()
for render_output in iter_render:
if isinstance(render_output, _Segment):
yield render_output
else:
yield from self.render(render_output, _options)
def render_lines(
self,
renderable: RenderableType,
options: Optional[ConsoleOptions] = None,
*,
style: Optional[Style] = None,
pad: bool = True,
new_lines: bool = False,
) -> List[List[Segment]]:
with self._lock:
render_options = options or self.options
_rendered = self.render(renderable, render_options)
if style:
_rendered = Segment.apply_style(_rendered, style)
render_height = render_options.height
if render_height is not None:
render_height = max(0, render_height)
lines = list(
islice(
Segment.split_and_crop_lines(
_rendered,
render_options.max_width,
include_new_lines=new_lines,
pad=pad,
style=style,
),
None,
render_height,
)
)
if render_options.height is not None:
extra_lines = render_options.height - len(lines)
if extra_lines > 0:
pad_line = [
(
[
Segment(" " * render_options.max_width, style),
Segment("\n"),
]
if new_lines
else [Segment(" " * render_options.max_width, style)]
)
]
lines.extend(pad_line * extra_lines)
return lines
def render_str(
self,
text: str,
*,
style: Union[str, Style] = "",
justify: Optional[JustifyMethod] = None,
overflow: Optional[OverflowMethod] = None,
emoji: Optional[bool] = None,
markup: Optional[bool] = None,
highlight: Optional[bool] = None,
highlighter: Optional[HighlighterType] = None,
) -> "Text":
emoji_enabled = emoji or (emoji is None and self._emoji)
markup_enabled = markup or (markup is None and self._markup)
highlight_enabled = highlight or (highlight is None and self._highlight)
if markup_enabled:
rich_text = render_markup(
text,
style=style,
emoji=emoji_enabled,
emoji_variant=self._emoji_variant,
)
rich_text.justify = justify
rich_text.overflow = overflow
else:
rich_text = Text(
(
_emoji_replace(text, default_variant=self._emoji_variant)
if emoji_enabled
else text
),
justify=justify,
overflow=overflow,
style=style,
)
_highlighter = (highlighter or self.highlighter) if highlight_enabled else None
if _highlighter is not None:
highlight_text = _highlighter(str(rich_text))
highlight_text.copy_styles(rich_text)
return highlight_text
return rich_text
def get_style(
self, name: Union[str, Style], *, default: Optional[Union[Style, str]] = None
) -> Style:
if isinstance(name, Style):
return name
try:
style = self._theme_stack.get(name)
if style is None:
style = Style.parse(name)
return style.copy() if style.link else style
except errors.StyleSyntaxError as error:
if default is not None:
return self.get_style(default)
raise errors.MissingStyle(
f"Failed to get style {name!r}; {error}"
) from None
def _collect_renderables(
self,
objects: Iterable[Any],
sep: str,
end: str,
*,
justify: Optional[JustifyMethod] = None,
emoji: Optional[bool] = None,
markup: Optional[bool] = None,
highlight: Optional[bool] = None,
) -> List[ConsoleRenderable]:
renderables: List[ConsoleRenderable] = []
_append = renderables.append
text: List[Text] = []
append_text = text.append
append = _append
if justify in ("left", "center", "right"):
def align_append(renderable: RenderableType) -> None:
_append(Align(renderable, cast(AlignMethod, justify)))
append = align_append
_highlighter: HighlighterType = _null_highlighter
if highlight or (highlight is None and self._highlight):
_highlighter = self.highlighter
def check_text() -> None:
if text:
sep_text = Text(sep, justify=justify, end=end)
append(sep_text.join(text))
text.clear()
for renderable in objects:
renderable = rich_cast(renderable)
if isinstance(renderable, str):
append_text(
self.render_str(
renderable,
emoji=emoji,
markup=markup,
highlight=highlight,
highlighter=_highlighter,
)
)
elif isinstance(renderable, Text):
append_text(renderable)
elif isinstance(renderable, ConsoleRenderable):
check_text()
append(renderable)
elif is_expandable(renderable):
check_text()
append(Pretty(renderable, highlighter=_highlighter))
else:
append_text(_highlighter(str(renderable)))
check_text()
if self.style is not None:
style = self.get_style(self.style)
renderables = [Styled(renderable, style) for renderable in renderables]
return renderables
def rule(
self,
title: TextType = "",
*,
characters: str = "─",
style: Union[str, Style] = "rule.line",
align: AlignMethod = "center",
) -> None:
from .rule import Rule
rule = Rule(title=title, characters=characters, style=style, align=align)
self.print(rule)
def control(self, *control: Control) -> None:
if not self.is_dumb_terminal:
with self:
self._buffer.extend(_control.segment for _control in control)
def out(
self,
*objects: Any,
sep: str = " ",
end: str = "\n",
style: Optional[Union[str, Style]] = None,
highlight: Optional[bool] = None,
) -> None:
raw_output: str = sep.join(str(_object) for _object in objects)
self.print(
raw_output,
style=style,
highlight=highlight,
emoji=False,
markup=False,
no_wrap=True,
overflow="ignore",
crop=False,
end=end,
)
def print(
self,
*objects: Any,
sep: str = " ",
end: str = "\n",
style: Optional[Union[str, Style]] = None,
justify: Optional[JustifyMethod] = None,
overflow: Optional[OverflowMethod] = None,
no_wrap: Optional[bool] = None,
emoji: Optional[bool] = None,
markup: Optional[bool] = None,
highlight: Optional[bool] = None,
width: Optional[int] = None,
height: Optional[int] = None,
crop: bool = True,
soft_wrap: Optional[bool] = None,
new_line_start: bool = False,
) -> None:
if not objects:
objects = (NewLine(),)
if soft_wrap is None:
soft_wrap = self.soft_wrap
if soft_wrap:
if no_wrap is None:
no_wrap = True
if overflow is None:
overflow = "ignore"
crop = False
render_hooks = self._render_hooks[:]
with self:
renderables = self._collect_renderables(
objects,
sep,
end,
justify=justify,
emoji=emoji,
markup=markup,
highlight=highlight,
)
for hook in render_hooks:
renderables = hook.process_renderables(renderables)
render_options = self.options.update(
justify=justify,
overflow=overflow,
width=min(width, self.width) if width is not None else NO_CHANGE,
height=height,
no_wrap=no_wrap,
markup=markup,
highlight=highlight,
)
new_segments: List[Segment] = []
extend = new_segments.extend
render = self.render
if style is None:
for renderable in renderables:
extend(render(renderable, render_options))
else:
render_style = self.get_style(style)
new_line = Segment.line()
for renderable in renderables:
for line, add_new_line in Segment.split_lines_terminator(
render(renderable, render_options)
):
extend(Segment.apply_style(line, render_style))
if add_new_line:
new_segments.append(new_line)
if new_line_start:
if (
len("".join(segment.text for segment in new_segments).splitlines())
> 1
):
new_segments.insert(0, Segment.line())
if crop:
buffer_extend = self._buffer.extend
for line in Segment.split_and_crop_lines(
new_segments, self.width, pad=False
):
buffer_extend(line)
else:
self._buffer.extend(new_segments)
def print_json(
self,
json: Optional[str] = None,
*,
data: Any = None,
indent: Union[None, int, str] = 2,
highlight: bool = True,
skip_keys: bool = False,
ensure_ascii: bool = False,
check_circular: bool = True,
allow_nan: bool = True,
default: Optional[Callable[[Any], Any]] = None,
sort_keys: bool = False,
) -> None:
from rich.json import JSON
if json is None:
json_renderable = JSON.from_data(
data,
indent=indent,
highlight=highlight,
skip_keys=skip_keys,
ensure_ascii=ensure_ascii,
check_circular=check_circular,
allow_nan=allow_nan,
default=default,
sort_keys=sort_keys,
)
else:
if not isinstance(json, str):
raise TypeError(
f"json must be str. Did you mean print_json(data={json!r}) ?"
)
json_renderable = JSON(
json,
indent=indent,
highlight=highlight,
skip_keys=skip_keys,
ensure_ascii=ensure_ascii,
check_circular=check_circular,
allow_nan=allow_nan,
default=default,
sort_keys=sort_keys,
)
self.print(json_renderable, soft_wrap=True)
def update_screen(
self,
renderable: RenderableType,
*,
region: Optional[Region] = None,
options: Optional[ConsoleOptions] = None,
) -> None:
if not self.is_alt_screen:
raise errors.NoAltScreen("Alt screen must be enabled to call update_screen")
render_options = options or self.options
if region is None:
x = y = 0
render_options = render_options.update_dimensions(
render_options.max_width, render_options.height or self.height
)
else:
x, y, width, height = region
render_options = render_options.update_dimensions(width, height)
lines = self.render_lines(renderable, options=render_options)
self.update_screen_lines(lines, x, y)
def update_screen_lines(
self, lines: List[List[Segment]], x: int = 0, y: int = 0
) -> None:
if not self.is_alt_screen:
raise errors.NoAltScreen("Alt screen must be enabled to call update_screen")
screen_update = ScreenUpdate(lines, x, y)
segments = self.render(screen_update)
self._buffer.extend(segments)
self._check_buffer()
def print_exception(
self,
*,
width: Optional[int] = 100,
extra_lines: int = 3,
theme: Optional[str] = None,
word_wrap: bool = False,
show_locals: bool = False,
suppress: Iterable[Union[str, ModuleType]] = (),
max_frames: int = 100,
) -> None:
from .traceback import Traceback
traceback = Traceback(
width=width,
extra_lines=extra_lines,
theme=theme,
word_wrap=word_wrap,
show_locals=show_locals,
suppress=suppress,
max_frames=max_frames,
)
self.print(traceback)
@staticmethod
def _caller_frame_info(
offset: int,
currentframe: Callable[[], Optional[FrameType]] = inspect.currentframe,
) -> Tuple[str, int, Dict[str, Any]]:
# Ignore the frame of this local helper
offset += 1
frame = currentframe()
if frame is not None:
# Use the faster currentframe where implemented
while offset and frame is not None:
frame = frame.f_back
offset -= 1
assert frame is not None
return frame.f_code.co_filename, frame.f_lineno, frame.f_locals
else:
# Fallback to the slower stack
frame_info = inspect.stack()[offset]
return frame_info.filename, frame_info.lineno, frame_info.frame.f_locals
def log(
self,
*objects: Any,
sep: str = " ",
end: str = "\n",
style: Optional[Union[str, Style]] = None,
justify: Optional[JustifyMethod] = None,
emoji: Optional[bool] = None,
markup: Optional[bool] = None,
highlight: Optional[bool] = None,
log_locals: bool = False,
_stack_offset: int = 1,
) -> None:
if not objects:
objects = (NewLine(),)
render_hooks = self._render_hooks[:]
with self:
renderables = self._collect_renderables(
objects,
sep,
end,
justify=justify,
emoji=emoji,
markup=markup,
highlight=highlight,
)
if style is not None:
renderables = [Styled(renderable, style) for renderable in renderables]
filename, line_no, locals = self._caller_frame_info(_stack_offset)
link_path = None if filename.startswith("<") else os.path.abspath(filename)
path = filename.rpartition(os.sep)[-1]
if log_locals:
locals_map = {
key: value
for key, value in locals.items()
if not key.startswith("__")
}
renderables.append(render_scope(locals_map, title="[i]locals"))
renderables = [
self._log_render(
self,
renderables,
log_time=self.get_datetime(),
path=path,
line_no=line_no,
link_path=link_path,
)
]
for hook in render_hooks:
renderables = hook.process_renderables(renderables)
new_segments: List[Segment] = []
extend = new_segments.extend
render = self.render
render_options = self.options
for renderable in renderables:
extend(render(renderable, render_options))
buffer_extend = self._buffer.extend
for line in Segment.split_and_crop_lines(
new_segments, self.width, pad=False
):
buffer_extend(line)
def on_broken_pipe(self) -> None:
self.quiet = True
devnull = os.open(os.devnull, os.O_WRONLY)
os.dup2(devnull, sys.stdout.fileno())
raise SystemExit(1)
def _check_buffer(self) -> None:
if self.quiet:
del self._buffer[:]
return
try:
self._write_buffer()
except BrokenPipeError:
self.on_broken_pipe()
def _write_buffer(self) -> None:
with self._lock:
if self.record and not self._buffer_index:
with self._record_buffer_lock:
self._record_buffer.extend(self._buffer[:])
if self._buffer_index == 0:
if self.is_jupyter: # pragma: no cover
from .jupyter import display
display(self._buffer, self._render_buffer(self._buffer[:]))
del self._buffer[:]
else:
if WINDOWS:
use_legacy_windows_render = False
if self.legacy_windows:
fileno = get_fileno(self.file)
if fileno is not None:
use_legacy_windows_render = (
fileno in _STD_STREAMS_OUTPUT
)
if use_legacy_windows_render:
from rich._win32_console import LegacyWindowsTerm
from rich._windows_renderer import legacy_windows_render
buffer = self._buffer[:]
if self.no_color and self._color_system:
buffer = list(Segment.remove_color(buffer))
legacy_windows_render(buffer, LegacyWindowsTerm(self.file))
else:
# Either a non-std stream on legacy Windows, or modern Windows.
text = self._render_buffer(self._buffer[:])
# https://bugs.python.org/issue37871
# https://github.com/python/cpython/issues/82052
# We need to avoid writing more than 32Kb in a single write, due to the above bug
write = self.file.write
# Worse case scenario, every character is 4 bytes of utf-8
MAX_WRITE = 32 * 1024 // 4
try:
if len(text) <= MAX_WRITE:
write(text)
else:
batch: List[str] = []
batch_append = batch.append
size = 0
for line in text.splitlines(True):
if size + len(line) > MAX_WRITE and batch:
write("".join(batch))
batch.clear()
size = 0
batch_append(line)
size += len(line)
if batch:
write("".join(batch))
batch.clear()
except UnicodeEncodeError as error:
error.reason = f"{error.reason}\n*** You may need to add PYTHONIOENCODING=utf-8 to your environment ***"
raise
else:
text = self._render_buffer(self._buffer[:])
try:
self.file.write(text)
except UnicodeEncodeError as error:
error.reason = f"{error.reason}\n*** You may need to add PYTHONIOENCODING=utf-8 to your environment ***"
raise
self.file.flush()
del self._buffer[:]
def _render_buffer(self, buffer: Iterable[Segment]) -> str:
output: List[str] = []
append = output.append
color_system = self._color_system
legacy_windows = self.legacy_windows
not_terminal = not self.is_terminal
if self.no_color and color_system:
buffer = Segment.remove_color(buffer)
for text, style, control in buffer:
if style:
append(
style.render(
text,
color_system=color_system,
legacy_windows=legacy_windows,
)
)
elif not (not_terminal and control):
append(text)
rendered = "".join(output)
return rendered
def input(
self,
prompt: TextType = "",
*,
markup: bool = True,
emoji: bool = True,
password: bool = False,
stream: Optional[TextIO] = None,
) -> str:
if prompt:
self.print(prompt, markup=markup, emoji=emoji, end="")
if password:
result = getpass("", stream=stream)
else:
if stream:
result = stream.readline()
else:
result = input()
return result
def export_text(self, *, clear: bool = True, styles: bool = False) -> str:
assert (
self.record
), "To export console contents set record=True in the constructor or instance"
with self._record_buffer_lock:
if styles:
text = "".join(
(style.render(text) if style else text)
for text, style, _ in self._record_buffer
)
else:
text = "".join(
segment.text
for segment in self._record_buffer
if not segment.control
)
if clear:
del self._record_buffer[:]
return text
def save_text(self, path: str, *, clear: bool = True, styles: bool = False) -> None:
text = self.export_text(clear=clear, styles=styles)
with open(path, "w", encoding="utf-8") as write_file:
write_file.write(text)
def export_html(
self,
*,
theme: Optional[TerminalTheme] = None,
clear: bool = True,
code_format: Optional[str] = None,
inline_styles: bool = False,
) -> str:
assert (
self.record
), "To export console contents set record=True in the constructor or instance"
fragments: List[str] = []
append = fragments.append
_theme = theme or DEFAULT_TERMINAL_THEME
stylesheet = ""
render_code_format = CONSOLE_HTML_FORMAT if code_format is None else code_format
with self._record_buffer_lock:
if inline_styles:
for text, style, _ in Segment.filter_control(
Segment.simplify(self._record_buffer)
):
text = escape(text)
if style:
rule = style.get_html_style(_theme)
if style.link:
text = f'<a href="{style.link}">{text}</a>'
text = f'<span style="{rule}">{text}</span>' if rule else text
append(text)
else:
styles: Dict[str, int] = {}
for text, style, _ in Segment.filter_control(
Segment.simplify(self._record_buffer)
):
text = escape(text)
if style:
rule = style.get_html_style(_theme)
style_number = styles.setdefault(rule, len(styles) + 1)
if style.link:
text = f'<a class="r{style_number}" href="{style.link}">{text}</a>'
else:
text = f'<span class="r{style_number}">{text}</span>'
append(text)
stylesheet_rules: List[str] = []
stylesheet_append = stylesheet_rules.append
for style_rule, style_number in styles.items():
if style_rule:
stylesheet_append(f".r{style_number} {{{style_rule}}}")
stylesheet = "\n".join(stylesheet_rules)
rendered_code = render_code_format.format(
code="".join(fragments),
stylesheet=stylesheet,
foreground=_theme.foreground_color.hex,
background=_theme.background_color.hex,
)
if clear:
del self._record_buffer[:]
return rendered_code
def save_html(
self,
path: str,
*,
theme: Optional[TerminalTheme] = None,
clear: bool = True,
code_format: str = CONSOLE_HTML_FORMAT,
inline_styles: bool = False,
) -> None:
html = self.export_html(
theme=theme,
clear=clear,
code_format=code_format,
inline_styles=inline_styles,
)
with open(path, "w", encoding="utf-8") as write_file:
write_file.write(html)
def export_svg(
self,
*,
title: str = "Rich",
theme: Optional[TerminalTheme] = None,
clear: bool = True,
code_format: str = CONSOLE_SVG_FORMAT,
font_aspect_ratio: float = 0.61,
unique_id: Optional[str] = None,
) -> str:
from rich.cells import cell_len
style_cache: Dict[Style, str] = {}
def get_svg_style(style: Style) -> str:
if style in style_cache:
return style_cache[style]
css_rules = []
color = (
_theme.foreground_color
if (style.color is None or style.color.is_default)
else style.color.get_truecolor(_theme)
)
bgcolor = (
_theme.background_color
if (style.bgcolor is None or style.bgcolor.is_default)
else style.bgcolor.get_truecolor(_theme)
)
if style.reverse:
color, bgcolor = bgcolor, color
if style.dim:
color = blend_rgb(color, bgcolor, 0.4)
css_rules.append(f"fill: {color.hex}")
if style.bold:
css_rules.append("font-weight: bold")
if style.italic:
css_rules.append("font-style: italic;")
if style.underline:
css_rules.append("text-decoration: underline;")
if style.strike:
css_rules.append("text-decoration: line-through;")
css = ";".join(css_rules)
style_cache[style] = css
return css
_theme = theme or SVG_EXPORT_THEME
width = self.width
char_height = 20
char_width = char_height * font_aspect_ratio
line_height = char_height * 1.22
margin_top = 1
margin_right = 1
margin_bottom = 1
margin_left = 1
padding_top = 40
padding_right = 8
padding_bottom = 8
padding_left = 8
padding_width = padding_left + padding_right
padding_height = padding_top + padding_bottom
margin_width = margin_left + margin_right
margin_height = margin_top + margin_bottom
text_backgrounds: List[str] = []
text_group: List[str] = []
classes: Dict[str, int] = {}
style_no = 1
def escape_text(text: str) -> str:
return escape(text).replace(" ", " ")
def make_tag(
name: str, content: Optional[str] = None, **attribs: object
) -> str:
def stringify(value: object) -> str:
if isinstance(value, (float)):
return format(value, "g")
return str(value)
tag_attribs = " ".join(
f'{k.lstrip("_").replace("_", "-")}="{stringify(v)}"'
for k, v in attribs.items()
)
return (
f"<{name} {tag_attribs}>{content}</{name}>"
if content
else f"<{name} {tag_attribs}/>"
)
with self._record_buffer_lock:
segments = list(Segment.filter_control(self._record_buffer))
if clear:
self._record_buffer.clear()
if unique_id is None:
unique_id = "terminal-" + str(
zlib.adler32(
("".join(repr(segment) for segment in segments)).encode(
"utf-8",
"ignore",
)
+ title.encode("utf-8", "ignore")
)
)
y = 0
for y, line in enumerate(Segment.split_and_crop_lines(segments, length=width)):
x = 0
for text, style, _control in line:
style = style or Style()
rules = get_svg_style(style)
if rules not in classes:
classes[rules] = style_no
style_no += 1
class_name = f"r{classes[rules]}"
if style.reverse:
has_background = True
background = (
_theme.foreground_color.hex
if style.color is None
else style.color.get_truecolor(_theme).hex
)
else:
bgcolor = style.bgcolor
has_background = bgcolor is not None and not bgcolor.is_default
background = (
_theme.background_color.hex
if style.bgcolor is None
else style.bgcolor.get_truecolor(_theme).hex
)
text_length = cell_len(text)
if has_background:
text_backgrounds.append(
make_tag(
"rect",
fill=background,
x=x * char_width,
y=y * line_height + 1.5,
width=char_width * text_length,
height=line_height + 0.25,
shape_rendering="crispEdges",
)
)
if text != " " * len(text):
text_group.append(
make_tag(
"text",
escape_text(text),
_class=f"{unique_id}-{class_name}",
x=x * char_width,
y=y * line_height + char_height,
textLength=char_width * len(text),
clip_path=f"url(#{unique_id}-line-{y})",
)
)
x += cell_len(text)
line_offsets = [line_no * line_height + 1.5 for line_no in range(y)]
lines = "\n".join(
f"""<clipPath id="{unique_id}-line-{line_no}">
{make_tag("rect", x=0, y=offset, width=char_width * width, height=line_height + 0.25)}
</clipPath>"""
for line_no, offset in enumerate(line_offsets)
)
styles = "\n".join(
f".{unique_id}-r{rule_no} {{ {css} }}" for css, rule_no in classes.items()
)
backgrounds = "".join(text_backgrounds)
matrix = "".join(text_group)
terminal_width = ceil(width * char_width + padding_width)
terminal_height = (y + 1) * line_height + padding_height
chrome = make_tag(
"rect",
fill=_theme.background_color.hex,
stroke="rgba(255,255,255,0.35)",
stroke_width="1",
x=margin_left,
y=margin_top,
width=terminal_width,
height=terminal_height,
rx=8,
)
title_color = _theme.foreground_color.hex
if title:
chrome += make_tag(
"text",
escape_text(title),
_class=f"{unique_id}-title",
fill=title_color,
text_anchor="middle",
x=terminal_width // 2,
y=margin_top + char_height + 6,
)
chrome += f"""
<g transform="translate(26,22)">
<circle cx="0" cy="0" r="7" fill="#ff5f57"/>
<circle cx="22" cy="0" r="7" fill="#febc2e"/>
<circle cx="44" cy="0" r="7" fill="#28c840"/>
</g>
"""
svg = code_format.format(
unique_id=unique_id,
char_width=char_width,
char_height=char_height,
line_height=line_height,
terminal_width=char_width * width - 1,
terminal_height=(y + 1) * line_height - 1,
width=terminal_width + margin_width,
height=terminal_height + margin_height,
terminal_x=margin_left + padding_left,
terminal_y=margin_top + padding_top,
styles=styles,
chrome=chrome,
backgrounds=backgrounds,
matrix=matrix,
lines=lines,
)
return svg
def save_svg(
self,
path: str,
*,
title: str = "Rich",
theme: Optional[TerminalTheme] = None,
clear: bool = True,
code_format: str = CONSOLE_SVG_FORMAT,
font_aspect_ratio: float = 0.61,
unique_id: Optional[str] = None,
) -> None:
svg = self.export_svg(
title=title,
theme=theme,
clear=clear,
code_format=code_format,
font_aspect_ratio=font_aspect_ratio,
unique_id=unique_id,
)
with open(path, "w", encoding="utf-8") as write_file:
write_file.write(svg)
def _svg_hash(svg_main_code: str) -> str:
return str(zlib.adler32(svg_main_code.encode()))
if __name__ == "__main__": # pragma: no cover
console = Console(record=True)
console.log(
"JSONRPC [i]request[/i]",
5,
1.3,
True,
False,
None,
{
"jsonrpc": "2.0",
"method": "subtract",
"params": {"minuend": 42, "subtrahend": 23},
"id": 3,
},
)
console.log("Hello, World!", "{'a': 1}", repr(console))
console.print(
{
"name": None,
"empty": [],
"quiz": {
"sport": {
"answered": True,
"q1": {
"question": "Which one is correct team name in NBA?",
"options": [
"New York Bulls",
"Los Angeles Kings",
"Golden State Warriors",
"Huston Rocket",
],
"answer": "Huston Rocket",
},
},
"maths": {
"answered": False,
"q1": {
"question": "5 + 7 = ?",
"options": [10, 11, 12, 13],
"answer": 12,
},
"q2": {
"question": "12 - 8 = ?",
"options": [1, 2, 3, 4],
"answer": 4,
},
},
},
}
) | --- +++ @@ -107,6 +107,7 @@
class ConsoleDimensions(NamedTuple):
+ """Size of the terminal."""
width: int
"""The width of the console in 'cells'."""
@@ -116,6 +117,7 @@
@dataclass
class ConsoleOptions:
+ """Options for __rich_console__ method."""
size: ConsoleDimensions
"""Size of console."""
@@ -145,9 +147,15 @@
@property
def ascii_only(self) -> bool:
+ """Check if renderables should use ascii only."""
return not self.encoding.startswith("utf")
def copy(self) -> "ConsoleOptions":
+ """Return a copy of the options.
+
+ Returns:
+ ConsoleOptions: a copy of self.
+ """
options: ConsoleOptions = ConsoleOptions.__new__(ConsoleOptions)
options.__dict__ = self.__dict__.copy()
return options
@@ -165,6 +173,7 @@ markup: Union[Optional[bool], NoChange] = NO_CHANGE,
height: Union[Optional[int], NoChange] = NO_CHANGE,
) -> "ConsoleOptions":
+ """Update values, return a copy."""
options = self.copy()
if not isinstance(width, NoChange):
options.min_width = options.max_width = max(0, width)
@@ -189,21 +198,51 @@ return options
def update_width(self, width: int) -> "ConsoleOptions":
+ """Update just the width, return a copy.
+
+ Args:
+ width (int): New width (sets both min_width and max_width)
+
+ Returns:
+ ~ConsoleOptions: New console options instance.
+ """
options = self.copy()
options.min_width = options.max_width = max(0, width)
return options
def update_height(self, height: int) -> "ConsoleOptions":
+ """Update the height, and return a copy.
+
+ Args:
+ height (int): New height
+
+ Returns:
+ ~ConsoleOptions: New Console options instance.
+ """
options = self.copy()
options.max_height = options.height = height
return options
def reset_height(self) -> "ConsoleOptions":
+ """Return a copy of the options with height set to ``None``.
+
+ Returns:
+ ~ConsoleOptions: New console options instance.
+ """
options = self.copy()
options.height = None
return options
def update_dimensions(self, width: int, height: int) -> "ConsoleOptions":
+ """Update the width and height, and return a copy.
+
+ Args:
+ width (int): New width (sets both min_width and max_width).
+ height (int): New height.
+
+ Returns:
+ ~ConsoleOptions: New console options instance.
+ """
options = self.copy()
options.min_width = options.max_width = max(0, width)
options.height = options.max_height = height
@@ -212,6 +251,7 @@
@runtime_checkable
class RichCast(Protocol):
+ """An object that may be 'cast' to a console renderable."""
def __rich__(
self,
@@ -221,6 +261,7 @@
@runtime_checkable
class ConsoleRenderable(Protocol):
+ """An object that supports the console protocol."""
def __rich_console__(
self, console: "Console", options: "ConsoleOptions"
@@ -239,9 +280,11 @@
class CaptureError(Exception):
+ """An error in the Capture context manager."""
class NewLine:
+ """A renderable to generate new line(s)"""
def __init__(self, count: int = 1) -> None:
self.count = count
@@ -253,6 +296,7 @@
class ScreenUpdate:
+ """Render a list of lines at a given offset."""
def __init__(self, lines: List[List[Segment]], x: int, y: int) -> None:
self._lines = lines
@@ -270,6 +314,12 @@
class Capture:
+ """Context manager to capture the result of printing to the console.
+ See :meth:`~rich.console.Console.capture` for how to use.
+
+ Args:
+ console (Console): A console instance to capture output.
+ """
def __init__(self, console: "Console") -> None:
self._console = console
@@ -288,6 +338,7 @@ self._result = self._console.end_capture()
def get(self) -> str:
+ """Get the result of the capture."""
if self._result is None:
raise CaptureError(
"Capture result is not available until context manager exits."
@@ -296,6 +347,7 @@
class ThemeContext:
+ """A context manager to use a temporary theme. See :meth:`~rich.console.Console.use_theme` for usage."""
def __init__(self, console: "Console", theme: Theme, inherit: bool = True) -> None:
self.console = console
@@ -316,6 +368,7 @@
class PagerContext:
+ """A context manager that 'pages' content. See :meth:`~rich.console.Console.pager` for usage."""
def __init__(
self,
@@ -354,6 +407,7 @@
class ScreenContext:
+ """A context manager that enables an alternative screen. See :meth:`~rich.console.Console.screen` for usage."""
def __init__(
self, console: "Console", hide_cursor: bool, style: StyleType = ""
@@ -366,6 +420,13 @@ def update(
self, *renderables: RenderableType, style: Optional[StyleType] = None
) -> None:
+ """Update the screen.
+
+ Args:
+ renderable (RenderableType, optional): Optional renderable to replace current renderable,
+ or None for no change. Defaults to None.
+ style: (Style, optional): Replacement style, or None for no change. Defaults to None.
+ """
if renderables:
self.screen.renderable = (
Group(*renderables) if len(renderables) > 1 else renderables[0]
@@ -393,6 +454,12 @@
class Group:
+ """Takes a group of renderables and returns a renderable object that renders the group.
+
+ Args:
+ renderables (Iterable[RenderableType]): An iterable of renderable objects.
+ fit (bool, optional): Fit dimension of group to contents, or fill available space. Defaults to True.
+ """
def __init__(self, *renderables: "RenderableType", fit: bool = True) -> None:
self._renderables = renderables
@@ -420,10 +487,16 @@
def group(fit: bool = True) -> Callable[..., Callable[..., Group]]:
+ """A decorator that turns an iterable of renderables in to a group.
+
+ Args:
+ fit (bool, optional): Fit dimension of group to contents, or fill available space. Defaults to True.
+ """
def decorator(
method: Callable[..., Iterable[RenderableType]],
) -> Callable[..., Group]:
+ """Convert a method that returns an iterable of renderables in to a Group."""
@wraps(method)
def _replace(*args: Any, **kwargs: Any) -> Group:
@@ -436,6 +509,7 @@
def _is_jupyter() -> bool: # pragma: no cover
+ """Check if we're running in a Jupyter notebook."""
try:
get_ipython # type: ignore[name-defined]
except NameError:
@@ -466,6 +540,7 @@
@dataclass
class ConsoleThreadLocals(threading.local):
+ """Thread local values for Console context."""
theme_stack: ThemeStack
buffer: List[Segment] = field(default_factory=list)
@@ -473,11 +548,22 @@
class RenderHook(ABC):
+ """Provides hooks in to the render process."""
@abstractmethod
def process_renderables(
self, renderables: List[ConsoleRenderable]
) -> List[ConsoleRenderable]:
+ """Called with a list of objects to render.
+
+ This method can return a new list of renderables, or modify and return the same list.
+
+ Args:
+ renderables (List[ConsoleRenderable]): A number of renderable objects.
+
+ Returns:
+ List[ConsoleRenderable]: A replacement list of renderables.
+ """
_windows_console_features: Optional["WindowsConsoleFeatures"] = None
@@ -494,10 +580,45 @@
def detect_legacy_windows() -> bool:
+ """Detect legacy Windows."""
return WINDOWS and not get_windows_console_features().vt
class Console:
+ """A high level console interface.
+
+ Args:
+ color_system (str, optional): The color system supported by your terminal,
+ either ``"standard"``, ``"256"`` or ``"truecolor"``. Leave as ``"auto"`` to autodetect.
+ force_terminal (Optional[bool], optional): Enable/disable terminal control codes, or None to auto-detect terminal. Defaults to None.
+ force_jupyter (Optional[bool], optional): Enable/disable Jupyter rendering, or None to auto-detect Jupyter. Defaults to None.
+ force_interactive (Optional[bool], optional): Enable/disable interactive mode, or None to auto detect. Defaults to None.
+ soft_wrap (Optional[bool], optional): Set soft wrap default on print method. Defaults to False.
+ theme (Theme, optional): An optional style theme object, or ``None`` for default theme.
+ stderr (bool, optional): Use stderr rather than stdout if ``file`` is not specified. Defaults to False.
+ file (IO, optional): A file object where the console should write to. Defaults to stdout.
+ quiet (bool, Optional): Boolean to suppress all output. Defaults to False.
+ width (int, optional): The width of the terminal. Leave as default to auto-detect width.
+ height (int, optional): The height of the terminal. Leave as default to auto-detect height.
+ style (StyleType, optional): Style to apply to all output, or None for no style. Defaults to None.
+ no_color (Optional[bool], optional): Enabled no color mode, or None to auto detect. Defaults to None.
+ tab_size (int, optional): Number of spaces used to replace a tab character. Defaults to 8.
+ record (bool, optional): Boolean to enable recording of terminal output,
+ required to call :meth:`export_html`, :meth:`export_svg`, and :meth:`export_text`. Defaults to False.
+ markup (bool, optional): Boolean to enable :ref:`console_markup`. Defaults to True.
+ emoji (bool, optional): Enable emoji code. Defaults to True.
+ emoji_variant (str, optional): Optional emoji variant, either "text" or "emoji". Defaults to None.
+ highlight (bool, optional): Enable automatic highlighting. Defaults to True.
+ log_time (bool, optional): Boolean to enable logging of time by :meth:`log` methods. Defaults to True.
+ log_path (bool, optional): Boolean to enable the logging of the caller by :meth:`log`. Defaults to True.
+ log_time_format (Union[str, TimeFormatterCallable], optional): If ``log_time`` is enabled, either string for strftime or callable that formats the time. Defaults to "[%X] ".
+ highlighter (HighlighterType, optional): Default highlighter.
+ legacy_windows (bool, optional): Enable legacy Windows mode, or ``None`` to auto detect. Defaults to ``None``.
+ safe_box (bool, optional): Restrict box options that don't render on legacy Windows.
+ get_datetime (Callable[[], datetime], optional): Callable that gets the current time as a datetime.datetime object (used by Console.log),
+ or None for datetime.now.
+ get_time (Callable[[], time], optional): Callable that gets the current time in seconds, default uses time.monotonic.
+ """
_environ: Mapping[str, str] = os.environ
@@ -640,6 +761,7 @@
@property
def file(self) -> IO[str]:
+ """Get the file object to write to."""
file = self._file or (sys.stderr if self.stderr else sys.stdout)
file = getattr(file, "rich_proxied_file", file)
if file is None:
@@ -648,14 +770,17 @@
@file.setter
def file(self, new_file: IO[str]) -> None:
+ """Set a new file object."""
self._file = new_file
@property
def _buffer(self) -> List[Segment]:
+ """Get a thread local buffer."""
return self._thread_locals.buffer
@property
def _buffer_index(self) -> int:
+ """Get a thread local buffer."""
return self._thread_locals.buffer_index
@_buffer_index.setter
@@ -664,9 +789,11 @@
@property
def _theme_stack(self) -> ThemeStack:
+ """Get the thread local theme stack."""
return self._thread_locals.theme_stack
def _detect_color_system(self) -> Optional[ColorSystem]:
+ """Detect color system from env vars."""
if self.is_jupyter:
return ColorSystem.TRUECOLOR
if not self.is_terminal or self.is_dumb_terminal:
@@ -690,56 +817,107 @@ return color_system
def _enter_buffer(self) -> None:
+ """Enter in to a buffer context, and buffer all output."""
self._buffer_index += 1
def _exit_buffer(self) -> None:
+ """Leave buffer context, and render content if required."""
self._buffer_index -= 1
self._check_buffer()
def set_live(self, live: "Live") -> bool:
+ """Set Live instance. Used by Live context manager (no need to call directly).
+
+ Args:
+ live (Live): Live instance using this Console.
+
+ Returns:
+ Boolean that indicates if the live is the topmost of the stack.
+
+ Raises:
+ errors.LiveError: If this Console has a Live context currently active.
+ """
with self._lock:
self._live_stack.append(live)
return len(self._live_stack) == 1
def clear_live(self) -> None:
+ """Clear the Live instance. Used by the Live context manager (no need to call directly)."""
with self._lock:
self._live_stack.pop()
def push_render_hook(self, hook: RenderHook) -> None:
+ """Add a new render hook to the stack.
+
+ Args:
+ hook (RenderHook): Render hook instance.
+ """
with self._lock:
self._render_hooks.append(hook)
def pop_render_hook(self) -> None:
+ """Pop the last renderhook from the stack."""
with self._lock:
self._render_hooks.pop()
def __enter__(self) -> "Console":
+ """Own context manager to enter buffer context."""
self._enter_buffer()
return self
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
+ """Exit buffer context."""
self._exit_buffer()
def begin_capture(self) -> None:
+ """Begin capturing console output. Call :meth:`end_capture` to exit capture mode and return output."""
self._enter_buffer()
def end_capture(self) -> str:
+ """End capture mode and return captured string.
+
+ Returns:
+ str: Console output.
+ """
render_result = self._render_buffer(self._buffer)
del self._buffer[:]
self._exit_buffer()
return render_result
def push_theme(self, theme: Theme, *, inherit: bool = True) -> None:
+ """Push a new theme on to the top of the stack, replacing the styles from the previous theme.
+ Generally speaking, you should call :meth:`~rich.console.Console.use_theme` to get a context manager, rather
+ than calling this method directly.
+
+ Args:
+ theme (Theme): A theme instance.
+ inherit (bool, optional): Inherit existing styles. Defaults to True.
+ """
self._theme_stack.push_theme(theme, inherit=inherit)
def pop_theme(self) -> None:
+ """Remove theme from top of stack, restoring previous theme."""
self._theme_stack.pop_theme()
def use_theme(self, theme: Theme, *, inherit: bool = True) -> ThemeContext:
+ """Use a different theme for the duration of the context manager.
+
+ Args:
+ theme (Theme): Theme instance to user.
+ inherit (bool, optional): Inherit existing console styles. Defaults to True.
+
+ Returns:
+ ThemeContext: [description]
+ """
return ThemeContext(self, theme, inherit)
@property
def color_system(self) -> Optional[str]:
+ """Get color system string.
+
+ Returns:
+ Optional[str]: "standard", "256" or "truecolor".
+ """
if self._color_system is not None:
return _COLOR_SYSTEMS_NAMES[self._color_system]
@@ -748,10 +926,21 @@
@property
def encoding(self) -> str:
+ """Get the encoding of the console file, e.g. ``"utf-8"``.
+
+ Returns:
+ str: A standard encoding string.
+ """
return (getattr(self.file, "encoding", "utf-8") or "utf-8").lower()
@property
def is_terminal(self) -> bool:
+ """Check if the console is writing to a terminal.
+
+ Returns:
+ bool: True if the console writing to a device capable of
+ understanding escape sequences, otherwise False.
+ """
# If dev has explicitly set this value, return it
if self._force_terminal is not None:
return self._force_terminal
@@ -794,12 +983,19 @@
@property
def is_dumb_terminal(self) -> bool:
+ """Detect dumb terminal.
+
+ Returns:
+ bool: True if writing to a dumb terminal, otherwise False.
+
+ """
_term = self._environ.get("TERM", "")
is_dumb = _term.lower() in ("dumb", "unknown")
return self.is_terminal and is_dumb
@property
def options(self) -> ConsoleOptions:
+ """Get default console options."""
size = self.size
return ConsoleOptions(
max_height=size.height,
@@ -813,6 +1009,11 @@
@property
def size(self) -> ConsoleDimensions:
+ """Get the size of the console.
+
+ Returns:
+ ConsoleDimensions: A named tuple containing the dimensions.
+ """
if self._width is not None and self._height is not None:
return ConsoleDimensions(self._width - self.legacy_windows, self._height)
@@ -849,44 +1050,111 @@
@size.setter
def size(self, new_size: Tuple[int, int]) -> None:
+ """Set a new size for the terminal.
+
+ Args:
+ new_size (Tuple[int, int]): New width and height.
+ """
width, height = new_size
self._width = width
self._height = height
@property
def width(self) -> int:
+ """Get the width of the console.
+
+ Returns:
+ int: The width (in characters) of the console.
+ """
return self.size.width
@width.setter
def width(self, width: int) -> None:
+ """Set width.
+
+ Args:
+ width (int): New width.
+ """
self._width = width
@property
def height(self) -> int:
+ """Get the height of the console.
+
+ Returns:
+ int: The height (in lines) of the console.
+ """
return self.size.height
@height.setter
def height(self, height: int) -> None:
+ """Set height.
+
+ Args:
+ height (int): new height.
+ """
self._height = height
def bell(self) -> None:
+ """Play a 'bell' sound (if supported by the terminal)."""
self.control(Control.bell())
def capture(self) -> Capture:
+ """A context manager to *capture* the result of print() or log() in a string,
+ rather than writing it to the console.
+
+ Example:
+ >>> from rich.console import Console
+ >>> console = Console()
+ >>> with console.capture() as capture:
+ ... console.print("[bold magenta]Hello World[/]")
+ >>> print(capture.get())
+
+ Returns:
+ Capture: Context manager with disables writing to the terminal.
+ """
capture = Capture(self)
return capture
def pager(
self, pager: Optional[Pager] = None, styles: bool = False, links: bool = False
) -> PagerContext:
+ """A context manager to display anything printed within a "pager". The pager application
+ is defined by the system and will typically support at least pressing a key to scroll.
+
+ Args:
+ pager (Pager, optional): A pager object, or None to use :class:`~rich.pager.SystemPager`. Defaults to None.
+ styles (bool, optional): Show styles in pager. Defaults to False.
+ links (bool, optional): Show links in pager. Defaults to False.
+
+ Example:
+ >>> from rich.console import Console
+ >>> from rich.__main__ import make_test_card
+ >>> console = Console()
+ >>> with console.pager():
+ console.print(make_test_card())
+
+ Returns:
+ PagerContext: A context manager.
+ """
return PagerContext(self, pager=pager, styles=styles, links=links)
def line(self, count: int = 1) -> None:
+ """Write new line(s).
+
+ Args:
+ count (int, optional): Number of new lines. Defaults to 1.
+ """
assert count >= 0, "count must be >= 0"
self.print(NewLine(count))
def clear(self, home: bool = True) -> None:
+ """Clear the screen.
+
+ Args:
+ home (bool, optional): Also move the cursor to 'home' position. Defaults to True.
+ """
if home:
self.control(Control.clear(), Control.home())
else:
@@ -901,6 +1169,18 @@ speed: float = 1.0,
refresh_per_second: float = 12.5,
) -> "Status":
+ """Display a status and spinner.
+
+ Args:
+ status (RenderableType): A status renderable (str or Text typically).
+ spinner (str, optional): Name of spinner animation (see python -m rich.spinner). Defaults to "dots".
+ spinner_style (StyleType, optional): Style of spinner. Defaults to "status.spinner".
+ speed (float, optional): Speed factor for spinner animation. Defaults to 1.0.
+ refresh_per_second (float, optional): Number of refreshes per second. Defaults to 12.5.
+
+ Returns:
+ Status: A Status object that may be used as a context manager.
+ """
from .status import Status
status_renderable = Status(
@@ -914,12 +1194,30 @@ return status_renderable
def show_cursor(self, show: bool = True) -> bool:
+ """Show or hide the cursor.
+
+ Args:
+ show (bool, optional): Set visibility of the cursor.
+ """
if self.is_terminal:
self.control(Control.show_cursor(show))
return True
return False
def set_alt_screen(self, enable: bool = True) -> bool:
+ """Enables alternative screen mode.
+
+ Note, if you enable this mode, you should ensure that is disabled before
+ the application exits. See :meth:`~rich.Console.screen` for a context manager
+ that handles this for you.
+
+ Args:
+ enable (bool, optional): Enable (True) or disable (False) alternate screen. Defaults to True.
+
+ Returns:
+ bool: True if the control codes were written.
+
+ """
changed = False
if self.is_terminal and not self.legacy_windows:
self.control(Control.alt_screen(enable))
@@ -929,9 +1227,40 @@
@property
def is_alt_screen(self) -> bool:
+ """Check if the alt screen was enabled.
+
+ Returns:
+ bool: True if the alt screen was enabled, otherwise False.
+ """
return self._is_alt_screen
def set_window_title(self, title: str) -> bool:
+ """Set the title of the console terminal window.
+
+ Warning: There is no means within Rich of "resetting" the window title to its
+ previous value, meaning the title you set will persist even after your application
+ exits.
+
+ ``fish`` shell resets the window title before and after each command by default,
+ negating this issue. Windows Terminal and command prompt will also reset the title for you.
+ Most other shells and terminals, however, do not do this.
+
+ Some terminals may require configuration changes before you can set the title.
+ Some terminals may not support setting the title at all.
+
+ Other software (including the terminal itself, the shell, custom prompts, plugins, etc.)
+ may also set the terminal window title. This could result in whatever value you write
+ using this method being overwritten.
+
+ Args:
+ title (str): The new title of the terminal window.
+
+ Returns:
+ bool: True if the control code to change the terminal title was
+ written, otherwise False. Note that a return value of True
+ does not guarantee that the window title has actually changed,
+ since the feature may be unsupported/disabled in some terminals.
+ """
if self.is_terminal:
self.control(Control.title(title))
return True
@@ -940,17 +1269,50 @@ def screen(
self, hide_cursor: bool = True, style: Optional[StyleType] = None
) -> "ScreenContext":
+ """Context manager to enable and disable 'alternative screen' mode.
+
+ Args:
+ hide_cursor (bool, optional): Also hide the cursor. Defaults to False.
+ style (Style, optional): Optional style for screen. Defaults to None.
+
+ Returns:
+ ~ScreenContext: Context which enables alternate screen on enter, and disables it on exit.
+ """
return ScreenContext(self, hide_cursor=hide_cursor, style=style or "")
def measure(
self, renderable: RenderableType, *, options: Optional[ConsoleOptions] = None
) -> Measurement:
+ """Measure a renderable. Returns a :class:`~rich.measure.Measurement` object which contains
+ information regarding the number of characters required to print the renderable.
+
+ Args:
+ renderable (RenderableType): Any renderable or string.
+ options (Optional[ConsoleOptions], optional): Options to use when measuring, or None
+ to use default options. Defaults to None.
+
+ Returns:
+ Measurement: A measurement of the renderable.
+ """
measurement = Measurement.get(self, options or self.options, renderable)
return measurement
def render(
self, renderable: RenderableType, options: Optional[ConsoleOptions] = None
) -> Iterable[Segment]:
+ """Render an object in to an iterable of `Segment` instances.
+
+ This method contains the logic for rendering objects with the console protocol.
+ You are unlikely to need to use it directly, unless you are extending the library.
+
+ Args:
+ renderable (RenderableType): An object supporting the console protocol, or
+ an object that may be converted to a string.
+ options (ConsoleOptions, optional): An options object, or None to use self.options. Defaults to None.
+
+ Returns:
+ Iterable[Segment]: An iterable of segments that may be rendered.
+ """
_options = options or self.options
if _options.max_width < 1:
@@ -995,6 +1357,21 @@ pad: bool = True,
new_lines: bool = False,
) -> List[List[Segment]]:
+ """Render objects in to a list of lines.
+
+ The output of render_lines is useful when further formatting of rendered console text
+ is required, such as the Panel class which draws a border around any renderable object.
+
+ Args:
+ renderable (RenderableType): Any object renderable in the console.
+ options (Optional[ConsoleOptions], optional): Console options, or None to use self.options. Default to ``None``.
+ style (Style, optional): Optional style to apply to renderables. Defaults to ``None``.
+ pad (bool, optional): Pad lines shorter than render width. Defaults to ``True``.
+ new_lines (bool, optional): Include "\n" characters at end of lines.
+
+ Returns:
+ List[List[Segment]]: A list of lines, where a line is a list of Segment objects.
+ """
with self._lock:
render_options = options or self.options
_rendered = self.render(renderable, render_options)
@@ -1047,6 +1424,22 @@ highlight: Optional[bool] = None,
highlighter: Optional[HighlighterType] = None,
) -> "Text":
+ """Convert a string to a Text instance. This is called automatically if
+ you print or log a string.
+
+ Args:
+ text (str): Text to render.
+ style (Union[str, Style], optional): Style to apply to rendered text.
+ justify (str, optional): Justify method: "default", "left", "center", "full", or "right". Defaults to ``None``.
+ overflow (str, optional): Overflow method: "crop", "fold", or "ellipsis". Defaults to ``None``.
+ emoji (Optional[bool], optional): Enable emoji, or ``None`` to use Console default.
+ markup (Optional[bool], optional): Enable markup, or ``None`` to use Console default.
+ highlight (Optional[bool], optional): Enable highlighting, or ``None`` to use Console default.
+ highlighter (HighlighterType, optional): Optional highlighter to apply.
+ Returns:
+ ConsoleRenderable: Renderable object.
+
+ """
emoji_enabled = emoji or (emoji is None and self._emoji)
markup_enabled = markup or (markup is None and self._markup)
highlight_enabled = highlight or (highlight is None and self._highlight)
@@ -1083,6 +1476,18 @@ def get_style(
self, name: Union[str, Style], *, default: Optional[Union[Style, str]] = None
) -> Style:
+ """Get a Style instance by its theme name or parse a definition.
+
+ Args:
+ name (str): The name of a style or a style definition.
+
+ Returns:
+ Style: A Style object.
+
+ Raises:
+ MissingStyle: If no style could be parsed from name.
+
+ """
if isinstance(name, Style):
return name
@@ -1109,6 +1514,20 @@ markup: Optional[bool] = None,
highlight: Optional[bool] = None,
) -> List[ConsoleRenderable]:
+ """Combine a number of renderables and text into one renderable.
+
+ Args:
+ objects (Iterable[Any]): Anything that Rich can render.
+ sep (str): String to write between print data.
+ end (str): String to write at end of print data.
+ justify (str, optional): One of "left", "right", "center", or "full". Defaults to ``None``.
+ emoji (Optional[bool], optional): Enable emoji code, or ``None`` to use console default.
+ markup (Optional[bool], optional): Enable markup, or ``None`` to use console default.
+ highlight (Optional[bool], optional): Enable automatic highlighting, or ``None`` to use console default.
+
+ Returns:
+ List[ConsoleRenderable]: A list of things to render.
+ """
renderables: List[ConsoleRenderable] = []
_append = renderables.append
text: List[Text] = []
@@ -1171,12 +1590,25 @@ style: Union[str, Style] = "rule.line",
align: AlignMethod = "center",
) -> None:
+ """Draw a line with optional centered title.
+
+ Args:
+ title (str, optional): Text to render over the rule. Defaults to "".
+ characters (str, optional): Character(s) to form the line. Defaults to "─".
+ style (str, optional): Style of line. Defaults to "rule.line".
+ align (str, optional): How to align the title, one of "left", "center", or "right". Defaults to "center".
+ """
from .rule import Rule
rule = Rule(title=title, characters=characters, style=style, align=align)
self.print(rule)
def control(self, *control: Control) -> None:
+ """Insert non-printing control codes.
+
+ Args:
+ control_codes (str): Control codes, such as those that may move the cursor.
+ """
if not self.is_dumb_terminal:
with self:
self._buffer.extend(_control.segment for _control in control)
@@ -1189,6 +1621,17 @@ style: Optional[Union[str, Style]] = None,
highlight: Optional[bool] = None,
) -> None:
+ """Output to the terminal. This is a low-level way of writing to the terminal which unlike
+ :meth:`~rich.console.Console.print` won't pretty print, wrap text, or apply markup, but will
+ optionally apply highlighting and a basic style.
+
+ Args:
+ sep (str, optional): String to write between print data. Defaults to " ".
+ end (str, optional): String to write at end of print data. Defaults to "\\\\n".
+ style (Union[str, Style], optional): A style to apply to output. Defaults to None.
+ highlight (Optional[bool], optional): Enable automatic highlighting, or ``None`` to use
+ console default. Defaults to ``None``.
+ """
raw_output: str = sep.join(str(_object) for _object in objects)
self.print(
raw_output,
@@ -1220,6 +1663,25 @@ soft_wrap: Optional[bool] = None,
new_line_start: bool = False,
) -> None:
+ """Print to the console.
+
+ Args:
+ objects (positional args): Objects to log to the terminal.
+ sep (str, optional): String to write between print data. Defaults to " ".
+ end (str, optional): String to write at end of print data. Defaults to "\\\\n".
+ style (Union[str, Style], optional): A style to apply to output. Defaults to None.
+ justify (str, optional): Justify method: "default", "left", "right", "center", or "full". Defaults to ``None``.
+ overflow (str, optional): Overflow method: "ignore", "crop", "fold", or "ellipsis". Defaults to None.
+ no_wrap (Optional[bool], optional): Disable word wrapping. Defaults to None.
+ emoji (Optional[bool], optional): Enable emoji code, or ``None`` to use console default. Defaults to ``None``.
+ markup (Optional[bool], optional): Enable markup, or ``None`` to use console default. Defaults to ``None``.
+ highlight (Optional[bool], optional): Enable automatic highlighting, or ``None`` to use console default. Defaults to ``None``.
+ width (Optional[int], optional): Width of output, or ``None`` to auto-detect. Defaults to ``None``.
+ crop (Optional[bool], optional): Crop output to width of terminal. Defaults to True.
+ soft_wrap (bool, optional): Enable soft wrap mode which disables word wrapping and cropping of text or ``None`` for
+ Console default. Defaults to ``None``.
+ new_line_start (bool, False): Insert a new line at the start if the output contains more than one line. Defaults to ``False``.
+ """
if not objects:
objects = (NewLine(),)
@@ -1300,6 +1762,21 @@ default: Optional[Callable[[Any], Any]] = None,
sort_keys: bool = False,
) -> None:
+ """Pretty prints JSON. Output will be valid JSON.
+
+ Args:
+ json (Optional[str]): A string containing JSON.
+ data (Any): If json is not supplied, then encode this data.
+ indent (Union[None, int, str], optional): Number of spaces to indent. Defaults to 2.
+ highlight (bool, optional): Enable highlighting of output: Defaults to True.
+ skip_keys (bool, optional): Skip keys not of a basic type. Defaults to False.
+ ensure_ascii (bool, optional): Escape all non-ascii characters. Defaults to False.
+ check_circular (bool, optional): Check for circular references. Defaults to True.
+ allow_nan (bool, optional): Allow NaN and Infinity values. Defaults to True.
+ default (Callable, optional): A callable that converts values that can not be encoded
+ in to something that can be JSON encoded. Defaults to None.
+ sort_keys (bool, optional): Sort dictionary keys. Defaults to False.
+ """
from rich.json import JSON
if json is None:
@@ -1339,6 +1816,18 @@ region: Optional[Region] = None,
options: Optional[ConsoleOptions] = None,
) -> None:
+ """Update the screen at a given offset.
+
+ Args:
+ renderable (RenderableType): A Rich renderable.
+ region (Region, optional): Region of screen to update, or None for entire screen. Defaults to None.
+ x (int, optional): x offset. Defaults to 0.
+ y (int, optional): y offset. Defaults to 0.
+
+ Raises:
+ errors.NoAltScreen: If the Console isn't in alt screen mode.
+
+ """
if not self.is_alt_screen:
raise errors.NoAltScreen("Alt screen must be enabled to call update_screen")
render_options = options or self.options
@@ -1357,6 +1846,16 @@ def update_screen_lines(
self, lines: List[List[Segment]], x: int = 0, y: int = 0
) -> None:
+ """Update lines of the screen at a given offset.
+
+ Args:
+ lines (List[List[Segment]]): Rendered lines (as produced by :meth:`~rich.Console.render_lines`).
+ x (int, optional): x offset (column no). Defaults to 0.
+ y (int, optional): y offset (column no). Defaults to 0.
+
+ Raises:
+ errors.NoAltScreen: If the Console isn't in alt screen mode.
+ """
if not self.is_alt_screen:
raise errors.NoAltScreen("Alt screen must be enabled to call update_screen")
screen_update = ScreenUpdate(lines, x, y)
@@ -1375,6 +1874,17 @@ suppress: Iterable[Union[str, ModuleType]] = (),
max_frames: int = 100,
) -> None:
+ """Prints a rich render of the last exception and traceback.
+
+ Args:
+ width (Optional[int], optional): Number of characters used to render code. Defaults to 100.
+ extra_lines (int, optional): Additional lines of code to render. Defaults to 3.
+ theme (str, optional): Override pygments theme used in traceback
+ word_wrap (bool, optional): Enable word wrapping of long lines. Defaults to False.
+ show_locals (bool, optional): Enable display of local variables. Defaults to False.
+ suppress (Iterable[Union[str, ModuleType]]): Optional sequence of modules or paths to exclude from traceback.
+ max_frames (int): Maximum number of frames to show in a traceback, 0 for no maximum. Defaults to 100.
+ """
from .traceback import Traceback
traceback = Traceback(
@@ -1393,6 +1903,20 @@ offset: int,
currentframe: Callable[[], Optional[FrameType]] = inspect.currentframe,
) -> Tuple[str, int, Dict[str, Any]]:
+ """Get caller frame information.
+
+ Args:
+ offset (int): the caller offset within the current frame stack.
+ currentframe (Callable[[], Optional[FrameType]], optional): the callable to use to
+ retrieve the current frame. Defaults to ``inspect.currentframe``.
+
+ Returns:
+ Tuple[str, int, Dict[str, Any]]: A tuple containing the filename, the line number and
+ the dictionary of local variables associated with the caller frame.
+
+ Raises:
+ RuntimeError: If the stack offset is invalid.
+ """
# Ignore the frame of this local helper
offset += 1
@@ -1422,6 +1946,21 @@ log_locals: bool = False,
_stack_offset: int = 1,
) -> None:
+ """Log rich content to the terminal.
+
+ Args:
+ objects (positional args): Objects to log to the terminal.
+ sep (str, optional): String to write between print data. Defaults to " ".
+ end (str, optional): String to write at end of print data. Defaults to "\\\\n".
+ style (Union[str, Style], optional): A style to apply to output. Defaults to None.
+ justify (str, optional): One of "left", "right", "center", or "full". Defaults to ``None``.
+ emoji (Optional[bool], optional): Enable emoji code, or ``None`` to use console default. Defaults to None.
+ markup (Optional[bool], optional): Enable markup, or ``None`` to use console default. Defaults to None.
+ highlight (Optional[bool], optional): Enable automatic highlighting, or ``None`` to use console default. Defaults to None.
+ log_locals (bool, optional): Boolean to enable logging of locals where ``log()``
+ was called. Defaults to False.
+ _stack_offset (int, optional): Offset of caller from end of call stack. Defaults to 1.
+ """
if not objects:
objects = (NewLine(),)
@@ -1476,12 +2015,25 @@ buffer_extend(line)
def on_broken_pipe(self) -> None:
+ """This function is called when a `BrokenPipeError` is raised.
+
+ This can occur when piping Textual output in Linux and macOS.
+ The default implementation is to exit the app, but you could implement
+ this method in a subclass to change the behavior.
+
+ See https://docs.python.org/3/library/signal.html#note-on-sigpipe for details.
+ """
self.quiet = True
devnull = os.open(os.devnull, os.O_WRONLY)
os.dup2(devnull, sys.stdout.fileno())
raise SystemExit(1)
def _check_buffer(self) -> None:
+ """Check if the buffer may be rendered. Render it if it can (e.g. Console.quiet is False)
+ Rendering is supported on Windows, Unix and Jupyter environments. For
+ legacy Windows consoles, the win32 API is called directly.
+ This method will also record what it renders if recording is enabled via Console.record.
+ """
if self.quiet:
del self._buffer[:]
return
@@ -1492,6 +2044,7 @@ self.on_broken_pipe()
def _write_buffer(self) -> None:
+ """Write the buffer to the output file."""
with self._lock:
if self.record and not self._buffer_index:
@@ -1564,6 +2117,7 @@ del self._buffer[:]
def _render_buffer(self, buffer: Iterable[Segment]) -> str:
+ """Render buffered output, and clear buffer."""
output: List[str] = []
append = output.append
color_system = self._color_system
@@ -1595,6 +2149,20 @@ password: bool = False,
stream: Optional[TextIO] = None,
) -> str:
+ """Displays a prompt and waits for input from the user. The prompt may contain color / style.
+
+ It works in the same way as Python's builtin :func:`input` function and provides elaborate line editing and history features if Python's builtin :mod:`readline` module is previously loaded.
+
+ Args:
+ prompt (Union[str, Text]): Text to render in the prompt.
+ markup (bool, optional): Enable console markup (requires a str prompt). Defaults to True.
+ emoji (bool, optional): Enable emoji (requires a str prompt). Defaults to True.
+ password: (bool, optional): Hide typed text. Defaults to False.
+ stream: (TextIO, optional): Optional file to read input from (rather than stdin). Defaults to None.
+
+ Returns:
+ str: Text read from stdin.
+ """
if prompt:
self.print(prompt, markup=markup, emoji=emoji, end="")
if password:
@@ -1607,6 +2175,17 @@ return result
def export_text(self, *, clear: bool = True, styles: bool = False) -> str:
+ """Generate text from console contents (requires record=True argument in constructor).
+
+ Args:
+ clear (bool, optional): Clear record buffer after exporting. Defaults to ``True``.
+ styles (bool, optional): If ``True``, ansi escape codes will be included. ``False`` for plain text.
+ Defaults to ``False``.
+
+ Returns:
+ str: String containing console contents.
+
+ """
assert (
self.record
), "To export console contents set record=True in the constructor or instance"
@@ -1628,6 +2207,15 @@ return text
def save_text(self, path: str, *, clear: bool = True, styles: bool = False) -> None:
+ """Generate text from console and save to a given location (requires record=True argument in constructor).
+
+ Args:
+ path (str): Path to write text files.
+ clear (bool, optional): Clear record buffer after exporting. Defaults to ``True``.
+ styles (bool, optional): If ``True``, ansi style codes will be included. ``False`` for plain text.
+ Defaults to ``False``.
+
+ """
text = self.export_text(clear=clear, styles=styles)
with open(path, "w", encoding="utf-8") as write_file:
write_file.write(text)
@@ -1640,6 +2228,20 @@ code_format: Optional[str] = None,
inline_styles: bool = False,
) -> str:
+ """Generate HTML from console contents (requires record=True argument in constructor).
+
+ Args:
+ theme (TerminalTheme, optional): TerminalTheme object containing console colors.
+ clear (bool, optional): Clear record buffer after exporting. Defaults to ``True``.
+ code_format (str, optional): Format string to render HTML. In addition to '{foreground}',
+ '{background}', and '{code}', should contain '{stylesheet}' if inline_styles is ``False``.
+ inline_styles (bool, optional): If ``True`` styles will be inlined in to spans, which makes files
+ larger but easier to cut and paste markup. If ``False``, styles will be embedded in a style tag.
+ Defaults to False.
+
+ Returns:
+ str: String containing console contents as HTML.
+ """
assert (
self.record
), "To export console contents set record=True in the constructor or instance"
@@ -1702,6 +2304,19 @@ code_format: str = CONSOLE_HTML_FORMAT,
inline_styles: bool = False,
) -> None:
+ """Generate HTML from console contents and write to a file (requires record=True argument in constructor).
+
+ Args:
+ path (str): Path to write html file.
+ theme (TerminalTheme, optional): TerminalTheme object containing console colors.
+ clear (bool, optional): Clear record buffer after exporting. Defaults to ``True``.
+ code_format (str, optional): Format string to render HTML. In addition to '{foreground}',
+ '{background}', and '{code}', should contain '{stylesheet}' if inline_styles is ``False``.
+ inline_styles (bool, optional): If ``True`` styles will be inlined in to spans, which makes files
+ larger but easier to cut and paste markup. If ``False``, styles will be embedded in a style tag.
+ Defaults to False.
+
+ """
html = self.export_html(
theme=theme,
clear=clear,
@@ -1721,12 +2336,29 @@ font_aspect_ratio: float = 0.61,
unique_id: Optional[str] = None,
) -> str:
+ """
+ Generate an SVG from the console contents (requires record=True in Console constructor).
+
+ Args:
+ title (str, optional): The title of the tab in the output image
+ theme (TerminalTheme, optional): The ``TerminalTheme`` object to use to style the terminal
+ clear (bool, optional): Clear record buffer after exporting. Defaults to ``True``
+ code_format (str, optional): Format string used to generate the SVG. Rich will inject a number of variables
+ into the string in order to form the final SVG output. The default template used and the variables
+ injected by Rich can be found by inspecting the ``console.CONSOLE_SVG_FORMAT`` variable.
+ font_aspect_ratio (float, optional): The width to height ratio of the font used in the ``code_format``
+ string. Defaults to 0.61, which is the width to height ratio of Fira Code (the default font).
+ If you aren't specifying a different font inside ``code_format``, you probably don't need this.
+ unique_id (str, optional): unique id that is used as the prefix for various elements (CSS styles, node
+ ids). If not set, this defaults to a computed value based on the recorded content.
+ """
from rich.cells import cell_len
style_cache: Dict[Style, str] = {}
def get_svg_style(style: Style) -> str:
+ """Convert a Style to CSS rules for SVG."""
if style in style_cache:
return style_cache[style]
css_rules = []
@@ -1786,11 +2418,13 @@ style_no = 1
def escape_text(text: str) -> str:
+ """HTML escape text and replace spaces with nbsp."""
return escape(text).replace(" ", " ")
def make_tag(
name: str, content: Optional[str] = None, **attribs: object
) -> str:
+ """Make a tag from name, content, and attributes."""
def stringify(value: object) -> str:
if isinstance(value, (float)):
@@ -1954,6 +2588,22 @@ font_aspect_ratio: float = 0.61,
unique_id: Optional[str] = None,
) -> None:
+ """Generate an SVG file from the console contents (requires record=True in Console constructor).
+
+ Args:
+ path (str): The path to write the SVG to.
+ title (str, optional): The title of the tab in the output image
+ theme (TerminalTheme, optional): The ``TerminalTheme`` object to use to style the terminal
+ clear (bool, optional): Clear record buffer after exporting. Defaults to ``True``
+ code_format (str, optional): Format string used to generate the SVG. Rich will inject a number of variables
+ into the string in order to form the final SVG output. The default template used and the variables
+ injected by Rich can be found by inspecting the ``console.CONSOLE_SVG_FORMAT`` variable.
+ font_aspect_ratio (float, optional): The width to height ratio of the font used in the ``code_format``
+ string. Defaults to 0.61, which is the width to height ratio of Fira Code (the default font).
+ If you aren't specifying a different font inside ``code_format``, you probably don't need this.
+ unique_id (str, optional): unique id that is used as the prefix for various elements (CSS styles, node
+ ids). If not set, this defaults to a computed value based on the recorded content.
+ """
svg = self.export_svg(
title=title,
theme=theme,
@@ -1967,6 +2617,14 @@
def _svg_hash(svg_main_code: str) -> str:
+ """Returns a unique hash for the given SVG main code.
+
+ Args:
+ svg_main_code (str): The content we're going to inject in the SVG envelope.
+
+ Returns:
+ str: a hash of the given content
+ """
return str(zlib.adler32(svg_main_code.encode()))
@@ -2023,4 +2681,4 @@ },
},
}
- )+ )
| https://raw.githubusercontent.com/Textualize/rich/HEAD/rich/console.py |
Generate helpful docstrings for debugging | from typing import TYPE_CHECKING, List, Optional, Tuple, Union
if TYPE_CHECKING:
from .console import (
Console,
ConsoleOptions,
RenderableType,
RenderResult,
)
from .jupyter import JupyterMixin
from .measure import Measurement
from .segment import Segment
from .style import Style
PaddingDimensions = Union[int, Tuple[int], Tuple[int, int], Tuple[int, int, int, int]]
class Padding(JupyterMixin):
def __init__(
self,
renderable: "RenderableType",
pad: "PaddingDimensions" = (0, 0, 0, 0),
*,
style: Union[str, Style] = "none",
expand: bool = True,
):
self.renderable = renderable
self.top, self.right, self.bottom, self.left = self.unpack(pad)
self.style = style
self.expand = expand
@classmethod
def indent(cls, renderable: "RenderableType", level: int) -> "Padding":
return Padding(renderable, pad=(0, 0, 0, level), expand=False)
@staticmethod
def unpack(pad: "PaddingDimensions") -> Tuple[int, int, int, int]:
if isinstance(pad, int):
return (pad, pad, pad, pad)
if len(pad) == 1:
_pad = pad[0]
return (_pad, _pad, _pad, _pad)
if len(pad) == 2:
pad_top, pad_right = pad
return (pad_top, pad_right, pad_top, pad_right)
if len(pad) == 4:
top, right, bottom, left = pad
return (top, right, bottom, left)
raise ValueError(f"1, 2 or 4 integers required for padding; {len(pad)} given")
def __repr__(self) -> str:
return f"Padding({self.renderable!r}, ({self.top},{self.right},{self.bottom},{self.left}))"
def __rich_console__(
self, console: "Console", options: "ConsoleOptions"
) -> "RenderResult":
style = console.get_style(self.style)
if self.expand:
width = options.max_width
else:
width = min(
Measurement.get(console, options, self.renderable).maximum
+ self.left
+ self.right,
options.max_width,
)
render_options = options.update_width(width - self.left - self.right)
if render_options.height is not None:
render_options = render_options.update_height(
height=render_options.height - self.top - self.bottom
)
lines = console.render_lines(
self.renderable, render_options, style=style, pad=True
)
_Segment = Segment
left = _Segment(" " * self.left, style) if self.left else None
right = (
[_Segment(f'{" " * self.right}', style), _Segment.line()]
if self.right
else [_Segment.line()]
)
blank_line: Optional[List[Segment]] = None
if self.top:
blank_line = [_Segment(f'{" " * width}\n', style)]
yield from blank_line * self.top
if left:
for line in lines:
yield left
yield from line
yield from right
else:
for line in lines:
yield from line
yield from right
if self.bottom:
blank_line = blank_line or [_Segment(f'{" " * width}\n', style)]
yield from blank_line * self.bottom
def __rich_measure__(
self, console: "Console", options: "ConsoleOptions"
) -> "Measurement":
max_width = options.max_width
extra_width = self.left + self.right
if max_width - extra_width < 1:
return Measurement(max_width, max_width)
measure_min, measure_max = Measurement.get(console, options, self.renderable)
measurement = Measurement(measure_min + extra_width, measure_max + extra_width)
measurement = measurement.with_maximum(max_width)
return measurement
if __name__ == "__main__": # pragma: no cover
from rich import print
print(Padding("Hello, World", (2, 4), style="on blue")) | --- +++ @@ -17,6 +17,18 @@
class Padding(JupyterMixin):
+ """Draw space around content.
+
+ Example:
+ >>> print(Padding("Hello", (2, 4), style="on blue"))
+
+ Args:
+ renderable (RenderableType): String or other renderable.
+ pad (Union[int, Tuple[int]]): Padding for top, right, bottom, and left borders.
+ May be specified with 1, 2, or 4 integers (CSS style).
+ style (Union[str, Style], optional): Style for padding characters. Defaults to "none".
+ expand (bool, optional): Expand padding to fit available width. Defaults to True.
+ """
def __init__(
self,
@@ -33,11 +45,21 @@
@classmethod
def indent(cls, renderable: "RenderableType", level: int) -> "Padding":
+ """Make padding instance to render an indent.
+
+ Args:
+ renderable (RenderableType): String or other renderable.
+ level (int): Number of characters to indent.
+
+ Returns:
+ Padding: A Padding instance.
+ """
return Padding(renderable, pad=(0, 0, 0, level), expand=False)
@staticmethod
def unpack(pad: "PaddingDimensions") -> Tuple[int, int, int, int]:
+ """Unpack padding specified in CSS style."""
if isinstance(pad, int):
return (pad, pad, pad, pad)
if len(pad) == 1:
@@ -116,4 +138,4 @@ if __name__ == "__main__": # pragma: no cover
from rich import print
- print(Padding("Hello, World", (2, 4), style="on blue"))+ print(Padding("Hello, World", (2, 4), style="on blue"))
| https://raw.githubusercontent.com/Textualize/rich/HEAD/rich/padding.py |
Write docstrings including parameters and return values | from __future__ import annotations
import sys
from dataclasses import dataclass
from typing import ClassVar, Iterable, get_args
from markdown_it import MarkdownIt
from markdown_it.token import Token
from rich.table import Table
from . import box
from ._loop import loop_first
from ._stack import Stack
from .console import Console, ConsoleOptions, JustifyMethod, RenderResult
from .containers import Renderables
from .jupyter import JupyterMixin
from .rule import Rule
from .segment import Segment
from .style import Style, StyleStack
from .syntax import Syntax
from .text import Text, TextType
class MarkdownElement:
new_line: ClassVar[bool] = True
@classmethod
def create(cls, markdown: Markdown, token: Token) -> MarkdownElement:
return cls()
def on_enter(self, context: MarkdownContext) -> None:
def on_text(self, context: MarkdownContext, text: TextType) -> None:
def on_leave(self, context: MarkdownContext) -> None:
def on_child_close(self, context: MarkdownContext, child: MarkdownElement) -> bool:
return True
def __rich_console__(
self, console: Console, options: ConsoleOptions
) -> RenderResult:
return ()
class UnknownElement(MarkdownElement):
class TextElement(MarkdownElement):
style_name = "none"
def on_enter(self, context: MarkdownContext) -> None:
self.style = context.enter_style(self.style_name)
self.text = Text(justify="left")
def on_text(self, context: MarkdownContext, text: TextType) -> None:
self.text.append(text, context.current_style if isinstance(text, str) else None)
def on_leave(self, context: MarkdownContext) -> None:
context.leave_style()
class Paragraph(TextElement):
style_name = "markdown.paragraph"
justify: JustifyMethod
@classmethod
def create(cls, markdown: Markdown, token: Token) -> Paragraph:
return cls(justify=markdown.justify or "left")
def __init__(self, justify: JustifyMethod) -> None:
self.justify = justify
def __rich_console__(
self, console: Console, options: ConsoleOptions
) -> RenderResult:
self.text.justify = self.justify
yield self.text
@dataclass
class HeadingFormat:
justify: JustifyMethod = "left"
style: str = ""
class Heading(TextElement):
LEVEL_ALIGN: ClassVar[dict[str, JustifyMethod]] = {
"h1": "center",
"h2": "left",
"h3": "left",
"h4": "left",
"h5": "left",
"h6": "left",
}
@classmethod
def create(cls, markdown: Markdown, token: Token) -> Heading:
return cls(token.tag)
def on_enter(self, context: MarkdownContext) -> None:
self.text = Text()
context.enter_style(self.style_name)
def __init__(self, tag: str) -> None:
self.tag = tag
self.style_name = f"markdown.{tag}"
super().__init__()
def __rich_console__(
self, console: Console, options: ConsoleOptions
) -> RenderResult:
text = self.text.copy()
heading_justify = self.LEVEL_ALIGN.get(self.tag, "left")
text.justify = heading_justify
yield text
class CodeBlock(TextElement):
style_name = "markdown.code_block"
@classmethod
def create(cls, markdown: Markdown, token: Token) -> CodeBlock:
node_info = token.info or ""
lexer_name = node_info.partition(" ")[0]
return cls(lexer_name or "text", markdown.code_theme)
def __init__(self, lexer_name: str, theme: str) -> None:
self.lexer_name = lexer_name
self.theme = theme
def __rich_console__(
self, console: Console, options: ConsoleOptions
) -> RenderResult:
code = str(self.text).rstrip()
syntax = Syntax(
code, self.lexer_name, theme=self.theme, word_wrap=True, padding=1
)
yield syntax
class BlockQuote(TextElement):
style_name = "markdown.block_quote"
def __init__(self) -> None:
self.elements: Renderables = Renderables()
def on_child_close(self, context: MarkdownContext, child: MarkdownElement) -> bool:
self.elements.append(child)
return False
def __rich_console__(
self, console: Console, options: ConsoleOptions
) -> RenderResult:
render_options = options.update(width=options.max_width - 4)
lines = console.render_lines(self.elements, render_options, style=self.style)
style = self.style
new_line = Segment("\n")
padding = Segment("▌ ", style)
for line in lines:
yield padding
yield from line
yield new_line
class HorizontalRule(MarkdownElement):
new_line = False
def __rich_console__(
self, console: Console, options: ConsoleOptions
) -> RenderResult:
style = console.get_style("markdown.hr", default="none")
yield Rule(style=style, characters="-")
yield Text()
class TableElement(MarkdownElement):
def __init__(self) -> None:
self.header: TableHeaderElement | None = None
self.body: TableBodyElement | None = None
def on_child_close(self, context: MarkdownContext, child: MarkdownElement) -> bool:
if isinstance(child, TableHeaderElement):
self.header = child
elif isinstance(child, TableBodyElement):
self.body = child
else:
raise RuntimeError("Couldn't process markdown table.")
return False
def __rich_console__(
self, console: Console, options: ConsoleOptions
) -> RenderResult:
table = Table(
box=box.SIMPLE,
pad_edge=False,
style="markdown.table.border",
show_edge=True,
collapse_padding=True,
)
if self.header is not None and self.header.row is not None:
for column in self.header.row.cells:
heading = column.content.copy()
heading.stylize("markdown.table.header")
table.add_column(heading)
if self.body is not None:
for row in self.body.rows:
row_content = [element.content for element in row.cells]
table.add_row(*row_content)
yield table
class TableHeaderElement(MarkdownElement):
def __init__(self) -> None:
self.row: TableRowElement | None = None
def on_child_close(self, context: MarkdownContext, child: MarkdownElement) -> bool:
assert isinstance(child, TableRowElement)
self.row = child
return False
class TableBodyElement(MarkdownElement):
def __init__(self) -> None:
self.rows: list[TableRowElement] = []
def on_child_close(self, context: MarkdownContext, child: MarkdownElement) -> bool:
assert isinstance(child, TableRowElement)
self.rows.append(child)
return False
class TableRowElement(MarkdownElement):
def __init__(self) -> None:
self.cells: list[TableDataElement] = []
def on_child_close(self, context: MarkdownContext, child: MarkdownElement) -> bool:
assert isinstance(child, TableDataElement)
self.cells.append(child)
return False
class TableDataElement(MarkdownElement):
@classmethod
def create(cls, markdown: Markdown, token: Token) -> MarkdownElement:
style = str(token.attrs.get("style")) or ""
justify: JustifyMethod
if "text-align:right" in style:
justify = "right"
elif "text-align:center" in style:
justify = "center"
elif "text-align:left" in style:
justify = "left"
else:
justify = "default"
assert justify in get_args(JustifyMethod)
return cls(justify=justify)
def __init__(self, justify: JustifyMethod) -> None:
self.content: Text = Text("", justify=justify)
self.justify = justify
def on_text(self, context: MarkdownContext, text: TextType) -> None:
text = Text(text) if isinstance(text, str) else text
text.stylize(context.current_style)
self.content.append_text(text)
class ListElement(MarkdownElement):
@classmethod
def create(cls, markdown: Markdown, token: Token) -> ListElement:
return cls(token.type, int(token.attrs.get("start", 1)))
def __init__(self, list_type: str, list_start: int | None) -> None:
self.items: list[ListItem] = []
self.list_type = list_type
self.list_start = list_start
def on_child_close(self, context: MarkdownContext, child: MarkdownElement) -> bool:
assert isinstance(child, ListItem)
self.items.append(child)
return False
def __rich_console__(
self, console: Console, options: ConsoleOptions
) -> RenderResult:
if self.list_type == "bullet_list_open":
for item in self.items:
yield from item.render_bullet(console, options)
else:
number = 1 if self.list_start is None else self.list_start
last_number = number + len(self.items)
for index, item in enumerate(self.items):
yield from item.render_number(
console, options, number + index, last_number
)
class ListItem(TextElement):
style_name = "markdown.item"
def __init__(self) -> None:
self.elements: Renderables = Renderables()
def on_child_close(self, context: MarkdownContext, child: MarkdownElement) -> bool:
self.elements.append(child)
return False
def render_bullet(self, console: Console, options: ConsoleOptions) -> RenderResult:
render_options = options.update(width=options.max_width - 3)
lines = console.render_lines(self.elements, render_options, style=self.style)
bullet_style = console.get_style("markdown.item.bullet", default="none")
bullet = Segment(" • ", bullet_style)
padding = Segment(" " * 3, bullet_style)
new_line = Segment("\n")
for first, line in loop_first(lines):
yield bullet if first else padding
yield from line
yield new_line
def render_number(
self, console: Console, options: ConsoleOptions, number: int, last_number: int
) -> RenderResult:
number_width = len(str(last_number)) + 2
render_options = options.update(width=options.max_width - number_width)
lines = console.render_lines(self.elements, render_options, style=self.style)
number_style = console.get_style("markdown.item.number", default="none")
new_line = Segment("\n")
padding = Segment(" " * number_width, number_style)
numeral = Segment(f"{number}".rjust(number_width - 1) + " ", number_style)
for first, line in loop_first(lines):
yield numeral if first else padding
yield from line
yield new_line
class Link(TextElement):
@classmethod
def create(cls, markdown: Markdown, token: Token) -> MarkdownElement:
url = token.attrs.get("href", "#")
return cls(token.content, str(url))
def __init__(self, text: str, href: str):
self.text = Text(text)
self.href = href
class ImageItem(TextElement):
new_line = False
@classmethod
def create(cls, markdown: Markdown, token: Token) -> MarkdownElement:
return cls(str(token.attrs.get("src", "")), markdown.hyperlinks)
def __init__(self, destination: str, hyperlinks: bool) -> None:
self.destination = destination
self.hyperlinks = hyperlinks
self.link: str | None = None
super().__init__()
def on_enter(self, context: MarkdownContext) -> None:
self.link = context.current_style.link
self.text = Text(justify="left")
super().on_enter(context)
def __rich_console__(
self, console: Console, options: ConsoleOptions
) -> RenderResult:
link_style = Style(link=self.link or self.destination or None)
title = self.text or Text(self.destination.strip("/").rsplit("/", 1)[-1])
if self.hyperlinks:
title.stylize(link_style)
text = Text.assemble("🌆 ", title, " ", end="")
yield text
class MarkdownContext:
def __init__(
self,
console: Console,
options: ConsoleOptions,
style: Style,
inline_code_lexer: str | None = None,
inline_code_theme: str = "monokai",
) -> None:
self.console = console
self.options = options
self.style_stack: StyleStack = StyleStack(style)
self.stack: Stack[MarkdownElement] = Stack()
self._syntax: Syntax | None = None
if inline_code_lexer is not None:
self._syntax = Syntax("", inline_code_lexer, theme=inline_code_theme)
@property
def current_style(self) -> Style:
return self.style_stack.current
def on_text(self, text: str, node_type: str) -> None:
if node_type in {"fence", "code_inline"} and self._syntax is not None:
highlight_text = self._syntax.highlight(text)
highlight_text.rstrip()
self.stack.top.on_text(
self, Text.assemble(highlight_text, style=self.style_stack.current)
)
else:
self.stack.top.on_text(self, text)
def enter_style(self, style_name: str | Style) -> Style:
style = self.console.get_style(style_name, default="none")
self.style_stack.push(style)
return self.current_style
def leave_style(self) -> Style:
style = self.style_stack.pop()
return style
class Markdown(JupyterMixin):
elements: ClassVar[dict[str, type[MarkdownElement]]] = {
"paragraph_open": Paragraph,
"heading_open": Heading,
"fence": CodeBlock,
"code_block": CodeBlock,
"blockquote_open": BlockQuote,
"hr": HorizontalRule,
"bullet_list_open": ListElement,
"ordered_list_open": ListElement,
"list_item_open": ListItem,
"image": ImageItem,
"table_open": TableElement,
"tbody_open": TableBodyElement,
"thead_open": TableHeaderElement,
"tr_open": TableRowElement,
"td_open": TableDataElement,
"th_open": TableDataElement,
}
inlines = {"em", "strong", "code", "s"}
def __init__(
self,
markup: str,
code_theme: str = "monokai",
justify: JustifyMethod | None = None,
style: str | Style = "none",
hyperlinks: bool = True,
inline_code_lexer: str | None = None,
inline_code_theme: str | None = None,
) -> None:
parser = MarkdownIt().enable("strikethrough").enable("table")
self.markup = markup
self.parsed = parser.parse(markup)
self.code_theme = code_theme
self.justify: JustifyMethod | None = justify
self.style = style
self.hyperlinks = hyperlinks
self.inline_code_lexer = inline_code_lexer
self.inline_code_theme = inline_code_theme or code_theme
def _flatten_tokens(self, tokens: Iterable[Token]) -> Iterable[Token]:
for token in tokens:
is_fence = token.type == "fence"
is_image = token.tag == "img"
if token.children and not (is_image or is_fence):
yield from self._flatten_tokens(token.children)
else:
yield token
def __rich_console__(
self, console: Console, options: ConsoleOptions
) -> RenderResult:
style = console.get_style(self.style, default="none")
options = options.update(height=None)
context = MarkdownContext(
console,
options,
style,
inline_code_lexer=self.inline_code_lexer,
inline_code_theme=self.inline_code_theme,
)
tokens = self.parsed
inline_style_tags = self.inlines
new_line = False
_new_line_segment = Segment.line()
for token in self._flatten_tokens(tokens):
node_type = token.type
tag = token.tag
entering = token.nesting == 1
exiting = token.nesting == -1
self_closing = token.nesting == 0
if node_type == "text":
context.on_text(token.content, node_type)
elif node_type == "hardbreak":
context.on_text("\n", node_type)
elif node_type == "softbreak":
context.on_text(" ", node_type)
elif node_type == "link_open":
href = str(token.attrs.get("href", ""))
if self.hyperlinks:
link_style = console.get_style("markdown.link_url", default="none")
link_style += Style(link=href)
context.enter_style(link_style)
else:
context.stack.push(Link.create(self, token))
elif node_type == "link_close":
if self.hyperlinks:
context.leave_style()
else:
element = context.stack.pop()
assert isinstance(element, Link)
link_style = console.get_style("markdown.link", default="none")
context.enter_style(link_style)
context.on_text(element.text.plain, node_type)
context.leave_style()
context.on_text(" (", node_type)
link_url_style = console.get_style(
"markdown.link_url", default="none"
)
context.enter_style(link_url_style)
context.on_text(element.href, node_type)
context.leave_style()
context.on_text(")", node_type)
elif (
tag in inline_style_tags
and node_type != "fence"
and node_type != "code_block"
):
if entering:
# If it's an opening inline token e.g. strong, em, etc.
# Then we move into a style context i.e. push to stack.
context.enter_style(f"markdown.{tag}")
elif exiting:
# If it's a closing inline style, then we pop the style
# off of the stack, to move out of the context of it...
context.leave_style()
else:
# If it's a self-closing inline style e.g. `code_inline`
context.enter_style(f"markdown.{tag}")
if token.content:
context.on_text(token.content, node_type)
context.leave_style()
else:
# Map the markdown tag -> MarkdownElement renderable
element_class = self.elements.get(token.type) or UnknownElement
element = element_class.create(self, token)
if entering or self_closing:
context.stack.push(element)
element.on_enter(context)
if exiting: # CLOSING tag
element = context.stack.pop()
should_render = not context.stack or (
context.stack
and context.stack.top.on_child_close(context, element)
)
if should_render:
if new_line:
yield _new_line_segment
yield from console.render(element, context.options)
elif self_closing: # SELF-CLOSING tags (e.g. text, code, image)
context.stack.pop()
text = token.content
if text is not None:
element.on_text(context, text)
should_render = (
not context.stack
or context.stack
and context.stack.top.on_child_close(context, element)
)
if should_render:
if new_line and node_type != "inline":
yield _new_line_segment
yield from console.render(element, context.options)
if exiting or self_closing:
element.on_leave(context)
new_line = element.new_line
if __name__ == "__main__": # pragma: no cover
import argparse
import sys
parser = argparse.ArgumentParser(
description="Render Markdown to the console with Rich"
)
parser.add_argument(
"path",
metavar="PATH",
help="path to markdown file, or - for stdin",
)
parser.add_argument(
"-c",
"--force-color",
dest="force_color",
action="store_true",
default=None,
help="force color for non-terminals",
)
parser.add_argument(
"-t",
"--code-theme",
dest="code_theme",
default="monokai",
help="pygments code theme",
)
parser.add_argument(
"-i",
"--inline-code-lexer",
dest="inline_code_lexer",
default=None,
help="inline_code_lexer",
)
parser.add_argument(
"-y",
"--hyperlinks",
dest="hyperlinks",
action="store_true",
help="enable hyperlinks",
)
parser.add_argument(
"-w",
"--width",
type=int,
dest="width",
default=None,
help="width of output (default will auto-detect)",
)
parser.add_argument(
"-j",
"--justify",
dest="justify",
action="store_true",
help="enable full text justify",
)
parser.add_argument(
"-p",
"--page",
dest="page",
action="store_true",
help="use pager to scroll output",
)
args = parser.parse_args()
from rich.console import Console
if args.path == "-":
markdown_body = sys.stdin.read()
else:
with open(args.path, encoding="utf-8") as markdown_file:
markdown_body = markdown_file.read()
markdown = Markdown(
markdown_body,
justify="full" if args.justify else "left",
code_theme=args.code_theme,
hyperlinks=args.hyperlinks,
inline_code_lexer=args.inline_code_lexer,
)
if args.page:
import io
import pydoc
fileio = io.StringIO()
console = Console(
file=fileio, force_terminal=args.force_color, width=args.width
)
console.print(markdown)
pydoc.pager(fileio.getvalue())
else:
console = Console(
force_terminal=args.force_color, width=args.width, record=True
)
console.print(markdown) | --- +++ @@ -27,15 +27,50 @@
@classmethod
def create(cls, markdown: Markdown, token: Token) -> MarkdownElement:
+ """Factory to create markdown element,
+
+ Args:
+ markdown (Markdown): The parent Markdown object.
+ token (Token): A node from markdown-it.
+
+ Returns:
+ MarkdownElement: A new markdown element
+ """
return cls()
def on_enter(self, context: MarkdownContext) -> None:
+ """Called when the node is entered.
+
+ Args:
+ context (MarkdownContext): The markdown context.
+ """
def on_text(self, context: MarkdownContext, text: TextType) -> None:
+ """Called when text is parsed.
+
+ Args:
+ context (MarkdownContext): The markdown context.
+ """
def on_leave(self, context: MarkdownContext) -> None:
+ """Called when the parser leaves the element.
+
+ Args:
+ context (MarkdownContext): [description]
+ """
def on_child_close(self, context: MarkdownContext, child: MarkdownElement) -> bool:
+ """Called when a child element is closed.
+
+ This method allows a parent element to take over rendering of its children.
+
+ Args:
+ context (MarkdownContext): The markdown context.
+ child (MarkdownElement): The child markdown element.
+
+ Returns:
+ bool: Return True to render the element, or False to not render the element.
+ """
return True
def __rich_console__(
@@ -45,9 +80,16 @@
class UnknownElement(MarkdownElement):
+ """An unknown element.
+
+ Hopefully there will be no unknown elements, and we will have a MarkdownElement for
+ everything in the document.
+
+ """
class TextElement(MarkdownElement):
+ """Base class for elements that render text."""
style_name = "none"
@@ -63,6 +105,7 @@
class Paragraph(TextElement):
+ """A Paragraph."""
style_name = "markdown.paragraph"
justify: JustifyMethod
@@ -88,6 +131,7 @@
class Heading(TextElement):
+ """A heading."""
LEVEL_ALIGN: ClassVar[dict[str, JustifyMethod]] = {
"h1": "center",
@@ -121,6 +165,7 @@
class CodeBlock(TextElement):
+ """A code block with syntax highlighting."""
style_name = "markdown.code_block"
@@ -145,6 +190,7 @@
class BlockQuote(TextElement):
+ """A block quote."""
style_name = "markdown.block_quote"
@@ -170,6 +216,7 @@
class HorizontalRule(MarkdownElement):
+ """A horizontal rule to divide sections."""
new_line = False
@@ -182,6 +229,7 @@
class TableElement(MarkdownElement):
+ """MarkdownElement corresponding to `table_open`."""
def __init__(self) -> None:
self.header: TableHeaderElement | None = None
@@ -222,6 +270,7 @@
class TableHeaderElement(MarkdownElement):
+ """MarkdownElement corresponding to `thead_open` and `thead_close`."""
def __init__(self) -> None:
self.row: TableRowElement | None = None
@@ -233,6 +282,7 @@
class TableBodyElement(MarkdownElement):
+ """MarkdownElement corresponding to `tbody_open` and `tbody_close`."""
def __init__(self) -> None:
self.rows: list[TableRowElement] = []
@@ -244,6 +294,7 @@
class TableRowElement(MarkdownElement):
+ """MarkdownElement corresponding to `tr_open` and `tr_close`."""
def __init__(self) -> None:
self.cells: list[TableDataElement] = []
@@ -255,6 +306,8 @@
class TableDataElement(MarkdownElement):
+ """MarkdownElement corresponding to `td_open` and `td_close`
+ and `th_open` and `th_close`."""
@classmethod
def create(cls, markdown: Markdown, token: Token) -> MarkdownElement:
@@ -284,6 +337,7 @@
class ListElement(MarkdownElement):
+ """A list element."""
@classmethod
def create(cls, markdown: Markdown, token: Token) -> ListElement:
@@ -315,6 +369,7 @@
class ListItem(TextElement):
+ """An item in a list."""
style_name = "markdown.item"
@@ -367,11 +422,21 @@
class ImageItem(TextElement):
+ """Renders a placeholder for an image."""
new_line = False
@classmethod
def create(cls, markdown: Markdown, token: Token) -> MarkdownElement:
+ """Factory to create markdown element,
+
+ Args:
+ markdown (Markdown): The parent Markdown object.
+ token (Any): A token from markdown-it.
+
+ Returns:
+ MarkdownElement: A new markdown element
+ """
return cls(str(token.attrs.get("src", "")), markdown.hyperlinks)
def __init__(self, destination: str, hyperlinks: bool) -> None:
@@ -397,6 +462,7 @@
class MarkdownContext:
+ """Manages the console render state."""
def __init__(
self,
@@ -417,9 +483,11 @@
@property
def current_style(self) -> Style:
+ """Current style which is the product of all styles on the stack."""
return self.style_stack.current
def on_text(self, text: str, node_type: str) -> None:
+ """Called when the parser visits text."""
if node_type in {"fence", "code_inline"} and self._syntax is not None:
highlight_text = self._syntax.highlight(text)
highlight_text.rstrip()
@@ -430,16 +498,31 @@ self.stack.top.on_text(self, text)
def enter_style(self, style_name: str | Style) -> Style:
+ """Enter a style context."""
style = self.console.get_style(style_name, default="none")
self.style_stack.push(style)
return self.current_style
def leave_style(self) -> Style:
+ """Leave a style context."""
style = self.style_stack.pop()
return style
class Markdown(JupyterMixin):
+ """A Markdown renderable.
+
+ Args:
+ markup (str): A string containing markdown.
+ code_theme (str, optional): Pygments theme for code blocks. Defaults to "monokai". See https://pygments.org/styles/ for code themes.
+ justify (JustifyMethod, optional): Justify value for paragraphs. Defaults to None.
+ style (Union[str, Style], optional): Optional style to apply to markdown.
+ hyperlinks (bool, optional): Enable hyperlinks. Defaults to ``True``.
+ inline_code_lexer: (str, optional): Lexer to use if inline code highlighting is
+ enabled. Defaults to None.
+ inline_code_theme: (Optional[str], optional): Pygments theme for inline code
+ highlighting, or None for no highlighting. Defaults to None.
+ """
elements: ClassVar[dict[str, type[MarkdownElement]]] = {
"paragraph_open": Paragraph,
@@ -483,6 +566,7 @@ self.inline_code_theme = inline_code_theme or code_theme
def _flatten_tokens(self, tokens: Iterable[Token]) -> Iterable[Token]:
+ """Flattens the token stream."""
for token in tokens:
is_fence = token.type == "fence"
is_image = token.tag == "img"
@@ -494,6 +578,7 @@ def __rich_console__(
self, console: Console, options: ConsoleOptions
) -> RenderResult:
+ """Render markdown to the console."""
style = console.get_style(self.style, default="none")
options = options.update(height=None)
context = MarkdownContext(
@@ -705,4 +790,4 @@ console = Console(
force_terminal=args.force_color, width=args.width, record=True
)
- console.print(markdown)+ console.print(markdown)
| https://raw.githubusercontent.com/Textualize/rich/HEAD/rich/markdown.py |
Add docstrings that explain purpose and usage | from pathlib import Path
from json import loads, dumps
from typing import Any, Callable, Optional, Union
from .text import Text
from .highlighter import JSONHighlighter, NullHighlighter
class JSON:
def __init__(
self,
json: str,
indent: Union[None, int, str] = 2,
highlight: bool = True,
skip_keys: bool = False,
ensure_ascii: bool = False,
check_circular: bool = True,
allow_nan: bool = True,
default: Optional[Callable[[Any], Any]] = None,
sort_keys: bool = False,
) -> None:
data = loads(json)
json = dumps(
data,
indent=indent,
skipkeys=skip_keys,
ensure_ascii=ensure_ascii,
check_circular=check_circular,
allow_nan=allow_nan,
default=default,
sort_keys=sort_keys,
)
highlighter = JSONHighlighter() if highlight else NullHighlighter()
self.text = highlighter(json)
self.text.no_wrap = True
self.text.overflow = None
@classmethod
def from_data(
cls,
data: Any,
indent: Union[None, int, str] = 2,
highlight: bool = True,
skip_keys: bool = False,
ensure_ascii: bool = False,
check_circular: bool = True,
allow_nan: bool = True,
default: Optional[Callable[[Any], Any]] = None,
sort_keys: bool = False,
) -> "JSON":
json_instance: "JSON" = cls.__new__(cls)
json = dumps(
data,
indent=indent,
skipkeys=skip_keys,
ensure_ascii=ensure_ascii,
check_circular=check_circular,
allow_nan=allow_nan,
default=default,
sort_keys=sort_keys,
)
highlighter = JSONHighlighter() if highlight else NullHighlighter()
json_instance.text = highlighter(json)
json_instance.text.no_wrap = True
json_instance.text.overflow = None
return json_instance
def __rich__(self) -> Text:
return self.text
if __name__ == "__main__":
import argparse
import sys
parser = argparse.ArgumentParser(description="Pretty print json")
parser.add_argument(
"path",
metavar="PATH",
help="path to file, or - for stdin",
)
parser.add_argument(
"-i",
"--indent",
metavar="SPACES",
type=int,
help="Number of spaces in an indent",
default=2,
)
args = parser.parse_args()
from rich.console import Console
console = Console()
error_console = Console(stderr=True)
try:
if args.path == "-":
json_data = sys.stdin.read()
else:
json_data = Path(args.path).read_text()
except Exception as error:
error_console.print(f"Unable to read {args.path!r}; {error}")
sys.exit(-1)
console.print(JSON(json_data, indent=args.indent), soft_wrap=True) | --- +++ @@ -7,6 +7,20 @@
class JSON:
+ """A renderable which pretty prints JSON.
+
+ Args:
+ json (str): JSON encoded data.
+ indent (Union[None, int, str], optional): Number of characters to indent by. Defaults to 2.
+ highlight (bool, optional): Enable highlighting. Defaults to True.
+ skip_keys (bool, optional): Skip keys not of a basic type. Defaults to False.
+ ensure_ascii (bool, optional): Escape all non-ascii characters. Defaults to False.
+ check_circular (bool, optional): Check for circular references. Defaults to True.
+ allow_nan (bool, optional): Allow NaN and Infinity values. Defaults to True.
+ default (Callable, optional): A callable that converts values that can not be encoded
+ in to something that can be JSON encoded. Defaults to None.
+ sort_keys (bool, optional): Sort dictionary keys. Defaults to False.
+ """
def __init__(
self,
@@ -49,6 +63,24 @@ default: Optional[Callable[[Any], Any]] = None,
sort_keys: bool = False,
) -> "JSON":
+ """Encodes a JSON object from arbitrary data.
+
+ Args:
+ data (Any): An object that may be encoded in to JSON
+ indent (Union[None, int, str], optional): Number of characters to indent by. Defaults to 2.
+ highlight (bool, optional): Enable highlighting. Defaults to True.
+ default (Callable, optional): Optional callable which will be called for objects that cannot be serialized. Defaults to None.
+ skip_keys (bool, optional): Skip keys not of a basic type. Defaults to False.
+ ensure_ascii (bool, optional): Escape all non-ascii characters. Defaults to False.
+ check_circular (bool, optional): Check for circular references. Defaults to True.
+ allow_nan (bool, optional): Allow NaN and Infinity values. Defaults to True.
+ default (Callable, optional): A callable that converts values that can not be encoded
+ in to something that can be JSON encoded. Defaults to None.
+ sort_keys (bool, optional): Sort dictionary keys. Defaults to False.
+
+ Returns:
+ JSON: New JSON object from the given data.
+ """
json_instance: "JSON" = cls.__new__(cls)
json = dumps(
data,
@@ -104,4 +136,4 @@ error_console.print(f"Unable to read {args.path!r}; {error}")
sys.exit(-1)
- console.print(JSON(json_data, indent=args.indent), soft_wrap=True)+ console.print(JSON(json_data, indent=args.indent), soft_wrap=True)
| https://raw.githubusercontent.com/Textualize/rich/HEAD/rich/json.py |
Write proper docstrings for these functions | from operator import itemgetter
from typing import TYPE_CHECKING, Callable, NamedTuple, Optional, Sequence
from . import errors
from .protocol import is_renderable, rich_cast
if TYPE_CHECKING:
from .console import Console, ConsoleOptions, RenderableType
class Measurement(NamedTuple):
minimum: int
"""Minimum number of cells required to render."""
maximum: int
"""Maximum number of cells required to render."""
@property
def span(self) -> int:
return self.maximum - self.minimum
def normalize(self) -> "Measurement":
minimum, maximum = self
minimum = min(max(0, minimum), maximum)
return Measurement(max(0, minimum), max(0, max(minimum, maximum)))
def with_maximum(self, width: int) -> "Measurement":
minimum, maximum = self
return Measurement(min(minimum, width), min(maximum, width))
def with_minimum(self, width: int) -> "Measurement":
minimum, maximum = self
width = max(0, width)
return Measurement(max(minimum, width), max(maximum, width))
def clamp(
self, min_width: Optional[int] = None, max_width: Optional[int] = None
) -> "Measurement":
measurement = self
if min_width is not None:
measurement = measurement.with_minimum(min_width)
if max_width is not None:
measurement = measurement.with_maximum(max_width)
return measurement
@classmethod
def get(
cls, console: "Console", options: "ConsoleOptions", renderable: "RenderableType"
) -> "Measurement":
_max_width = options.max_width
if _max_width < 1:
return Measurement(0, 0)
if isinstance(renderable, str):
renderable = console.render_str(
renderable, markup=options.markup, highlight=False
)
renderable = rich_cast(renderable)
if is_renderable(renderable):
get_console_width: Optional[
Callable[["Console", "ConsoleOptions"], "Measurement"]
] = getattr(renderable, "__rich_measure__", None)
if get_console_width is not None:
render_width = (
get_console_width(console, options)
.normalize()
.with_maximum(_max_width)
)
if render_width.maximum < 1:
return Measurement(0, 0)
return render_width.normalize()
else:
return Measurement(0, _max_width)
else:
raise errors.NotRenderableError(
f"Unable to get render width for {renderable!r}; "
"a str, Segment, or object with __rich_console__ method is required"
)
def measure_renderables(
console: "Console",
options: "ConsoleOptions",
renderables: Sequence["RenderableType"],
) -> "Measurement":
if not renderables:
return Measurement(0, 0)
get_measurement = Measurement.get
measurements = [
get_measurement(console, options, renderable) for renderable in renderables
]
measured_width = Measurement(
max(measurements, key=itemgetter(0)).minimum,
max(measurements, key=itemgetter(1)).maximum,
)
return measured_width | --- +++ @@ -9,6 +9,7 @@
class Measurement(NamedTuple):
+ """Stores the minimum and maximum widths (in characters) required to render an object."""
minimum: int
"""Minimum number of cells required to render."""
@@ -17,18 +18,40 @@
@property
def span(self) -> int:
+ """Get difference between maximum and minimum."""
return self.maximum - self.minimum
def normalize(self) -> "Measurement":
+ """Get measurement that ensures that minimum <= maximum and minimum >= 0
+
+ Returns:
+ Measurement: A normalized measurement.
+ """
minimum, maximum = self
minimum = min(max(0, minimum), maximum)
return Measurement(max(0, minimum), max(0, max(minimum, maximum)))
def with_maximum(self, width: int) -> "Measurement":
+ """Get a RenderableWith where the widths are <= width.
+
+ Args:
+ width (int): Maximum desired width.
+
+ Returns:
+ Measurement: New Measurement object.
+ """
minimum, maximum = self
return Measurement(min(minimum, width), min(maximum, width))
def with_minimum(self, width: int) -> "Measurement":
+ """Get a RenderableWith where the widths are >= width.
+
+ Args:
+ width (int): Minimum desired width.
+
+ Returns:
+ Measurement: New Measurement object.
+ """
minimum, maximum = self
width = max(0, width)
return Measurement(max(minimum, width), max(maximum, width))
@@ -36,6 +59,15 @@ def clamp(
self, min_width: Optional[int] = None, max_width: Optional[int] = None
) -> "Measurement":
+ """Clamp a measurement within the specified range.
+
+ Args:
+ min_width (int): Minimum desired width, or ``None`` for no minimum. Defaults to None.
+ max_width (int): Maximum desired width, or ``None`` for no maximum. Defaults to None.
+
+ Returns:
+ Measurement: New Measurement object.
+ """
measurement = self
if min_width is not None:
measurement = measurement.with_minimum(min_width)
@@ -47,6 +79,19 @@ def get(
cls, console: "Console", options: "ConsoleOptions", renderable: "RenderableType"
) -> "Measurement":
+ """Get a measurement for a renderable.
+
+ Args:
+ console (~rich.console.Console): Console instance.
+ options (~rich.console.ConsoleOptions): Console options.
+ renderable (RenderableType): An object that may be rendered with Rich.
+
+ Raises:
+ errors.NotRenderableError: If the object is not renderable.
+
+ Returns:
+ Measurement: Measurement object containing range of character widths required to render the object.
+ """
_max_width = options.max_width
if _max_width < 1:
return Measurement(0, 0)
@@ -82,6 +127,17 @@ options: "ConsoleOptions",
renderables: Sequence["RenderableType"],
) -> "Measurement":
+ """Get a measurement that would fit a number of renderables.
+
+ Args:
+ console (~rich.console.Console): Console instance.
+ options (~rich.console.ConsoleOptions): Console options.
+ renderables (Iterable[RenderableType]): One or more renderable objects.
+
+ Returns:
+ Measurement: Measurement object containing range of character widths required to
+ contain all given renderables.
+ """
if not renderables:
return Measurement(0, 0)
get_measurement = Measurement.get
@@ -92,4 +148,4 @@ max(measurements, key=itemgetter(0)).minimum,
max(measurements, key=itemgetter(1)).maximum,
)
- return measured_width+ return measured_width
| https://raw.githubusercontent.com/Textualize/rich/HEAD/rich/measure.py |
Add professional docstrings to my codebase | import re
from abc import ABC, abstractmethod
from typing import ClassVar, Sequence, Union
from .text import Span, Text
def _combine_regex(*regexes: str) -> str:
return "|".join(regexes)
class Highlighter(ABC):
def __call__(self, text: Union[str, Text]) -> Text:
if isinstance(text, str):
highlight_text = Text(text)
elif isinstance(text, Text):
highlight_text = text.copy()
else:
raise TypeError(f"str or Text instance required, not {text!r}")
self.highlight(highlight_text)
return highlight_text
@abstractmethod
def highlight(self, text: Text) -> None:
class NullHighlighter(Highlighter):
def highlight(self, text: Text) -> None:
class RegexHighlighter(Highlighter):
highlights: ClassVar[Sequence[str]] = []
base_style: ClassVar[str] = ""
def highlight(self, text: Text) -> None:
highlight_regex = text.highlight_regex
for re_highlight in self.highlights:
highlight_regex(re_highlight, style_prefix=self.base_style)
class ReprHighlighter(RegexHighlighter):
base_style = "repr."
highlights: ClassVar[Sequence[str]] = [
r"(?P<tag_start><)(?P<tag_name>[-\w.:|]*)(?P<tag_contents>[\w\W]*)(?P<tag_end>>)",
r'(?P<attrib_name>[\w_]{1,50})=(?P<attrib_value>"?[\w_]+"?)?',
r"(?P<brace>[][{}()])",
_combine_regex(
r"(?P<ipv4>[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3})",
r"(?P<ipv6>([A-Fa-f0-9]{1,4}::?){1,7}[A-Fa-f0-9]{1,4})",
r"(?P<eui64>(?:[0-9A-Fa-f]{1,2}-){7}[0-9A-Fa-f]{1,2}|(?:[0-9A-Fa-f]{1,2}:){7}[0-9A-Fa-f]{1,2}|(?:[0-9A-Fa-f]{4}\.){3}[0-9A-Fa-f]{4})",
r"(?P<eui48>(?:[0-9A-Fa-f]{1,2}-){5}[0-9A-Fa-f]{1,2}|(?:[0-9A-Fa-f]{1,2}:){5}[0-9A-Fa-f]{1,2}|(?:[0-9A-Fa-f]{4}\.){2}[0-9A-Fa-f]{4})",
r"(?P<uuid>[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12})",
r"(?P<call>[\w.]*?)\(",
r"\b(?P<bool_true>True)\b|\b(?P<bool_false>False)\b|\b(?P<none>None)\b",
r"(?P<ellipsis>\.\.\.)",
r"(?P<number_complex>(?<!\w)(?:\-?[0-9]+\.?[0-9]*(?:e[-+]?\d+?)?)(?:[-+](?:[0-9]+\.?[0-9]*(?:e[-+]?\d+)?))?j)",
r"(?P<number>(?<!\w)\-?[0-9]+\.?[0-9]*(e[-+]?\d+?)?\b|0x[0-9a-fA-F]*)",
r"(?P<path>\B(/[-\w._+]+)*\/)(?P<filename>[-\w._+]*)?",
r"(?<![\\\w])(?P<str>b?'''.*?(?<!\\)'''|b?'.*?(?<!\\)'|b?\"\"\".*?(?<!\\)\"\"\"|b?\".*?(?<!\\)\")",
r"(?P<url>(file|https|http|ws|wss)://[-0-9a-zA-Z$_+!`(),.?/;:&=%#~@]*)",
),
]
class JSONHighlighter(RegexHighlighter):
# Captures the start and end of JSON strings, handling escaped quotes
JSON_STR = r"(?<![\\\w])(?P<str>b?\".*?(?<!\\)\")"
JSON_WHITESPACE = {" ", "\n", "\r", "\t"}
base_style: ClassVar[str] = "json."
highlights: ClassVar[Sequence[str]] = [
_combine_regex(
r"(?P<brace>[\{\[\(\)\]\}])",
r"\b(?P<bool_true>true)\b|\b(?P<bool_false>false)\b|\b(?P<null>null)\b",
r"(?P<number>(?<!\w)\-?[0-9]+\.?[0-9]*(e[\-\+]?\d+?)?\b|0x[0-9a-fA-F]*)",
JSON_STR,
),
]
def highlight(self, text: Text) -> None:
super().highlight(text)
# Additional work to handle highlighting JSON keys
plain = text.plain
append = text.spans.append
whitespace = self.JSON_WHITESPACE
for match in re.finditer(self.JSON_STR, plain):
start, end = match.span()
cursor = end
while cursor < len(plain):
char = plain[cursor]
cursor += 1
if char == ":":
append(Span(start, end, "json.key"))
elif char in whitespace:
continue
break
class ISO8601Highlighter(RegexHighlighter):
base_style: ClassVar[str] = "iso8601."
highlights: ClassVar[Sequence[str]] = [
#
# Dates
#
# Calendar month (e.g. 2008-08). The hyphen is required
r"^(?P<year>[0-9]{4})-(?P<month>1[0-2]|0[1-9])$",
# Calendar date w/o hyphens (e.g. 20080830)
r"^(?P<date>(?P<year>[0-9]{4})(?P<month>1[0-2]|0[1-9])(?P<day>3[01]|0[1-9]|[12][0-9]))$",
# Ordinal date (e.g. 2008-243). The hyphen is optional
r"^(?P<date>(?P<year>[0-9]{4})-?(?P<day>36[0-6]|3[0-5][0-9]|[12][0-9]{2}|0[1-9][0-9]|00[1-9]))$",
#
# Weeks
#
# Week of the year (e.g., 2008-W35). The hyphen is optional
r"^(?P<date>(?P<year>[0-9]{4})-?W(?P<week>5[0-3]|[1-4][0-9]|0[1-9]))$",
# Week date (e.g., 2008-W35-6). The hyphens are optional
r"^(?P<date>(?P<year>[0-9]{4})-?W(?P<week>5[0-3]|[1-4][0-9]|0[1-9])-?(?P<day>[1-7]))$",
#
# Times
#
# Hours and minutes (e.g., 17:21). The colon is optional
r"^(?P<time>(?P<hour>2[0-3]|[01][0-9]):?(?P<minute>[0-5][0-9]))$",
# Hours, minutes, and seconds w/o colons (e.g., 172159)
r"^(?P<time>(?P<hour>2[0-3]|[01][0-9])(?P<minute>[0-5][0-9])(?P<second>[0-5][0-9]))$",
# Time zone designator (e.g., Z, +07 or +07:00). The colons and the minutes are optional
r"^(?P<timezone>(Z|[+-](?:2[0-3]|[01][0-9])(?::?(?:[0-5][0-9]))?))$",
# Hours, minutes, and seconds with time zone designator (e.g., 17:21:59+07:00).
# All the colons are optional. The minutes in the time zone designator are also optional
r"^(?P<time>(?P<hour>2[0-3]|[01][0-9])(?P<minute>[0-5][0-9])(?P<second>[0-5][0-9]))(?P<timezone>Z|[+-](?:2[0-3]|[01][0-9])(?::?(?:[0-5][0-9]))?)$",
#
# Date and Time
#
# Calendar date with hours, minutes, and seconds (e.g., 2008-08-30 17:21:59 or 20080830 172159).
# A space is required between the date and the time. The hyphens and colons are optional.
# This regex matches dates and times that specify some hyphens or colons but omit others.
# This does not follow ISO 8601
r"^(?P<date>(?P<year>[0-9]{4})(?P<hyphen>-)?(?P<month>1[0-2]|0[1-9])(?(hyphen)-)(?P<day>3[01]|0[1-9]|[12][0-9])) (?P<time>(?P<hour>2[0-3]|[01][0-9])(?(hyphen):)(?P<minute>[0-5][0-9])(?(hyphen):)(?P<second>[0-5][0-9]))$",
#
# XML Schema dates and times
#
# Date, with optional time zone (e.g., 2008-08-30 or 2008-08-30+07:00).
# Hyphens are required. This is the XML Schema 'date' type
r"^(?P<date>(?P<year>-?(?:[1-9][0-9]*)?[0-9]{4})-(?P<month>1[0-2]|0[1-9])-(?P<day>3[01]|0[1-9]|[12][0-9]))(?P<timezone>Z|[+-](?:2[0-3]|[01][0-9]):[0-5][0-9])?$",
# Time, with optional fractional seconds and time zone (e.g., 01:45:36 or 01:45:36.123+07:00).
# There is no limit on the number of digits for the fractional seconds. This is the XML Schema 'time' type
r"^(?P<time>(?P<hour>2[0-3]|[01][0-9]):(?P<minute>[0-5][0-9]):(?P<second>[0-5][0-9])(?P<frac>\.[0-9]+)?)(?P<timezone>Z|[+-](?:2[0-3]|[01][0-9]):[0-5][0-9])?$",
# Date and time, with optional fractional seconds and time zone (e.g., 2008-08-30T01:45:36 or 2008-08-30T01:45:36.123Z).
# This is the XML Schema 'dateTime' type
r"^(?P<date>(?P<year>-?(?:[1-9][0-9]*)?[0-9]{4})-(?P<month>1[0-2]|0[1-9])-(?P<day>3[01]|0[1-9]|[12][0-9]))T(?P<time>(?P<hour>2[0-3]|[01][0-9]):(?P<minute>[0-5][0-9]):(?P<second>[0-5][0-9])(?P<ms>\.[0-9]+)?)(?P<timezone>Z|[+-](?:2[0-3]|[01][0-9]):[0-5][0-9])?$",
]
if __name__ == "__main__": # pragma: no cover
from .console import Console
console = Console()
console.print("[bold green]hello world![/bold green]")
console.print("'[bold green]hello world![/bold green]'")
console.print(" /foo")
console.print("/foo/")
console.print("/foo/bar")
console.print("foo/bar/baz")
console.print("/foo/bar/baz?foo=bar+egg&egg=baz")
console.print("/foo/bar/baz/")
console.print("/foo/bar/baz/egg")
console.print("/foo/bar/baz/egg.py")
console.print("/foo/bar/baz/egg.py word")
console.print(" /foo/bar/baz/egg.py word")
console.print("foo /foo/bar/baz/egg.py word")
console.print("foo /foo/bar/ba._++z/egg+.py word")
console.print("https://example.org?foo=bar#header")
console.print(1234567.34)
console.print(1 / 2)
console.print(-1 / 123123123123)
console.print(
"127.0.1.1 bar 192.168.1.4 2001:0db8:85a3:0000:0000:8a2e:0370:7334 foo"
)
import json
console.print_json(json.dumps(obj={"name": "apple", "count": 1}), indent=None) | --- +++ @@ -6,12 +6,29 @@
def _combine_regex(*regexes: str) -> str:
+ """Combine a number of regexes in to a single regex.
+
+ Returns:
+ str: New regex with all regexes ORed together.
+ """
return "|".join(regexes)
class Highlighter(ABC):
+ """Abstract base class for highlighters."""
def __call__(self, text: Union[str, Text]) -> Text:
+ """Highlight a str or Text instance.
+
+ Args:
+ text (Union[str, ~Text]): Text to highlight.
+
+ Raises:
+ TypeError: If not called with text or str.
+
+ Returns:
+ Text: A test instance with highlighting applied.
+ """
if isinstance(text, str):
highlight_text = Text(text)
elif isinstance(text, Text):
@@ -23,19 +40,37 @@
@abstractmethod
def highlight(self, text: Text) -> None:
+ """Apply highlighting in place to text.
+
+ Args:
+ text (~Text): A text object highlight.
+ """
class NullHighlighter(Highlighter):
-
- def highlight(self, text: Text) -> None:
+ """A highlighter object that doesn't highlight.
+
+ May be used to disable highlighting entirely.
+
+ """
+
+ def highlight(self, text: Text) -> None:
+ """Nothing to do"""
class RegexHighlighter(Highlighter):
+ """Applies highlighting from a list of regular expressions."""
highlights: ClassVar[Sequence[str]] = []
base_style: ClassVar[str] = ""
def highlight(self, text: Text) -> None:
+ """Highlight :class:`rich.text.Text` using regular expressions.
+
+ Args:
+ text (~Text): Text to highlighted.
+
+ """
highlight_regex = text.highlight_regex
for re_highlight in self.highlights:
@@ -43,6 +78,7 @@
class ReprHighlighter(RegexHighlighter):
+ """Highlights the text typically produced from ``__repr__`` methods."""
base_style = "repr."
highlights: ClassVar[Sequence[str]] = [
@@ -68,6 +104,7 @@
class JSONHighlighter(RegexHighlighter):
+ """Highlights JSON"""
# Captures the start and end of JSON strings, handling escaped quotes
JSON_STR = r"(?<![\\\w])(?P<str>b?\".*?(?<!\\)\")"
@@ -104,6 +141,9 @@
class ISO8601Highlighter(RegexHighlighter):
+ """Highlights the ISO8601 date time strings.
+ Regex reference: https://www.oreilly.com/library/view/regular-expressions-cookbook/9781449327453/ch04s07.html
+ """
base_style: ClassVar[str] = "iso8601."
highlights: ClassVar[Sequence[str]] = [
@@ -189,4 +229,4 @@ )
import json
- console.print_json(json.dumps(obj={"name": "apple", "count": 1}), indent=None)+ console.print_json(json.dumps(obj={"name": "apple", "count": 1}), indent=None)
| https://raw.githubusercontent.com/Textualize/rich/HEAD/rich/highlighter.py |
Generate docstrings for exported functions | from collections.abc import Mapping
from typing import TYPE_CHECKING, Any, Optional, Tuple
from .highlighter import ReprHighlighter
from .panel import Panel
from .pretty import Pretty
from .table import Table
from .text import Text, TextType
if TYPE_CHECKING:
from .console import ConsoleRenderable, OverflowMethod
def render_scope(
scope: "Mapping[str, Any]",
*,
title: Optional[TextType] = None,
sort_keys: bool = True,
indent_guides: bool = False,
max_length: Optional[int] = None,
max_string: Optional[int] = None,
max_depth: Optional[int] = None,
overflow: Optional["OverflowMethod"] = None,
) -> "ConsoleRenderable":
highlighter = ReprHighlighter()
items_table = Table.grid(padding=(0, 1), expand=False)
items_table.add_column(justify="right")
def sort_items(item: Tuple[str, Any]) -> Tuple[bool, str]:
key, _ = item
return (not key.startswith("__"), key.lower())
items = sorted(scope.items(), key=sort_items) if sort_keys else scope.items()
for key, value in items:
key_text = Text.assemble(
(key, "scope.key.special" if key.startswith("__") else "scope.key"),
(" =", "scope.equals"),
)
items_table.add_row(
key_text,
Pretty(
value,
highlighter=highlighter,
indent_guides=indent_guides,
max_length=max_length,
max_string=max_string,
max_depth=max_depth,
overflow=overflow,
),
)
return Panel.fit(
items_table,
title=title,
border_style="scope.border",
padding=(0, 1),
)
if __name__ == "__main__": # pragma: no cover
from rich import print
print()
def test(foo: float, bar: float) -> None:
list_of_things = [1, 2, 3, None, 4, True, False, "Hello World"]
dict_of_things = {
"version": "1.1",
"method": "confirmFruitPurchase",
"params": [["apple", "orange", "mangoes", "pomelo"], 1.123],
"id": "194521489",
}
print(render_scope(locals(), title="[i]locals", sort_keys=False))
test(20.3423, 3.1427)
print() | --- +++ @@ -22,11 +22,28 @@ max_depth: Optional[int] = None,
overflow: Optional["OverflowMethod"] = None,
) -> "ConsoleRenderable":
+ """Render python variables in a given scope.
+
+ Args:
+ scope (Mapping): A mapping containing variable names and values.
+ title (str, optional): Optional title. Defaults to None.
+ sort_keys (bool, optional): Enable sorting of items. Defaults to True.
+ indent_guides (bool, optional): Enable indentation guides. Defaults to False.
+ max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
+ Defaults to None.
+ max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to None.
+ max_depth (int, optional): Maximum depths of locals before truncating, or None to disable. Defaults to None.
+ overflow (OverflowMethod, optional): How to handle overflowing locals, or None to disable. Defaults to None.
+
+ Returns:
+ ConsoleRenderable: A renderable object.
+ """
highlighter = ReprHighlighter()
items_table = Table.grid(padding=(0, 1), expand=False)
items_table.add_column(justify="right")
def sort_items(item: Tuple[str, Any]) -> Tuple[bool, str]:
+ """Sort special variables first, then alphabetically."""
key, _ = item
return (not key.startswith("__"), key.lower())
@@ -72,4 +89,4 @@ print(render_scope(locals(), title="[i]locals", sort_keys=False))
test(20.3423, 3.1427)
- print()+ print()
| https://raw.githubusercontent.com/Textualize/rich/HEAD/rich/scope.py |
Generate consistent documentation across files | import time
from typing import TYPE_CHECKING, Callable, Dict, Iterable, List, Union, Final
from .segment import ControlCode, ControlType, Segment
if TYPE_CHECKING:
from .console import Console, ConsoleOptions, RenderResult
STRIP_CONTROL_CODES: Final = [
7, # Bell
8, # Backspace
11, # Vertical tab
12, # Form feed
13, # Carriage return
]
_CONTROL_STRIP_TRANSLATE: Final = {
_codepoint: None for _codepoint in STRIP_CONTROL_CODES
}
CONTROL_ESCAPE: Final = {
7: "\\a",
8: "\\b",
11: "\\v",
12: "\\f",
13: "\\r",
}
CONTROL_CODES_FORMAT: Dict[int, Callable[..., str]] = {
ControlType.BELL: lambda: "\x07",
ControlType.CARRIAGE_RETURN: lambda: "\r",
ControlType.HOME: lambda: "\x1b[H",
ControlType.CLEAR: lambda: "\x1b[2J",
ControlType.ENABLE_ALT_SCREEN: lambda: "\x1b[?1049h",
ControlType.DISABLE_ALT_SCREEN: lambda: "\x1b[?1049l",
ControlType.SHOW_CURSOR: lambda: "\x1b[?25h",
ControlType.HIDE_CURSOR: lambda: "\x1b[?25l",
ControlType.CURSOR_UP: lambda param: f"\x1b[{param}A",
ControlType.CURSOR_DOWN: lambda param: f"\x1b[{param}B",
ControlType.CURSOR_FORWARD: lambda param: f"\x1b[{param}C",
ControlType.CURSOR_BACKWARD: lambda param: f"\x1b[{param}D",
ControlType.CURSOR_MOVE_TO_COLUMN: lambda param: f"\x1b[{param+1}G",
ControlType.ERASE_IN_LINE: lambda param: f"\x1b[{param}K",
ControlType.CURSOR_MOVE_TO: lambda x, y: f"\x1b[{y+1};{x+1}H",
ControlType.SET_WINDOW_TITLE: lambda title: f"\x1b]0;{title}\x07",
}
class Control:
__slots__ = ["segment"]
def __init__(self, *codes: Union[ControlType, ControlCode]) -> None:
control_codes: List[ControlCode] = [
(code,) if isinstance(code, ControlType) else code for code in codes
]
_format_map = CONTROL_CODES_FORMAT
rendered_codes = "".join(
_format_map[code](*parameters) for code, *parameters in control_codes
)
self.segment = Segment(rendered_codes, None, control_codes)
@classmethod
def bell(cls) -> "Control":
return cls(ControlType.BELL)
@classmethod
def home(cls) -> "Control":
return cls(ControlType.HOME)
@classmethod
def move(cls, x: int = 0, y: int = 0) -> "Control":
def get_codes() -> Iterable[ControlCode]:
control = ControlType
if x:
yield (
control.CURSOR_FORWARD if x > 0 else control.CURSOR_BACKWARD,
abs(x),
)
if y:
yield (
control.CURSOR_DOWN if y > 0 else control.CURSOR_UP,
abs(y),
)
control = cls(*get_codes())
return control
@classmethod
def move_to_column(cls, x: int, y: int = 0) -> "Control":
return (
cls(
(ControlType.CURSOR_MOVE_TO_COLUMN, x),
(
ControlType.CURSOR_DOWN if y > 0 else ControlType.CURSOR_UP,
abs(y),
),
)
if y
else cls((ControlType.CURSOR_MOVE_TO_COLUMN, x))
)
@classmethod
def move_to(cls, x: int, y: int) -> "Control":
return cls((ControlType.CURSOR_MOVE_TO, x, y))
@classmethod
def clear(cls) -> "Control":
return cls(ControlType.CLEAR)
@classmethod
def show_cursor(cls, show: bool) -> "Control":
return cls(ControlType.SHOW_CURSOR if show else ControlType.HIDE_CURSOR)
@classmethod
def alt_screen(cls, enable: bool) -> "Control":
if enable:
return cls(ControlType.ENABLE_ALT_SCREEN, ControlType.HOME)
else:
return cls(ControlType.DISABLE_ALT_SCREEN)
@classmethod
def title(cls, title: str) -> "Control":
return cls((ControlType.SET_WINDOW_TITLE, title))
def __str__(self) -> str:
return self.segment.text
def __rich_console__(
self, console: "Console", options: "ConsoleOptions"
) -> "RenderResult":
if self.segment.text:
yield self.segment
def strip_control_codes(
text: str, _translate_table: Dict[int, None] = _CONTROL_STRIP_TRANSLATE
) -> str:
return text.translate(_translate_table)
def escape_control_codes(
text: str,
_translate_table: Dict[int, str] = CONTROL_ESCAPE,
) -> str:
return text.translate(_translate_table)
if __name__ == "__main__": # pragma: no cover
from rich.console import Console
console = Console()
console.print("Look at the title of your terminal window ^")
# console.print(Control((ControlType.SET_WINDOW_TITLE, "Hello, world!")))
for i in range(10):
console.set_window_title("🚀 Loading" + "." * i)
time.sleep(0.5) | --- +++ @@ -46,6 +46,12 @@
class Control:
+ """A renderable that inserts a control code (non printable but may move cursor).
+
+ Args:
+ *codes (str): Positional arguments are either a :class:`~rich.segment.ControlType` enum or a
+ tuple of ControlType and an integer parameter
+ """
__slots__ = ["segment"]
@@ -61,14 +67,26 @@
@classmethod
def bell(cls) -> "Control":
+ """Ring the 'bell'."""
return cls(ControlType.BELL)
@classmethod
def home(cls) -> "Control":
+ """Move cursor to 'home' position."""
return cls(ControlType.HOME)
@classmethod
def move(cls, x: int = 0, y: int = 0) -> "Control":
+ """Move cursor relative to current position.
+
+ Args:
+ x (int): X offset.
+ y (int): Y offset.
+
+ Returns:
+ ~Control: Control object.
+
+ """
def get_codes() -> Iterable[ControlCode]:
control = ControlType
@@ -88,6 +106,15 @@
@classmethod
def move_to_column(cls, x: int, y: int = 0) -> "Control":
+ """Move to the given column, optionally add offset to row.
+
+ Returns:
+ x (int): absolute x (column)
+ y (int): optional y offset (row)
+
+ Returns:
+ ~Control: Control object.
+ """
return (
cls(
@@ -103,18 +130,30 @@
@classmethod
def move_to(cls, x: int, y: int) -> "Control":
+ """Move cursor to absolute position.
+
+ Args:
+ x (int): x offset (column)
+ y (int): y offset (row)
+
+ Returns:
+ ~Control: Control object.
+ """
return cls((ControlType.CURSOR_MOVE_TO, x, y))
@classmethod
def clear(cls) -> "Control":
+ """Clear the screen."""
return cls(ControlType.CLEAR)
@classmethod
def show_cursor(cls, show: bool) -> "Control":
+ """Show or hide the cursor."""
return cls(ControlType.SHOW_CURSOR if show else ControlType.HIDE_CURSOR)
@classmethod
def alt_screen(cls, enable: bool) -> "Control":
+ """Enable or disable alt screen."""
if enable:
return cls(ControlType.ENABLE_ALT_SCREEN, ControlType.HOME)
else:
@@ -122,6 +161,11 @@
@classmethod
def title(cls, title: str) -> "Control":
+ """Set the terminal window title
+
+ Args:
+ title (str): The new terminal window title
+ """
return cls((ControlType.SET_WINDOW_TITLE, title))
def __str__(self) -> str:
@@ -137,6 +181,14 @@ def strip_control_codes(
text: str, _translate_table: Dict[int, None] = _CONTROL_STRIP_TRANSLATE
) -> str:
+ """Remove control codes from text.
+
+ Args:
+ text (str): A string possibly contain control codes.
+
+ Returns:
+ str: String with control codes removed.
+ """
return text.translate(_translate_table)
@@ -144,6 +196,15 @@ text: str,
_translate_table: Dict[int, str] = CONTROL_ESCAPE,
) -> str:
+ """Replace control codes with their "escaped" equivalent in the given text.
+ (e.g. "\b" becomes "\\b")
+
+ Args:
+ text (str): A string possibly containing control codes.
+
+ Returns:
+ str: String with control codes replaced with their escaped version.
+ """
return text.translate(_translate_table)
@@ -155,4 +216,4 @@ # console.print(Control((ControlType.SET_WINDOW_TITLE, "Hello, world!")))
for i in range(10):
console.set_window_title("🚀 Loading" + "." * i)
- time.sleep(0.5)+ time.sleep(0.5)
| https://raw.githubusercontent.com/Textualize/rich/HEAD/rich/control.py |
Add docstrings for better understanding | import builtins
import collections
import dataclasses
import inspect
import os
import reprlib
import sys
from array import array
from collections import Counter, UserDict, UserList, defaultdict, deque
from dataclasses import dataclass, fields, is_dataclass
from inspect import isclass
from itertools import islice
from types import MappingProxyType
from typing import (
TYPE_CHECKING,
Any,
Callable,
DefaultDict,
Deque,
Dict,
Iterable,
List,
Optional,
Sequence,
Set,
Tuple,
Union,
)
from rich.repr import RichReprResult
try:
import attr as _attr_module
_has_attrs = hasattr(_attr_module, "ib")
except ImportError: # pragma: no cover
_has_attrs = False
from . import get_console
from ._loop import loop_last
from ._pick import pick_bool
from .abc import RichRenderable
from .cells import cell_len
from .highlighter import ReprHighlighter
from .jupyter import JupyterMixin, JupyterRenderable
from .measure import Measurement
from .text import Text
if TYPE_CHECKING:
from .console import (
Console,
ConsoleOptions,
HighlighterType,
JustifyMethod,
OverflowMethod,
RenderResult,
)
def _is_attr_object(obj: Any) -> bool:
return _has_attrs and _attr_module.has(type(obj))
def _get_attr_fields(obj: Any) -> Sequence["_attr_module.Attribute[Any]"]:
return _attr_module.fields(type(obj)) if _has_attrs else []
def _is_dataclass_repr(obj: object) -> bool:
# Digging in to a lot of internals here
# Catching all exceptions in case something is missing on a non CPython implementation
try:
return obj.__repr__.__code__.co_filename in (
dataclasses.__file__,
reprlib.__file__,
)
except Exception: # pragma: no coverage
return False
_dummy_namedtuple = collections.namedtuple("_dummy_namedtuple", [])
def _has_default_namedtuple_repr(obj: object) -> bool:
obj_file = None
try:
obj_file = inspect.getfile(obj.__repr__)
except (OSError, TypeError):
# OSError handles case where object is defined in __main__ scope, e.g. REPL - no filename available.
# TypeError trapped defensively, in case of object without filename slips through.
pass
default_repr_file = inspect.getfile(_dummy_namedtuple.__repr__)
return obj_file == default_repr_file
def _ipy_display_hook(
value: Any,
console: Optional["Console"] = None,
overflow: "OverflowMethod" = "ignore",
crop: bool = False,
indent_guides: bool = False,
max_length: Optional[int] = None,
max_string: Optional[int] = None,
max_depth: Optional[int] = None,
expand_all: bool = False,
) -> Union[str, None]:
# needed here to prevent circular import:
from .console import ConsoleRenderable
# always skip rich generated jupyter renderables or None values
if _safe_isinstance(value, JupyterRenderable) or value is None:
return None
console = console or get_console()
with console.capture() as capture:
# certain renderables should start on a new line
if _safe_isinstance(value, ConsoleRenderable):
console.line()
console.print(
(
value
if _safe_isinstance(value, RichRenderable)
else Pretty(
value,
overflow=overflow,
indent_guides=indent_guides,
max_length=max_length,
max_string=max_string,
max_depth=max_depth,
expand_all=expand_all,
margin=12,
)
),
crop=crop,
new_line_start=True,
end="",
)
# strip trailing newline, not usually part of a text repr
# I'm not sure if this should be prevented at a lower level
return capture.get().rstrip("\n")
def _safe_isinstance(
obj: object, class_or_tuple: Union[type, Tuple[type, ...]]
) -> bool:
try:
return isinstance(obj, class_or_tuple)
except Exception:
return False
def install(
console: Optional["Console"] = None,
overflow: "OverflowMethod" = "ignore",
crop: bool = False,
indent_guides: bool = False,
max_length: Optional[int] = None,
max_string: Optional[int] = None,
max_depth: Optional[int] = None,
expand_all: bool = False,
) -> None:
from rich import get_console
console = console or get_console()
assert console is not None
def display_hook(value: Any) -> None:
if value is not None:
assert console is not None
builtins._ = None # type: ignore[attr-defined]
console.print(
(
value
if _safe_isinstance(value, RichRenderable)
else Pretty(
value,
overflow=overflow,
indent_guides=indent_guides,
max_length=max_length,
max_string=max_string,
max_depth=max_depth,
expand_all=expand_all,
)
),
crop=crop,
)
builtins._ = value # type: ignore[attr-defined]
try:
ip = get_ipython() # type: ignore[name-defined]
except NameError:
sys.displayhook = display_hook
else:
from IPython.core.formatters import BaseFormatter
class RichFormatter(BaseFormatter): # type: ignore[misc]
pprint: bool = True
def __call__(self, value: Any) -> Any:
if self.pprint:
return _ipy_display_hook(
value,
console=console,
overflow=overflow,
indent_guides=indent_guides,
max_length=max_length,
max_string=max_string,
max_depth=max_depth,
expand_all=expand_all,
)
else:
return repr(value)
# replace plain text formatter with rich formatter
rich_formatter = RichFormatter()
ip.display_formatter.formatters["text/plain"] = rich_formatter
class Pretty(JupyterMixin):
def __init__(
self,
_object: Any,
highlighter: Optional["HighlighterType"] = None,
*,
indent_size: int = 4,
justify: Optional["JustifyMethod"] = None,
overflow: Optional["OverflowMethod"] = None,
no_wrap: Optional[bool] = False,
indent_guides: bool = False,
max_length: Optional[int] = None,
max_string: Optional[int] = None,
max_depth: Optional[int] = None,
expand_all: bool = False,
margin: int = 0,
insert_line: bool = False,
) -> None:
self._object = _object
self.highlighter = highlighter or ReprHighlighter()
self.indent_size = indent_size
self.justify: Optional["JustifyMethod"] = justify
self.overflow: Optional["OverflowMethod"] = overflow
self.no_wrap = no_wrap
self.indent_guides = indent_guides
self.max_length = max_length
self.max_string = max_string
self.max_depth = max_depth
self.expand_all = expand_all
self.margin = margin
self.insert_line = insert_line
def __rich_console__(
self, console: "Console", options: "ConsoleOptions"
) -> "RenderResult":
pretty_str = pretty_repr(
self._object,
max_width=options.max_width - self.margin,
indent_size=self.indent_size,
max_length=self.max_length,
max_string=self.max_string,
max_depth=self.max_depth,
expand_all=self.expand_all,
)
pretty_text = Text.from_ansi(
pretty_str,
justify=self.justify or options.justify,
overflow=self.overflow or options.overflow,
no_wrap=pick_bool(self.no_wrap, options.no_wrap),
style="pretty",
)
pretty_text = (
self.highlighter(pretty_text)
if pretty_text
else Text(
f"{type(self._object)}.__repr__ returned empty string",
style="dim italic",
)
)
if self.indent_guides and not options.ascii_only:
pretty_text = pretty_text.with_indent_guides(
self.indent_size, style="repr.indent"
)
if self.insert_line and "\n" in pretty_text:
yield ""
yield pretty_text
def __rich_measure__(
self, console: "Console", options: "ConsoleOptions"
) -> "Measurement":
pretty_str = pretty_repr(
self._object,
max_width=options.max_width,
indent_size=self.indent_size,
max_length=self.max_length,
max_string=self.max_string,
max_depth=self.max_depth,
expand_all=self.expand_all,
)
text_width = (
max(cell_len(line) for line in pretty_str.splitlines()) if pretty_str else 0
)
return Measurement(text_width, text_width)
def _get_braces_for_defaultdict(_object: DefaultDict[Any, Any]) -> Tuple[str, str, str]:
return (
f"defaultdict({_object.default_factory!r}, {{",
"})",
f"defaultdict({_object.default_factory!r}, {{}})",
)
def _get_braces_for_deque(_object: Deque[Any]) -> Tuple[str, str, str]:
if _object.maxlen is None:
return ("deque([", "])", "deque()")
return (
"deque([",
f"], maxlen={_object.maxlen})",
f"deque(maxlen={_object.maxlen})",
)
def _get_braces_for_array(_object: "array[Any]") -> Tuple[str, str, str]:
return (f"array({_object.typecode!r}, [", "])", f"array({_object.typecode!r})")
_BRACES: Dict[type, Callable[[Any], Tuple[str, str, str]]] = {
os._Environ: lambda _object: ("environ({", "})", "environ({})"),
array: _get_braces_for_array,
defaultdict: _get_braces_for_defaultdict,
Counter: lambda _object: ("Counter({", "})", "Counter()"),
deque: _get_braces_for_deque,
dict: lambda _object: ("{", "}", "{}"),
UserDict: lambda _object: ("{", "}", "{}"),
frozenset: lambda _object: ("frozenset({", "})", "frozenset()"),
list: lambda _object: ("[", "]", "[]"),
UserList: lambda _object: ("[", "]", "[]"),
set: lambda _object: ("{", "}", "set()"),
tuple: lambda _object: ("(", ")", "()"),
MappingProxyType: lambda _object: ("mappingproxy({", "})", "mappingproxy({})"),
}
_CONTAINERS = tuple(_BRACES.keys())
_MAPPING_CONTAINERS = (dict, os._Environ, MappingProxyType, UserDict)
def is_expandable(obj: Any) -> bool:
return (
_safe_isinstance(obj, _CONTAINERS)
or (is_dataclass(obj))
or (hasattr(obj, "__rich_repr__"))
or _is_attr_object(obj)
) and not isclass(obj)
@dataclass
class Node:
key_repr: str = ""
value_repr: str = ""
open_brace: str = ""
close_brace: str = ""
empty: str = ""
last: bool = False
is_tuple: bool = False
is_namedtuple: bool = False
children: Optional[List["Node"]] = None
key_separator: str = ": "
separator: str = ", "
def iter_tokens(self) -> Iterable[str]:
if self.key_repr:
yield self.key_repr
yield self.key_separator
if self.value_repr:
yield self.value_repr
elif self.children is not None:
if self.children:
yield self.open_brace
if self.is_tuple and not self.is_namedtuple and len(self.children) == 1:
yield from self.children[0].iter_tokens()
yield ","
else:
for child in self.children:
yield from child.iter_tokens()
if not child.last:
yield self.separator
yield self.close_brace
else:
yield self.empty
def check_length(self, start_length: int, max_length: int) -> bool:
total_length = start_length
for token in self.iter_tokens():
total_length += cell_len(token)
if total_length > max_length:
return False
return True
def __str__(self) -> str:
repr_text = "".join(self.iter_tokens())
return repr_text
def render(
self, max_width: int = 80, indent_size: int = 4, expand_all: bool = False
) -> str:
lines = [_Line(node=self, is_root=True)]
line_no = 0
while line_no < len(lines):
line = lines[line_no]
if line.expandable and not line.expanded:
if expand_all or not line.check_length(max_width):
lines[line_no : line_no + 1] = line.expand(indent_size)
line_no += 1
repr_str = "\n".join(str(line) for line in lines)
return repr_str
@dataclass
class _Line:
parent: Optional["_Line"] = None
is_root: bool = False
node: Optional[Node] = None
text: str = ""
suffix: str = ""
whitespace: str = ""
expanded: bool = False
last: bool = False
@property
def expandable(self) -> bool:
return bool(self.node is not None and self.node.children)
def check_length(self, max_length: int) -> bool:
start_length = (
len(self.whitespace) + cell_len(self.text) + cell_len(self.suffix)
)
assert self.node is not None
return self.node.check_length(start_length, max_length)
def expand(self, indent_size: int) -> Iterable["_Line"]:
node = self.node
assert node is not None
whitespace = self.whitespace
assert node.children
if node.key_repr:
new_line = yield _Line(
text=f"{node.key_repr}{node.key_separator}{node.open_brace}",
whitespace=whitespace,
)
else:
new_line = yield _Line(text=node.open_brace, whitespace=whitespace)
child_whitespace = self.whitespace + " " * indent_size
tuple_of_one = node.is_tuple and len(node.children) == 1
for last, child in loop_last(node.children):
separator = "," if tuple_of_one else node.separator
line = _Line(
parent=new_line,
node=child,
whitespace=child_whitespace,
suffix=separator,
last=last and not tuple_of_one,
)
yield line
yield _Line(
text=node.close_brace,
whitespace=whitespace,
suffix=self.suffix,
last=self.last,
)
def __str__(self) -> str:
if self.last:
return f"{self.whitespace}{self.text}{self.node or ''}"
else:
return (
f"{self.whitespace}{self.text}{self.node or ''}{self.suffix.rstrip()}"
)
def _is_namedtuple(obj: Any) -> bool:
try:
fields = getattr(obj, "_fields", None)
except Exception:
# Being very defensive - if we cannot get the attr then its not a namedtuple
return False
return isinstance(obj, tuple) and isinstance(fields, tuple)
def traverse(
_object: Any,
max_length: Optional[int] = None,
max_string: Optional[int] = None,
max_depth: Optional[int] = None,
) -> Node:
def to_repr(obj: Any) -> str:
if (
max_string is not None
and _safe_isinstance(obj, (bytes, str))
and len(obj) > max_string
):
truncated = len(obj) - max_string
obj_repr = f"{obj[:max_string]!r}+{truncated}"
else:
try:
obj_repr = repr(obj)
except Exception as error:
obj_repr = f"<repr-error {str(error)!r}>"
return obj_repr
visited_ids: Set[int] = set()
push_visited = visited_ids.add
pop_visited = visited_ids.remove
def _traverse(obj: Any, root: bool = False, depth: int = 0) -> Node:
obj_id = id(obj)
if obj_id in visited_ids:
# Recursion detected
return Node(value_repr="...")
obj_type = type(obj)
children: List[Node]
reached_max_depth = max_depth is not None and depth >= max_depth
def iter_rich_args(rich_args: Any) -> Iterable[Union[Any, Tuple[str, Any]]]:
for arg in rich_args:
if _safe_isinstance(arg, tuple):
if len(arg) == 3:
key, child, default = arg
if default == child:
continue
yield key, child
elif len(arg) == 2:
key, child = arg
yield key, child
elif len(arg) == 1:
yield arg[0]
else:
yield arg
try:
fake_attributes = hasattr(
obj, "awehoi234_wdfjwljet234_234wdfoijsdfmmnxpi492"
)
except Exception:
fake_attributes = False
rich_repr_result: Optional[RichReprResult] = None
if not fake_attributes:
try:
if hasattr(obj, "__rich_repr__") and not isclass(obj):
rich_repr_result = obj.__rich_repr__()
except Exception:
pass
if rich_repr_result is not None:
push_visited(obj_id)
angular = getattr(obj.__rich_repr__, "angular", False)
args = list(iter_rich_args(rich_repr_result))
class_name = obj.__class__.__name__
if args:
children = []
append = children.append
if reached_max_depth:
if angular:
node = Node(value_repr=f"<{class_name}...>")
else:
node = Node(value_repr=f"{class_name}(...)")
else:
if angular:
node = Node(
open_brace=f"<{class_name} ",
close_brace=">",
children=children,
last=root,
separator=" ",
)
else:
node = Node(
open_brace=f"{class_name}(",
close_brace=")",
children=children,
last=root,
)
for last, arg in loop_last(args):
if _safe_isinstance(arg, tuple):
key, child = arg
child_node = _traverse(child, depth=depth + 1)
child_node.last = last
child_node.key_repr = key
child_node.key_separator = "="
append(child_node)
else:
child_node = _traverse(arg, depth=depth + 1)
child_node.last = last
append(child_node)
else:
node = Node(
value_repr=f"<{class_name}>" if angular else f"{class_name}()",
children=[],
last=root,
)
pop_visited(obj_id)
elif _is_attr_object(obj) and not fake_attributes:
push_visited(obj_id)
children = []
append = children.append
attr_fields = _get_attr_fields(obj)
if attr_fields:
if reached_max_depth:
node = Node(value_repr=f"{obj.__class__.__name__}(...)")
else:
node = Node(
open_brace=f"{obj.__class__.__name__}(",
close_brace=")",
children=children,
last=root,
)
def iter_attrs() -> (
Iterable[Tuple[str, Any, Optional[Callable[[Any], str]]]]
):
for attr in attr_fields:
if attr.repr:
try:
value = getattr(obj, attr.name)
except Exception as error:
# Can happen, albeit rarely
yield (attr.name, error, None)
else:
yield (
attr.name,
value,
attr.repr if callable(attr.repr) else None,
)
for last, (name, value, repr_callable) in loop_last(iter_attrs()):
if repr_callable:
child_node = Node(value_repr=str(repr_callable(value)))
else:
child_node = _traverse(value, depth=depth + 1)
child_node.last = last
child_node.key_repr = name
child_node.key_separator = "="
append(child_node)
else:
node = Node(
value_repr=f"{obj.__class__.__name__}()", children=[], last=root
)
pop_visited(obj_id)
elif (
is_dataclass(obj)
and not _safe_isinstance(obj, type)
and not fake_attributes
and _is_dataclass_repr(obj)
):
push_visited(obj_id)
children = []
append = children.append
if reached_max_depth:
node = Node(value_repr=f"{obj.__class__.__name__}(...)")
else:
node = Node(
open_brace=f"{obj.__class__.__name__}(",
close_brace=")",
children=children,
last=root,
empty=f"{obj.__class__.__name__}()",
)
for last, field in loop_last(
field
for field in fields(obj)
if field.repr and hasattr(obj, field.name)
):
child_node = _traverse(getattr(obj, field.name), depth=depth + 1)
child_node.key_repr = field.name
child_node.last = last
child_node.key_separator = "="
append(child_node)
pop_visited(obj_id)
elif _is_namedtuple(obj) and _has_default_namedtuple_repr(obj):
push_visited(obj_id)
class_name = obj.__class__.__name__
if reached_max_depth:
# If we've reached the max depth, we still show the class name, but not its contents
node = Node(
value_repr=f"{class_name}(...)",
)
else:
children = []
append = children.append
node = Node(
open_brace=f"{class_name}(",
close_brace=")",
children=children,
empty=f"{class_name}()",
)
for last, (key, value) in loop_last(obj._asdict().items()):
child_node = _traverse(value, depth=depth + 1)
child_node.key_repr = key
child_node.last = last
child_node.key_separator = "="
append(child_node)
pop_visited(obj_id)
elif _safe_isinstance(obj, _CONTAINERS):
for container_type in _CONTAINERS:
if _safe_isinstance(obj, container_type):
obj_type = container_type
break
push_visited(obj_id)
open_brace, close_brace, empty = _BRACES[obj_type](obj)
if reached_max_depth:
node = Node(value_repr=f"{open_brace}...{close_brace}")
elif obj_type.__repr__ != type(obj).__repr__:
node = Node(value_repr=to_repr(obj), last=root)
elif obj:
children = []
node = Node(
open_brace=open_brace,
close_brace=close_brace,
children=children,
last=root,
)
append = children.append
num_items = len(obj)
last_item_index = num_items - 1
if _safe_isinstance(obj, _MAPPING_CONTAINERS):
iter_items = iter(obj.items())
if max_length is not None:
iter_items = islice(iter_items, max_length)
for index, (key, child) in enumerate(iter_items):
child_node = _traverse(child, depth=depth + 1)
child_node.key_repr = to_repr(key)
child_node.last = index == last_item_index
append(child_node)
else:
iter_values = iter(obj)
if max_length is not None:
iter_values = islice(iter_values, max_length)
for index, child in enumerate(iter_values):
child_node = _traverse(child, depth=depth + 1)
child_node.last = index == last_item_index
append(child_node)
if max_length is not None and num_items > max_length:
append(Node(value_repr=f"... +{num_items - max_length}", last=True))
else:
node = Node(empty=empty, children=[], last=root)
pop_visited(obj_id)
else:
node = Node(value_repr=to_repr(obj), last=root)
node.is_tuple = type(obj) == tuple
node.is_namedtuple = _is_namedtuple(obj)
return node
node = _traverse(_object, root=True)
return node
def pretty_repr(
_object: Any,
*,
max_width: int = 80,
indent_size: int = 4,
max_length: Optional[int] = None,
max_string: Optional[int] = None,
max_depth: Optional[int] = None,
expand_all: bool = False,
) -> str:
if _safe_isinstance(_object, Node):
node = _object
else:
node = traverse(
_object, max_length=max_length, max_string=max_string, max_depth=max_depth
)
repr_str: str = node.render(
max_width=max_width, indent_size=indent_size, expand_all=expand_all
)
return repr_str
def pprint(
_object: Any,
*,
console: Optional["Console"] = None,
indent_guides: bool = True,
max_length: Optional[int] = None,
max_string: Optional[int] = None,
max_depth: Optional[int] = None,
expand_all: bool = False,
) -> None:
_console = get_console() if console is None else console
_console.print(
Pretty(
_object,
max_length=max_length,
max_string=max_string,
max_depth=max_depth,
indent_guides=indent_guides,
expand_all=expand_all,
overflow="ignore",
),
soft_wrap=True,
)
if __name__ == "__main__": # pragma: no cover
class BrokenRepr:
def __repr__(self) -> str:
1 / 0
return "this will fail"
from typing import NamedTuple
class StockKeepingUnit(NamedTuple):
name: str
description: str
price: float
category: str
reviews: List[str]
d = defaultdict(int)
d["foo"] = 5
data = {
"foo": [
1,
"Hello World!",
100.123,
323.232,
432324.0,
{5, 6, 7, (1, 2, 3, 4), 8},
],
"bar": frozenset({1, 2, 3}),
"defaultdict": defaultdict(
list, {"crumble": ["apple", "rhubarb", "butter", "sugar", "flour"]}
),
"counter": Counter(
[
"apple",
"orange",
"pear",
"kumquat",
"kumquat",
"durian" * 100,
]
),
"atomic": (False, True, None),
"namedtuple": StockKeepingUnit(
"Sparkling British Spring Water",
"Carbonated spring water",
0.9,
"water",
["its amazing!", "its terrible!"],
),
"Broken": BrokenRepr(),
}
data["foo"].append(data) # type: ignore[attr-defined]
from rich import print
print(Pretty(data, indent_guides=True, max_string=20))
class Thing:
def __repr__(self) -> str:
return "Hello\x1b[38;5;239m World!"
print(Pretty(Thing())) | --- +++ @@ -58,14 +58,24 @@
def _is_attr_object(obj: Any) -> bool:
+ """Check if an object was created with attrs module."""
return _has_attrs and _attr_module.has(type(obj))
def _get_attr_fields(obj: Any) -> Sequence["_attr_module.Attribute[Any]"]:
+ """Get fields for an attrs object."""
return _attr_module.fields(type(obj)) if _has_attrs else []
def _is_dataclass_repr(obj: object) -> bool:
+ """Check if an instance of a dataclass contains the default repr.
+
+ Args:
+ obj (object): A dataclass instance.
+
+ Returns:
+ bool: True if the default repr is used, False if there is a custom repr.
+ """
# Digging in to a lot of internals here
# Catching all exceptions in case something is missing on a non CPython implementation
try:
@@ -81,6 +91,14 @@
def _has_default_namedtuple_repr(obj: object) -> bool:
+ """Check if an instance of namedtuple contains the default repr
+
+ Args:
+ obj (object): A namedtuple
+
+ Returns:
+ bool: True if the default repr is used, False if there's a custom repr.
+ """
obj_file = None
try:
obj_file = inspect.getfile(obj.__repr__)
@@ -143,6 +161,7 @@ def _safe_isinstance(
obj: object, class_or_tuple: Union[type, Tuple[type, ...]]
) -> bool:
+ """isinstance can fail in rare cases, for example types with no __class__"""
try:
return isinstance(obj, class_or_tuple)
except Exception:
@@ -159,12 +178,27 @@ max_depth: Optional[int] = None,
expand_all: bool = False,
) -> None:
+ """Install automatic pretty printing in the Python REPL.
+
+ Args:
+ console (Console, optional): Console instance or ``None`` to use global console. Defaults to None.
+ overflow (Optional[OverflowMethod], optional): Overflow method. Defaults to "ignore".
+ crop (Optional[bool], optional): Enable cropping of long lines. Defaults to False.
+ indent_guides (bool, optional): Enable indentation guides. Defaults to False.
+ max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
+ Defaults to None.
+ max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to None.
+ max_depth (int, optional): Maximum depth of nested data structures, or None for no maximum. Defaults to None.
+ expand_all (bool, optional): Expand all containers. Defaults to False.
+ max_frames (int): Maximum number of frames to show in a traceback, 0 for no maximum. Defaults to 100.
+ """
from rich import get_console
console = console or get_console()
assert console is not None
def display_hook(value: Any) -> None:
+ """Replacement sys.displayhook which prettifies objects with Rich."""
if value is not None:
assert console is not None
builtins._ = None # type: ignore[attr-defined]
@@ -217,6 +251,24 @@
class Pretty(JupyterMixin):
+ """A rich renderable that pretty prints an object.
+
+ Args:
+ _object (Any): An object to pretty print.
+ highlighter (HighlighterType, optional): Highlighter object to apply to result, or None for ReprHighlighter. Defaults to None.
+ indent_size (int, optional): Number of spaces in indent. Defaults to 4.
+ justify (JustifyMethod, optional): Justify method, or None for default. Defaults to None.
+ overflow (OverflowMethod, optional): Overflow method, or None for default. Defaults to None.
+ no_wrap (Optional[bool], optional): Disable word wrapping. Defaults to False.
+ indent_guides (bool, optional): Enable indentation guides. Defaults to False.
+ max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
+ Defaults to None.
+ max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to None.
+ max_depth (int, optional): Maximum depth of nested data structures, or None for no maximum. Defaults to None.
+ expand_all (bool, optional): Expand all containers. Defaults to False.
+ margin (int, optional): Subtrace a margin from width to force containers to expand earlier. Defaults to 0.
+ insert_line (bool, optional): Insert a new line if the output has multiple new lines. Defaults to False.
+ """
def __init__(
self,
@@ -344,6 +396,7 @@
def is_expandable(obj: Any) -> bool:
+ """Check if an object may be expanded by pretty print."""
return (
_safe_isinstance(obj, _CONTAINERS)
or (is_dataclass(obj))
@@ -354,6 +407,7 @@
@dataclass
class Node:
+ """A node in a repr tree. May be atomic or a container."""
key_repr: str = ""
value_repr: str = ""
@@ -368,6 +422,7 @@ separator: str = ", "
def iter_tokens(self) -> Iterable[str]:
+ """Generate tokens for this node."""
if self.key_repr:
yield self.key_repr
yield self.key_separator
@@ -389,6 +444,15 @@ yield self.empty
def check_length(self, start_length: int, max_length: int) -> bool:
+ """Check the length fits within a limit.
+
+ Args:
+ start_length (int): Starting length of the line (indent, prefix, suffix).
+ max_length (int): Maximum length.
+
+ Returns:
+ bool: True if the node can be rendered within max length, otherwise False.
+ """
total_length = start_length
for token in self.iter_tokens():
total_length += cell_len(token)
@@ -403,6 +467,16 @@ def render(
self, max_width: int = 80, indent_size: int = 4, expand_all: bool = False
) -> str:
+ """Render the node to a pretty repr.
+
+ Args:
+ max_width (int, optional): Maximum width of the repr. Defaults to 80.
+ indent_size (int, optional): Size of indents. Defaults to 4.
+ expand_all (bool, optional): Expand all levels. Defaults to False.
+
+ Returns:
+ str: A repr string of the original object.
+ """
lines = [_Line(node=self, is_root=True)]
line_no = 0
while line_no < len(lines):
@@ -418,6 +492,7 @@
@dataclass
class _Line:
+ """A line in repr output."""
parent: Optional["_Line"] = None
is_root: bool = False
@@ -430,9 +505,11 @@
@property
def expandable(self) -> bool:
+ """Check if the line may be expanded."""
return bool(self.node is not None and self.node.children)
def check_length(self, max_length: int) -> bool:
+ """Check this line fits within a given number of cells."""
start_length = (
len(self.whitespace) + cell_len(self.text) + cell_len(self.suffix)
)
@@ -440,6 +517,7 @@ return self.node.check_length(start_length, max_length)
def expand(self, indent_size: int) -> Iterable["_Line"]:
+ """Expand this line by adding children on their own line."""
node = self.node
assert node is not None
whitespace = self.whitespace
@@ -481,6 +559,16 @@
def _is_namedtuple(obj: Any) -> bool:
+ """Checks if an object is most likely a namedtuple. It is possible
+ to craft an object that passes this check and isn't a namedtuple, but
+ there is only a minuscule chance of this happening unintentionally.
+
+ Args:
+ obj (Any): The object to test
+
+ Returns:
+ bool: True if the object is a namedtuple. False otherwise.
+ """
try:
fields = getattr(obj, "_fields", None)
except Exception:
@@ -495,8 +583,23 @@ max_string: Optional[int] = None,
max_depth: Optional[int] = None,
) -> Node:
+ """Traverse object and generate a tree.
+
+ Args:
+ _object (Any): Object to be traversed.
+ max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
+ Defaults to None.
+ max_string (int, optional): Maximum length of string before truncating, or None to disable truncating.
+ Defaults to None.
+ max_depth (int, optional): Maximum depth of data structures, or None for no maximum.
+ Defaults to None.
+
+ Returns:
+ Node: The root of a tree structure which can be used to render a pretty repr.
+ """
def to_repr(obj: Any) -> str:
+ """Get repr string for an object, but catch errors."""
if (
max_string is not None
and _safe_isinstance(obj, (bytes, str))
@@ -516,6 +619,7 @@ pop_visited = visited_ids.remove
def _traverse(obj: Any, root: bool = False, depth: int = 0) -> Node:
+ """Walk the object depth first."""
obj_id = id(obj)
if obj_id in visited_ids:
@@ -627,6 +731,7 @@ def iter_attrs() -> (
Iterable[Tuple[str, Any, Optional[Callable[[Any], str]]]]
):
+ """Iterate over attr fields and values."""
for attr in attr_fields:
if attr.repr:
try:
@@ -780,6 +885,23 @@ max_depth: Optional[int] = None,
expand_all: bool = False,
) -> str:
+ """Prettify repr string by expanding on to new lines to fit within a given width.
+
+ Args:
+ _object (Any): Object to repr.
+ max_width (int, optional): Desired maximum width of repr string. Defaults to 80.
+ indent_size (int, optional): Number of spaces to indent. Defaults to 4.
+ max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
+ Defaults to None.
+ max_string (int, optional): Maximum length of string before truncating, or None to disable truncating.
+ Defaults to None.
+ max_depth (int, optional): Maximum depth of nested data structure, or None for no depth.
+ Defaults to None.
+ expand_all (bool, optional): Expand all containers regardless of available width. Defaults to False.
+
+ Returns:
+ str: A possibly multi-line representation of the object.
+ """
if _safe_isinstance(_object, Node):
node = _object
@@ -803,6 +925,18 @@ max_depth: Optional[int] = None,
expand_all: bool = False,
) -> None:
+ """A convenience function for pretty printing.
+
+ Args:
+ _object (Any): Object to pretty print.
+ console (Console, optional): Console instance, or None to use default. Defaults to None.
+ max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
+ Defaults to None.
+ max_string (int, optional): Maximum length of strings before truncating, or None to disable. Defaults to None.
+ max_depth (int, optional): Maximum depth for nested data structures, or None for unlimited depth. Defaults to None.
+ indent_guides (bool, optional): Enable indentation guides. Defaults to True.
+ expand_all (bool, optional): Expand all containers. Defaults to False.
+ """
_console = get_console() if console is None else console
_console.print(
Pretty(
@@ -879,4 +1013,4 @@ def __repr__(self) -> str:
return "Hello\x1b[38;5;239m World!"
- print(Pretty(Thing()))+ print(Pretty(Thing()))
| https://raw.githubusercontent.com/Textualize/rich/HEAD/rich/pretty.py |
Add docstrings for utility scripts | from typing import Any, cast, Set, TYPE_CHECKING
from inspect import isclass
if TYPE_CHECKING:
from rich.console import RenderableType
_GIBBERISH = """aihwerij235234ljsdnp34ksodfipwoe234234jlskjdf"""
def is_renderable(check_object: Any) -> bool:
return (
isinstance(check_object, str)
or hasattr(check_object, "__rich__")
or hasattr(check_object, "__rich_console__")
)
def rich_cast(renderable: object) -> "RenderableType":
from rich.console import RenderableType
rich_visited_set: Set[type] = set() # Prevent potential infinite loop
while hasattr(renderable, "__rich__") and not isclass(renderable):
# Detect object which claim to have all the attributes
if hasattr(renderable, _GIBBERISH):
return repr(renderable)
cast_method = getattr(renderable, "__rich__")
renderable = cast_method()
renderable_type = type(renderable)
if renderable_type in rich_visited_set:
break
rich_visited_set.add(renderable_type)
return cast(RenderableType, renderable) | --- +++ @@ -8,6 +8,7 @@
def is_renderable(check_object: Any) -> bool:
+ """Check if an object may be rendered by Rich."""
return (
isinstance(check_object, str)
or hasattr(check_object, "__rich__")
@@ -16,6 +17,14 @@
def rich_cast(renderable: object) -> "RenderableType":
+ """Cast an object to a renderable by calling __rich__ if present.
+
+ Args:
+ renderable (object): A potentially renderable object
+
+ Returns:
+ object: The result of recursively calling __rich__.
+ """
from rich.console import RenderableType
rich_visited_set: Set[type] = set() # Prevent potential infinite loop
@@ -30,4 +39,4 @@ break
rich_visited_set.add(renderable_type)
- return cast(RenderableType, renderable)+ return cast(RenderableType, renderable)
| https://raw.githubusercontent.com/Textualize/rich/HEAD/rich/protocol.py |
Add docstrings to improve collaboration | import math
from functools import lru_cache
from time import monotonic
from typing import Iterable, List, Optional
from .color import Color, blend_rgb
from .color_triplet import ColorTriplet
from .console import Console, ConsoleOptions, RenderResult
from .jupyter import JupyterMixin
from .measure import Measurement
from .segment import Segment
from .style import Style, StyleType
# Number of characters before 'pulse' animation repeats
PULSE_SIZE = 20
class ProgressBar(JupyterMixin):
def __init__(
self,
total: Optional[float] = 100.0,
completed: float = 0,
width: Optional[int] = None,
pulse: bool = False,
style: StyleType = "bar.back",
complete_style: StyleType = "bar.complete",
finished_style: StyleType = "bar.finished",
pulse_style: StyleType = "bar.pulse",
animation_time: Optional[float] = None,
):
self.total = total
self.completed = completed
self.width = width
self.pulse = pulse
self.style = style
self.complete_style = complete_style
self.finished_style = finished_style
self.pulse_style = pulse_style
self.animation_time = animation_time
self._pulse_segments: Optional[List[Segment]] = None
def __repr__(self) -> str:
return f"<Bar {self.completed!r} of {self.total!r}>"
@property
def percentage_completed(self) -> Optional[float]:
if self.total is None:
return None
completed = (self.completed / self.total) * 100.0
completed = min(100, max(0.0, completed))
return completed
@lru_cache(maxsize=16)
def _get_pulse_segments(
self,
fore_style: Style,
back_style: Style,
color_system: str,
no_color: bool,
ascii: bool = False,
) -> List[Segment]:
bar = "-" if ascii else "━"
segments: List[Segment] = []
if color_system not in ("standard", "eight_bit", "truecolor") or no_color:
segments += [Segment(bar, fore_style)] * (PULSE_SIZE // 2)
segments += [Segment(" " if no_color else bar, back_style)] * (
PULSE_SIZE - (PULSE_SIZE // 2)
)
return segments
append = segments.append
fore_color = (
fore_style.color.get_truecolor()
if fore_style.color
else ColorTriplet(255, 0, 255)
)
back_color = (
back_style.color.get_truecolor()
if back_style.color
else ColorTriplet(0, 0, 0)
)
cos = math.cos
pi = math.pi
_Segment = Segment
_Style = Style
from_triplet = Color.from_triplet
for index in range(PULSE_SIZE):
position = index / PULSE_SIZE
fade = 0.5 + cos(position * pi * 2) / 2.0
color = blend_rgb(fore_color, back_color, cross_fade=fade)
append(_Segment(bar, _Style(color=from_triplet(color))))
return segments
def update(self, completed: float, total: Optional[float] = None) -> None:
self.completed = completed
self.total = total if total is not None else self.total
def _render_pulse(
self, console: Console, width: int, ascii: bool = False
) -> Iterable[Segment]:
fore_style = console.get_style(self.pulse_style, default="white")
back_style = console.get_style(self.style, default="black")
pulse_segments = self._get_pulse_segments(
fore_style, back_style, console.color_system, console.no_color, ascii=ascii
)
segment_count = len(pulse_segments)
current_time = (
monotonic() if self.animation_time is None else self.animation_time
)
segments = pulse_segments * (int(width / segment_count) + 2)
offset = int(-current_time * 15) % segment_count
segments = segments[offset : offset + width]
yield from segments
def __rich_console__(
self, console: Console, options: ConsoleOptions
) -> RenderResult:
width = min(self.width or options.max_width, options.max_width)
ascii = options.legacy_windows or options.ascii_only
should_pulse = self.pulse or self.total is None
if should_pulse:
yield from self._render_pulse(console, width, ascii=ascii)
return
completed: Optional[float] = (
min(self.total, max(0, self.completed)) if self.total is not None else None
)
bar = "-" if ascii else "━"
half_bar_right = " " if ascii else "╸"
half_bar_left = " " if ascii else "╺"
complete_halves = (
int(width * 2 * completed / self.total)
if self.total and completed is not None
else width * 2
)
bar_count = complete_halves // 2
half_bar_count = complete_halves % 2
style = console.get_style(self.style)
is_finished = self.total is None or self.completed >= self.total
complete_style = console.get_style(
self.finished_style if is_finished else self.complete_style
)
_Segment = Segment
if bar_count:
yield _Segment(bar * bar_count, complete_style)
if half_bar_count:
yield _Segment(half_bar_right * half_bar_count, complete_style)
if not console.no_color:
remaining_bars = width - bar_count - half_bar_count
if remaining_bars and console.color_system is not None:
if not half_bar_count and bar_count:
yield _Segment(half_bar_left, style)
remaining_bars -= 1
if remaining_bars:
yield _Segment(bar * remaining_bars, style)
def __rich_measure__(
self, console: Console, options: ConsoleOptions
) -> Measurement:
return (
Measurement(self.width, self.width)
if self.width is not None
else Measurement(4, options.max_width)
)
if __name__ == "__main__": # pragma: no cover
console = Console()
bar = ProgressBar(width=50, total=100)
import time
console.show_cursor(False)
for n in range(0, 101, 1):
bar.update(n)
console.print(bar)
console.file.write("\r")
time.sleep(0.05)
console.show_cursor(True)
console.print() | --- +++ @@ -16,6 +16,19 @@
class ProgressBar(JupyterMixin):
+ """Renders a (progress) bar. Used by rich.progress.
+
+ Args:
+ total (float, optional): Number of steps in the bar. Defaults to 100. Set to None to render a pulsing animation.
+ completed (float, optional): Number of steps completed. Defaults to 0.
+ width (int, optional): Width of the bar, or ``None`` for maximum width. Defaults to None.
+ pulse (bool, optional): Enable pulse effect. Defaults to False. Will pulse if a None total was passed.
+ style (StyleType, optional): Style for the bar background. Defaults to "bar.back".
+ complete_style (StyleType, optional): Style for the completed bar. Defaults to "bar.complete".
+ finished_style (StyleType, optional): Style for a finished bar. Defaults to "bar.finished".
+ pulse_style (StyleType, optional): Style for pulsing bars. Defaults to "bar.pulse".
+ animation_time (Optional[float], optional): Time in seconds to use for animation, or None to use system time.
+ """
def __init__(
self,
@@ -46,6 +59,7 @@
@property
def percentage_completed(self) -> Optional[float]:
+ """Calculate percentage complete."""
if self.total is None:
return None
completed = (self.completed / self.total) * 100.0
@@ -61,6 +75,11 @@ no_color: bool,
ascii: bool = False,
) -> List[Segment]:
+ """Get a list of segments to render a pulse animation.
+
+ Returns:
+ List[Segment]: A list of segments, one segment per character.
+ """
bar = "-" if ascii else "━"
segments: List[Segment] = []
if color_system not in ("standard", "eight_bit", "truecolor") or no_color:
@@ -95,12 +114,30 @@ return segments
def update(self, completed: float, total: Optional[float] = None) -> None:
+ """Update progress with new values.
+
+ Args:
+ completed (float): Number of steps completed.
+ total (float, optional): Total number of steps, or ``None`` to not change. Defaults to None.
+ """
self.completed = completed
self.total = total if total is not None else self.total
def _render_pulse(
self, console: Console, width: int, ascii: bool = False
) -> Iterable[Segment]:
+ """Renders the pulse animation.
+
+ Args:
+ console (Console): Console instance.
+ width (int): Width in characters of pulse animation.
+
+ Returns:
+ RenderResult: [description]
+
+ Yields:
+ Iterator[Segment]: Segments to render pulse
+ """
fore_style = console.get_style(self.pulse_style, default="white")
back_style = console.get_style(self.style, default="black")
@@ -183,4 +220,4 @@ console.file.write("\r")
time.sleep(0.05)
console.show_cursor(True)
- console.print()+ console.print()
| https://raw.githubusercontent.com/Textualize/rich/HEAD/rich/progress_bar.py |
Add detailed docstrings explaining each function | import inspect
from functools import partial
from typing import (
Any,
Callable,
Iterable,
List,
Optional,
Tuple,
Type,
TypeVar,
Union,
overload,
)
T = TypeVar("T")
Result = Iterable[Union[Any, Tuple[Any], Tuple[str, Any], Tuple[str, Any, Any]]]
RichReprResult = Result
class ReprError(Exception):
@overload
def auto(cls: Optional[Type[T]]) -> Type[T]:
...
@overload
def auto(*, angular: bool = False) -> Callable[[Type[T]], Type[T]]:
...
def auto(
cls: Optional[Type[T]] = None, *, angular: Optional[bool] = None
) -> Union[Type[T], Callable[[Type[T]], Type[T]]]:
def do_replace(cls: Type[T], angular: Optional[bool] = None) -> Type[T]:
def auto_repr(self: T) -> str:
repr_str: List[str] = []
append = repr_str.append
angular: bool = getattr(self.__rich_repr__, "angular", False) # type: ignore[attr-defined]
for arg in self.__rich_repr__(): # type: ignore[attr-defined]
if isinstance(arg, tuple):
if len(arg) == 1:
append(repr(arg[0]))
else:
key, value, *default = arg
if key is None:
append(repr(value))
else:
if default and default[0] == value:
continue
append(f"{key}={value!r}")
else:
append(repr(arg))
if angular:
return f"<{self.__class__.__name__} {' '.join(repr_str)}>"
else:
return f"{self.__class__.__name__}({', '.join(repr_str)})"
def auto_rich_repr(self: Type[T]) -> Result:
try:
signature = inspect.signature(self.__init__)
for name, param in signature.parameters.items():
if param.kind == param.POSITIONAL_ONLY:
yield getattr(self, name)
elif param.kind in (
param.POSITIONAL_OR_KEYWORD,
param.KEYWORD_ONLY,
):
if param.default is param.empty:
yield getattr(self, param.name)
else:
yield param.name, getattr(self, param.name), param.default
except Exception as error:
raise ReprError(
f"Failed to auto generate __rich_repr__; {error}"
) from None
if not hasattr(cls, "__rich_repr__"):
auto_rich_repr.__doc__ = "Build a rich repr"
cls.__rich_repr__ = auto_rich_repr # type: ignore[attr-defined]
auto_repr.__doc__ = "Return repr(self)"
cls.__repr__ = auto_repr # type: ignore[assignment]
if angular is not None:
cls.__rich_repr__.angular = angular # type: ignore[attr-defined]
return cls
if cls is None:
return partial(do_replace, angular=angular)
else:
return do_replace(cls, angular=angular)
@overload
def rich_repr(cls: Optional[Type[T]]) -> Type[T]:
...
@overload
def rich_repr(*, angular: bool = False) -> Callable[[Type[T]], Type[T]]:
...
def rich_repr(
cls: Optional[Type[T]] = None, *, angular: bool = False
) -> Union[Type[T], Callable[[Type[T]], Type[T]]]:
if cls is None:
return auto(angular=angular)
else:
return auto(cls)
if __name__ == "__main__":
@auto
class Foo:
def __rich_repr__(self) -> Result:
yield "foo"
yield "bar", {"shopping": ["eggs", "ham", "pineapple"]}
yield "buy", "hand sanitizer"
foo = Foo()
from rich.console import Console
console = Console()
console.rule("Standard repr")
console.print(foo)
console.print(foo, width=60)
console.print(foo, width=30)
console.rule("Angular repr")
Foo.__rich_repr__.angular = True # type: ignore[attr-defined]
console.print(foo)
console.print(foo, width=60)
console.print(foo, width=30) | --- +++ @@ -21,6 +21,7 @@
class ReprError(Exception):
+ """An error occurred when attempting to build a repr."""
@overload
@@ -36,9 +37,11 @@ def auto(
cls: Optional[Type[T]] = None, *, angular: Optional[bool] = None
) -> Union[Type[T], Callable[[Type[T]], Type[T]]]:
+ """Class decorator to create __repr__ from __rich_repr__"""
def do_replace(cls: Type[T], angular: Optional[bool] = None) -> Type[T]:
def auto_repr(self: T) -> str:
+ """Create repr string from __rich_repr__"""
repr_str: List[str] = []
append = repr_str.append
@@ -63,6 +66,7 @@ return f"{self.__class__.__name__}({', '.join(repr_str)})"
def auto_rich_repr(self: Type[T]) -> Result:
+ """Auto generate __rich_rep__ from signature of __init__"""
try:
signature = inspect.signature(self.__init__)
for name, param in signature.parameters.items():
@@ -142,4 +146,4 @@ console.print(foo)
console.print(foo, width=60)
- console.print(foo, width=30)+ console.print(foo, width=30)
| https://raw.githubusercontent.com/Textualize/rich/HEAD/rich/repr.py |
Improve documentation using docstrings | from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Sequence
if TYPE_CHECKING:
from rich.console import ConsoleRenderable
from . import get_console
from .segment import Segment
from .terminal_theme import DEFAULT_TERMINAL_THEME
if TYPE_CHECKING:
from rich.console import ConsoleRenderable
JUPYTER_HTML_FORMAT = """\
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace">{code}</pre>
"""
class JupyterRenderable:
def __init__(self, html: str, text: str) -> None:
self.html = html
self.text = text
def _repr_mimebundle_(
self, include: Sequence[str], exclude: Sequence[str], **kwargs: Any
) -> Dict[str, str]:
data = {"text/plain": self.text, "text/html": self.html}
if include:
data = {k: v for (k, v) in data.items() if k in include}
if exclude:
data = {k: v for (k, v) in data.items() if k not in exclude}
return data
class JupyterMixin:
__slots__ = ()
def _repr_mimebundle_(
self: "ConsoleRenderable",
include: Sequence[str],
exclude: Sequence[str],
**kwargs: Any,
) -> Dict[str, str]:
console = get_console()
segments = list(console.render(self, console.options))
html = _render_segments(segments)
text = console._render_buffer(segments)
data = {"text/plain": text, "text/html": html}
if include:
data = {k: v for (k, v) in data.items() if k in include}
if exclude:
data = {k: v for (k, v) in data.items() if k not in exclude}
return data
def _render_segments(segments: Iterable[Segment]) -> str:
def escape(text: str) -> str:
return text.replace("&", "&").replace("<", "<").replace(">", ">")
fragments: List[str] = []
append_fragment = fragments.append
theme = DEFAULT_TERMINAL_THEME
for text, style, control in Segment.simplify(segments):
if control:
continue
text = escape(text)
if style:
rule = style.get_html_style(theme)
text = f'<span style="{rule}">{text}</span>' if rule else text
if style.link:
text = f'<a href="{style.link}" target="_blank">{text}</a>'
append_fragment(text)
code = "".join(fragments)
html = JUPYTER_HTML_FORMAT.format(code=code)
return html
def display(segments: Iterable[Segment], text: str) -> None:
html = _render_segments(segments)
jupyter_renderable = JupyterRenderable(html, text)
try:
from IPython.display import display as ipython_display
ipython_display(jupyter_renderable)
except ModuleNotFoundError:
# Handle the case where the Console has force_jupyter=True,
# but IPython is not installed.
pass
def print(*args: Any, **kwargs: Any) -> None:
console = get_console()
return console.print(*args, **kwargs) | --- +++ @@ -16,6 +16,7 @@
class JupyterRenderable:
+ """A shim to write html to Jupyter notebook."""
def __init__(self, html: str, text: str) -> None:
self.html = html
@@ -33,6 +34,7 @@
class JupyterMixin:
+ """Add to an Rich renderable to make it render in Jupyter notebook."""
__slots__ = ()
@@ -56,6 +58,7 @@
def _render_segments(segments: Iterable[Segment]) -> str:
def escape(text: str) -> str:
+ """Escape html."""
return text.replace("&", "&").replace("<", "<").replace(">", ">")
fragments: List[str] = []
@@ -79,6 +82,7 @@
def display(segments: Iterable[Segment], text: str) -> None:
+ """Render segments to Jupyter."""
html = _render_segments(segments)
jupyter_renderable = JupyterRenderable(html, text)
try:
@@ -92,5 +96,6 @@
def print(*args: Any, **kwargs: Any) -> None:
+ """Proxy for Console print."""
console = get_console()
- return console.print(*args, **kwargs)+ return console.print(*args, **kwargs)
| https://raw.githubusercontent.com/Textualize/rich/HEAD/rich/jupyter.py |
Add missing documentation to my Python functions | from abc import ABC, abstractmethod
from typing import Any
class Pager(ABC):
@abstractmethod
def show(self, content: str) -> None:
class SystemPager(Pager):
def _pager(self, content: str) -> Any: # pragma: no cover
return __import__("pydoc").pager(content)
def show(self, content: str) -> None:
self._pager(content)
if __name__ == "__main__": # pragma: no cover
from .__main__ import make_test_card
from .console import Console
console = Console()
with console.pager(styles=True):
console.print(make_test_card()) | --- +++ @@ -3,17 +3,25 @@
class Pager(ABC):
+ """Base class for a pager."""
@abstractmethod
def show(self, content: str) -> None:
+ """Show content in pager.
+
+ Args:
+ content (str): Content to be displayed.
+ """
class SystemPager(Pager):
+ """Uses the pager installed on the system."""
def _pager(self, content: str) -> Any: # pragma: no cover
return __import__("pydoc").pager(content)
def show(self, content: str) -> None:
+ """Use the same pager used by pydoc."""
self._pager(content)
@@ -23,4 +31,4 @@
console = Console()
with console.pager(styles=True):
- console.print(make_test_card())+ console.print(make_test_card())
| https://raw.githubusercontent.com/Textualize/rich/HEAD/rich/pager.py |
Generate docstrings with examples | from enum import IntEnum
from functools import lru_cache
from itertools import filterfalse
from logging import getLogger
from operator import attrgetter
from typing import (
TYPE_CHECKING,
Dict,
Iterable,
List,
NamedTuple,
Optional,
Sequence,
Tuple,
Type,
Union,
)
from .cells import (
_is_single_cell_widths,
cached_cell_len,
cell_len,
get_character_cell_size,
set_cell_size,
)
from .repr import Result, rich_repr
from .style import Style
if TYPE_CHECKING:
from .console import Console, ConsoleOptions, RenderResult
log = getLogger("rich")
class ControlType(IntEnum):
BELL = 1
CARRIAGE_RETURN = 2
HOME = 3
CLEAR = 4
SHOW_CURSOR = 5
HIDE_CURSOR = 6
ENABLE_ALT_SCREEN = 7
DISABLE_ALT_SCREEN = 8
CURSOR_UP = 9
CURSOR_DOWN = 10
CURSOR_FORWARD = 11
CURSOR_BACKWARD = 12
CURSOR_MOVE_TO_COLUMN = 13
CURSOR_MOVE_TO = 14
ERASE_IN_LINE = 15
SET_WINDOW_TITLE = 16
ControlCode = Union[
Tuple[ControlType],
Tuple[ControlType, Union[int, str]],
Tuple[ControlType, int, int],
]
@rich_repr()
class Segment(NamedTuple):
text: str
style: Optional[Style] = None
control: Optional[Sequence[ControlCode]] = None
@property
def cell_length(self) -> int:
text, _style, control = self
return 0 if control else cell_len(text)
def __rich_repr__(self) -> Result:
yield self.text
if self.control is None:
if self.style is not None:
yield self.style
else:
yield self.style
yield self.control
def __bool__(self) -> bool:
return bool(self.text)
@property
def is_control(self) -> bool:
return self.control is not None
@classmethod
@lru_cache(1024 * 16)
def _split_cells(cls, segment: "Segment", cut: int) -> Tuple["Segment", "Segment"]:
text, style, control = segment
_Segment = Segment
cell_length = segment.cell_length
if cut >= cell_length:
return segment, _Segment("", style, control)
cell_size = get_character_cell_size
pos = int((cut / cell_length) * len(text))
while True:
before = text[:pos]
cell_pos = cell_len(before)
out_by = cell_pos - cut
if not out_by:
return (
_Segment(before, style, control),
_Segment(text[pos:], style, control),
)
if out_by == -1 and cell_size(text[pos]) == 2:
return (
_Segment(text[:pos] + " ", style, control),
_Segment(" " + text[pos + 1 :], style, control),
)
if out_by == +1 and cell_size(text[pos - 1]) == 2:
return (
_Segment(text[: pos - 1] + " ", style, control),
_Segment(" " + text[pos:], style, control),
)
if cell_pos < cut:
pos += 1
else:
pos -= 1
def split_cells(self, cut: int) -> Tuple["Segment", "Segment"]:
text, style, control = self
assert cut >= 0
if _is_single_cell_widths(text):
# Fast path with all 1 cell characters
if cut >= len(text):
return self, Segment("", style, control)
return (
Segment(text[:cut], style, control),
Segment(text[cut:], style, control),
)
return self._split_cells(self, cut)
@classmethod
def line(cls) -> "Segment":
return cls("\n")
@classmethod
def apply_style(
cls,
segments: Iterable["Segment"],
style: Optional[Style] = None,
post_style: Optional[Style] = None,
) -> Iterable["Segment"]:
result_segments = segments
if style:
apply = style.__add__
result_segments = (
cls(text, None if control else apply(_style), control)
for text, _style, control in result_segments
)
if post_style:
result_segments = (
cls(
text,
(
None
if control
else (_style + post_style if _style else post_style)
),
control,
)
for text, _style, control in result_segments
)
return result_segments
@classmethod
def filter_control(
cls, segments: Iterable["Segment"], is_control: bool = False
) -> Iterable["Segment"]:
if is_control:
return filter(attrgetter("control"), segments)
else:
return filterfalse(attrgetter("control"), segments)
@classmethod
def split_lines(cls, segments: Iterable["Segment"]) -> Iterable[List["Segment"]]:
line: List[Segment] = []
append = line.append
for segment in segments:
if "\n" in segment.text and not segment.control:
text, style, _ = segment
while text:
_text, new_line, text = text.partition("\n")
if _text:
append(cls(_text, style))
if new_line:
yield line
line = []
append = line.append
else:
append(segment)
if line:
yield line
@classmethod
def split_lines_terminator(
cls, segments: Iterable["Segment"]
) -> Iterable[Tuple[List["Segment"], bool]]:
line: List[Segment] = []
append = line.append
for segment in segments:
if "\n" in segment.text and not segment.control:
text, style, _ = segment
while text:
_text, new_line, text = text.partition("\n")
if _text:
append(cls(_text, style))
if new_line:
yield (line, True)
line = []
append = line.append
else:
append(segment)
if line:
yield (line, False)
@classmethod
def split_and_crop_lines(
cls,
segments: Iterable["Segment"],
length: int,
style: Optional[Style] = None,
pad: bool = True,
include_new_lines: bool = True,
) -> Iterable[List["Segment"]]:
line: List[Segment] = []
append = line.append
adjust_line_length = cls.adjust_line_length
new_line_segment = cls("\n")
for segment in segments:
if "\n" in segment.text and not segment.control:
text, segment_style, _ = segment
while text:
_text, new_line, text = text.partition("\n")
if _text:
append(cls(_text, segment_style))
if new_line:
cropped_line = adjust_line_length(
line, length, style=style, pad=pad
)
if include_new_lines:
cropped_line.append(new_line_segment)
yield cropped_line
line.clear()
else:
append(segment)
if line:
yield adjust_line_length(line, length, style=style, pad=pad)
@classmethod
def adjust_line_length(
cls,
line: List["Segment"],
length: int,
style: Optional[Style] = None,
pad: bool = True,
) -> List["Segment"]:
line_length = sum(segment.cell_length for segment in line)
new_line: List[Segment]
if line_length < length:
if pad:
new_line = line + [cls(" " * (length - line_length), style)]
else:
new_line = line[:]
elif line_length > length:
new_line = []
append = new_line.append
line_length = 0
for segment in line:
segment_length = segment.cell_length
if line_length + segment_length < length or segment.control:
append(segment)
line_length += segment_length
else:
text, segment_style, _ = segment
text = set_cell_size(text, length - line_length)
append(cls(text, segment_style))
break
else:
new_line = line[:]
return new_line
@classmethod
def get_line_length(cls, line: List["Segment"]) -> int:
_cell_len = cell_len
return sum(_cell_len(text) for text, style, control in line if not control)
@classmethod
def get_shape(cls, lines: List[List["Segment"]]) -> Tuple[int, int]:
get_line_length = cls.get_line_length
max_width = max(get_line_length(line) for line in lines) if lines else 0
return (max_width, len(lines))
@classmethod
def set_shape(
cls,
lines: List[List["Segment"]],
width: int,
height: Optional[int] = None,
style: Optional[Style] = None,
new_lines: bool = False,
) -> List[List["Segment"]]:
_height = height or len(lines)
blank = (
[cls(" " * width + "\n", style)] if new_lines else [cls(" " * width, style)]
)
adjust_line_length = cls.adjust_line_length
shaped_lines = lines[:_height]
shaped_lines[:] = [
adjust_line_length(line, width, style=style) for line in lines
]
if len(shaped_lines) < _height:
shaped_lines.extend([blank] * (_height - len(shaped_lines)))
return shaped_lines
@classmethod
def align_top(
cls: Type["Segment"],
lines: List[List["Segment"]],
width: int,
height: int,
style: Style,
new_lines: bool = False,
) -> List[List["Segment"]]:
extra_lines = height - len(lines)
if not extra_lines:
return lines[:]
lines = lines[:height]
blank = cls(" " * width + "\n", style) if new_lines else cls(" " * width, style)
lines = lines + [[blank]] * extra_lines
return lines
@classmethod
def align_bottom(
cls: Type["Segment"],
lines: List[List["Segment"]],
width: int,
height: int,
style: Style,
new_lines: bool = False,
) -> List[List["Segment"]]:
extra_lines = height - len(lines)
if not extra_lines:
return lines[:]
lines = lines[:height]
blank = cls(" " * width + "\n", style) if new_lines else cls(" " * width, style)
lines = [[blank]] * extra_lines + lines
return lines
@classmethod
def align_middle(
cls: Type["Segment"],
lines: List[List["Segment"]],
width: int,
height: int,
style: Style,
new_lines: bool = False,
) -> List[List["Segment"]]:
extra_lines = height - len(lines)
if not extra_lines:
return lines[:]
lines = lines[:height]
blank = cls(" " * width + "\n", style) if new_lines else cls(" " * width, style)
top_lines = extra_lines // 2
bottom_lines = extra_lines - top_lines
lines = [[blank]] * top_lines + lines + [[blank]] * bottom_lines
return lines
@classmethod
def simplify(cls, segments: Iterable["Segment"]) -> Iterable["Segment"]:
iter_segments = iter(segments)
try:
last_segment = next(iter_segments)
except StopIteration:
return
_Segment = Segment
for segment in iter_segments:
if last_segment.style == segment.style and not segment.control:
last_segment = _Segment(
last_segment.text + segment.text, last_segment.style
)
else:
yield last_segment
last_segment = segment
yield last_segment
@classmethod
def strip_links(cls, segments: Iterable["Segment"]) -> Iterable["Segment"]:
for segment in segments:
if segment.control or segment.style is None:
yield segment
else:
text, style, _control = segment
yield cls(text, style.update_link(None) if style else None)
@classmethod
def strip_styles(cls, segments: Iterable["Segment"]) -> Iterable["Segment"]:
for text, _style, control in segments:
yield cls(text, None, control)
@classmethod
def remove_color(cls, segments: Iterable["Segment"]) -> Iterable["Segment"]:
cache: Dict[Style, Style] = {}
for text, style, control in segments:
if style:
colorless_style = cache.get(style)
if colorless_style is None:
colorless_style = style.without_color
cache[style] = colorless_style
yield cls(text, colorless_style, control)
else:
yield cls(text, None, control)
@classmethod
def divide(
cls, segments: Iterable["Segment"], cuts: Iterable[int]
) -> Iterable[List["Segment"]]:
split_segments: List["Segment"] = []
add_segment = split_segments.append
iter_cuts = iter(cuts)
while True:
cut = next(iter_cuts, -1)
if cut == -1:
return
if cut != 0:
break
yield []
pos = 0
segments_clear = split_segments.clear
segments_copy = split_segments.copy
_cell_len = cached_cell_len
for segment in segments:
text, _style, control = segment
while text:
end_pos = pos if control else pos + _cell_len(text)
if end_pos < cut:
add_segment(segment)
pos = end_pos
break
if end_pos == cut:
add_segment(segment)
yield segments_copy()
segments_clear()
pos = end_pos
cut = next(iter_cuts, -1)
if cut == -1:
if split_segments:
yield segments_copy()
return
break
else:
before, segment = segment.split_cells(cut - pos)
text, _style, control = segment
add_segment(before)
yield segments_copy()
segments_clear()
pos = cut
cut = next(iter_cuts, -1)
if cut == -1:
if split_segments:
yield segments_copy()
return
yield segments_copy()
class Segments:
def __init__(self, segments: Iterable[Segment], new_lines: bool = False) -> None:
self.segments = list(segments)
self.new_lines = new_lines
def __rich_console__(
self, console: "Console", options: "ConsoleOptions"
) -> "RenderResult":
if self.new_lines:
line = Segment.line()
for segment in self.segments:
yield segment
yield line
else:
yield from self.segments
class SegmentLines:
def __init__(self, lines: Iterable[List[Segment]], new_lines: bool = False) -> None:
self.lines = list(lines)
self.new_lines = new_lines
def __rich_console__(
self, console: "Console", options: "ConsoleOptions"
) -> "RenderResult":
if self.new_lines:
new_line = Segment.line()
for line in self.lines:
yield from line
yield new_line
else:
for line in self.lines:
yield from line
if __name__ == "__main__": # pragma: no cover
from rich.console import Console
from rich.syntax import Syntax
from rich.text import Text
code = """from rich.console import Console
console = Console()
text = Text.from_markup("Hello, [bold magenta]World[/]!")
console.print(text)"""
text = Text.from_markup("Hello, [bold magenta]World[/]!")
console = Console()
console.rule("rich.Segment")
console.print(
"A Segment is the last step in the Rich render process before generating text with ANSI codes."
)
console.print("\nConsider the following code:\n")
console.print(Syntax(code, "python", line_numbers=True))
console.print()
console.print(
"When you call [b]print()[/b], Rich [i]renders[/i] the object in to the following:\n"
)
fragments = list(console.render(text))
console.print(fragments)
console.print()
console.print("The Segments are then processed to produce the following output:\n")
console.print(text)
console.print(
"\nYou will only need to know this if you are implementing your own Rich renderables."
) | --- +++ @@ -33,6 +33,7 @@
class ControlType(IntEnum):
+ """Non-printable control codes which typically translate to ANSI codes."""
BELL = 1
CARRIAGE_RETURN = 2
@@ -61,6 +62,17 @@
@rich_repr()
class Segment(NamedTuple):
+ """A piece of text with associated style. Segments are produced by the Console render process and
+ are ultimately converted in to strings to be written to the terminal.
+
+ Args:
+ text (str): A piece of text.
+ style (:class:`~rich.style.Style`, optional): An optional style to apply to the text.
+ control (Tuple[ControlCode], optional): Optional sequence of control codes.
+
+ Attributes:
+ cell_length (int): The cell length of this Segment.
+ """
text: str
style: Optional[Style] = None
@@ -68,6 +80,11 @@
@property
def cell_length(self) -> int:
+ """The number of terminal cells required to display self.text.
+
+ Returns:
+ int: A number of cells.
+ """
text, _style, control = self
return 0 if control else cell_len(text)
@@ -81,15 +98,29 @@ yield self.control
def __bool__(self) -> bool:
+ """Check if the segment contains text."""
return bool(self.text)
@property
def is_control(self) -> bool:
+ """Check if the segment contains control codes."""
return self.control is not None
@classmethod
@lru_cache(1024 * 16)
def _split_cells(cls, segment: "Segment", cut: int) -> Tuple["Segment", "Segment"]:
+ """Split a segment in to two at a given cell position.
+
+ Note that splitting a double-width character, may result in that character turning
+ into two spaces.
+
+ Args:
+ segment (Segment): A segment to split.
+ cut (int): A cell position to cut on.
+
+ Returns:
+ A tuple of two segments.
+ """
text, style, control = segment
_Segment = Segment
cell_length = segment.cell_length
@@ -125,6 +156,17 @@ pos -= 1
def split_cells(self, cut: int) -> Tuple["Segment", "Segment"]:
+ """Split segment in to two segments at the specified column.
+
+ If the cut point falls in the middle of a 2-cell wide character then it is replaced
+ by two spaces, to preserve the display width of the parent segment.
+
+ Args:
+ cut (int): Offset within the segment to cut.
+
+ Returns:
+ Tuple[Segment, Segment]: Two segments.
+ """
text, style, control = self
assert cut >= 0
@@ -141,6 +183,7 @@
@classmethod
def line(cls) -> "Segment":
+ """Make a new line segment."""
return cls("\n")
@classmethod
@@ -150,6 +193,18 @@ style: Optional[Style] = None,
post_style: Optional[Style] = None,
) -> Iterable["Segment"]:
+ """Apply style(s) to an iterable of segments.
+
+ Returns an iterable of segments where the style is replaced by ``style + segment.style + post_style``.
+
+ Args:
+ segments (Iterable[Segment]): Segments to process.
+ style (Style, optional): Base style. Defaults to None.
+ post_style (Style, optional): Style to apply on top of segment style. Defaults to None.
+
+ Returns:
+ Iterable[Segments]: A new iterable of segments (possibly the same iterable).
+ """
result_segments = segments
if style:
apply = style.__add__
@@ -176,6 +231,16 @@ def filter_control(
cls, segments: Iterable["Segment"], is_control: bool = False
) -> Iterable["Segment"]:
+ """Filter segments by ``is_control`` attribute.
+
+ Args:
+ segments (Iterable[Segment]): An iterable of Segment instances.
+ is_control (bool, optional): is_control flag to match in search.
+
+ Returns:
+ Iterable[Segment]: And iterable of Segment instances.
+
+ """
if is_control:
return filter(attrgetter("control"), segments)
else:
@@ -183,6 +248,14 @@
@classmethod
def split_lines(cls, segments: Iterable["Segment"]) -> Iterable[List["Segment"]]:
+ """Split a sequence of segments in to a list of lines.
+
+ Args:
+ segments (Iterable[Segment]): Segments potentially containing line feeds.
+
+ Yields:
+ Iterable[List[Segment]]: Iterable of segment lists, one per line.
+ """
line: List[Segment] = []
append = line.append
@@ -206,6 +279,14 @@ def split_lines_terminator(
cls, segments: Iterable["Segment"]
) -> Iterable[Tuple[List["Segment"], bool]]:
+ """Split a sequence of segments in to a list of lines and a boolean to indicate if there was a new line.
+
+ Args:
+ segments (Iterable[Segment]): Segments potentially containing line feeds.
+
+ Yields:
+ Iterable[List[Segment]]: Iterable of segment lists, one per line.
+ """
line: List[Segment] = []
append = line.append
@@ -234,6 +315,18 @@ pad: bool = True,
include_new_lines: bool = True,
) -> Iterable[List["Segment"]]:
+ """Split segments in to lines, and crop lines greater than a given length.
+
+ Args:
+ segments (Iterable[Segment]): An iterable of segments, probably
+ generated from console.render.
+ length (int): Desired line length.
+ style (Style, optional): Style to use for any padding.
+ pad (bool): Enable padding of lines that are less than `length`.
+
+ Returns:
+ Iterable[List[Segment]]: An iterable of lines of segments.
+ """
line: List[Segment] = []
append = line.append
@@ -268,6 +361,17 @@ style: Optional[Style] = None,
pad: bool = True,
) -> List["Segment"]:
+ """Adjust a line to a given width (cropping or padding as required).
+
+ Args:
+ segments (Iterable[Segment]): A list of segments in a single line.
+ length (int): The desired width of the line.
+ style (Style, optional): The style of padding if used (space on the end). Defaults to None.
+ pad (bool, optional): Pad lines with spaces if they are shorter than `length`. Defaults to True.
+
+ Returns:
+ List[Segment]: A line of segments with the desired length.
+ """
line_length = sum(segment.cell_length for segment in line)
new_line: List[Segment]
@@ -296,11 +400,27 @@
@classmethod
def get_line_length(cls, line: List["Segment"]) -> int:
+ """Get the length of list of segments.
+
+ Args:
+ line (List[Segment]): A line encoded as a list of Segments (assumes no '\\\\n' characters),
+
+ Returns:
+ int: The length of the line.
+ """
_cell_len = cell_len
return sum(_cell_len(text) for text, style, control in line if not control)
@classmethod
def get_shape(cls, lines: List[List["Segment"]]) -> Tuple[int, int]:
+ """Get the shape (enclosing rectangle) of a list of lines.
+
+ Args:
+ lines (List[List[Segment]]): A list of lines (no '\\\\n' characters).
+
+ Returns:
+ Tuple[int, int]: Width and height in characters.
+ """
get_line_length = cls.get_line_length
max_width = max(get_line_length(line) for line in lines) if lines else 0
return (max_width, len(lines))
@@ -314,6 +434,18 @@ style: Optional[Style] = None,
new_lines: bool = False,
) -> List[List["Segment"]]:
+ """Set the shape of a list of lines (enclosing rectangle).
+
+ Args:
+ lines (List[List[Segment]]): A list of lines.
+ width (int): Desired width.
+ height (int, optional): Desired height or None for no change.
+ style (Style, optional): Style of any padding added.
+ new_lines (bool, optional): Padded lines should include "\n". Defaults to False.
+
+ Returns:
+ List[List[Segment]]: New list of lines.
+ """
_height = height or len(lines)
blank = (
@@ -338,6 +470,18 @@ style: Style,
new_lines: bool = False,
) -> List[List["Segment"]]:
+ """Aligns lines to top (adds extra lines to bottom as required).
+
+ Args:
+ lines (List[List[Segment]]): A list of lines.
+ width (int): Desired width.
+ height (int, optional): Desired height or None for no change.
+ style (Style): Style of any padding added.
+ new_lines (bool, optional): Padded lines should include "\n". Defaults to False.
+
+ Returns:
+ List[List[Segment]]: New list of lines.
+ """
extra_lines = height - len(lines)
if not extra_lines:
return lines[:]
@@ -355,6 +499,18 @@ style: Style,
new_lines: bool = False,
) -> List[List["Segment"]]:
+ """Aligns render to bottom (adds extra lines above as required).
+
+ Args:
+ lines (List[List[Segment]]): A list of lines.
+ width (int): Desired width.
+ height (int, optional): Desired height or None for no change.
+ style (Style): Style of any padding added. Defaults to None.
+ new_lines (bool, optional): Padded lines should include "\n". Defaults to False.
+
+ Returns:
+ List[List[Segment]]: New list of lines.
+ """
extra_lines = height - len(lines)
if not extra_lines:
return lines[:]
@@ -372,6 +528,18 @@ style: Style,
new_lines: bool = False,
) -> List[List["Segment"]]:
+ """Aligns lines to middle (adds extra lines to above and below as required).
+
+ Args:
+ lines (List[List[Segment]]): A list of lines.
+ width (int): Desired width.
+ height (int, optional): Desired height or None for no change.
+ style (Style): Style of any padding added.
+ new_lines (bool, optional): Padded lines should include "\n". Defaults to False.
+
+ Returns:
+ List[List[Segment]]: New list of lines.
+ """
extra_lines = height - len(lines)
if not extra_lines:
return lines[:]
@@ -384,6 +552,14 @@
@classmethod
def simplify(cls, segments: Iterable["Segment"]) -> Iterable["Segment"]:
+ """Simplify an iterable of segments by combining contiguous segments with the same style.
+
+ Args:
+ segments (Iterable[Segment]): An iterable of segments.
+
+ Returns:
+ Iterable[Segment]: A possibly smaller iterable of segments that will render the same way.
+ """
iter_segments = iter(segments)
try:
last_segment = next(iter_segments)
@@ -403,6 +579,14 @@
@classmethod
def strip_links(cls, segments: Iterable["Segment"]) -> Iterable["Segment"]:
+ """Remove all links from an iterable of styles.
+
+ Args:
+ segments (Iterable[Segment]): An iterable segments.
+
+ Yields:
+ Segment: Segments with link removed.
+ """
for segment in segments:
if segment.control or segment.style is None:
yield segment
@@ -412,11 +596,27 @@
@classmethod
def strip_styles(cls, segments: Iterable["Segment"]) -> Iterable["Segment"]:
+ """Remove all styles from an iterable of segments.
+
+ Args:
+ segments (Iterable[Segment]): An iterable segments.
+
+ Yields:
+ Segment: Segments with styles replace with None
+ """
for text, _style, control in segments:
yield cls(text, None, control)
@classmethod
def remove_color(cls, segments: Iterable["Segment"]) -> Iterable["Segment"]:
+ """Remove all color from an iterable of segments.
+
+ Args:
+ segments (Iterable[Segment]): An iterable segments.
+
+ Yields:
+ Segment: Segments with colorless style.
+ """
cache: Dict[Style, Style] = {}
for text, style, control in segments:
@@ -433,6 +633,14 @@ def divide(
cls, segments: Iterable["Segment"], cuts: Iterable[int]
) -> Iterable[List["Segment"]]:
+ """Divides an iterable of segments in to portions.
+
+ Args:
+ cuts (Iterable[int]): Cell positions where to divide.
+
+ Yields:
+ [Iterable[List[Segment]]]: An iterable of Segments in List.
+ """
split_segments: List["Segment"] = []
add_segment = split_segments.append
@@ -492,6 +700,13 @@
class Segments:
+ """A simple renderable to render an iterable of segments. This class may be useful if
+ you want to print segments outside of a __rich_console__ method.
+
+ Args:
+ segments (Iterable[Segment]): An iterable of segments.
+ new_lines (bool, optional): Add new lines between segments. Defaults to False.
+ """
def __init__(self, segments: Iterable[Segment], new_lines: bool = False) -> None:
self.segments = list(segments)
@@ -511,6 +726,13 @@
class SegmentLines:
def __init__(self, lines: Iterable[List[Segment]], new_lines: bool = False) -> None:
+ """A simple renderable containing a number of lines of segments. May be used as an intermediate
+ in rendering process.
+
+ Args:
+ lines (Iterable[List[Segment]]): Lists of segments forming lines.
+ new_lines (bool, optional): Insert new lines after each line. Defaults to False.
+ """
self.lines = list(lines)
self.new_lines = new_lines
@@ -558,4 +780,4 @@ console.print(text)
console.print(
"\nYou will only need to know this if you are implementing your own Rich renderables."
- )+ )
| https://raw.githubusercontent.com/Textualize/rich/HEAD/rich/segment.py |
Add docstrings to improve collaboration | from __future__ import annotations
import sys
from threading import Event, RLock, Thread
from types import TracebackType
from typing import IO, TYPE_CHECKING, Any, Callable, List, Optional, TextIO, Type, cast
from . import get_console
from .console import Console, ConsoleRenderable, Group, RenderableType, RenderHook
from .control import Control
from .file_proxy import FileProxy
from .jupyter import JupyterMixin
from .live_render import LiveRender, VerticalOverflowMethod
from .screen import Screen
from .text import Text
if TYPE_CHECKING:
# Can be replaced with `from typing import Self` in Python 3.11+
from typing_extensions import Self # pragma: no cover
class _RefreshThread(Thread):
def __init__(self, live: "Live", refresh_per_second: float) -> None:
self.live = live
self.refresh_per_second = refresh_per_second
self.done = Event()
super().__init__(daemon=True)
def stop(self) -> None:
self.done.set()
def run(self) -> None:
while not self.done.wait(1 / self.refresh_per_second):
with self.live._lock:
if not self.done.is_set():
self.live.refresh()
class Live(JupyterMixin, RenderHook):
def __init__(
self,
renderable: Optional[RenderableType] = None,
*,
console: Optional[Console] = None,
screen: bool = False,
auto_refresh: bool = True,
refresh_per_second: float = 4,
transient: bool = False,
redirect_stdout: bool = True,
redirect_stderr: bool = True,
vertical_overflow: VerticalOverflowMethod = "ellipsis",
get_renderable: Optional[Callable[[], RenderableType]] = None,
) -> None:
assert refresh_per_second > 0, "refresh_per_second must be > 0"
self._renderable = renderable
self.console = console if console is not None else get_console()
self._screen = screen
self._alt_screen = False
self._redirect_stdout = redirect_stdout
self._redirect_stderr = redirect_stderr
self._restore_stdout: Optional[IO[str]] = None
self._restore_stderr: Optional[IO[str]] = None
self._lock = RLock()
self.ipy_widget: Optional[Any] = None
self.auto_refresh = auto_refresh
self._started: bool = False
self.transient = True if screen else transient
self._refresh_thread: Optional[_RefreshThread] = None
self.refresh_per_second = refresh_per_second
self.vertical_overflow = vertical_overflow
self._get_renderable = get_renderable
self._live_render = LiveRender(
self.get_renderable(), vertical_overflow=vertical_overflow
)
self._nested = False
@property
def is_started(self) -> bool:
return self._started
def get_renderable(self) -> RenderableType:
renderable = (
self._get_renderable()
if self._get_renderable is not None
else self._renderable
)
return renderable or ""
def start(self, refresh: bool = False) -> None:
with self._lock:
if self._started:
return
self._started = True
if not self.console.set_live(self):
self._nested = True
return
if self._screen:
self._alt_screen = self.console.set_alt_screen(True)
self.console.show_cursor(False)
self._enable_redirect_io()
self.console.push_render_hook(self)
if refresh:
try:
self.refresh()
except Exception:
# If refresh fails, we want to stop the redirection of sys.stderr,
# so the error stacktrace is properly displayed in the terminal.
# (or, if the code that calls Rich captures the exception and wants to display something,
# let this be displayed in the terminal).
self.stop()
raise
if self.auto_refresh:
self._refresh_thread = _RefreshThread(self, self.refresh_per_second)
self._refresh_thread.start()
def stop(self) -> None:
with self._lock:
if not self._started:
return
self._started = False
self.console.clear_live()
if self._nested:
if not self.transient:
self.console.print(self.renderable)
return
if self.auto_refresh and self._refresh_thread is not None:
self._refresh_thread.stop()
self._refresh_thread = None
# allow it to fully render on the last even if overflow
self.vertical_overflow = "visible"
with self.console:
try:
if not self._alt_screen and not self.console.is_jupyter:
self.refresh()
finally:
self._disable_redirect_io()
self.console.pop_render_hook()
if (
not self._alt_screen
and self.console.is_terminal
and self._live_render.last_render_height
):
self.console.line()
self.console.show_cursor(True)
if self._alt_screen:
self.console.set_alt_screen(False)
if self.transient and not self._alt_screen:
self.console.control(self._live_render.restore_cursor())
if self.ipy_widget is not None and self.transient:
self.ipy_widget.close() # pragma: no cover
def __enter__(self) -> Self:
self.start(refresh=self._renderable is not None)
return self
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> None:
self.stop()
def _enable_redirect_io(self) -> None:
if self.console.is_terminal or self.console.is_jupyter:
if self._redirect_stdout and not isinstance(sys.stdout, FileProxy):
self._restore_stdout = sys.stdout
sys.stdout = cast("TextIO", FileProxy(self.console, sys.stdout))
if self._redirect_stderr and not isinstance(sys.stderr, FileProxy):
self._restore_stderr = sys.stderr
sys.stderr = cast("TextIO", FileProxy(self.console, sys.stderr))
def _disable_redirect_io(self) -> None:
if self._restore_stdout:
sys.stdout = cast("TextIO", self._restore_stdout)
self._restore_stdout = None
if self._restore_stderr:
sys.stderr = cast("TextIO", self._restore_stderr)
self._restore_stderr = None
@property
def renderable(self) -> RenderableType:
live_stack = self.console._live_stack
renderable: RenderableType
if live_stack and self is live_stack[0]:
# The first Live instance will render everything in the Live stack
renderable = Group(*[live.get_renderable() for live in live_stack])
else:
renderable = self.get_renderable()
return Screen(renderable) if self._alt_screen else renderable
def update(self, renderable: RenderableType, *, refresh: bool = False) -> None:
if isinstance(renderable, str):
renderable = self.console.render_str(renderable)
with self._lock:
self._renderable = renderable
if refresh:
self.refresh()
def refresh(self) -> None:
with self._lock:
self._live_render.set_renderable(self.renderable)
if self._nested:
if self.console._live_stack:
self.console._live_stack[0].refresh()
return
if self.console.is_jupyter: # pragma: no cover
try:
from IPython.display import display
from ipywidgets import Output
except ImportError:
import warnings
warnings.warn('install "ipywidgets" for Jupyter support')
else:
if self.ipy_widget is None:
self.ipy_widget = Output()
display(self.ipy_widget)
with self.ipy_widget:
self.ipy_widget.clear_output(wait=True)
self.console.print(self._live_render.renderable)
elif self.console.is_terminal and not self.console.is_dumb_terminal:
with self.console:
self.console.print(Control())
elif (
not self._started and not self.transient
): # if it is finished allow files or dumb-terminals to see final result
with self.console:
self.console.print(Control())
def process_renderables(
self, renderables: List[ConsoleRenderable]
) -> List[ConsoleRenderable]:
self._live_render.vertical_overflow = self.vertical_overflow
if self.console.is_interactive:
# lock needs acquiring as user can modify live_render renderable at any time unlike in Progress.
with self._lock:
reset = (
Control.home()
if self._alt_screen
else self._live_render.position_cursor()
)
renderables = [reset, *renderables, self._live_render]
elif (
not self._started and not self.transient
): # if it is finished render the final output for files or dumb_terminals
renderables = [*renderables, self._live_render]
return renderables
if __name__ == "__main__": # pragma: no cover
import random
import time
from itertools import cycle
from typing import Dict, List, Tuple
from .align import Align
from .console import Console
from .live import Live as Live
from .panel import Panel
from .rule import Rule
from .syntax import Syntax
from .table import Table
console = Console()
syntax = Syntax(
'''def loop_last(values: Iterable[T]) -> Iterable[Tuple[bool, T]]:
"""Iterate and generate a tuple with a flag for last value."""
iter_values = iter(values)
try:
previous_value = next(iter_values)
except StopIteration:
return
for value in iter_values:
yield False, previous_value
previous_value = value
yield True, previous_value''',
"python",
line_numbers=True,
)
table = Table("foo", "bar", "baz")
table.add_row("1", "2", "3")
progress_renderables = [
"You can make the terminal shorter and taller to see the live table hide"
"Text may be printed while the progress bars are rendering.",
Panel("In fact, [i]any[/i] renderable will work"),
"Such as [magenta]tables[/]...",
table,
"Pretty printed structures...",
{"type": "example", "text": "Pretty printed"},
"Syntax...",
syntax,
Rule("Give it a try!"),
]
examples = cycle(progress_renderables)
exchanges = [
"SGD",
"MYR",
"EUR",
"USD",
"AUD",
"JPY",
"CNH",
"HKD",
"CAD",
"INR",
"DKK",
"GBP",
"RUB",
"NZD",
"MXN",
"IDR",
"TWD",
"THB",
"VND",
]
with Live(console=console) as live_table:
exchange_rate_dict: Dict[Tuple[str, str], float] = {}
for index in range(100):
select_exchange = exchanges[index % len(exchanges)]
for exchange in exchanges:
if exchange == select_exchange:
continue
time.sleep(0.4)
if random.randint(0, 10) < 1:
console.log(next(examples))
exchange_rate_dict[(select_exchange, exchange)] = 200 / (
(random.random() * 320) + 1
)
if len(exchange_rate_dict) > len(exchanges) - 1:
exchange_rate_dict.pop(list(exchange_rate_dict.keys())[0])
table = Table(title="Exchange Rates")
table.add_column("Source Currency")
table.add_column("Destination Currency")
table.add_column("Exchange Rate")
for (source, dest), exchange_rate in exchange_rate_dict.items():
table.add_row(
source,
dest,
Text(
f"{exchange_rate:.4f}",
style="red" if exchange_rate < 1.0 else "green",
),
)
live_table.update(Align.center(table)) | --- +++ @@ -20,6 +20,7 @@
class _RefreshThread(Thread):
+ """A thread that calls refresh() at regular intervals."""
def __init__(self, live: "Live", refresh_per_second: float) -> None:
self.live = live
@@ -38,6 +39,20 @@
class Live(JupyterMixin, RenderHook):
+ """Renders an auto-updating live display of any given renderable.
+
+ Args:
+ renderable (RenderableType, optional): The renderable to live display. Defaults to displaying nothing.
+ console (Console, optional): Optional Console instance. Defaults to an internal Console instance writing to stdout.
+ screen (bool, optional): Enable alternate screen mode. Defaults to False.
+ auto_refresh (bool, optional): Enable auto refresh. If disabled, you will need to call `refresh()` or `update()` with refresh flag. Defaults to True
+ refresh_per_second (float, optional): Number of times per second to refresh the live display. Defaults to 4.
+ transient (bool, optional): Clear the renderable on exit (has no effect when screen=True). Defaults to False.
+ redirect_stdout (bool, optional): Enable redirection of stdout, so ``print`` may be used. Defaults to True.
+ redirect_stderr (bool, optional): Enable redirection of stderr. Defaults to True.
+ vertical_overflow (VerticalOverflowMethod, optional): How to handle renderable when it is too tall for the console. Defaults to "ellipsis".
+ get_renderable (Callable[[], RenderableType], optional): Optional callable to get renderable. Defaults to None.
+ """
def __init__(
self,
@@ -82,6 +97,7 @@
@property
def is_started(self) -> bool:
+ """Check if live display has been started."""
return self._started
def get_renderable(self) -> RenderableType:
@@ -93,6 +109,11 @@ return renderable or ""
def start(self, refresh: bool = False) -> None:
+ """Start live rendering display.
+
+ Args:
+ refresh (bool, optional): Also refresh. Defaults to False.
+ """
with self._lock:
if self._started:
return
@@ -122,6 +143,7 @@ self._refresh_thread.start()
def stop(self) -> None:
+ """Stop live rendering display."""
with self._lock:
if not self._started:
return
@@ -171,6 +193,7 @@ self.stop()
def _enable_redirect_io(self) -> None:
+ """Enable redirecting of stdout / stderr."""
if self.console.is_terminal or self.console.is_jupyter:
if self._redirect_stdout and not isinstance(sys.stdout, FileProxy):
self._restore_stdout = sys.stdout
@@ -180,6 +203,7 @@ sys.stderr = cast("TextIO", FileProxy(self.console, sys.stderr))
def _disable_redirect_io(self) -> None:
+ """Disable redirecting of stdout / stderr."""
if self._restore_stdout:
sys.stdout = cast("TextIO", self._restore_stdout)
self._restore_stdout = None
@@ -189,6 +213,11 @@
@property
def renderable(self) -> RenderableType:
+ """Get the renderable that is being displayed
+
+ Returns:
+ RenderableType: Displayed renderable.
+ """
live_stack = self.console._live_stack
renderable: RenderableType
if live_stack and self is live_stack[0]:
@@ -199,6 +228,12 @@ return Screen(renderable) if self._alt_screen else renderable
def update(self, renderable: RenderableType, *, refresh: bool = False) -> None:
+ """Update the renderable that is being displayed
+
+ Args:
+ renderable (RenderableType): New renderable to use.
+ refresh (bool, optional): Refresh the display. Defaults to False.
+ """
if isinstance(renderable, str):
renderable = self.console.render_str(renderable)
with self._lock:
@@ -207,6 +242,7 @@ self.refresh()
def refresh(self) -> None:
+ """Update the display of the Live Render."""
with self._lock:
self._live_render.set_renderable(self.renderable)
if self._nested:
@@ -242,6 +278,7 @@ def process_renderables(
self, renderables: List[ConsoleRenderable]
) -> List[ConsoleRenderable]:
+ """Process renderables to restore cursor and display progress."""
self._live_render.vertical_overflow = self.vertical_overflow
if self.console.is_interactive:
# lock needs acquiring as user can modify live_render renderable at any time unlike in Progress.
@@ -364,4 +401,4 @@ ),
)
- live_table.update(Align.center(table))+ live_table.update(Align.center(table))
| https://raw.githubusercontent.com/Textualize/rich/HEAD/rich/live.py |
Add docstrings to meet PEP guidelines |
import ctypes
import sys
from typing import Any
windll: Any = None
if sys.platform == "win32":
windll = ctypes.LibraryLoader(ctypes.WinDLL)
else:
raise ImportError(f"{__name__} can only be imported on Windows")
import time
from ctypes import Structure, byref, wintypes
from typing import IO, NamedTuple, Type, cast
from rich.color import ColorSystem
from rich.style import Style
STDOUT = -11
ENABLE_VIRTUAL_TERMINAL_PROCESSING = 4
COORD = wintypes._COORD
class LegacyWindowsError(Exception):
pass
class WindowsCoordinates(NamedTuple):
row: int
col: int
@classmethod
def from_param(cls, value: "WindowsCoordinates") -> COORD:
return COORD(value.col, value.row)
class CONSOLE_SCREEN_BUFFER_INFO(Structure):
_fields_ = [
("dwSize", COORD),
("dwCursorPosition", COORD),
("wAttributes", wintypes.WORD),
("srWindow", wintypes.SMALL_RECT),
("dwMaximumWindowSize", COORD),
]
class CONSOLE_CURSOR_INFO(ctypes.Structure):
_fields_ = [("dwSize", wintypes.DWORD), ("bVisible", wintypes.BOOL)]
_GetStdHandle = windll.kernel32.GetStdHandle
_GetStdHandle.argtypes = [
wintypes.DWORD,
]
_GetStdHandle.restype = wintypes.HANDLE
def GetStdHandle(handle: int = STDOUT) -> wintypes.HANDLE:
return cast(wintypes.HANDLE, _GetStdHandle(handle))
_GetConsoleMode = windll.kernel32.GetConsoleMode
_GetConsoleMode.argtypes = [wintypes.HANDLE, wintypes.LPDWORD]
_GetConsoleMode.restype = wintypes.BOOL
def GetConsoleMode(std_handle: wintypes.HANDLE) -> int:
console_mode = wintypes.DWORD()
success = bool(_GetConsoleMode(std_handle, console_mode))
if not success:
raise LegacyWindowsError("Unable to get legacy Windows Console Mode")
return console_mode.value
_FillConsoleOutputCharacterW = windll.kernel32.FillConsoleOutputCharacterW
_FillConsoleOutputCharacterW.argtypes = [
wintypes.HANDLE,
ctypes.c_char,
wintypes.DWORD,
cast(Type[COORD], WindowsCoordinates),
ctypes.POINTER(wintypes.DWORD),
]
_FillConsoleOutputCharacterW.restype = wintypes.BOOL
def FillConsoleOutputCharacter(
std_handle: wintypes.HANDLE,
char: str,
length: int,
start: WindowsCoordinates,
) -> int:
character = ctypes.c_char(char.encode())
num_characters = wintypes.DWORD(length)
num_written = wintypes.DWORD(0)
_FillConsoleOutputCharacterW(
std_handle,
character,
num_characters,
start,
byref(num_written),
)
return num_written.value
_FillConsoleOutputAttribute = windll.kernel32.FillConsoleOutputAttribute
_FillConsoleOutputAttribute.argtypes = [
wintypes.HANDLE,
wintypes.WORD,
wintypes.DWORD,
cast(Type[COORD], WindowsCoordinates),
ctypes.POINTER(wintypes.DWORD),
]
_FillConsoleOutputAttribute.restype = wintypes.BOOL
def FillConsoleOutputAttribute(
std_handle: wintypes.HANDLE,
attributes: int,
length: int,
start: WindowsCoordinates,
) -> int:
num_cells = wintypes.DWORD(length)
style_attrs = wintypes.WORD(attributes)
num_written = wintypes.DWORD(0)
_FillConsoleOutputAttribute(
std_handle, style_attrs, num_cells, start, byref(num_written)
)
return num_written.value
_SetConsoleTextAttribute = windll.kernel32.SetConsoleTextAttribute
_SetConsoleTextAttribute.argtypes = [
wintypes.HANDLE,
wintypes.WORD,
]
_SetConsoleTextAttribute.restype = wintypes.BOOL
def SetConsoleTextAttribute(
std_handle: wintypes.HANDLE, attributes: wintypes.WORD
) -> bool:
return bool(_SetConsoleTextAttribute(std_handle, attributes))
_GetConsoleScreenBufferInfo = windll.kernel32.GetConsoleScreenBufferInfo
_GetConsoleScreenBufferInfo.argtypes = [
wintypes.HANDLE,
ctypes.POINTER(CONSOLE_SCREEN_BUFFER_INFO),
]
_GetConsoleScreenBufferInfo.restype = wintypes.BOOL
def GetConsoleScreenBufferInfo(
std_handle: wintypes.HANDLE,
) -> CONSOLE_SCREEN_BUFFER_INFO:
console_screen_buffer_info = CONSOLE_SCREEN_BUFFER_INFO()
_GetConsoleScreenBufferInfo(std_handle, byref(console_screen_buffer_info))
return console_screen_buffer_info
_SetConsoleCursorPosition = windll.kernel32.SetConsoleCursorPosition
_SetConsoleCursorPosition.argtypes = [
wintypes.HANDLE,
cast(Type[COORD], WindowsCoordinates),
]
_SetConsoleCursorPosition.restype = wintypes.BOOL
def SetConsoleCursorPosition(
std_handle: wintypes.HANDLE, coords: WindowsCoordinates
) -> bool:
return bool(_SetConsoleCursorPosition(std_handle, coords))
_GetConsoleCursorInfo = windll.kernel32.GetConsoleCursorInfo
_GetConsoleCursorInfo.argtypes = [
wintypes.HANDLE,
ctypes.POINTER(CONSOLE_CURSOR_INFO),
]
_GetConsoleCursorInfo.restype = wintypes.BOOL
def GetConsoleCursorInfo(
std_handle: wintypes.HANDLE, cursor_info: CONSOLE_CURSOR_INFO
) -> bool:
return bool(_GetConsoleCursorInfo(std_handle, byref(cursor_info)))
_SetConsoleCursorInfo = windll.kernel32.SetConsoleCursorInfo
_SetConsoleCursorInfo.argtypes = [
wintypes.HANDLE,
ctypes.POINTER(CONSOLE_CURSOR_INFO),
]
_SetConsoleCursorInfo.restype = wintypes.BOOL
def SetConsoleCursorInfo(
std_handle: wintypes.HANDLE, cursor_info: CONSOLE_CURSOR_INFO
) -> bool:
return bool(_SetConsoleCursorInfo(std_handle, byref(cursor_info)))
_SetConsoleTitle = windll.kernel32.SetConsoleTitleW
_SetConsoleTitle.argtypes = [wintypes.LPCWSTR]
_SetConsoleTitle.restype = wintypes.BOOL
def SetConsoleTitle(title: str) -> bool:
return bool(_SetConsoleTitle(title))
class LegacyWindowsTerm:
BRIGHT_BIT = 8
# Indices are ANSI color numbers, values are the corresponding Windows Console API color numbers
ANSI_TO_WINDOWS = [
0, # black The Windows colours are defined in wincon.h as follows:
4, # red define FOREGROUND_BLUE 0x0001 -- 0000 0001
2, # green define FOREGROUND_GREEN 0x0002 -- 0000 0010
6, # yellow define FOREGROUND_RED 0x0004 -- 0000 0100
1, # blue define FOREGROUND_INTENSITY 0x0008 -- 0000 1000
5, # magenta define BACKGROUND_BLUE 0x0010 -- 0001 0000
3, # cyan define BACKGROUND_GREEN 0x0020 -- 0010 0000
7, # white define BACKGROUND_RED 0x0040 -- 0100 0000
8, # bright black (grey) define BACKGROUND_INTENSITY 0x0080 -- 1000 0000
12, # bright red
10, # bright green
14, # bright yellow
9, # bright blue
13, # bright magenta
11, # bright cyan
15, # bright white
]
def __init__(self, file: "IO[str]") -> None:
handle = GetStdHandle(STDOUT)
self._handle = handle
default_text = GetConsoleScreenBufferInfo(handle).wAttributes
self._default_text = default_text
self._default_fore = default_text & 7
self._default_back = (default_text >> 4) & 7
self._default_attrs = self._default_fore | (self._default_back << 4)
self._file = file
self.write = file.write
self.flush = file.flush
@property
def cursor_position(self) -> WindowsCoordinates:
coord: COORD = GetConsoleScreenBufferInfo(self._handle).dwCursorPosition
return WindowsCoordinates(row=coord.Y, col=coord.X)
@property
def screen_size(self) -> WindowsCoordinates:
screen_size: COORD = GetConsoleScreenBufferInfo(self._handle).dwSize
return WindowsCoordinates(row=screen_size.Y, col=screen_size.X)
def write_text(self, text: str) -> None:
self.write(text)
self.flush()
def write_styled(self, text: str, style: Style) -> None:
color = style.color
bgcolor = style.bgcolor
if style.reverse:
color, bgcolor = bgcolor, color
if color:
fore = color.downgrade(ColorSystem.WINDOWS).number
fore = fore if fore is not None else 7 # Default to ANSI 7: White
if style.bold:
fore = fore | self.BRIGHT_BIT
if style.dim:
fore = fore & ~self.BRIGHT_BIT
fore = self.ANSI_TO_WINDOWS[fore]
else:
fore = self._default_fore
if bgcolor:
back = bgcolor.downgrade(ColorSystem.WINDOWS).number
back = back if back is not None else 0 # Default to ANSI 0: Black
back = self.ANSI_TO_WINDOWS[back]
else:
back = self._default_back
assert fore is not None
assert back is not None
SetConsoleTextAttribute(
self._handle, attributes=ctypes.c_ushort(fore | (back << 4))
)
self.write_text(text)
SetConsoleTextAttribute(self._handle, attributes=self._default_text)
def move_cursor_to(self, new_position: WindowsCoordinates) -> None:
if new_position.col < 0 or new_position.row < 0:
return
SetConsoleCursorPosition(self._handle, coords=new_position)
def erase_line(self) -> None:
screen_size = self.screen_size
cursor_position = self.cursor_position
cells_to_erase = screen_size.col
start_coordinates = WindowsCoordinates(row=cursor_position.row, col=0)
FillConsoleOutputCharacter(
self._handle, " ", length=cells_to_erase, start=start_coordinates
)
FillConsoleOutputAttribute(
self._handle,
self._default_attrs,
length=cells_to_erase,
start=start_coordinates,
)
def erase_end_of_line(self) -> None:
cursor_position = self.cursor_position
cells_to_erase = self.screen_size.col - cursor_position.col
FillConsoleOutputCharacter(
self._handle, " ", length=cells_to_erase, start=cursor_position
)
FillConsoleOutputAttribute(
self._handle,
self._default_attrs,
length=cells_to_erase,
start=cursor_position,
)
def erase_start_of_line(self) -> None:
row, col = self.cursor_position
start = WindowsCoordinates(row, 0)
FillConsoleOutputCharacter(self._handle, " ", length=col, start=start)
FillConsoleOutputAttribute(
self._handle, self._default_attrs, length=col, start=start
)
def move_cursor_up(self) -> None:
cursor_position = self.cursor_position
SetConsoleCursorPosition(
self._handle,
coords=WindowsCoordinates(
row=cursor_position.row - 1, col=cursor_position.col
),
)
def move_cursor_down(self) -> None:
cursor_position = self.cursor_position
SetConsoleCursorPosition(
self._handle,
coords=WindowsCoordinates(
row=cursor_position.row + 1,
col=cursor_position.col,
),
)
def move_cursor_forward(self) -> None:
row, col = self.cursor_position
if col == self.screen_size.col - 1:
row += 1
col = 0
else:
col += 1
SetConsoleCursorPosition(
self._handle, coords=WindowsCoordinates(row=row, col=col)
)
def move_cursor_to_column(self, column: int) -> None:
row, _ = self.cursor_position
SetConsoleCursorPosition(self._handle, coords=WindowsCoordinates(row, column))
def move_cursor_backward(self) -> None:
row, col = self.cursor_position
if col == 0:
row -= 1
col = self.screen_size.col - 1
else:
col -= 1
SetConsoleCursorPosition(
self._handle, coords=WindowsCoordinates(row=row, col=col)
)
def hide_cursor(self) -> None:
current_cursor_size = self._get_cursor_size()
invisible_cursor = CONSOLE_CURSOR_INFO(dwSize=current_cursor_size, bVisible=0)
SetConsoleCursorInfo(self._handle, cursor_info=invisible_cursor)
def show_cursor(self) -> None:
current_cursor_size = self._get_cursor_size()
visible_cursor = CONSOLE_CURSOR_INFO(dwSize=current_cursor_size, bVisible=1)
SetConsoleCursorInfo(self._handle, cursor_info=visible_cursor)
def set_title(self, title: str) -> None:
assert len(title) < 255, "Console title must be less than 255 characters"
SetConsoleTitle(title)
def _get_cursor_size(self) -> int:
cursor_info = CONSOLE_CURSOR_INFO()
GetConsoleCursorInfo(self._handle, cursor_info=cursor_info)
return int(cursor_info.dwSize)
if __name__ == "__main__":
handle = GetStdHandle()
from rich.console import Console
console = Console()
term = LegacyWindowsTerm(sys.stdout)
term.set_title("Win32 Console Examples")
style = Style(color="black", bgcolor="red")
heading = Style.parse("black on green")
# Check colour output
console.rule("Checking colour output")
console.print("[on red]on red!")
console.print("[blue]blue!")
console.print("[yellow]yellow!")
console.print("[bold yellow]bold yellow!")
console.print("[bright_yellow]bright_yellow!")
console.print("[dim bright_yellow]dim bright_yellow!")
console.print("[italic cyan]italic cyan!")
console.print("[bold white on blue]bold white on blue!")
console.print("[reverse bold white on blue]reverse bold white on blue!")
console.print("[bold black on cyan]bold black on cyan!")
console.print("[black on green]black on green!")
console.print("[blue on green]blue on green!")
console.print("[white on black]white on black!")
console.print("[black on white]black on white!")
console.print("[#1BB152 on #DA812D]#1BB152 on #DA812D!")
# Check cursor movement
console.rule("Checking cursor movement")
console.print()
term.move_cursor_backward()
term.move_cursor_backward()
term.write_text("went back and wrapped to prev line")
time.sleep(1)
term.move_cursor_up()
term.write_text("we go up")
time.sleep(1)
term.move_cursor_down()
term.write_text("and down")
time.sleep(1)
term.move_cursor_up()
term.move_cursor_backward()
term.move_cursor_backward()
term.write_text("we went up and back 2")
time.sleep(1)
term.move_cursor_down()
term.move_cursor_backward()
term.move_cursor_backward()
term.write_text("we went down and back 2")
time.sleep(1)
# Check erasing of lines
term.hide_cursor()
console.print()
console.rule("Checking line erasing")
console.print("\n...Deleting to the start of the line...")
term.write_text("The red arrow shows the cursor location, and direction of erase")
time.sleep(1)
term.move_cursor_to_column(16)
term.write_styled("<", Style.parse("black on red"))
term.move_cursor_backward()
time.sleep(1)
term.erase_start_of_line()
time.sleep(1)
console.print("\n\n...And to the end of the line...")
term.write_text("The red arrow shows the cursor location, and direction of erase")
time.sleep(1)
term.move_cursor_to_column(16)
term.write_styled(">", Style.parse("black on red"))
time.sleep(1)
term.erase_end_of_line()
time.sleep(1)
console.print("\n\n...Now the whole line will be erased...")
term.write_styled("I'm going to disappear!", style=Style.parse("black on cyan"))
time.sleep(1)
term.erase_line()
term.show_cursor()
print("\n") | --- +++ @@ -1,3 +1,7 @@+"""Light wrapper around the Win32 Console API - this module should only be imported on Windows
+
+The API that this module wraps is documented at https://docs.microsoft.com/en-us/windows/console/console-functions
+"""
import ctypes
import sys
@@ -27,12 +31,26 @@
class WindowsCoordinates(NamedTuple):
+ """Coordinates in the Windows Console API are (y, x), not (x, y).
+ This class is intended to prevent that confusion.
+ Rows and columns are indexed from 0.
+ This class can be used in place of wintypes._COORD in arguments and argtypes.
+ """
row: int
col: int
@classmethod
def from_param(cls, value: "WindowsCoordinates") -> COORD:
+ """Converts a WindowsCoordinates into a wintypes _COORD structure.
+ This classmethod is internally called by ctypes to perform the conversion.
+
+ Args:
+ value (WindowsCoordinates): The input coordinates to convert.
+
+ Returns:
+ wintypes._COORD: The converted coordinates struct.
+ """
return COORD(value.col, value.row)
@@ -58,6 +76,14 @@
def GetStdHandle(handle: int = STDOUT) -> wintypes.HANDLE:
+ """Retrieves a handle to the specified standard device (standard input, standard output, or standard error).
+
+ Args:
+ handle (int): Integer identifier for the handle. Defaults to -11 (stdout).
+
+ Returns:
+ wintypes.HANDLE: The handle
+ """
return cast(wintypes.HANDLE, _GetStdHandle(handle))
@@ -67,6 +93,19 @@
def GetConsoleMode(std_handle: wintypes.HANDLE) -> int:
+ """Retrieves the current input mode of a console's input buffer
+ or the current output mode of a console screen buffer.
+
+ Args:
+ std_handle (wintypes.HANDLE): A handle to the console input buffer or the console screen buffer.
+
+ Raises:
+ LegacyWindowsError: If any error occurs while calling the Windows console API.
+
+ Returns:
+ int: Value representing the current console mode as documented at
+ https://docs.microsoft.com/en-us/windows/console/getconsolemode#parameters
+ """
console_mode = wintypes.DWORD()
success = bool(_GetConsoleMode(std_handle, console_mode))
@@ -92,6 +131,17 @@ length: int,
start: WindowsCoordinates,
) -> int:
+ """Writes a character to the console screen buffer a specified number of times, beginning at the specified coordinates.
+
+ Args:
+ std_handle (wintypes.HANDLE): A handle to the console input buffer or the console screen buffer.
+ char (str): The character to write. Must be a string of length 1.
+ length (int): The number of times to write the character.
+ start (WindowsCoordinates): The coordinates to start writing at.
+
+ Returns:
+ int: The number of characters written.
+ """
character = ctypes.c_char(char.encode())
num_characters = wintypes.DWORD(length)
num_written = wintypes.DWORD(0)
@@ -122,6 +172,18 @@ length: int,
start: WindowsCoordinates,
) -> int:
+ """Sets the character attributes for a specified number of character cells,
+ beginning at the specified coordinates in a screen buffer.
+
+ Args:
+ std_handle (wintypes.HANDLE): A handle to the console input buffer or the console screen buffer.
+ attributes (int): Integer value representing the foreground and background colours of the cells.
+ length (int): The number of cells to set the output attribute of.
+ start (WindowsCoordinates): The coordinates of the first cell whose attributes are to be set.
+
+ Returns:
+ int: The number of cells whose attributes were actually set.
+ """
num_cells = wintypes.DWORD(length)
style_attrs = wintypes.WORD(attributes)
num_written = wintypes.DWORD(0)
@@ -142,6 +204,16 @@ def SetConsoleTextAttribute(
std_handle: wintypes.HANDLE, attributes: wintypes.WORD
) -> bool:
+ """Set the colour attributes for all text written after this function is called.
+
+ Args:
+ std_handle (wintypes.HANDLE): A handle to the console input buffer or the console screen buffer.
+ attributes (int): Integer value representing the foreground and background colours.
+
+
+ Returns:
+ bool: True if the attribute was set successfully, otherwise False.
+ """
return bool(_SetConsoleTextAttribute(std_handle, attributes))
@@ -156,6 +228,14 @@ def GetConsoleScreenBufferInfo(
std_handle: wintypes.HANDLE,
) -> CONSOLE_SCREEN_BUFFER_INFO:
+ """Retrieves information about the specified console screen buffer.
+
+ Args:
+ std_handle (wintypes.HANDLE): A handle to the console input buffer or the console screen buffer.
+
+ Returns:
+ CONSOLE_SCREEN_BUFFER_INFO: A CONSOLE_SCREEN_BUFFER_INFO ctype struct contain information about
+ screen size, cursor position, colour attributes, and more."""
console_screen_buffer_info = CONSOLE_SCREEN_BUFFER_INFO()
_GetConsoleScreenBufferInfo(std_handle, byref(console_screen_buffer_info))
return console_screen_buffer_info
@@ -172,6 +252,15 @@ def SetConsoleCursorPosition(
std_handle: wintypes.HANDLE, coords: WindowsCoordinates
) -> bool:
+ """Set the position of the cursor in the console screen
+
+ Args:
+ std_handle (wintypes.HANDLE): A handle to the console input buffer or the console screen buffer.
+ coords (WindowsCoordinates): The coordinates to move the cursor to.
+
+ Returns:
+ bool: True if the function succeeds, otherwise False.
+ """
return bool(_SetConsoleCursorPosition(std_handle, coords))
@@ -186,6 +275,16 @@ def GetConsoleCursorInfo(
std_handle: wintypes.HANDLE, cursor_info: CONSOLE_CURSOR_INFO
) -> bool:
+ """Get the cursor info - used to get cursor visibility and width
+
+ Args:
+ std_handle (wintypes.HANDLE): A handle to the console input buffer or the console screen buffer.
+ cursor_info (CONSOLE_CURSOR_INFO): CONSOLE_CURSOR_INFO ctype struct that receives information
+ about the console's cursor.
+
+ Returns:
+ bool: True if the function succeeds, otherwise False.
+ """
return bool(_GetConsoleCursorInfo(std_handle, byref(cursor_info)))
@@ -200,6 +299,15 @@ def SetConsoleCursorInfo(
std_handle: wintypes.HANDLE, cursor_info: CONSOLE_CURSOR_INFO
) -> bool:
+ """Set the cursor info - used for adjusting cursor visibility and width
+
+ Args:
+ std_handle (wintypes.HANDLE): A handle to the console input buffer or the console screen buffer.
+ cursor_info (CONSOLE_CURSOR_INFO): CONSOLE_CURSOR_INFO ctype struct containing the new cursor info.
+
+ Returns:
+ bool: True if the function succeeds, otherwise False.
+ """
return bool(_SetConsoleCursorInfo(std_handle, byref(cursor_info)))
@@ -209,10 +317,25 @@
def SetConsoleTitle(title: str) -> bool:
+ """Sets the title of the current console window
+
+ Args:
+ title (str): The new title of the console window.
+
+ Returns:
+ bool: True if the function succeeds, otherwise False.
+ """
return bool(_SetConsoleTitle(title))
class LegacyWindowsTerm:
+ """This class allows interaction with the legacy Windows Console API. It should only be used in the context
+ of environments where virtual terminal processing is not available. However, if it is used in a Windows environment,
+ the entire API should work.
+
+ Args:
+ file (IO[str]): The file which the Windows Console API HANDLE is retrieved from, defaults to sys.stdout.
+ """
BRIGHT_BIT = 8
@@ -252,19 +375,40 @@
@property
def cursor_position(self) -> WindowsCoordinates:
+ """Returns the current position of the cursor (0-based)
+
+ Returns:
+ WindowsCoordinates: The current cursor position.
+ """
coord: COORD = GetConsoleScreenBufferInfo(self._handle).dwCursorPosition
return WindowsCoordinates(row=coord.Y, col=coord.X)
@property
def screen_size(self) -> WindowsCoordinates:
+ """Returns the current size of the console screen buffer, in character columns and rows
+
+ Returns:
+ WindowsCoordinates: The width and height of the screen as WindowsCoordinates.
+ """
screen_size: COORD = GetConsoleScreenBufferInfo(self._handle).dwSize
return WindowsCoordinates(row=screen_size.Y, col=screen_size.X)
def write_text(self, text: str) -> None:
+ """Write text directly to the terminal without any modification of styles
+
+ Args:
+ text (str): The text to write to the console
+ """
self.write(text)
self.flush()
def write_styled(self, text: str, style: Style) -> None:
+ """Write styled text to the terminal.
+
+ Args:
+ text (str): The text to write
+ style (Style): The style of the text
+ """
color = style.color
bgcolor = style.bgcolor
if style.reverse:
@@ -298,11 +442,17 @@ SetConsoleTextAttribute(self._handle, attributes=self._default_text)
def move_cursor_to(self, new_position: WindowsCoordinates) -> None:
+ """Set the position of the cursor
+
+ Args:
+ new_position (WindowsCoordinates): The WindowsCoordinates representing the new position of the cursor.
+ """
if new_position.col < 0 or new_position.row < 0:
return
SetConsoleCursorPosition(self._handle, coords=new_position)
def erase_line(self) -> None:
+ """Erase all content on the line the cursor is currently located at"""
screen_size = self.screen_size
cursor_position = self.cursor_position
cells_to_erase = screen_size.col
@@ -318,6 +468,7 @@ )
def erase_end_of_line(self) -> None:
+ """Erase all content from the cursor position to the end of that line"""
cursor_position = self.cursor_position
cells_to_erase = self.screen_size.col - cursor_position.col
FillConsoleOutputCharacter(
@@ -331,6 +482,7 @@ )
def erase_start_of_line(self) -> None:
+ """Erase all content from the cursor position to the start of that line"""
row, col = self.cursor_position
start = WindowsCoordinates(row, 0)
FillConsoleOutputCharacter(self._handle, " ", length=col, start=start)
@@ -339,6 +491,7 @@ )
def move_cursor_up(self) -> None:
+ """Move the cursor up a single cell"""
cursor_position = self.cursor_position
SetConsoleCursorPosition(
self._handle,
@@ -348,6 +501,7 @@ )
def move_cursor_down(self) -> None:
+ """Move the cursor down a single cell"""
cursor_position = self.cursor_position
SetConsoleCursorPosition(
self._handle,
@@ -358,6 +512,7 @@ )
def move_cursor_forward(self) -> None:
+ """Move the cursor forward a single cell. Wrap to the next line if required."""
row, col = self.cursor_position
if col == self.screen_size.col - 1:
row += 1
@@ -369,10 +524,16 @@ )
def move_cursor_to_column(self, column: int) -> None:
+ """Move cursor to the column specified by the zero-based column index, staying on the same row
+
+ Args:
+ column (int): The zero-based column index to move the cursor to.
+ """
row, _ = self.cursor_position
SetConsoleCursorPosition(self._handle, coords=WindowsCoordinates(row, column))
def move_cursor_backward(self) -> None:
+ """Move the cursor backward a single cell. Wrap to the previous line if required."""
row, col = self.cursor_position
if col == 0:
row -= 1
@@ -384,20 +545,28 @@ )
def hide_cursor(self) -> None:
+ """Hide the cursor"""
current_cursor_size = self._get_cursor_size()
invisible_cursor = CONSOLE_CURSOR_INFO(dwSize=current_cursor_size, bVisible=0)
SetConsoleCursorInfo(self._handle, cursor_info=invisible_cursor)
def show_cursor(self) -> None:
+ """Show the cursor"""
current_cursor_size = self._get_cursor_size()
visible_cursor = CONSOLE_CURSOR_INFO(dwSize=current_cursor_size, bVisible=1)
SetConsoleCursorInfo(self._handle, cursor_info=visible_cursor)
def set_title(self, title: str) -> None:
+ """Set the title of the terminal window
+
+ Args:
+ title (str): The new title of the console window
+ """
assert len(title) < 255, "Console title must be less than 255 characters"
SetConsoleTitle(title)
def _get_cursor_size(self) -> int:
+ """Get the percentage of the character cell that is filled by the cursor"""
cursor_info = CONSOLE_CURSOR_INFO()
GetConsoleCursorInfo(self._handle, cursor_info=cursor_info)
return int(cursor_info.dwSize)
@@ -489,4 +658,4 @@ term.erase_line()
term.show_cursor()
- print("\n")+ print("\n")
| https://raw.githubusercontent.com/Textualize/rich/HEAD/rich/_win32_console.py |
Help me comply with documentation standards | from __future__ import annotations
import io
import typing
import warnings
from abc import ABC, abstractmethod
from collections import deque
from dataclasses import dataclass, field
from datetime import timedelta
from io import RawIOBase, UnsupportedOperation
from math import ceil
from mmap import mmap
from operator import length_hint
from os import PathLike, stat
from threading import Event, RLock, Thread
from types import TracebackType
from typing import (
TYPE_CHECKING,
Any,
BinaryIO,
Callable,
ContextManager,
Deque,
Dict,
Generic,
Iterable,
List,
Literal,
NamedTuple,
NewType,
Optional,
TextIO,
Tuple,
Type,
TypeVar,
Union,
)
if TYPE_CHECKING:
# Can be replaced with `from typing import Self` in Python 3.11+
from typing_extensions import Self # pragma: no cover
from . import filesize, get_console
from .console import Console, Group, JustifyMethod, RenderableType
from .highlighter import Highlighter
from .jupyter import JupyterMixin
from .live import Live
from .progress_bar import ProgressBar
from .spinner import Spinner
from .style import StyleType
from .table import Column, Table
from .text import Text, TextType
TaskID = NewType("TaskID", int)
ProgressType = TypeVar("ProgressType")
GetTimeCallable = Callable[[], float]
_I = typing.TypeVar("_I", TextIO, BinaryIO)
class _TrackThread(Thread):
def __init__(self, progress: "Progress", task_id: "TaskID", update_period: float):
self.progress = progress
self.task_id = task_id
self.update_period = update_period
self.done = Event()
self.completed = 0
super().__init__(daemon=True)
def run(self) -> None:
task_id = self.task_id
advance = self.progress.advance
update_period = self.update_period
last_completed = 0
wait = self.done.wait
while not wait(update_period) and self.progress.live.is_started:
completed = self.completed
if last_completed != completed:
advance(task_id, completed - last_completed)
last_completed = completed
self.progress.update(self.task_id, completed=self.completed, refresh=True)
def __enter__(self) -> "_TrackThread":
self.start()
return self
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> None:
self.done.set()
self.join()
def track(
sequence: Iterable[ProgressType],
description: str = "Working...",
total: Optional[float] = None,
completed: int = 0,
auto_refresh: bool = True,
console: Optional[Console] = None,
transient: bool = False,
get_time: Optional[Callable[[], float]] = None,
refresh_per_second: float = 10,
style: StyleType = "bar.back",
complete_style: StyleType = "bar.complete",
finished_style: StyleType = "bar.finished",
pulse_style: StyleType = "bar.pulse",
update_period: float = 0.1,
disable: bool = False,
show_speed: bool = True,
) -> Iterable[ProgressType]:
columns: List["ProgressColumn"] = (
[TextColumn("[progress.description]{task.description}")] if description else []
)
columns.extend(
(
BarColumn(
style=style,
complete_style=complete_style,
finished_style=finished_style,
pulse_style=pulse_style,
),
TaskProgressColumn(show_speed=show_speed),
TimeRemainingColumn(elapsed_when_finished=True),
)
)
progress = Progress(
*columns,
auto_refresh=auto_refresh,
console=console,
transient=transient,
get_time=get_time,
refresh_per_second=refresh_per_second or 10,
disable=disable,
)
with progress:
yield from progress.track(
sequence,
total=total,
completed=completed,
description=description,
update_period=update_period,
)
class _Reader(RawIOBase, BinaryIO):
def __init__(
self,
handle: BinaryIO,
progress: "Progress",
task: TaskID,
close_handle: bool = True,
) -> None:
self.handle = handle
self.progress = progress
self.task = task
self.close_handle = close_handle
self._closed = False
def __enter__(self) -> "_Reader":
self.handle.__enter__()
return self
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> None:
self.close()
def __iter__(self) -> BinaryIO:
return self
def __next__(self) -> bytes:
line = next(self.handle)
self.progress.advance(self.task, advance=len(line))
return line
@property
def closed(self) -> bool:
return self._closed
def fileno(self) -> int:
return self.handle.fileno()
def isatty(self) -> bool:
return self.handle.isatty()
@property
def mode(self) -> str:
return self.handle.mode
@property
def name(self) -> str:
return self.handle.name
def readable(self) -> bool:
return self.handle.readable()
def seekable(self) -> bool:
return self.handle.seekable()
def writable(self) -> bool:
return False
def read(self, size: int = -1) -> bytes:
block = self.handle.read(size)
self.progress.advance(self.task, advance=len(block))
return block
def readinto(self, b: Union[bytearray, memoryview, mmap]): # type: ignore[no-untyped-def, override]
n = self.handle.readinto(b) # type: ignore[attr-defined]
self.progress.advance(self.task, advance=n)
return n
def readline(self, size: int = -1) -> bytes: # type: ignore[override]
line = self.handle.readline(size)
self.progress.advance(self.task, advance=len(line))
return line
def readlines(self, hint: int = -1) -> List[bytes]:
lines = self.handle.readlines(hint)
self.progress.advance(self.task, advance=sum(map(len, lines)))
return lines
def close(self) -> None:
if self.close_handle:
self.handle.close()
self._closed = True
def seek(self, offset: int, whence: int = 0) -> int:
pos = self.handle.seek(offset, whence)
self.progress.update(self.task, completed=pos)
return pos
def tell(self) -> int:
return self.handle.tell()
def write(self, s: Any) -> int:
raise UnsupportedOperation("write")
def writelines(self, lines: Iterable[Any]) -> None:
raise UnsupportedOperation("writelines")
class _ReadContext(ContextManager[_I], Generic[_I]):
def __init__(self, progress: "Progress", reader: _I) -> None:
self.progress = progress
self.reader: _I = reader
def __enter__(self) -> _I:
self.progress.start()
return self.reader.__enter__()
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> None:
self.progress.stop()
self.reader.__exit__(exc_type, exc_val, exc_tb)
def wrap_file(
file: BinaryIO,
total: int,
*,
description: str = "Reading...",
auto_refresh: bool = True,
console: Optional[Console] = None,
transient: bool = False,
get_time: Optional[Callable[[], float]] = None,
refresh_per_second: float = 10,
style: StyleType = "bar.back",
complete_style: StyleType = "bar.complete",
finished_style: StyleType = "bar.finished",
pulse_style: StyleType = "bar.pulse",
disable: bool = False,
) -> ContextManager[BinaryIO]:
columns: List["ProgressColumn"] = (
[TextColumn("[progress.description]{task.description}")] if description else []
)
columns.extend(
(
BarColumn(
style=style,
complete_style=complete_style,
finished_style=finished_style,
pulse_style=pulse_style,
),
DownloadColumn(),
TimeRemainingColumn(),
)
)
progress = Progress(
*columns,
auto_refresh=auto_refresh,
console=console,
transient=transient,
get_time=get_time,
refresh_per_second=refresh_per_second or 10,
disable=disable,
)
reader = progress.wrap_file(file, total=total, description=description)
return _ReadContext(progress, reader)
@typing.overload
def open(
file: Union[str, "PathLike[str]", bytes],
mode: Union[Literal["rt"], Literal["r"]],
buffering: int = -1,
encoding: Optional[str] = None,
errors: Optional[str] = None,
newline: Optional[str] = None,
*,
total: Optional[int] = None,
description: str = "Reading...",
auto_refresh: bool = True,
console: Optional[Console] = None,
transient: bool = False,
get_time: Optional[Callable[[], float]] = None,
refresh_per_second: float = 10,
style: StyleType = "bar.back",
complete_style: StyleType = "bar.complete",
finished_style: StyleType = "bar.finished",
pulse_style: StyleType = "bar.pulse",
disable: bool = False,
) -> ContextManager[TextIO]:
pass
@typing.overload
def open(
file: Union[str, "PathLike[str]", bytes],
mode: Literal["rb"],
buffering: int = -1,
encoding: Optional[str] = None,
errors: Optional[str] = None,
newline: Optional[str] = None,
*,
total: Optional[int] = None,
description: str = "Reading...",
auto_refresh: bool = True,
console: Optional[Console] = None,
transient: bool = False,
get_time: Optional[Callable[[], float]] = None,
refresh_per_second: float = 10,
style: StyleType = "bar.back",
complete_style: StyleType = "bar.complete",
finished_style: StyleType = "bar.finished",
pulse_style: StyleType = "bar.pulse",
disable: bool = False,
) -> ContextManager[BinaryIO]:
pass
def open(
file: Union[str, "PathLike[str]", bytes],
mode: Union[Literal["rb"], Literal["rt"], Literal["r"]] = "r",
buffering: int = -1,
encoding: Optional[str] = None,
errors: Optional[str] = None,
newline: Optional[str] = None,
*,
total: Optional[int] = None,
description: str = "Reading...",
auto_refresh: bool = True,
console: Optional[Console] = None,
transient: bool = False,
get_time: Optional[Callable[[], float]] = None,
refresh_per_second: float = 10,
style: StyleType = "bar.back",
complete_style: StyleType = "bar.complete",
finished_style: StyleType = "bar.finished",
pulse_style: StyleType = "bar.pulse",
disable: bool = False,
) -> Union[ContextManager[BinaryIO], ContextManager[TextIO]]:
columns: List["ProgressColumn"] = (
[TextColumn("[progress.description]{task.description}")] if description else []
)
columns.extend(
(
BarColumn(
style=style,
complete_style=complete_style,
finished_style=finished_style,
pulse_style=pulse_style,
),
DownloadColumn(),
TimeRemainingColumn(),
)
)
progress = Progress(
*columns,
auto_refresh=auto_refresh,
console=console,
transient=transient,
get_time=get_time,
refresh_per_second=refresh_per_second or 10,
disable=disable,
)
reader = progress.open(
file,
mode=mode,
buffering=buffering,
encoding=encoding,
errors=errors,
newline=newline,
total=total,
description=description,
)
return _ReadContext(progress, reader) # type: ignore[return-value, type-var]
class ProgressColumn(ABC):
max_refresh: Optional[float] = None
def __init__(self, table_column: Optional[Column] = None) -> None:
self._table_column = table_column
self._renderable_cache: Dict[TaskID, Tuple[float, RenderableType]] = {}
self._update_time: Optional[float] = None
def get_table_column(self) -> Column:
return self._table_column or Column()
def __call__(self, task: "Task") -> RenderableType:
current_time = task.get_time()
if self.max_refresh is not None and not task.completed:
try:
timestamp, renderable = self._renderable_cache[task.id]
except KeyError:
pass
else:
if timestamp + self.max_refresh > current_time:
return renderable
renderable = self.render(task)
self._renderable_cache[task.id] = (current_time, renderable)
return renderable
@abstractmethod
def render(self, task: "Task") -> RenderableType:
class RenderableColumn(ProgressColumn):
def __init__(
self, renderable: RenderableType = "", *, table_column: Optional[Column] = None
):
self.renderable = renderable
super().__init__(table_column=table_column)
def render(self, task: "Task") -> RenderableType:
return self.renderable
class SpinnerColumn(ProgressColumn):
def __init__(
self,
spinner_name: str = "dots",
style: Optional[StyleType] = "progress.spinner",
speed: float = 1.0,
finished_text: TextType = " ",
table_column: Optional[Column] = None,
):
self.spinner = Spinner(spinner_name, style=style, speed=speed)
self.finished_text = (
Text.from_markup(finished_text)
if isinstance(finished_text, str)
else finished_text
)
super().__init__(table_column=table_column)
def set_spinner(
self,
spinner_name: str,
spinner_style: Optional[StyleType] = "progress.spinner",
speed: float = 1.0,
) -> None:
self.spinner = Spinner(spinner_name, style=spinner_style, speed=speed)
def render(self, task: "Task") -> RenderableType:
text = (
self.finished_text
if task.finished
else self.spinner.render(task.get_time())
)
return text
class TextColumn(ProgressColumn):
def __init__(
self,
text_format: str,
style: StyleType = "none",
justify: JustifyMethod = "left",
markup: bool = True,
highlighter: Optional[Highlighter] = None,
table_column: Optional[Column] = None,
) -> None:
self.text_format = text_format
self.justify: JustifyMethod = justify
self.style = style
self.markup = markup
self.highlighter = highlighter
super().__init__(table_column=table_column or Column(no_wrap=True))
def render(self, task: "Task") -> Text:
_text = self.text_format.format(task=task)
if self.markup:
text = Text.from_markup(_text, style=self.style, justify=self.justify)
else:
text = Text(_text, style=self.style, justify=self.justify)
if self.highlighter:
self.highlighter.highlight(text)
return text
class BarColumn(ProgressColumn):
def __init__(
self,
bar_width: Optional[int] = 40,
style: StyleType = "bar.back",
complete_style: StyleType = "bar.complete",
finished_style: StyleType = "bar.finished",
pulse_style: StyleType = "bar.pulse",
table_column: Optional[Column] = None,
) -> None:
self.bar_width = bar_width
self.style = style
self.complete_style = complete_style
self.finished_style = finished_style
self.pulse_style = pulse_style
super().__init__(table_column=table_column)
def render(self, task: "Task") -> ProgressBar:
return ProgressBar(
total=max(0, task.total) if task.total is not None else None,
completed=max(0, task.completed),
width=None if self.bar_width is None else max(1, self.bar_width),
pulse=not task.started,
animation_time=task.get_time(),
style=self.style,
complete_style=self.complete_style,
finished_style=self.finished_style,
pulse_style=self.pulse_style,
)
class TimeElapsedColumn(ProgressColumn):
def render(self, task: "Task") -> Text:
elapsed = task.finished_time if task.finished else task.elapsed
if elapsed is None:
return Text("-:--:--", style="progress.elapsed")
delta = timedelta(seconds=max(0, int(elapsed)))
return Text(str(delta), style="progress.elapsed")
class TaskProgressColumn(TextColumn):
def __init__(
self,
text_format: str = "[progress.percentage]{task.percentage:>3.0f}%",
text_format_no_percentage: str = "",
style: StyleType = "none",
justify: JustifyMethod = "left",
markup: bool = True,
highlighter: Optional[Highlighter] = None,
table_column: Optional[Column] = None,
show_speed: bool = False,
) -> None:
self.text_format_no_percentage = text_format_no_percentage
self.show_speed = show_speed
super().__init__(
text_format=text_format,
style=style,
justify=justify,
markup=markup,
highlighter=highlighter,
table_column=table_column,
)
@classmethod
def render_speed(cls, speed: Optional[float]) -> Text:
if speed is None:
return Text("", style="progress.percentage")
unit, suffix = filesize.pick_unit_and_suffix(
int(speed),
["", "×10³", "×10⁶", "×10⁹", "×10¹²"],
1000,
)
data_speed = speed / unit
return Text(f"{data_speed:.1f}{suffix} it/s", style="progress.percentage")
def render(self, task: "Task") -> Text:
if task.total is None and self.show_speed:
return self.render_speed(task.finished_speed or task.speed)
text_format = (
self.text_format_no_percentage if task.total is None else self.text_format
)
_text = text_format.format(task=task)
if self.markup:
text = Text.from_markup(_text, style=self.style, justify=self.justify)
else:
text = Text(_text, style=self.style, justify=self.justify)
if self.highlighter:
self.highlighter.highlight(text)
return text
class TimeRemainingColumn(ProgressColumn):
# Only refresh twice a second to prevent jitter
max_refresh = 0.5
def __init__(
self,
compact: bool = False,
elapsed_when_finished: bool = False,
table_column: Optional[Column] = None,
):
self.compact = compact
self.elapsed_when_finished = elapsed_when_finished
super().__init__(table_column=table_column)
def render(self, task: "Task") -> Text:
if self.elapsed_when_finished and task.finished:
task_time = task.finished_time
style = "progress.elapsed"
else:
task_time = task.time_remaining
style = "progress.remaining"
if task.total is None:
return Text("", style=style)
if task_time is None:
return Text("--:--" if self.compact else "-:--:--", style=style)
# Based on https://github.com/tqdm/tqdm/blob/master/tqdm/std.py
minutes, seconds = divmod(int(task_time), 60)
hours, minutes = divmod(minutes, 60)
if self.compact and not hours:
formatted = f"{minutes:02d}:{seconds:02d}"
else:
formatted = f"{hours:d}:{minutes:02d}:{seconds:02d}"
return Text(formatted, style=style)
class FileSizeColumn(ProgressColumn):
def render(self, task: "Task") -> Text:
data_size = filesize.decimal(int(task.completed))
return Text(data_size, style="progress.filesize")
class TotalFileSizeColumn(ProgressColumn):
def render(self, task: "Task") -> Text:
data_size = filesize.decimal(int(task.total)) if task.total is not None else ""
return Text(data_size, style="progress.filesize.total")
class MofNCompleteColumn(ProgressColumn):
def __init__(self, separator: str = "/", table_column: Optional[Column] = None):
self.separator = separator
super().__init__(table_column=table_column)
def render(self, task: "Task") -> Text:
completed = int(task.completed)
total = int(task.total) if task.total is not None else "?"
total_width = len(str(total))
return Text(
f"{completed:{total_width}d}{self.separator}{total}",
style="progress.download",
)
class DownloadColumn(ProgressColumn):
def __init__(
self, binary_units: bool = False, table_column: Optional[Column] = None
) -> None:
self.binary_units = binary_units
super().__init__(table_column=table_column)
def render(self, task: "Task") -> Text:
completed = int(task.completed)
unit_and_suffix_calculation_base = (
int(task.total) if task.total is not None else completed
)
if self.binary_units:
unit, suffix = filesize.pick_unit_and_suffix(
unit_and_suffix_calculation_base,
["bytes", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"],
1024,
)
else:
unit, suffix = filesize.pick_unit_and_suffix(
unit_and_suffix_calculation_base,
["bytes", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"],
1000,
)
precision = 0 if unit == 1 else 1
completed_ratio = completed / unit
completed_str = f"{completed_ratio:,.{precision}f}"
if task.total is not None:
total = int(task.total)
total_ratio = total / unit
total_str = f"{total_ratio:,.{precision}f}"
else:
total_str = "?"
download_status = f"{completed_str}/{total_str} {suffix}"
download_text = Text(download_status, style="progress.download")
return download_text
class TransferSpeedColumn(ProgressColumn):
def render(self, task: "Task") -> Text:
speed = task.finished_speed or task.speed
if speed is None:
return Text("?", style="progress.data.speed")
data_speed = filesize.decimal(int(speed))
return Text(f"{data_speed}/s", style="progress.data.speed")
class ProgressSample(NamedTuple):
timestamp: float
"""Timestamp of sample."""
completed: float
"""Number of steps completed."""
@dataclass
class Task:
id: TaskID
"""Task ID associated with this task (used in Progress methods)."""
description: str
"""str: Description of the task."""
total: Optional[float]
"""Optional[float]: Total number of steps in this task."""
completed: float
"""float: Number of steps completed"""
_get_time: GetTimeCallable
"""Callable to get the current time."""
finished_time: Optional[float] = None
"""float: Time task was finished."""
visible: bool = True
"""bool: Indicates if this task is visible in the progress display."""
fields: Dict[str, Any] = field(default_factory=dict)
"""dict: Arbitrary fields passed in via Progress.update."""
start_time: Optional[float] = field(default=None, init=False, repr=False)
"""Optional[float]: Time this task was started, or None if not started."""
stop_time: Optional[float] = field(default=None, init=False, repr=False)
"""Optional[float]: Time this task was stopped, or None if not stopped."""
finished_speed: Optional[float] = None
"""Optional[float]: The last speed for a finished task."""
_progress: Deque[ProgressSample] = field(
default_factory=lambda: deque(maxlen=1000), init=False, repr=False
)
_lock: RLock = field(repr=False, default_factory=RLock)
"""Thread lock."""
def get_time(self) -> float:
return self._get_time()
@property
def started(self) -> bool:
return self.start_time is not None
@property
def remaining(self) -> Optional[float]:
if self.total is None:
return None
return self.total - self.completed
@property
def elapsed(self) -> Optional[float]:
if self.start_time is None:
return None
if self.stop_time is not None:
return self.stop_time - self.start_time
return self.get_time() - self.start_time
@property
def finished(self) -> bool:
return self.finished_time is not None
@property
def percentage(self) -> float:
if not self.total:
return 0.0
completed = (self.completed / self.total) * 100.0
completed = min(100.0, max(0.0, completed))
return completed
@property
def speed(self) -> Optional[float]:
if self.start_time is None:
return None
with self._lock:
progress = self._progress
if not progress:
return None
total_time = progress[-1].timestamp - progress[0].timestamp
if total_time == 0:
return None
iter_progress = iter(progress)
next(iter_progress)
total_completed = sum(sample.completed for sample in iter_progress)
speed = total_completed / total_time
return speed
@property
def time_remaining(self) -> Optional[float]:
if self.finished:
return 0.0
speed = self.speed
if not speed:
return None
remaining = self.remaining
if remaining is None:
return None
estimate = ceil(remaining / speed)
return estimate
def _reset(self) -> None:
self._progress.clear()
self.finished_time = None
self.finished_speed = None
class Progress(JupyterMixin):
def __init__(
self,
*columns: Union[str, ProgressColumn],
console: Optional[Console] = None,
auto_refresh: bool = True,
refresh_per_second: float = 10,
speed_estimate_period: float = 30.0,
transient: bool = False,
redirect_stdout: bool = True,
redirect_stderr: bool = True,
get_time: Optional[GetTimeCallable] = None,
disable: bool = False,
expand: bool = False,
) -> None:
assert refresh_per_second > 0, "refresh_per_second must be > 0"
self._lock = RLock()
self.columns = columns or self.get_default_columns()
self.speed_estimate_period = speed_estimate_period
self.disable = disable
self.expand = expand
self._tasks: Dict[TaskID, Task] = {}
self._task_index: TaskID = TaskID(0)
self.live = Live(
console=console or get_console(),
auto_refresh=auto_refresh,
refresh_per_second=refresh_per_second,
transient=transient,
redirect_stdout=redirect_stdout,
redirect_stderr=redirect_stderr,
get_renderable=self.get_renderable,
)
self.get_time = get_time or self.console.get_time
self.print = self.console.print
self.log = self.console.log
@classmethod
def get_default_columns(cls) -> Tuple[ProgressColumn, ...]:
return (
TextColumn("[progress.description]{task.description}"),
BarColumn(),
TaskProgressColumn(),
TimeRemainingColumn(),
)
@property
def console(self) -> Console:
return self.live.console
@property
def tasks(self) -> List[Task]:
with self._lock:
return list(self._tasks.values())
@property
def task_ids(self) -> List[TaskID]:
with self._lock:
return list(self._tasks.keys())
@property
def finished(self) -> bool:
with self._lock:
if not self._tasks:
return True
return all(task.finished for task in self._tasks.values())
def start(self) -> None:
if not self.disable:
self.live.start(refresh=True)
def stop(self) -> None:
if not self.disable:
self.live.stop()
if not self.console.is_interactive and not self.console.is_jupyter:
self.console.print()
def __enter__(self) -> Self:
self.start()
return self
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> None:
self.stop()
def track(
self,
sequence: Iterable[ProgressType],
total: Optional[float] = None,
completed: int = 0,
task_id: Optional[TaskID] = None,
description: str = "Working...",
update_period: float = 0.1,
) -> Iterable[ProgressType]:
if total is None:
total = float(length_hint(sequence)) or None
if task_id is None:
task_id = self.add_task(description, total=total, completed=completed)
else:
self.update(task_id, total=total, completed=completed)
if self.live.auto_refresh:
with _TrackThread(self, task_id, update_period) as track_thread:
for value in sequence:
yield value
track_thread.completed += 1
else:
advance = self.advance
refresh = self.refresh
for value in sequence:
yield value
advance(task_id, 1)
refresh()
def wrap_file(
self,
file: BinaryIO,
total: Optional[int] = None,
*,
task_id: Optional[TaskID] = None,
description: str = "Reading...",
) -> BinaryIO:
# attempt to recover the total from the task
total_bytes: Optional[float] = None
if total is not None:
total_bytes = total
elif task_id is not None:
with self._lock:
total_bytes = self._tasks[task_id].total
if total_bytes is None:
raise ValueError(
f"unable to get the total number of bytes, please specify 'total'"
)
# update total of task or create new task
if task_id is None:
task_id = self.add_task(description, total=total_bytes)
else:
self.update(task_id, total=total_bytes)
return _Reader(file, self, task_id, close_handle=False)
@typing.overload
def open(
self,
file: Union[str, "PathLike[str]", bytes],
mode: Literal["rb"],
buffering: int = -1,
encoding: Optional[str] = None,
errors: Optional[str] = None,
newline: Optional[str] = None,
*,
total: Optional[int] = None,
task_id: Optional[TaskID] = None,
description: str = "Reading...",
) -> BinaryIO:
pass
@typing.overload
def open(
self,
file: Union[str, "PathLike[str]", bytes],
mode: Union[Literal["r"], Literal["rt"]],
buffering: int = -1,
encoding: Optional[str] = None,
errors: Optional[str] = None,
newline: Optional[str] = None,
*,
total: Optional[int] = None,
task_id: Optional[TaskID] = None,
description: str = "Reading...",
) -> TextIO:
pass
def open(
self,
file: Union[str, "PathLike[str]", bytes],
mode: Union[Literal["rb"], Literal["rt"], Literal["r"]] = "r",
buffering: int = -1,
encoding: Optional[str] = None,
errors: Optional[str] = None,
newline: Optional[str] = None,
*,
total: Optional[int] = None,
task_id: Optional[TaskID] = None,
description: str = "Reading...",
) -> Union[BinaryIO, TextIO]:
# normalize the mode (always rb, rt)
_mode = "".join(sorted(mode, reverse=False))
if _mode not in ("br", "rt", "r"):
raise ValueError(f"invalid mode {mode!r}")
# patch buffering to provide the same behaviour as the builtin `open`
line_buffering = buffering == 1
if _mode == "br" and buffering == 1:
warnings.warn(
"line buffering (buffering=1) isn't supported in binary mode, the default buffer size will be used",
RuntimeWarning,
)
buffering = -1
elif _mode in ("rt", "r"):
if buffering == 0:
raise ValueError("can't have unbuffered text I/O")
elif buffering == 1:
buffering = -1
# attempt to get the total with `os.stat`
if total is None:
total = stat(file).st_size
# update total of task or create new task
if task_id is None:
task_id = self.add_task(description, total=total)
else:
self.update(task_id, total=total)
# open the file in binary mode,
handle = io.open(file, "rb", buffering=buffering)
reader = _Reader(handle, self, task_id, close_handle=True)
# wrap the reader in a `TextIOWrapper` if text mode
if mode in ("r", "rt"):
return io.TextIOWrapper(
reader,
encoding=encoding,
errors=errors,
newline=newline,
line_buffering=line_buffering,
)
return reader
def start_task(self, task_id: TaskID) -> None:
with self._lock:
task = self._tasks[task_id]
if task.start_time is None:
task.start_time = self.get_time()
def stop_task(self, task_id: TaskID) -> None:
with self._lock:
task = self._tasks[task_id]
current_time = self.get_time()
if task.start_time is None:
task.start_time = current_time
task.stop_time = current_time
def update(
self,
task_id: TaskID,
*,
total: Optional[float] = None,
completed: Optional[float] = None,
advance: Optional[float] = None,
description: Optional[str] = None,
visible: Optional[bool] = None,
refresh: bool = False,
**fields: Any,
) -> None:
with self._lock:
task = self._tasks[task_id]
completed_start = task.completed
if total is not None and total != task.total:
task.total = total
task._reset()
if advance is not None:
task.completed += advance
if completed is not None:
task.completed = completed
if description is not None:
task.description = description
if visible is not None:
task.visible = visible
task.fields.update(fields)
update_completed = task.completed - completed_start
current_time = self.get_time()
old_sample_time = current_time - self.speed_estimate_period
_progress = task._progress
popleft = _progress.popleft
while _progress and _progress[0].timestamp < old_sample_time:
popleft()
if update_completed > 0:
_progress.append(ProgressSample(current_time, update_completed))
if (
task.total is not None
and task.completed >= task.total
and task.finished_time is None
):
task.finished_time = task.elapsed
if refresh:
self.refresh()
def reset(
self,
task_id: TaskID,
*,
start: bool = True,
total: Optional[float] = None,
completed: int = 0,
visible: Optional[bool] = None,
description: Optional[str] = None,
**fields: Any,
) -> None:
current_time = self.get_time()
with self._lock:
task = self._tasks[task_id]
task._reset()
task.start_time = current_time if start else None
if total is not None:
task.total = total
task.completed = completed
if visible is not None:
task.visible = visible
if fields:
task.fields = fields
if description is not None:
task.description = description
task.finished_time = None
self.refresh()
def advance(self, task_id: TaskID, advance: float = 1) -> None:
current_time = self.get_time()
with self._lock:
task = self._tasks[task_id]
completed_start = task.completed
task.completed += advance
update_completed = task.completed - completed_start
old_sample_time = current_time - self.speed_estimate_period
_progress = task._progress
popleft = _progress.popleft
while _progress and _progress[0].timestamp < old_sample_time:
popleft()
while len(_progress) > 1000:
popleft()
_progress.append(ProgressSample(current_time, update_completed))
if (
task.total is not None
and task.completed >= task.total
and task.finished_time is None
):
task.finished_time = task.elapsed
task.finished_speed = task.speed
def refresh(self) -> None:
if not self.disable and self.live.is_started:
self.live.refresh()
def get_renderable(self) -> RenderableType:
renderable = Group(*self.get_renderables())
return renderable
def get_renderables(self) -> Iterable[RenderableType]:
table = self.make_tasks_table(self.tasks)
yield table
def make_tasks_table(self, tasks: Iterable[Task]) -> Table:
table_columns = (
(
Column(no_wrap=True)
if isinstance(_column, str)
else _column.get_table_column().copy()
)
for _column in self.columns
)
table = Table.grid(*table_columns, padding=(0, 1), expand=self.expand)
for task in tasks:
if task.visible:
table.add_row(
*(
(
column.format(task=task)
if isinstance(column, str)
else column(task)
)
for column in self.columns
)
)
return table
def __rich__(self) -> RenderableType:
with self._lock:
return self.get_renderable()
def add_task(
self,
description: str,
start: bool = True,
total: Optional[float] = 100.0,
completed: int = 0,
visible: bool = True,
**fields: Any,
) -> TaskID:
with self._lock:
task = Task(
self._task_index,
description,
total,
completed,
visible=visible,
fields=fields,
_get_time=self.get_time,
_lock=self._lock,
)
self._tasks[self._task_index] = task
if start:
self.start_task(self._task_index)
new_task_index = self._task_index
self._task_index = TaskID(int(self._task_index) + 1)
self.refresh()
return new_task_index
def remove_task(self, task_id: TaskID) -> None:
with self._lock:
del self._tasks[task_id]
if __name__ == "__main__": # pragma: no coverage
import random
import time
from .panel import Panel
from .rule import Rule
from .syntax import Syntax
from .table import Table
syntax = Syntax(
'''def loop_last(values: Iterable[T]) -> Iterable[Tuple[bool, T]]:
"""Iterate and generate a tuple with a flag for last value."""
iter_values = iter(values)
try:
previous_value = next(iter_values)
except StopIteration:
return
for value in iter_values:
yield False, previous_value
previous_value = value
yield True, previous_value''',
"python",
line_numbers=True,
)
table = Table("foo", "bar", "baz")
table.add_row("1", "2", "3")
progress_renderables = [
"Text may be printed while the progress bars are rendering.",
Panel("In fact, [i]any[/i] renderable will work"),
"Such as [magenta]tables[/]...",
table,
"Pretty printed structures...",
{"type": "example", "text": "Pretty printed"},
"Syntax...",
syntax,
Rule("Give it a try!"),
]
from itertools import cycle
examples = cycle(progress_renderables)
console = Console(record=True)
with Progress(
SpinnerColumn(),
*Progress.get_default_columns(),
TimeElapsedColumn(),
console=console,
transient=False,
) as progress:
task1 = progress.add_task("[red]Downloading", total=1000)
task2 = progress.add_task("[green]Processing", total=1000)
task3 = progress.add_task("[yellow]Thinking", total=None)
while not progress.finished:
progress.update(task1, advance=0.5)
progress.update(task2, advance=0.3)
time.sleep(0.01)
if random.randint(0, 100) < 1:
progress.log(next(examples)) | --- +++ @@ -62,6 +62,7 @@
class _TrackThread(Thread):
+ """A thread to periodically update progress."""
def __init__(self, progress: "Progress", task_id: "TaskID", update_period: float):
self.progress = progress
@@ -118,6 +119,30 @@ disable: bool = False,
show_speed: bool = True,
) -> Iterable[ProgressType]:
+ """Track progress by iterating over a sequence.
+
+ You can also track progress of an iterable, which might require that you additionally specify ``total``.
+
+ Args:
+ sequence (Iterable[ProgressType]): Values you wish to iterate over and track progress.
+ description (str, optional): Description of task show next to progress bar. Defaults to "Working".
+ total: (float, optional): Total number of steps. Default is len(sequence).
+ completed (int, optional): Number of steps completed so far. Defaults to 0.
+ auto_refresh (bool, optional): Automatic refresh, disable to force a refresh after each iteration. Default is True.
+ transient: (bool, optional): Clear the progress on exit. Defaults to False.
+ console (Console, optional): Console to write to. Default creates internal Console instance.
+ refresh_per_second (float): Number of times per second to refresh the progress information. Defaults to 10.
+ style (StyleType, optional): Style for the bar background. Defaults to "bar.back".
+ complete_style (StyleType, optional): Style for the completed bar. Defaults to "bar.complete".
+ finished_style (StyleType, optional): Style for a finished bar. Defaults to "bar.finished".
+ pulse_style (StyleType, optional): Style for pulsing bars. Defaults to "bar.pulse".
+ update_period (float, optional): Minimum time (in seconds) between calls to update(). Defaults to 0.1.
+ disable (bool, optional): Disable display of progress.
+ show_speed (bool, optional): Show speed if total isn't known. Defaults to True.
+ Returns:
+ Iterable[ProgressType]: An iterable of the values in the sequence.
+
+ """
columns: List["ProgressColumn"] = (
[TextColumn("[progress.description]{task.description}")] if description else []
@@ -155,6 +180,7 @@
class _Reader(RawIOBase, BinaryIO):
+ """A reader that tracks progress while it's being read from."""
def __init__(
self,
@@ -257,6 +283,7 @@
class _ReadContext(ContextManager[_I], Generic[_I]):
+ """A utility class to handle a context for both a reader and a progress."""
def __init__(self, progress: "Progress", reader: _I) -> None:
self.progress = progress
@@ -292,6 +319,25 @@ pulse_style: StyleType = "bar.pulse",
disable: bool = False,
) -> ContextManager[BinaryIO]:
+ """Read bytes from a file while tracking progress.
+
+ Args:
+ file (Union[str, PathLike[str], BinaryIO]): The path to the file to read, or a file-like object in binary mode.
+ total (int): Total number of bytes to read.
+ description (str, optional): Description of task show next to progress bar. Defaults to "Reading".
+ auto_refresh (bool, optional): Automatic refresh, disable to force a refresh after each iteration. Default is True.
+ transient: (bool, optional): Clear the progress on exit. Defaults to False.
+ console (Console, optional): Console to write to. Default creates internal Console instance.
+ refresh_per_second (float): Number of times per second to refresh the progress information. Defaults to 10.
+ style (StyleType, optional): Style for the bar background. Defaults to "bar.back".
+ complete_style (StyleType, optional): Style for the completed bar. Defaults to "bar.complete".
+ finished_style (StyleType, optional): Style for a finished bar. Defaults to "bar.finished".
+ pulse_style (StyleType, optional): Style for pulsing bars. Defaults to "bar.pulse".
+ disable (bool, optional): Disable display of progress.
+ Returns:
+ ContextManager[BinaryIO]: A context manager yielding a progress reader.
+
+ """
columns: List["ProgressColumn"] = (
[TextColumn("[progress.description]{task.description}")] if description else []
@@ -393,6 +439,32 @@ pulse_style: StyleType = "bar.pulse",
disable: bool = False,
) -> Union[ContextManager[BinaryIO], ContextManager[TextIO]]:
+ """Read bytes from a file while tracking progress.
+
+ Args:
+ path (Union[str, PathLike[str], BinaryIO]): The path to the file to read, or a file-like object in binary mode.
+ mode (str): The mode to use to open the file. Only supports "r", "rb" or "rt".
+ buffering (int): The buffering strategy to use, see :func:`io.open`.
+ encoding (str, optional): The encoding to use when reading in text mode, see :func:`io.open`.
+ errors (str, optional): The error handling strategy for decoding errors, see :func:`io.open`.
+ newline (str, optional): The strategy for handling newlines in text mode, see :func:`io.open`
+ total: (int, optional): Total number of bytes to read. Must be provided if reading from a file handle. Default for a path is os.stat(file).st_size.
+ description (str, optional): Description of task show next to progress bar. Defaults to "Reading".
+ auto_refresh (bool, optional): Automatic refresh, disable to force a refresh after each iteration. Default is True.
+ transient: (bool, optional): Clear the progress on exit. Defaults to False.
+ console (Console, optional): Console to write to. Default creates internal Console instance.
+ refresh_per_second (float): Number of times per second to refresh the progress information. Defaults to 10.
+ style (StyleType, optional): Style for the bar background. Defaults to "bar.back".
+ complete_style (StyleType, optional): Style for the completed bar. Defaults to "bar.complete".
+ finished_style (StyleType, optional): Style for a finished bar. Defaults to "bar.finished".
+ pulse_style (StyleType, optional): Style for pulsing bars. Defaults to "bar.pulse".
+ disable (bool, optional): Disable display of progress.
+ encoding (str, optional): The encoding to use when reading in text mode.
+
+ Returns:
+ ContextManager[BinaryIO]: A context manager yielding a progress reader.
+
+ """
columns: List["ProgressColumn"] = (
[TextColumn("[progress.description]{task.description}")] if description else []
@@ -433,6 +505,7 @@
class ProgressColumn(ABC):
+ """Base class for a widget to use in progress display."""
max_refresh: Optional[float] = None
@@ -442,9 +515,18 @@ self._update_time: Optional[float] = None
def get_table_column(self) -> Column:
+ """Get a table column, used to build tasks table."""
return self._table_column or Column()
def __call__(self, task: "Task") -> RenderableType:
+ """Called by the Progress object to return a renderable for the given task.
+
+ Args:
+ task (Task): An object containing information regarding the task.
+
+ Returns:
+ RenderableType: Anything renderable (including str).
+ """
current_time = task.get_time()
if self.max_refresh is not None and not task.completed:
try:
@@ -461,9 +543,15 @@
@abstractmethod
def render(self, task: "Task") -> RenderableType:
+ """Should return a renderable object."""
class RenderableColumn(ProgressColumn):
+ """A column to insert an arbitrary column.
+
+ Args:
+ renderable (RenderableType, optional): Any renderable. Defaults to empty string.
+ """
def __init__(
self, renderable: RenderableType = "", *, table_column: Optional[Column] = None
@@ -476,6 +564,14 @@
class SpinnerColumn(ProgressColumn):
+ """A column with a 'spinner' animation.
+
+ Args:
+ spinner_name (str, optional): Name of spinner animation. Defaults to "dots".
+ style (StyleType, optional): Style of spinner. Defaults to "progress.spinner".
+ speed (float, optional): Speed factor of spinner. Defaults to 1.0.
+ finished_text (TextType, optional): Text used when task is finished. Defaults to " ".
+ """
def __init__(
self,
@@ -499,6 +595,13 @@ spinner_style: Optional[StyleType] = "progress.spinner",
speed: float = 1.0,
) -> None:
+ """Set a new spinner.
+
+ Args:
+ spinner_name (str): Spinner name, see python -m rich.spinner.
+ spinner_style (Optional[StyleType], optional): Spinner style. Defaults to "progress.spinner".
+ speed (float, optional): Speed factor of spinner. Defaults to 1.0.
+ """
self.spinner = Spinner(spinner_name, style=spinner_style, speed=speed)
def render(self, task: "Task") -> RenderableType:
@@ -511,6 +614,7 @@
class TextColumn(ProgressColumn):
+ """A column containing text."""
def __init__(
self,
@@ -540,6 +644,15 @@
class BarColumn(ProgressColumn):
+ """Renders a visual progress bar.
+
+ Args:
+ bar_width (Optional[int], optional): Width of bar or None for full width. Defaults to 40.
+ style (StyleType, optional): Style for the bar background. Defaults to "bar.back".
+ complete_style (StyleType, optional): Style for the completed bar. Defaults to "bar.complete".
+ finished_style (StyleType, optional): Style for a finished bar. Defaults to "bar.finished".
+ pulse_style (StyleType, optional): Style for pulsing bars. Defaults to "bar.pulse".
+ """
def __init__(
self,
@@ -558,6 +671,7 @@ super().__init__(table_column=table_column)
def render(self, task: "Task") -> ProgressBar:
+ """Gets a progress bar widget for a task."""
return ProgressBar(
total=max(0, task.total) if task.total is not None else None,
completed=max(0, task.completed),
@@ -572,8 +686,10 @@
class TimeElapsedColumn(ProgressColumn):
+ """Renders time elapsed."""
def render(self, task: "Task") -> Text:
+ """Show time elapsed."""
elapsed = task.finished_time if task.finished else task.elapsed
if elapsed is None:
return Text("-:--:--", style="progress.elapsed")
@@ -582,6 +698,18 @@
class TaskProgressColumn(TextColumn):
+ """Show task progress as a percentage.
+
+ Args:
+ text_format (str, optional): Format for percentage display. Defaults to "[progress.percentage]{task.percentage:>3.0f}%".
+ text_format_no_percentage (str, optional): Format if percentage is unknown. Defaults to "".
+ style (StyleType, optional): Style of output. Defaults to "none".
+ justify (JustifyMethod, optional): Text justification. Defaults to "left".
+ markup (bool, optional): Enable markup. Defaults to True.
+ highlighter (Optional[Highlighter], optional): Highlighter to apply to output. Defaults to None.
+ table_column (Optional[Column], optional): Table Column to use. Defaults to None.
+ show_speed (bool, optional): Show speed if total is unknown. Defaults to False.
+ """
def __init__(
self,
@@ -607,6 +735,14 @@
@classmethod
def render_speed(cls, speed: Optional[float]) -> Text:
+ """Render the speed in iterations per second.
+
+ Args:
+ task (Task): A Task object.
+
+ Returns:
+ Text: Text object containing the task speed.
+ """
if speed is None:
return Text("", style="progress.percentage")
unit, suffix = filesize.pick_unit_and_suffix(
@@ -634,6 +770,12 @@
class TimeRemainingColumn(ProgressColumn):
+ """Renders estimated time remaining.
+
+ Args:
+ compact (bool, optional): Render MM:SS when time remaining is less than an hour. Defaults to False.
+ elapsed_when_finished (bool, optional): Render time elapsed when the task is finished. Defaults to False.
+ """
# Only refresh twice a second to prevent jitter
max_refresh = 0.5
@@ -649,6 +791,7 @@ super().__init__(table_column=table_column)
def render(self, task: "Task") -> Text:
+ """Show time remaining."""
if self.elapsed_when_finished and task.finished:
task_time = task.finished_time
style = "progress.elapsed"
@@ -675,26 +818,41 @@
class FileSizeColumn(ProgressColumn):
+ """Renders completed filesize."""
def render(self, task: "Task") -> Text:
+ """Show data completed."""
data_size = filesize.decimal(int(task.completed))
return Text(data_size, style="progress.filesize")
class TotalFileSizeColumn(ProgressColumn):
+ """Renders total filesize."""
def render(self, task: "Task") -> Text:
+ """Show data completed."""
data_size = filesize.decimal(int(task.total)) if task.total is not None else ""
return Text(data_size, style="progress.filesize.total")
class MofNCompleteColumn(ProgressColumn):
+ """Renders completed count/total, e.g. ' 10/1000'.
+
+ Best for bounded tasks with int quantities.
+
+ Space pads the completed count so that progress length does not change as task progresses
+ past powers of 10.
+
+ Args:
+ separator (str, optional): Text to separate completed and total values. Defaults to "/".
+ """
def __init__(self, separator: str = "/", table_column: Optional[Column] = None):
self.separator = separator
super().__init__(table_column=table_column)
def render(self, task: "Task") -> Text:
+ """Show completed/total."""
completed = int(task.completed)
total = int(task.total) if task.total is not None else "?"
total_width = len(str(total))
@@ -705,6 +863,11 @@
class DownloadColumn(ProgressColumn):
+ """Renders file size downloaded and total, e.g. '0.5/2.3 GB'.
+
+ Args:
+ binary_units (bool, optional): Use binary units, KiB, MiB etc. Defaults to False.
+ """
def __init__(
self, binary_units: bool = False, table_column: Optional[Column] = None
@@ -713,6 +876,7 @@ super().__init__(table_column=table_column)
def render(self, task: "Task") -> Text:
+ """Calculate common unit for completed and total."""
completed = int(task.completed)
unit_and_suffix_calculation_base = (
@@ -748,8 +912,10 @@
class TransferSpeedColumn(ProgressColumn):
+ """Renders human readable transfer speed."""
def render(self, task: "Task") -> Text:
+ """Show data transfer speed."""
speed = task.finished_speed or task.speed
if speed is None:
return Text("?", style="progress.data.speed")
@@ -758,6 +924,7 @@
class ProgressSample(NamedTuple):
+ """Sample of progress for a given time."""
timestamp: float
"""Timestamp of sample."""
@@ -767,6 +934,11 @@
@dataclass
class Task:
+ """Information regarding a progress task.
+
+ This object should be considered read-only outside of the :class:`~Progress` class.
+
+ """
id: TaskID
"""Task ID associated with this task (used in Progress methods)."""
@@ -809,20 +981,24 @@ """Thread lock."""
def get_time(self) -> float:
+ """float: Get the current time, in seconds."""
return self._get_time()
@property
def started(self) -> bool:
+ """bool: Check if the task as started."""
return self.start_time is not None
@property
def remaining(self) -> Optional[float]:
+ """Optional[float]: Get the number of steps remaining, if a non-None total was set."""
if self.total is None:
return None
return self.total - self.completed
@property
def elapsed(self) -> Optional[float]:
+ """Optional[float]: Time elapsed since task was started, or ``None`` if the task hasn't started."""
if self.start_time is None:
return None
if self.stop_time is not None:
@@ -831,10 +1007,12 @@
@property
def finished(self) -> bool:
+ """Check if the task has finished."""
return self.finished_time is not None
@property
def percentage(self) -> float:
+ """float: Get progress of task as a percentage. If a None total was set, returns 0"""
if not self.total:
return 0.0
completed = (self.completed / self.total) * 100.0
@@ -843,6 +1021,7 @@
@property
def speed(self) -> Optional[float]:
+ """Optional[float]: Get the estimated speed in steps per second."""
if self.start_time is None:
return None
with self._lock:
@@ -860,6 +1039,7 @@
@property
def time_remaining(self) -> Optional[float]:
+ """Optional[float]: Get estimated time to completion, or ``None`` if no data."""
if self.finished:
return 0.0
speed = self.speed
@@ -872,12 +1052,27 @@ return estimate
def _reset(self) -> None:
+ """Reset progress."""
self._progress.clear()
self.finished_time = None
self.finished_speed = None
class Progress(JupyterMixin):
+ """Renders an auto-updating progress bar(s).
+
+ Args:
+ console (Console, optional): Optional Console instance. Defaults to an internal Console instance writing to stdout.
+ auto_refresh (bool, optional): Enable auto refresh. If disabled, you will need to call `refresh()`.
+ refresh_per_second (float, optional): Number of times per second to refresh the progress information. Defaults to 10.
+ speed_estimate_period: (float, optional): Period (in seconds) used to calculate the speed estimate. Defaults to 30.
+ transient: (bool, optional): Clear the progress on exit. Defaults to False.
+ redirect_stdout: (bool, optional): Enable redirection of stdout, so ``print`` may be used. Defaults to True.
+ redirect_stderr: (bool, optional): Enable redirection of stderr. Defaults to True.
+ get_time: (Callable, optional): A callable that gets the current time, or None to use Console.get_time. Defaults to None.
+ disable (bool, optional): Disable progress display. Defaults to False
+ expand (bool, optional): Expand tasks table to fit width. Defaults to False.
+ """
def __init__(
self,
@@ -917,6 +1112,28 @@
@classmethod
def get_default_columns(cls) -> Tuple[ProgressColumn, ...]:
+ """Get the default columns used for a new Progress instance:
+ - a text column for the description (TextColumn)
+ - the bar itself (BarColumn)
+ - a text column showing completion percentage (TextColumn)
+ - an estimated-time-remaining column (TimeRemainingColumn)
+ If the Progress instance is created without passing a columns argument,
+ the default columns defined here will be used.
+
+ You can also create a Progress instance using custom columns before
+ and/or after the defaults, as in this example:
+
+ progress = Progress(
+ SpinnerColumn(),
+ *Progress.get_default_columns(),
+ "Elapsed:",
+ TimeElapsedColumn(),
+ )
+
+ This code shows the creation of a Progress display, containing
+ a spinner to the left, the default columns, and a labeled elapsed
+ time column.
+ """
return (
TextColumn("[progress.description]{task.description}"),
BarColumn(),
@@ -930,26 +1147,31 @@
@property
def tasks(self) -> List[Task]:
+ """Get a list of Task instances."""
with self._lock:
return list(self._tasks.values())
@property
def task_ids(self) -> List[TaskID]:
+ """A list of task IDs."""
with self._lock:
return list(self._tasks.keys())
@property
def finished(self) -> bool:
+ """Check if all tasks have been completed."""
with self._lock:
if not self._tasks:
return True
return all(task.finished for task in self._tasks.values())
def start(self) -> None:
+ """Start the progress display."""
if not self.disable:
self.live.start(refresh=True)
def stop(self) -> None:
+ """Stop the progress display."""
if not self.disable:
self.live.stop()
if not self.console.is_interactive and not self.console.is_jupyter:
@@ -976,6 +1198,21 @@ description: str = "Working...",
update_period: float = 0.1,
) -> Iterable[ProgressType]:
+ """Track progress by iterating over a sequence.
+
+ You can also track progress of an iterable, which might require that you additionally specify ``total``.
+
+ Args:
+ sequence (Iterable[ProgressType]): Values you want to iterate over and track progress.
+ total: (float, optional): Total number of steps. Default is len(sequence).
+ completed (int, optional): Number of steps completed so far. Defaults to 0.
+ task_id: (TaskID): Task to track. Default is new task.
+ description: (str, optional): Description of task, if new task is created.
+ update_period (float, optional): Minimum time (in seconds) between calls to update(). Defaults to 0.1.
+
+ Returns:
+ Iterable[ProgressType]: An iterable of values taken from the provided sequence.
+ """
if total is None:
total = float(length_hint(sequence)) or None
@@ -1005,6 +1242,20 @@ task_id: Optional[TaskID] = None,
description: str = "Reading...",
) -> BinaryIO:
+ """Track progress file reading from a binary file.
+
+ Args:
+ file (BinaryIO): A file-like object opened in binary mode.
+ total (int, optional): Total number of bytes to read. This must be provided unless a task with a total is also given.
+ task_id (TaskID): Task to track. Default is new task.
+ description (str, optional): Description of task, if new task is created.
+
+ Returns:
+ BinaryIO: A readable file-like object in binary mode.
+
+ Raises:
+ ValueError: When no total value can be extracted from the arguments or the task.
+ """
# attempt to recover the total from the task
total_bytes: Optional[float] = None
if total is not None:
@@ -1070,6 +1321,25 @@ task_id: Optional[TaskID] = None,
description: str = "Reading...",
) -> Union[BinaryIO, TextIO]:
+ """Track progress while reading from a binary file.
+
+ Args:
+ path (Union[str, PathLike[str]]): The path to the file to read.
+ mode (str): The mode to use to open the file. Only supports "r", "rb" or "rt".
+ buffering (int): The buffering strategy to use, see :func:`io.open`.
+ encoding (str, optional): The encoding to use when reading in text mode, see :func:`io.open`.
+ errors (str, optional): The error handling strategy for decoding errors, see :func:`io.open`.
+ newline (str, optional): The strategy for handling newlines in text mode, see :func:`io.open`.
+ total (int, optional): Total number of bytes to read. If none given, os.stat(path).st_size is used.
+ task_id (TaskID): Task to track. Default is new task.
+ description (str, optional): Description of task, if new task is created.
+
+ Returns:
+ BinaryIO: A readable file-like object in binary mode.
+
+ Raises:
+ ValueError: When an invalid mode is given.
+ """
# normalize the mode (always rb, rt)
_mode = "".join(sorted(mode, reverse=False))
if _mode not in ("br", "rt", "r"):
@@ -1116,12 +1386,27 @@ return reader
def start_task(self, task_id: TaskID) -> None:
+ """Start a task.
+
+ Starts a task (used when calculating elapsed time). You may need to call this manually,
+ if you called ``add_task`` with ``start=False``.
+
+ Args:
+ task_id (TaskID): ID of task.
+ """
with self._lock:
task = self._tasks[task_id]
if task.start_time is None:
task.start_time = self.get_time()
def stop_task(self, task_id: TaskID) -> None:
+ """Stop a task.
+
+ This will freeze the elapsed time on the task.
+
+ Args:
+ task_id (TaskID): ID of task.
+ """
with self._lock:
task = self._tasks[task_id]
current_time = self.get_time()
@@ -1141,6 +1426,18 @@ refresh: bool = False,
**fields: Any,
) -> None:
+ """Update information associated with a task.
+
+ Args:
+ task_id (TaskID): Task id (returned by add_task).
+ total (float, optional): Updates task.total if not None.
+ completed (float, optional): Updates task.completed if not None.
+ advance (float, optional): Add a value to task.completed if not None.
+ description (str, optional): Change task description if not None.
+ visible (bool, optional): Set visible flag if not None.
+ refresh (bool): Force a refresh of progress information. Default is False.
+ **fields (Any): Additional data fields required for rendering.
+ """
with self._lock:
task = self._tasks[task_id]
completed_start = task.completed
@@ -1189,6 +1486,17 @@ description: Optional[str] = None,
**fields: Any,
) -> None:
+ """Reset a task so completed is 0 and the clock is reset.
+
+ Args:
+ task_id (TaskID): ID of task.
+ start (bool, optional): Start the task after reset. Defaults to True.
+ total (float, optional): New total steps in task, or None to use current total. Defaults to None.
+ completed (int, optional): Number of steps completed. Defaults to 0.
+ visible (bool, optional): Set visible flag if not None.
+ description (str, optional): Change task description if not None. Defaults to None.
+ **fields (str): Additional data fields required for rendering.
+ """
current_time = self.get_time()
with self._lock:
task = self._tasks[task_id]
@@ -1207,6 +1515,12 @@ self.refresh()
def advance(self, task_id: TaskID, advance: float = 1) -> None:
+ """Advance task by a number of steps.
+
+ Args:
+ task_id (TaskID): ID of task.
+ advance (float): Number of steps to advance. Default is 1.
+ """
current_time = self.get_time()
with self._lock:
task = self._tasks[task_id]
@@ -1231,18 +1545,29 @@ task.finished_speed = task.speed
def refresh(self) -> None:
+ """Refresh (render) the progress information."""
if not self.disable and self.live.is_started:
self.live.refresh()
def get_renderable(self) -> RenderableType:
+ """Get a renderable for the progress display."""
renderable = Group(*self.get_renderables())
return renderable
def get_renderables(self) -> Iterable[RenderableType]:
+ """Get a number of renderables for the progress display."""
table = self.make_tasks_table(self.tasks)
yield table
def make_tasks_table(self, tasks: Iterable[Task]) -> Table:
+ """Get a table to render the Progress display.
+
+ Args:
+ tasks (Iterable[Task]): An iterable of Task instances, one per row of the table.
+
+ Returns:
+ Table: A table instance.
+ """
table_columns = (
(
Column(no_wrap=True)
@@ -1268,6 +1593,7 @@ return table
def __rich__(self) -> RenderableType:
+ """Makes the Progress class itself renderable."""
with self._lock:
return self.get_renderable()
@@ -1280,6 +1606,21 @@ visible: bool = True,
**fields: Any,
) -> TaskID:
+ """Add a new 'task' to the Progress display.
+
+ Args:
+ description (str): A description of the task.
+ start (bool, optional): Start the task immediately (to calculate elapsed time). If set to False,
+ you will need to call `start` manually. Defaults to True.
+ total (float, optional): Number of total steps in the progress if known.
+ Set to None to render a pulsing animation. Defaults to 100.
+ completed (int, optional): Number of steps completed so far. Defaults to 0.
+ visible (bool, optional): Enable display of the task. Defaults to True.
+ **fields (str): Additional data fields required for rendering.
+
+ Returns:
+ TaskID: An ID you can use when calling `update`.
+ """
with self._lock:
task = Task(
self._task_index,
@@ -1300,6 +1641,12 @@ return new_task_index
def remove_task(self, task_id: TaskID) -> None:
+ """Delete a task if it exists.
+
+ Args:
+ task_id (TaskID): A task ID.
+
+ """
with self._lock:
del self._tasks[task_id]
@@ -1366,4 +1713,4 @@ progress.update(task2, advance=0.3)
time.sleep(0.01)
if random.randint(0, 100) < 1:
- progress.log(next(examples))+ progress.log(next(examples))
| https://raw.githubusercontent.com/Textualize/rich/HEAD/rich/progress.py |
Add docstrings to make code maintainable | from typing import Any, Generic, List, Optional, TextIO, TypeVar, Union, overload
from . import get_console
from .console import Console
from .text import Text, TextType
PromptType = TypeVar("PromptType")
DefaultType = TypeVar("DefaultType")
class PromptError(Exception):
class InvalidResponse(PromptError):
def __init__(self, message: TextType) -> None:
self.message = message
def __rich__(self) -> TextType:
return self.message
class PromptBase(Generic[PromptType]):
response_type: type = str
validate_error_message = "[prompt.invalid]Please enter a valid value"
illegal_choice_message = (
"[prompt.invalid.choice]Please select one of the available options"
)
prompt_suffix = ": "
choices: Optional[List[str]] = None
def __init__(
self,
prompt: TextType = "",
*,
console: Optional[Console] = None,
password: bool = False,
choices: Optional[List[str]] = None,
case_sensitive: bool = True,
show_default: bool = True,
show_choices: bool = True,
) -> None:
self.console = console or get_console()
self.prompt = (
Text.from_markup(prompt, style="prompt")
if isinstance(prompt, str)
else prompt
)
self.password = password
if choices is not None:
self.choices = choices
self.case_sensitive = case_sensitive
self.show_default = show_default
self.show_choices = show_choices
@classmethod
@overload
def ask(
cls,
prompt: TextType = "",
*,
console: Optional[Console] = None,
password: bool = False,
choices: Optional[List[str]] = None,
case_sensitive: bool = True,
show_default: bool = True,
show_choices: bool = True,
default: DefaultType,
stream: Optional[TextIO] = None,
) -> Union[DefaultType, PromptType]:
...
@classmethod
@overload
def ask(
cls,
prompt: TextType = "",
*,
console: Optional[Console] = None,
password: bool = False,
choices: Optional[List[str]] = None,
case_sensitive: bool = True,
show_default: bool = True,
show_choices: bool = True,
stream: Optional[TextIO] = None,
) -> PromptType:
...
@classmethod
def ask(
cls,
prompt: TextType = "",
*,
console: Optional[Console] = None,
password: bool = False,
choices: Optional[List[str]] = None,
case_sensitive: bool = True,
show_default: bool = True,
show_choices: bool = True,
default: Any = ...,
stream: Optional[TextIO] = None,
) -> Any:
_prompt = cls(
prompt,
console=console,
password=password,
choices=choices,
case_sensitive=case_sensitive,
show_default=show_default,
show_choices=show_choices,
)
return _prompt(default=default, stream=stream)
def render_default(self, default: DefaultType) -> Text:
return Text(f"({default})", "prompt.default")
def make_prompt(self, default: DefaultType) -> Text:
prompt = self.prompt.copy()
prompt.end = ""
if self.show_choices and self.choices:
_choices = "/".join(self.choices)
choices = f"[{_choices}]"
prompt.append(" ")
prompt.append(choices, "prompt.choices")
if (
default != ...
and self.show_default
and isinstance(default, (str, self.response_type))
):
prompt.append(" ")
_default = self.render_default(default)
prompt.append(_default)
prompt.append(self.prompt_suffix)
return prompt
@classmethod
def get_input(
cls,
console: Console,
prompt: TextType,
password: bool,
stream: Optional[TextIO] = None,
) -> str:
return console.input(prompt, password=password, stream=stream)
def check_choice(self, value: str) -> bool:
assert self.choices is not None
if self.case_sensitive:
return value.strip() in self.choices
return value.strip().lower() in [choice.lower() for choice in self.choices]
def process_response(self, value: str) -> PromptType:
value = value.strip()
try:
return_value: PromptType = self.response_type(value)
except ValueError:
raise InvalidResponse(self.validate_error_message)
if self.choices is not None:
if not self.check_choice(value):
raise InvalidResponse(self.illegal_choice_message)
if not self.case_sensitive:
# return the original choice, not the lower case version
return_value = self.response_type(
self.choices[
[choice.lower() for choice in self.choices].index(value.lower())
]
)
return return_value
def on_validate_error(self, value: str, error: InvalidResponse) -> None:
self.console.print(error, markup=True)
def pre_prompt(self) -> None:
@overload
def __call__(self, *, stream: Optional[TextIO] = None) -> PromptType:
...
@overload
def __call__(
self, *, default: DefaultType, stream: Optional[TextIO] = None
) -> Union[PromptType, DefaultType]:
...
def __call__(self, *, default: Any = ..., stream: Optional[TextIO] = None) -> Any:
while True:
self.pre_prompt()
prompt = self.make_prompt(default)
value = self.get_input(self.console, prompt, self.password, stream=stream)
if value == "" and default != ...:
return default
try:
return_value = self.process_response(value)
except InvalidResponse as error:
self.on_validate_error(value, error)
continue
else:
return return_value
class Prompt(PromptBase[str]):
response_type = str
class IntPrompt(PromptBase[int]):
response_type = int
validate_error_message = "[prompt.invalid]Please enter a valid integer number"
class FloatPrompt(PromptBase[float]):
response_type = float
validate_error_message = "[prompt.invalid]Please enter a number"
class Confirm(PromptBase[bool]):
response_type = bool
validate_error_message = "[prompt.invalid]Please enter Y or N"
choices: List[str] = ["y", "n"]
def render_default(self, default: DefaultType) -> Text:
yes, no = self.choices
return Text(f"({yes})" if default else f"({no})", style="prompt.default")
def process_response(self, value: str) -> bool:
value = value.strip().lower()
if value not in self.choices:
raise InvalidResponse(self.validate_error_message)
return value == self.choices[0]
if __name__ == "__main__": # pragma: no cover
from rich import print
if Confirm.ask("Run [i]prompt[/i] tests?", default=True):
while True:
result = IntPrompt.ask(
":rocket: Enter a number between [b]1[/b] and [b]10[/b]", default=5
)
if result >= 1 and result <= 10:
break
print(":pile_of_poo: [prompt.invalid]Number must be between 1 and 10")
print(f"number={result}")
while True:
password = Prompt.ask(
"Please enter a password [cyan](must be at least 5 characters)",
password=True,
)
if len(password) >= 5:
break
print("[prompt.invalid]password too short")
print(f"password={password!r}")
fruit = Prompt.ask("Enter a fruit", choices=["apple", "orange", "pear"])
print(f"fruit={fruit!r}")
doggie = Prompt.ask(
"What's the best Dog? (Case INSENSITIVE)",
choices=["Border Terrier", "Collie", "Labradoodle"],
case_sensitive=False,
)
print(f"doggie={doggie!r}")
else:
print("[b]OK :loudly_crying_face:") | --- +++ @@ -9,9 +9,16 @@
class PromptError(Exception):
+ """Exception base class for prompt related errors."""
class InvalidResponse(PromptError):
+ """Exception to indicate a response was invalid. Raise this within process_response() to indicate an error
+ and provide an error message.
+
+ Args:
+ message (Union[str, Text]): Error message.
+ """
def __init__(self, message: TextType) -> None:
self.message = message
@@ -21,6 +28,18 @@
class PromptBase(Generic[PromptType]):
+ """Ask the user for input until a valid response is received. This is the base class, see one of
+ the concrete classes for examples.
+
+ Args:
+ prompt (TextType, optional): Prompt text. Defaults to "".
+ console (Console, optional): A Console instance or None to use global console. Defaults to None.
+ password (bool, optional): Enable password input. Defaults to False.
+ choices (List[str], optional): A list of valid choices. Defaults to None.
+ case_sensitive (bool, optional): Matching of choices should be case-sensitive. Defaults to True.
+ show_default (bool, optional): Show default in prompt. Defaults to True.
+ show_choices (bool, optional): Show choices in prompt. Defaults to True.
+ """
response_type: type = str
@@ -103,6 +122,21 @@ default: Any = ...,
stream: Optional[TextIO] = None,
) -> Any:
+ """Shortcut to construct and run a prompt loop and return the result.
+
+ Example:
+ >>> filename = Prompt.ask("Enter a filename")
+
+ Args:
+ prompt (TextType, optional): Prompt text. Defaults to "".
+ console (Console, optional): A Console instance or None to use global console. Defaults to None.
+ password (bool, optional): Enable password input. Defaults to False.
+ choices (List[str], optional): A list of valid choices. Defaults to None.
+ case_sensitive (bool, optional): Matching of choices should be case-sensitive. Defaults to True.
+ show_default (bool, optional): Show default in prompt. Defaults to True.
+ show_choices (bool, optional): Show choices in prompt. Defaults to True.
+ stream (TextIO, optional): Optional text file open for reading to get input. Defaults to None.
+ """
_prompt = cls(
prompt,
console=console,
@@ -115,9 +149,25 @@ return _prompt(default=default, stream=stream)
def render_default(self, default: DefaultType) -> Text:
+ """Turn the supplied default in to a Text instance.
+
+ Args:
+ default (DefaultType): Default value.
+
+ Returns:
+ Text: Text containing rendering of default value.
+ """
return Text(f"({default})", "prompt.default")
def make_prompt(self, default: DefaultType) -> Text:
+ """Make prompt text.
+
+ Args:
+ default (DefaultType): Default value.
+
+ Returns:
+ Text: Text to display in prompt.
+ """
prompt = self.prompt.copy()
prompt.end = ""
@@ -148,15 +198,44 @@ password: bool,
stream: Optional[TextIO] = None,
) -> str:
+ """Get input from user.
+
+ Args:
+ console (Console): Console instance.
+ prompt (TextType): Prompt text.
+ password (bool): Enable password entry.
+
+ Returns:
+ str: String from user.
+ """
return console.input(prompt, password=password, stream=stream)
def check_choice(self, value: str) -> bool:
+ """Check value is in the list of valid choices.
+
+ Args:
+ value (str): Value entered by user.
+
+ Returns:
+ bool: True if choice was valid, otherwise False.
+ """
assert self.choices is not None
if self.case_sensitive:
return value.strip() in self.choices
return value.strip().lower() in [choice.lower() for choice in self.choices]
def process_response(self, value: str) -> PromptType:
+ """Process response from user, convert to prompt type.
+
+ Args:
+ value (str): String typed by user.
+
+ Raises:
+ InvalidResponse: If ``value`` is invalid.
+
+ Returns:
+ PromptType: The value to be returned from ask method.
+ """
value = value.strip()
try:
return_value: PromptType = self.response_type(value)
@@ -177,9 +256,16 @@ return return_value
def on_validate_error(self, value: str, error: InvalidResponse) -> None:
+ """Called to handle validation error.
+
+ Args:
+ value (str): String entered by user.
+ error (InvalidResponse): Exception instance the initiated the error.
+ """
self.console.print(error, markup=True)
def pre_prompt(self) -> None:
+ """Hook to display something before the prompt."""
@overload
def __call__(self, *, stream: Optional[TextIO] = None) -> PromptType:
@@ -192,6 +278,14 @@ ...
def __call__(self, *, default: Any = ..., stream: Optional[TextIO] = None) -> Any:
+ """Run the prompt loop.
+
+ Args:
+ default (Any, optional): Optional default value.
+
+ Returns:
+ PromptType: Processed value.
+ """
while True:
self.pre_prompt()
prompt = self.make_prompt(default)
@@ -208,33 +302,61 @@
class Prompt(PromptBase[str]):
+ """A prompt that returns a str.
+
+ Example:
+ >>> name = Prompt.ask("Enter your name")
+
+
+ """
response_type = str
class IntPrompt(PromptBase[int]):
+ """A prompt that returns an integer.
+
+ Example:
+ >>> burrito_count = IntPrompt.ask("How many burritos do you want to order")
+
+ """
response_type = int
validate_error_message = "[prompt.invalid]Please enter a valid integer number"
class FloatPrompt(PromptBase[float]):
+ """A prompt that returns a float.
+
+ Example:
+ >>> temperature = FloatPrompt.ask("Enter desired temperature")
+
+ """
response_type = float
validate_error_message = "[prompt.invalid]Please enter a number"
class Confirm(PromptBase[bool]):
+ """A yes / no confirmation prompt.
+
+ Example:
+ >>> if Confirm.ask("Continue"):
+ run_job()
+
+ """
response_type = bool
validate_error_message = "[prompt.invalid]Please enter Y or N"
choices: List[str] = ["y", "n"]
def render_default(self, default: DefaultType) -> Text:
+ """Render the default as (y) or (n) rather than True/False."""
yes, no = self.choices
return Text(f"({yes})" if default else f"({no})", style="prompt.default")
def process_response(self, value: str) -> bool:
+ """Convert choices to a bool."""
value = value.strip().lower()
if value not in self.choices:
raise InvalidResponse(self.validate_error_message)
@@ -275,4 +397,4 @@ print(f"doggie={doggie!r}")
else:
- print("[b]OK :loudly_crying_face:")+ print("[b]OK :loudly_crying_face:")
| https://raw.githubusercontent.com/Textualize/rich/HEAD/rich/prompt.py |
Document my Python code with docstrings | from typing import TYPE_CHECKING, Optional
from .align import AlignMethod
from .box import ROUNDED, Box
from .cells import cell_len
from .jupyter import JupyterMixin
from .measure import Measurement, measure_renderables
from .padding import Padding, PaddingDimensions
from .segment import Segment
from .style import Style, StyleType
from .text import Text, TextType
if TYPE_CHECKING:
from .console import Console, ConsoleOptions, RenderableType, RenderResult
class Panel(JupyterMixin):
def __init__(
self,
renderable: "RenderableType",
box: Box = ROUNDED,
*,
title: Optional[TextType] = None,
title_align: AlignMethod = "center",
subtitle: Optional[TextType] = None,
subtitle_align: AlignMethod = "center",
safe_box: Optional[bool] = None,
expand: bool = True,
style: StyleType = "none",
border_style: StyleType = "none",
width: Optional[int] = None,
height: Optional[int] = None,
padding: PaddingDimensions = (0, 1),
highlight: bool = False,
) -> None:
self.renderable = renderable
self.box = box
self.title = title
self.title_align: AlignMethod = title_align
self.subtitle = subtitle
self.subtitle_align = subtitle_align
self.safe_box = safe_box
self.expand = expand
self.style = style
self.border_style = border_style
self.width = width
self.height = height
self.padding = padding
self.highlight = highlight
@classmethod
def fit(
cls,
renderable: "RenderableType",
box: Box = ROUNDED,
*,
title: Optional[TextType] = None,
title_align: AlignMethod = "center",
subtitle: Optional[TextType] = None,
subtitle_align: AlignMethod = "center",
safe_box: Optional[bool] = None,
style: StyleType = "none",
border_style: StyleType = "none",
width: Optional[int] = None,
height: Optional[int] = None,
padding: PaddingDimensions = (0, 1),
highlight: bool = False,
) -> "Panel":
return cls(
renderable,
box,
title=title,
title_align=title_align,
subtitle=subtitle,
subtitle_align=subtitle_align,
safe_box=safe_box,
style=style,
border_style=border_style,
width=width,
height=height,
padding=padding,
highlight=highlight,
expand=False,
)
@property
def _title(self) -> Optional[Text]:
if self.title:
title_text = (
Text.from_markup(self.title)
if isinstance(self.title, str)
else self.title.copy()
)
title_text.end = ""
title_text.plain = title_text.plain.replace("\n", " ")
title_text.no_wrap = True
title_text.expand_tabs()
title_text.pad(1)
return title_text
return None
@property
def _subtitle(self) -> Optional[Text]:
if self.subtitle:
subtitle_text = (
Text.from_markup(self.subtitle)
if isinstance(self.subtitle, str)
else self.subtitle.copy()
)
subtitle_text.end = ""
subtitle_text.plain = subtitle_text.plain.replace("\n", " ")
subtitle_text.no_wrap = True
subtitle_text.expand_tabs()
subtitle_text.pad(1)
return subtitle_text
return None
def __rich_console__(
self, console: "Console", options: "ConsoleOptions"
) -> "RenderResult":
_padding = Padding.unpack(self.padding)
renderable = (
Padding(self.renderable, _padding) if any(_padding) else self.renderable
)
style = console.get_style(self.style)
border_style = style + console.get_style(self.border_style)
width = (
options.max_width
if self.width is None
else min(options.max_width, self.width)
)
safe_box: bool = console.safe_box if self.safe_box is None else self.safe_box
box = self.box.substitute(options, safe=safe_box)
def align_text(
text: Text, width: int, align: str, character: str, style: Style
) -> Text:
text = text.copy()
text.truncate(width)
excess_space = width - cell_len(text.plain)
if text.style:
text.stylize(console.get_style(text.style))
if excess_space:
if align == "left":
return Text.assemble(
text,
(character * excess_space, style),
no_wrap=True,
end="",
)
elif align == "center":
left = excess_space // 2
return Text.assemble(
(character * left, style),
text,
(character * (excess_space - left), style),
no_wrap=True,
end="",
)
else:
return Text.assemble(
(character * excess_space, style),
text,
no_wrap=True,
end="",
)
return text
title_text = self._title
if title_text is not None:
title_text.stylize_before(border_style)
child_width = (
width - 2
if self.expand
else console.measure(
renderable, options=options.update_width(width - 2)
).maximum
)
child_height = self.height or options.height or None
if child_height:
child_height -= 2
if title_text is not None:
child_width = min(
options.max_width - 2, max(child_width, title_text.cell_len + 2)
)
width = child_width + 2
child_options = options.update(
width=child_width, height=child_height, highlight=self.highlight
)
lines = console.render_lines(renderable, child_options, style=style)
line_start = Segment(box.mid_left, border_style)
line_end = Segment(f"{box.mid_right}", border_style)
new_line = Segment.line()
if title_text is None or width <= 4:
yield Segment(box.get_top([width - 2]), border_style)
else:
title_text = align_text(
title_text,
width - 4,
self.title_align,
box.top,
border_style,
)
yield Segment(box.top_left + box.top, border_style)
yield from console.render(title_text, child_options.update_width(width - 4))
yield Segment(box.top + box.top_right, border_style)
yield new_line
for line in lines:
yield line_start
yield from line
yield line_end
yield new_line
subtitle_text = self._subtitle
if subtitle_text is not None:
subtitle_text.stylize_before(border_style)
if subtitle_text is None or width <= 4:
yield Segment(box.get_bottom([width - 2]), border_style)
else:
subtitle_text = align_text(
subtitle_text,
width - 4,
self.subtitle_align,
box.bottom,
border_style,
)
yield Segment(box.bottom_left + box.bottom, border_style)
yield from console.render(
subtitle_text, child_options.update_width(width - 4)
)
yield Segment(box.bottom + box.bottom_right, border_style)
yield new_line
def __rich_measure__(
self, console: "Console", options: "ConsoleOptions"
) -> "Measurement":
_title = self._title
_, right, _, left = Padding.unpack(self.padding)
padding = left + right
renderables = [self.renderable, _title] if _title else [self.renderable]
if self.width is None:
width = (
measure_renderables(
console,
options.update_width(options.max_width - padding - 2),
renderables,
).maximum
+ padding
+ 2
)
else:
width = self.width
return Measurement(width, width)
if __name__ == "__main__": # pragma: no cover
from .console import Console
c = Console()
from .box import DOUBLE, ROUNDED
from .padding import Padding
p = Panel(
"Hello, World!",
title="rich.Panel",
style="white on blue",
box=DOUBLE,
padding=1,
)
c.print()
c.print(p) | --- +++ @@ -15,6 +15,27 @@
class Panel(JupyterMixin):
+ """A console renderable that draws a border around its contents.
+
+ Example:
+ >>> console.print(Panel("Hello, World!"))
+
+ Args:
+ renderable (RenderableType): A console renderable object.
+ box (Box): A Box instance that defines the look of the border (see :ref:`appendix_box`. Defaults to box.ROUNDED.
+ title (Optional[TextType], optional): Optional title displayed in panel header. Defaults to None.
+ title_align (AlignMethod, optional): Alignment of title. Defaults to "center".
+ subtitle (Optional[TextType], optional): Optional subtitle displayed in panel footer. Defaults to None.
+ subtitle_align (AlignMethod, optional): Alignment of subtitle. Defaults to "center".
+ safe_box (bool, optional): Disable box characters that don't display on windows legacy terminal with *raster* fonts. Defaults to True.
+ expand (bool, optional): If True the panel will stretch to fill the console width, otherwise it will be sized to fit the contents. Defaults to True.
+ style (str, optional): The style of the panel (border and contents). Defaults to "none".
+ border_style (str, optional): The style of the border. Defaults to "none".
+ width (Optional[int], optional): Optional width of panel. Defaults to None to auto-detect.
+ height (Optional[int], optional): Optional height of panel. Defaults to None to auto-detect.
+ padding (Optional[PaddingDimensions]): Optional padding around renderable. Defaults to 0.
+ highlight (bool, optional): Enable automatic highlighting of panel title (if str). Defaults to False.
+ """
def __init__(
self,
@@ -67,6 +88,7 @@ padding: PaddingDimensions = (0, 1),
highlight: bool = False,
) -> "Panel":
+ """An alternative constructor that sets expand=False."""
return cls(
renderable,
box,
@@ -137,6 +159,18 @@ def align_text(
text: Text, width: int, align: str, character: str, style: Style
) -> Text:
+ """Gets new aligned text.
+
+ Args:
+ text (Text): Title or subtitle text.
+ width (int): Desired width.
+ align (str): Alignment.
+ character (str): Character for alignment.
+ style (Style): Border style
+
+ Returns:
+ Text: New text instance
+ """
text = text.copy()
text.truncate(width)
excess_space = width - cell_len(text.plain)
@@ -280,4 +314,4 @@ )
c.print()
- c.print(p)+ c.print(p)
| https://raw.githubusercontent.com/Textualize/rich/HEAD/rich/panel.py |
Add detailed documentation for each class | import re
from ast import literal_eval
from operator import attrgetter
from typing import Callable, Iterable, List, Match, NamedTuple, Optional, Tuple, Union
from ._emoji_replace import _emoji_replace
from .emoji import EmojiVariant
from .errors import MarkupError
from .style import Style
from .text import Span, Text
RE_TAGS = re.compile(
r"""((\\*)\[([a-z#/@][^[]*?)])""",
re.VERBOSE,
)
RE_HANDLER = re.compile(r"^([\w.]*?)(\(.*?\))?$")
class Tag(NamedTuple):
name: str
"""The tag name. e.g. 'bold'."""
parameters: Optional[str]
"""Any additional parameters after the name."""
def __str__(self) -> str:
return (
self.name if self.parameters is None else f"{self.name} {self.parameters}"
)
@property
def markup(self) -> str:
return (
f"[{self.name}]"
if self.parameters is None
else f"[{self.name}={self.parameters}]"
)
_ReStringMatch = Match[str] # regex match object
_ReSubCallable = Callable[[_ReStringMatch], str] # Callable invoked by re.sub
_EscapeSubMethod = Callable[[_ReSubCallable, str], str] # Sub method of a compiled re
def escape(
markup: str,
_escape: _EscapeSubMethod = re.compile(r"(\\*)(\[[a-z#/@][^[]*?])").sub,
) -> str:
def escape_backslashes(match: Match[str]) -> str:
backslashes, text = match.groups()
return f"{backslashes}{backslashes}\\{text}"
markup = _escape(escape_backslashes, markup)
if markup.endswith("\\") and not markup.endswith("\\\\"):
return markup + "\\"
return markup
def _parse(markup: str) -> Iterable[Tuple[int, Optional[str], Optional[Tag]]]:
position = 0
_divmod = divmod
_Tag = Tag
for match in RE_TAGS.finditer(markup):
full_text, escapes, tag_text = match.groups()
start, end = match.span()
if start > position:
yield start, markup[position:start], None
if escapes:
backslashes, escaped = _divmod(len(escapes), 2)
if backslashes:
# Literal backslashes
yield start, "\\" * backslashes, None
start += backslashes * 2
if escaped:
# Escape of tag
yield start, full_text[len(escapes) :], None
position = end
continue
text, equals, parameters = tag_text.partition("=")
yield start, None, _Tag(text, parameters if equals else None)
position = end
if position < len(markup):
yield position, markup[position:], None
def render(
markup: str,
style: Union[str, Style] = "",
emoji: bool = True,
emoji_variant: Optional[EmojiVariant] = None,
) -> Text:
emoji_replace = _emoji_replace
if "[" not in markup:
return Text(
emoji_replace(markup, default_variant=emoji_variant) if emoji else markup,
style=style,
)
text = Text(style=style)
append = text.append
normalize = Style.normalize
style_stack: List[Tuple[int, Tag]] = []
pop = style_stack.pop
spans: List[Span] = []
append_span = spans.append
_Span = Span
_Tag = Tag
def pop_style(style_name: str) -> Tuple[int, Tag]:
for index, (_, tag) in enumerate(reversed(style_stack), 1):
if tag.name == style_name:
return pop(-index)
raise KeyError(style_name)
for position, plain_text, tag in _parse(markup):
if plain_text is not None:
# Handle open brace escapes, where the brace is not part of a tag.
plain_text = plain_text.replace("\\[", "[")
append(emoji_replace(plain_text) if emoji else plain_text)
elif tag is not None:
if tag.name.startswith("/"): # Closing tag
style_name = tag.name[1:].strip()
if style_name: # explicit close
style_name = normalize(style_name)
try:
start, open_tag = pop_style(style_name)
except KeyError:
raise MarkupError(
f"closing tag '{tag.markup}' at position {position} doesn't match any open tag"
) from None
else: # implicit close
try:
start, open_tag = pop()
except IndexError:
raise MarkupError(
f"closing tag '[/]' at position {position} has nothing to close"
) from None
if open_tag.name.startswith("@"):
if open_tag.parameters:
handler_name = ""
parameters = open_tag.parameters.strip()
handler_match = RE_HANDLER.match(parameters)
if handler_match is not None:
handler_name, match_parameters = handler_match.groups()
parameters = (
"()" if match_parameters is None else match_parameters
)
try:
meta_params = literal_eval(parameters)
except SyntaxError as error:
raise MarkupError(
f"error parsing {parameters!r} in {open_tag.parameters!r}; {error.msg}"
)
except Exception as error:
raise MarkupError(
f"error parsing {open_tag.parameters!r}; {error}"
) from None
if handler_name:
meta_params = (
handler_name,
meta_params
if isinstance(meta_params, tuple)
else (meta_params,),
)
else:
meta_params = ()
append_span(
_Span(
start, len(text), Style(meta={open_tag.name: meta_params})
)
)
else:
append_span(_Span(start, len(text), str(open_tag)))
else: # Opening tag
normalized_tag = _Tag(normalize(tag.name), tag.parameters)
style_stack.append((len(text), normalized_tag))
text_length = len(text)
while style_stack:
start, tag = style_stack.pop()
style = str(tag)
if style:
append_span(_Span(start, text_length, style))
text.spans = sorted(spans[::-1], key=attrgetter("start"))
return text
if __name__ == "__main__": # pragma: no cover
MARKUP = [
"[red]Hello World[/red]",
"[magenta]Hello [b]World[/b]",
"[bold]Bold[italic] bold and italic [/bold]italic[/italic]",
"Click [link=https://www.willmcgugan.com]here[/link] to visit my Blog",
":warning-emoji: [bold red blink] DANGER![/]",
]
from rich import print
from rich.table import Table
grid = Table("Markup", "Result", padding=(0, 1))
for markup in MARKUP:
grid.add_row(Text(markup), markup)
print(grid) | --- +++ @@ -18,6 +18,7 @@
class Tag(NamedTuple):
+ """A tag in console markup."""
name: str
"""The tag name. e.g. 'bold'."""
@@ -31,6 +32,7 @@
@property
def markup(self) -> str:
+ """Get the string representation of this tag."""
return (
f"[{self.name}]"
if self.parameters is None
@@ -47,8 +49,17 @@ markup: str,
_escape: _EscapeSubMethod = re.compile(r"(\\*)(\[[a-z#/@][^[]*?])").sub,
) -> str:
+ """Escapes text so that it won't be interpreted as markup.
+
+ Args:
+ markup (str): Content to be inserted in to markup.
+
+ Returns:
+ str: Markup with square brackets escaped.
+ """
def escape_backslashes(match: Match[str]) -> str:
+ """Called by re.sub replace matches."""
backslashes, text = match.groups()
return f"{backslashes}{backslashes}\\{text}"
@@ -60,6 +71,12 @@
def _parse(markup: str) -> Iterable[Tuple[int, Optional[str], Optional[Tag]]]:
+ """Parse markup in to an iterable of tuples of (position, text, tag).
+
+ Args:
+ markup (str): A string containing console markup
+
+ """
position = 0
_divmod = divmod
_Tag = Tag
@@ -92,6 +109,21 @@ emoji: bool = True,
emoji_variant: Optional[EmojiVariant] = None,
) -> Text:
+ """Render console markup in to a Text instance.
+
+ Args:
+ markup (str): A string containing console markup.
+ style: (Union[str, Style]): The style to use.
+ emoji (bool, optional): Also render emoji code. Defaults to True.
+ emoji_variant (str, optional): Optional emoji variant, either "text" or "emoji". Defaults to None.
+
+
+ Raises:
+ MarkupError: If there is a syntax error in the markup.
+
+ Returns:
+ Text: A test instance.
+ """
emoji_replace = _emoji_replace
if "[" not in markup:
return Text(
@@ -112,6 +144,7 @@ _Tag = Tag
def pop_style(style_name: str) -> Tuple[int, Tag]:
+ """Pop tag matching given style name."""
for index, (_, tag) in enumerate(reversed(style_stack), 1):
if tag.name == style_name:
return pop(-index)
@@ -215,4 +248,4 @@ for markup in MARKUP:
grid.add_row(Text(markup), markup)
- print(grid)+ print(grid)
| https://raw.githubusercontent.com/Textualize/rich/HEAD/rich/markup.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.