python_code
stringlengths 0
1.02M
| repo_name
stringlengths 9
48
| file_path
stringlengths 5
114
|
---|---|---|
import argparse
import concurrent.futures
import json
import logging
import os
import sys
from enum import Enum
from pathlib import Path
from typing import Any, List, NamedTuple, Optional
from ufmt.core import make_black_config, ufmt_string
from usort import Config as UsortConfig
IS_WINDOWS: bool = os.name == "nt"
def eprint(*args: Any, **kwargs: Any) -> None:
print(*args, file=sys.stderr, flush=True, **kwargs)
class LintSeverity(str, Enum):
ERROR = "error"
WARNING = "warning"
ADVICE = "advice"
DISABLED = "disabled"
class LintMessage(NamedTuple):
path: Optional[str]
line: Optional[int]
char: Optional[int]
code: str
severity: LintSeverity
name: str
original: Optional[str]
replacement: Optional[str]
description: Optional[str]
def as_posix(name: str) -> str:
return name.replace("\\", "/") if IS_WINDOWS else name
def format_error_message(filename: str, err: Exception) -> LintMessage:
return LintMessage(
path=filename,
line=None,
char=None,
code="UFMT",
severity=LintSeverity.ADVICE,
name="command-failed",
original=None,
replacement=None,
description=(f"Failed due to {err.__class__.__name__}:\n{err}"),
)
def check_file(
filename: str,
) -> List[LintMessage]:
with open(filename, "rb") as f:
original = f.read().decode("utf-8")
try:
path = Path(filename)
usort_config = UsortConfig.find(path)
black_config = make_black_config(path)
# Use UFMT API to call both usort and black
replacement = ufmt_string(
path=path,
content=original,
usort_config=usort_config,
black_config=black_config,
)
if original == replacement:
return []
return [
LintMessage(
path=filename,
line=None,
char=None,
code="UFMT",
severity=LintSeverity.WARNING,
name="format",
original=original,
replacement=replacement,
description="Run `lintrunner -a` to apply this patch.",
)
]
except Exception as err:
return [format_error_message(filename, err)]
def main() -> None:
parser = argparse.ArgumentParser(
description="Format files with ufmt (black + usort).",
fromfile_prefix_chars="@",
)
parser.add_argument(
"--verbose",
action="store_true",
help="verbose logging",
)
parser.add_argument(
"filenames",
nargs="+",
help="paths to lint",
)
args = parser.parse_args()
logging.basicConfig(
format="<%(threadName)s:%(levelname)s> %(message)s",
level=logging.NOTSET
if args.verbose
else logging.DEBUG
if len(args.filenames) < 1000
else logging.INFO,
stream=sys.stderr,
)
with concurrent.futures.ThreadPoolExecutor(
max_workers=os.cpu_count(),
thread_name_prefix="Thread",
) as executor:
futures = {executor.submit(check_file, x): x for x in args.filenames}
for future in concurrent.futures.as_completed(futures):
try:
for lint_message in future.result():
print(json.dumps(lint_message._asdict()), flush=True)
except Exception:
logging.critical('Failed at "%s".', futures[future])
raise
if __name__ == "__main__":
main()
|
pytorch-master
|
tools/linter/adapters/ufmt_linter.py
|
#!/usr/bin/env python3
"""
Test ownership was introduced in https://github.com/pytorch/pytorch/issues/66232.
This lint verifies that every Python test file (file that matches test_*.py or *_test.py in the test folder)
has valid ownership information in a comment header. Valid means:
- The format of the header follows the pattern "# Owner(s): ["list", "of owner", "labels"]
- Each owner label actually exists in PyTorch
- Each owner label starts with "module: " or "oncall: " or is in ACCEPTABLE_OWNER_LABELS
"""
import argparse
import json
from enum import Enum
from typing import Any, List, NamedTuple, Optional
from urllib.request import urlopen
LINTER_CODE = "TESTOWNERS"
class LintSeverity(str, Enum):
ERROR = "error"
WARNING = "warning"
ADVICE = "advice"
DISABLED = "disabled"
class LintMessage(NamedTuple):
path: Optional[str]
line: Optional[int]
char: Optional[int]
code: str
severity: LintSeverity
name: str
original: Optional[str]
replacement: Optional[str]
description: Optional[str]
# Team/owner labels usually start with "module: " or "oncall: ", but the following are acceptable exceptions
ACCEPTABLE_OWNER_LABELS = ["NNC", "high priority"]
OWNERS_PREFIX = "# Owner(s): "
def get_pytorch_labels() -> Any:
labels = (
urlopen("https://ossci-metrics.s3.amazonaws.com/pytorch_labels.json")
.read()
.decode("utf-8")
)
return json.loads(labels)
PYTORCH_LABELS = get_pytorch_labels()
# Team/owner labels usually start with "module: " or "oncall: ", but the following are acceptable exceptions
ACCEPTABLE_OWNER_LABELS = ["NNC", "high priority"]
GLOB_EXCEPTIONS = ["**/test/run_test.py"]
def check_labels(
labels: List[str], filename: str, line_number: int
) -> List[LintMessage]:
lint_messages = []
for label in labels:
if label not in PYTORCH_LABELS:
lint_messages.append(
LintMessage(
path=filename,
line=line_number,
char=None,
code=LINTER_CODE,
severity=LintSeverity.ERROR,
name="[invalid-label]",
original=None,
replacement=None,
description=(
f"{label} is not a PyTorch label "
"(please choose from https://github.com/pytorch/pytorch/labels)"
),
)
)
if (
label.startswith("module:")
or label.startswith("oncall:")
or label in ACCEPTABLE_OWNER_LABELS
):
continue
lint_messages.append(
LintMessage(
path=filename,
line=line_number,
char=None,
code=LINTER_CODE,
severity=LintSeverity.ERROR,
name="[invalid-owner]",
original=None,
replacement=None,
description=(
f"{label} is not an acceptable owner "
"(please update to another label or edit ACCEPTABLE_OWNERS_LABELS "
"in tools/linters/adapters/testowners_linter.py"
),
)
)
return lint_messages
def check_file(filename: str) -> List[LintMessage]:
lint_messages = []
has_ownership_info = False
with open(filename) as f:
for idx, line in enumerate(f):
if not line.startswith(OWNERS_PREFIX):
continue
has_ownership_info = True
labels = json.loads(line[len(OWNERS_PREFIX) :])
lint_messages.extend(check_labels(labels, filename, idx + 1))
if has_ownership_info is False:
lint_messages.append(
LintMessage(
path=filename,
line=None,
char=None,
code=LINTER_CODE,
severity=LintSeverity.ERROR,
name="[no-owner-info]",
original=None,
replacement=None,
description="Missing a comment header with ownership information.",
)
)
return lint_messages
def main() -> None:
parser = argparse.ArgumentParser(
description="test ownership linter",
fromfile_prefix_chars="@",
)
parser.add_argument(
"filenames",
nargs="+",
help="paths to lint",
)
args = parser.parse_args()
lint_messages = []
for filename in args.filenames:
lint_messages.extend(check_file(filename))
for lint_message in lint_messages:
print(json.dumps(lint_message._asdict()), flush=True)
if __name__ == "__main__":
main()
|
pytorch-master
|
tools/linter/adapters/testowners_linter.py
|
"""
Checks that the configuration in .circleci/config.yml has been properly regenerated.
"""
import argparse
import json
import logging
import os
import subprocess
import sys
import time
from enum import Enum
from typing import List, NamedTuple, Optional
CHECKED_IN_FILE = "config.yml"
REGENERATION_SCRIPT = "regenerate.sh"
PARENT_DIR = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
README_PATH = os.path.join(PARENT_DIR, "README.md")
class LintSeverity(str, Enum):
ERROR = "error"
WARNING = "warning"
ADVICE = "advice"
DISABLED = "disabled"
class LintMessage(NamedTuple):
path: Optional[str]
line: Optional[int]
char: Optional[int]
code: str
severity: LintSeverity
name: str
original: Optional[str]
replacement: Optional[str]
description: Optional[str]
IS_WINDOWS: bool = os.name == "nt"
def as_posix(name: str) -> str:
return name.replace("\\", "/") if IS_WINDOWS else name
def run_command(args: List[str], cwd: str) -> "subprocess.CompletedProcess[bytes]":
logging.debug("$ %s", " ".join(args))
start_time = time.monotonic()
try:
return subprocess.run(
args,
cwd=cwd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
check=True,
)
finally:
end_time = time.monotonic()
logging.debug("took %dms", (end_time - start_time) * 1000)
def run_check(
regen_script_working_dir: str, regen_script: str, config_file: str
) -> List[LintMessage]:
try:
proc = run_command(["python3", regen_script], regen_script_working_dir)
except Exception as err:
return [
LintMessage(
path=None,
line=None,
char=None,
code="CIRCLECI",
severity=LintSeverity.ERROR,
name="command-failed",
original=None,
replacement=None,
description=(
f"Failed due to {err.__class__.__name__}:\n{err}"
if not isinstance(err, subprocess.CalledProcessError)
else (
"COMMAND (exit code {returncode})\n"
"{command}\n\n"
"STDERR\n{stderr}\n\n"
"STDOUT\n{stdout}"
).format(
returncode=err.returncode,
command=" ".join(as_posix(x) for x in err.cmd),
stderr=err.stderr.decode("utf-8").strip() or "(empty)",
stdout=err.stdout.decode("utf-8").strip() or "(empty)",
)
),
)
]
with open(config_file, mode="rb") as f:
config = f.read()
if proc.stdout == config:
return []
return [
LintMessage(
path=config_file,
line=None,
char=None,
code="CIRCLECI",
severity=LintSeverity.ERROR,
name="config inconsistency",
original=config.decode("utf-8"),
replacement=proc.stdout.decode("utf-8"),
description=(
"The checked-in CircleCI config.yml file does not match what was generated by the scripts. "
"Re-run with '-a' to accept changes."
),
)
]
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="circleci consistency linter",
fromfile_prefix_chars="@",
)
parser.add_argument(
"--config-yml",
required=True,
help="location of config.yml",
)
parser.add_argument(
"--regen-script-working-dir",
required=True,
help="this script will chdir to this argument before running --regen-script",
)
parser.add_argument(
"--regen-script",
required=True,
help="location of the config generation script, relative to --regen-script-working-dir",
)
parser.add_argument(
"--verbose",
action="store_true",
help="verbose logging",
)
args = parser.parse_args()
logging.basicConfig(
format="<%(threadName)s:%(levelname)s> %(message)s",
level=logging.NOTSET if args.verbose else logging.DEBUG,
stream=sys.stderr,
)
for lint_message in run_check(
args.regen_script_working_dir, args.regen_script, args.config_yml
):
print(json.dumps(lint_message._asdict()), flush=True)
|
pytorch-master
|
tools/linter/adapters/circleci_linter.py
|
"""
Initializer script that installs stuff to pip.
"""
import argparse
import logging
import os
import subprocess
import sys
import time
from typing import List
def run_command(args: List[str]) -> "subprocess.CompletedProcess[bytes]":
logging.debug("$ %s", " ".join(args))
start_time = time.monotonic()
try:
return subprocess.run(args, check=True)
finally:
end_time = time.monotonic()
logging.debug("took %dms", (end_time - start_time) * 1000)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="pip initializer")
parser.add_argument(
"packages",
nargs="+",
help="pip packages to install",
)
parser.add_argument(
"--verbose",
action="store_true",
help="verbose logging",
)
parser.add_argument(
"--dry-run", help="do not install anything, just print what would be done."
)
parser.add_argument(
"--no-binary",
help="do not use pre-compiled binaries from pip.",
action="store_true",
)
args = parser.parse_args()
logging.basicConfig(
format="<%(threadName)s:%(levelname)s> %(message)s",
level=logging.NOTSET if args.verbose else logging.DEBUG,
stream=sys.stderr,
)
pip_args = ["pip3", "install"]
# If we are in a global install, use `--user` to install so that you do not
# need root access in order to initialize linters.
#
# However, `pip install --user` interacts poorly with virtualenvs (see:
# https://bit.ly/3vD4kvl) and conda (see: https://bit.ly/3KG7ZfU). So in
# these cases perform a regular installation.
in_conda = os.environ.get("CONDA_PREFIX") is not None
in_virtualenv = os.environ.get("VIRTUAL_ENV") is not None
if not in_conda and not in_virtualenv:
pip_args.append("--user")
pip_args.extend(args.packages)
for package in args.packages:
package_name, _, version = package.partition("=")
if version == "":
raise RuntimeError(
"Package {package_name} did not have a version specified. "
"Please specify a version to produce a consistent linting experience."
)
if args.no_binary:
pip_args.append(f"--no-binary={package_name}")
dry_run = args.dry_run == "1"
if dry_run:
print(f"Would have run: {pip_args}")
sys.exit(0)
run_command(pip_args)
|
pytorch-master
|
tools/linter/adapters/pip_init.py
|
import argparse
import json
import logging
import os
import re
import subprocess
import sys
import time
from enum import Enum
from typing import Any, Dict, List, NamedTuple, Optional, Pattern, Set
IS_WINDOWS: bool = os.name == "nt"
def eprint(*args: Any, **kwargs: Any) -> None:
print(*args, file=sys.stderr, flush=True, **kwargs)
class LintSeverity(str, Enum):
ERROR = "error"
WARNING = "warning"
ADVICE = "advice"
DISABLED = "disabled"
class LintMessage(NamedTuple):
path: Optional[str]
line: Optional[int]
char: Optional[int]
code: str
severity: LintSeverity
name: str
original: Optional[str]
replacement: Optional[str]
description: Optional[str]
def as_posix(name: str) -> str:
return name.replace("\\", "/") if IS_WINDOWS else name
# fmt: off
# https://www.flake8rules.com/
DOCUMENTED_IN_FLAKE8RULES: Set[str] = {
"E101", "E111", "E112", "E113", "E114", "E115", "E116", "E117",
"E121", "E122", "E123", "E124", "E125", "E126", "E127", "E128", "E129",
"E131", "E133",
"E201", "E202", "E203",
"E211",
"E221", "E222", "E223", "E224", "E225", "E226", "E227", "E228",
"E231",
"E241", "E242",
"E251",
"E261", "E262", "E265", "E266",
"E271", "E272", "E273", "E274", "E275",
"E301", "E302", "E303", "E304", "E305", "E306",
"E401", "E402",
"E501", "E502",
"E701", "E702", "E703", "E704",
"E711", "E712", "E713", "E714",
"E721", "E722",
"E731",
"E741", "E742", "E743",
"E901", "E902", "E999",
"W191",
"W291", "W292", "W293",
"W391",
"W503", "W504",
"W601", "W602", "W603", "W604", "W605",
"F401", "F402", "F403", "F404", "F405",
"F811", "F812",
"F821", "F822", "F823",
"F831",
"F841",
"F901",
"C901",
}
# https://pypi.org/project/flake8-comprehensions/#rules
DOCUMENTED_IN_FLAKE8COMPREHENSIONS: Set[str] = {
"C400", "C401", "C402", "C403", "C404", "C405", "C406", "C407", "C408", "C409",
"C410",
"C411", "C412", "C413", "C413", "C414", "C415", "C416",
}
# https://github.com/PyCQA/flake8-bugbear#list-of-warnings
DOCUMENTED_IN_BUGBEAR: Set[str] = {
"B001", "B002", "B003", "B004", "B005", "B006", "B007", "B008", "B009", "B010",
"B011", "B012", "B013", "B014", "B015",
"B301", "B302", "B303", "B304", "B305", "B306",
"B901", "B902", "B903", "B950",
}
# fmt: on
# stdin:2: W802 undefined name 'foo'
# stdin:3:6: T484 Name 'foo' is not defined
# stdin:3:-100: W605 invalid escape sequence '\/'
# stdin:3:1: E302 expected 2 blank lines, found 1
RESULTS_RE: Pattern[str] = re.compile(
r"""(?mx)
^
(?P<file>.*?):
(?P<line>\d+):
(?:(?P<column>-?\d+):)?
\s(?P<code>\S+?):?
\s(?P<message>.*)
$
"""
)
def _test_results_re() -> None:
"""
>>> def t(s): return RESULTS_RE.search(s).groupdict()
>>> t(r"file.py:80:1: E302 expected 2 blank lines, found 1")
... # doctest: +NORMALIZE_WHITESPACE
{'file': 'file.py', 'line': '80', 'column': '1', 'code': 'E302',
'message': 'expected 2 blank lines, found 1'}
>>> t(r"file.py:7:1: P201: Resource `stdout` is acquired but not always released.")
... # doctest: +NORMALIZE_WHITESPACE
{'file': 'file.py', 'line': '7', 'column': '1', 'code': 'P201',
'message': 'Resource `stdout` is acquired but not always released.'}
>>> t(r"file.py:8:-10: W605 invalid escape sequence '/'")
... # doctest: +NORMALIZE_WHITESPACE
{'file': 'file.py', 'line': '8', 'column': '-10', 'code': 'W605',
'message': "invalid escape sequence '/'"}
"""
pass
def _run_command(
args: List[str],
*,
extra_env: Optional[Dict[str, str]],
) -> "subprocess.CompletedProcess[str]":
logging.debug(
"$ %s",
" ".join(
([f"{k}={v}" for (k, v) in extra_env.items()] if extra_env else []) + args
),
)
start_time = time.monotonic()
try:
return subprocess.run(
args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
check=True,
encoding="utf-8",
)
finally:
end_time = time.monotonic()
logging.debug("took %dms", (end_time - start_time) * 1000)
def run_command(
args: List[str],
*,
extra_env: Optional[Dict[str, str]],
retries: int,
) -> "subprocess.CompletedProcess[str]":
remaining_retries = retries
while True:
try:
return _run_command(args, extra_env=extra_env)
except subprocess.CalledProcessError as err:
if remaining_retries == 0 or not re.match(
r"^ERROR:1:1: X000 linting with .+ timed out after \d+ seconds",
err.stdout,
):
raise err
remaining_retries -= 1
logging.warning(
"(%s/%s) Retrying because command failed with: %r",
retries - remaining_retries,
retries,
err,
)
time.sleep(1)
def get_issue_severity(code: str) -> LintSeverity:
# "B901": `return x` inside a generator
# "B902": Invalid first argument to a method
# "B903": __slots__ efficiency
# "B950": Line too long
# "C4": Flake8 Comprehensions
# "C9": Cyclomatic complexity
# "E2": PEP8 horizontal whitespace "errors"
# "E3": PEP8 blank line "errors"
# "E5": PEP8 line length "errors"
# "F401": Name imported but unused
# "F403": Star imports used
# "F405": Name possibly from star imports
# "T400": type checking Notes
# "T49": internal type checker errors or unmatched messages
if any(
code.startswith(x)
for x in [
"B9",
"C4",
"C9",
"E2",
"E3",
"E5",
"F401",
"F403",
"F405",
"T400",
"T49",
]
):
return LintSeverity.ADVICE
# "F821": Undefined name
# "E999": syntax error
if any(code.startswith(x) for x in ["F821", "E999"]):
return LintSeverity.ERROR
# "F": PyFlakes Error
# "B": flake8-bugbear Error
# "E": PEP8 "Error"
# "W": PEP8 Warning
# possibly other plugins...
return LintSeverity.WARNING
def get_issue_documentation_url(code: str) -> str:
if code in DOCUMENTED_IN_FLAKE8RULES:
return f"https://www.flake8rules.com/rules/{code}.html"
if code in DOCUMENTED_IN_FLAKE8COMPREHENSIONS:
return "https://pypi.org/project/flake8-comprehensions/#rules"
if code in DOCUMENTED_IN_BUGBEAR:
return "https://github.com/PyCQA/flake8-bugbear#list-of-warnings"
return ""
def check_files(
filenames: List[str],
flake8_plugins_path: Optional[str],
severities: Dict[str, LintSeverity],
retries: int,
) -> List[LintMessage]:
try:
proc = run_command(
[sys.executable, "-mflake8", "--exit-zero"] + filenames,
extra_env={"FLAKE8_PLUGINS_PATH": flake8_plugins_path}
if flake8_plugins_path
else None,
retries=retries,
)
except (OSError, subprocess.CalledProcessError) as err:
return [
LintMessage(
path=None,
line=None,
char=None,
code="FLAKE8",
severity=LintSeverity.ERROR,
name="command-failed",
original=None,
replacement=None,
description=(
f"Failed due to {err.__class__.__name__}:\n{err}"
if not isinstance(err, subprocess.CalledProcessError)
else (
"COMMAND (exit code {returncode})\n"
"{command}\n\n"
"STDERR\n{stderr}\n\n"
"STDOUT\n{stdout}"
).format(
returncode=err.returncode,
command=" ".join(as_posix(x) for x in err.cmd),
stderr=err.stderr.strip() or "(empty)",
stdout=err.stdout.strip() or "(empty)",
)
),
)
]
return [
LintMessage(
path=match["file"],
name=match["code"],
description="{}\nSee {}".format(
match["message"],
get_issue_documentation_url(match["code"]),
),
line=int(match["line"]),
char=int(match["column"])
if match["column"] is not None and not match["column"].startswith("-")
else None,
code="FLAKE8",
severity=severities.get(match["code"]) or get_issue_severity(match["code"]),
original=None,
replacement=None,
)
for match in RESULTS_RE.finditer(proc.stdout)
]
def main() -> None:
parser = argparse.ArgumentParser(
description="Flake8 wrapper linter.",
fromfile_prefix_chars="@",
)
parser.add_argument(
"--flake8-plugins-path",
help="FLAKE8_PLUGINS_PATH env value",
)
parser.add_argument(
"--severity",
action="append",
help="map code to severity (e.g. `B950:advice`)",
)
parser.add_argument(
"--retries",
default=3,
type=int,
help="times to retry timed out flake8",
)
parser.add_argument(
"--verbose",
action="store_true",
help="verbose logging",
)
parser.add_argument(
"filenames",
nargs="+",
help="paths to lint",
)
args = parser.parse_args()
logging.basicConfig(
format="<%(threadName)s:%(levelname)s> %(message)s",
level=logging.NOTSET
if args.verbose
else logging.DEBUG
if len(args.filenames) < 1000
else logging.INFO,
stream=sys.stderr,
)
flake8_plugins_path = (
None
if args.flake8_plugins_path is None
else os.path.realpath(args.flake8_plugins_path)
)
severities: Dict[str, LintSeverity] = {}
if args.severity:
for severity in args.severity:
parts = severity.split(":", 1)
assert len(parts) == 2, f"invalid severity `{severity}`"
severities[parts[0]] = LintSeverity(parts[1])
lint_messages = check_files(
args.filenames, flake8_plugins_path, severities, args.retries
)
for lint_message in lint_messages:
print(json.dumps(lint_message._asdict()), flush=True)
if __name__ == "__main__":
main()
|
pytorch-master
|
tools/linter/adapters/flake8_linter.py
|
import argparse
import hashlib
import json
import logging
import os
import platform
import stat
import subprocess
import sys
import textwrap
import urllib.error
import urllib.request
from pathlib import Path
# String representing the host platform (e.g. Linux, Darwin).
HOST_PLATFORM = platform.system()
# PyTorch directory root
try:
result = subprocess.run(
["git", "rev-parse", "--show-toplevel"],
stdout=subprocess.PIPE,
check=True,
)
PYTORCH_ROOT = result.stdout.decode("utf-8").strip()
except subprocess.CalledProcessError:
# If git is not installed, compute repo root as 3 folders up from this file
path_ = os.path.abspath(__file__)
for _ in range(4):
path_ = os.path.dirname(path_)
PYTORCH_ROOT = path_
DRY_RUN = False
def compute_file_sha256(path: str) -> str:
"""Compute the SHA256 hash of a file and return it as a hex string."""
# If the file doesn't exist, return an empty string.
if not os.path.exists(path):
return ""
hash = hashlib.sha256()
# Open the file in binary mode and hash it.
with open(path, "rb") as f:
for b in f:
hash.update(b)
# Return the hash as a hexadecimal string.
return hash.hexdigest()
def report_download_progress(
chunk_number: int, chunk_size: int, file_size: int
) -> None:
"""
Pretty printer for file download progress.
"""
if file_size != -1:
percent = min(1, (chunk_number * chunk_size) / file_size)
bar = "#" * int(64 * percent)
sys.stdout.write("\r0% |{:<64}| {}%".format(bar, int(percent * 100)))
def check(binary_path: Path, reference_hash: str) -> bool:
"""Check whether the binary exists and is the right one.
If there is hash difference, delete the actual binary.
"""
if not binary_path.exists():
logging.info(f"{binary_path} does not exist.")
return False
existing_binary_hash = compute_file_sha256(str(binary_path))
if existing_binary_hash == reference_hash:
return True
logging.warning(
textwrap.dedent(
f"""\
Found binary hash does not match reference!
Found hash: {existing_binary_hash}
Reference hash: {reference_hash}
Deleting {binary_path} just to be safe.
"""
)
)
if DRY_RUN:
logging.critical(
"In dry run mode, so not actually deleting the binary. But consider deleting it ASAP!"
)
return False
try:
binary_path.unlink()
except OSError as e:
logging.critical(f"Failed to delete binary: {e}")
logging.critical(
"Delete this binary as soon as possible and do not execute it!"
)
return False
def download(
name: str,
output_dir: str,
url: str,
reference_bin_hash: str,
) -> bool:
"""
Download a platform-appropriate binary if one doesn't already exist at the expected location and verifies
that it is the right binary by checking its SHA256 hash against the expected hash.
"""
# First check if we need to do anything
binary_path = Path(output_dir, name)
if check(binary_path, reference_bin_hash):
logging.info(f"Correct binary already exists at {binary_path}. Exiting.")
return True
# Create the output folder
binary_path.parent.mkdir(parents=True, exist_ok=True)
# Download the binary
logging.info(f"Downloading {url} to {binary_path}")
if DRY_RUN:
logging.info("Exiting as there is nothing left to do in dry run mode")
return True
urllib.request.urlretrieve(
url,
binary_path,
reporthook=report_download_progress if sys.stdout.isatty() else None,
)
logging.info(f"Downloaded {name} successfully.")
# Check the downloaded binary
if not check(binary_path, reference_bin_hash):
logging.critical(f"Downloaded binary {name} failed its hash check")
return False
# Ensure that exeuctable bits are set
mode = os.stat(binary_path).st_mode
mode |= stat.S_IXUSR
os.chmod(binary_path, mode)
logging.info(f"Using {name} located at {binary_path}")
return True
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="downloads and checks binaries from s3",
)
parser.add_argument(
"--config-json",
required=True,
help="Path to config json that describes where to find binaries and hashes",
)
parser.add_argument(
"--linter",
required=True,
help="Which linter to initialize from the config json",
)
parser.add_argument(
"--output-dir",
required=True,
help="place to put the binary",
)
parser.add_argument(
"--output-name",
required=True,
help="name of binary",
)
parser.add_argument(
"--dry-run",
default=False,
help="do not download, just print what would be done",
)
args = parser.parse_args()
if args.dry_run == "0":
DRY_RUN = False
else:
DRY_RUN = True
logging.basicConfig(
format="[DRY_RUN] %(levelname)s: %(message)s"
if DRY_RUN
else "%(levelname)s: %(message)s",
level=logging.INFO,
stream=sys.stderr,
)
config = json.load(open(args.config_json))
config = config[args.linter]
# If the host platform is not in platform_to_hash, it is unsupported.
if HOST_PLATFORM not in config:
logging.error(f"Unsupported platform: {HOST_PLATFORM}")
exit(1)
url = config[HOST_PLATFORM]["download_url"]
hash = config[HOST_PLATFORM]["hash"]
ok = download(args.output_name, args.output_dir, url, hash)
if not ok:
logging.critical(f"Unable to initialize {args.linter}")
sys.exit(1)
|
pytorch-master
|
tools/linter/adapters/s3_init.py
|
"""Uploads a new binary to s3 and updates its hash in the config file.
You'll need to have appropriate credentials on the PyTorch AWS buckets, see:
https://boto3.amazonaws.com/v1/documentation/api/latest/guide/quickstart.html#configuration
for how to configure them.
"""
import argparse
import hashlib
import json
import logging
import os
import boto3 # type: ignore[import]
def compute_file_sha256(path: str) -> str:
"""Compute the SHA256 hash of a file and return it as a hex string."""
# If the file doesn't exist, return an empty string.
if not os.path.exists(path):
return ""
hash = hashlib.sha256()
# Open the file in binary mode and hash it.
with open(path, "rb") as f:
for b in f:
hash.update(b)
# Return the hash as a hexadecimal string.
return hash.hexdigest()
def main() -> None:
parser = argparse.ArgumentParser(
description="s3 binary updater",
fromfile_prefix_chars="@",
)
parser.add_argument(
"--config-json",
required=True,
help="path to config json that you are trying to update",
)
parser.add_argument(
"--linter",
required=True,
help="name of linter you're trying to update",
)
parser.add_argument(
"--platform",
required=True,
help="which platform you are uploading the binary for",
)
parser.add_argument(
"--file",
required=True,
help="file to upload",
)
parser.add_argument(
"--dry-run",
action="store_true",
help="if set, don't actually upload/write hash",
)
args = parser.parse_args()
logging.basicConfig(level=logging.INFO)
config = json.load(open(args.config_json))
linter_config = config[args.linter][args.platform]
bucket = linter_config["s3_bucket"]
object_name = linter_config["object_name"]
# Upload the file
logging.info(
f"Uploading file {args.file} to s3 bucket: {bucket}, object name: {object_name}"
)
if not args.dry_run:
s3_client = boto3.client("s3")
s3_client.upload_file(args.file, bucket, object_name)
# Update hash in repo
hash_of_new_binary = compute_file_sha256(args.file)
logging.info(f"Computed new hash for binary {hash_of_new_binary}")
linter_config["hash"] = hash_of_new_binary
config_dump = json.dumps(config, indent=4, sort_keys=True)
logging.info("Writing out new config:")
logging.info(config_dump)
if not args.dry_run:
with open(args.config_json, "w") as f:
f.write(config_dump)
if __name__ == "__main__":
main()
|
pytorch-master
|
tools/linter/adapters/update_s3.py
|
#!/usr/bin/env python3
"""
Verify that it is possible to round-trip native_functions.yaml via ruamel under some
configuration. Keeping native_functions.yaml consistent in this way allows us to
run codemods on the file using ruamel without introducing line noise. Note that we don't
want to normalize the YAML file, as that would to lots of spurious lint failures. Anything
that ruamel understands how to roundtrip, e.g., whitespace and comments, is OK!
ruamel is a bit picky about inconsistent indentation, so you will have to indent your
file properly. Also, if you are working on changing the syntax of native_functions.yaml,
you may find that you want to use some format that is not what ruamel prefers. If so,
it is OK to modify this script (instead of reformatting native_functions.yaml)--the point
is simply to make sure that there is *some* configuration of ruamel that can round trip
the YAML, not to be prescriptive about it.
"""
import argparse
import json
import sys
from enum import Enum
from io import StringIO
from typing import NamedTuple, Optional
import ruamel.yaml # type: ignore[import]
class LintSeverity(str, Enum):
ERROR = "error"
WARNING = "warning"
ADVICE = "advice"
DISABLED = "disabled"
class LintMessage(NamedTuple):
path: Optional[str]
line: Optional[int]
char: Optional[int]
code: str
severity: LintSeverity
name: str
original: Optional[str]
replacement: Optional[str]
description: Optional[str]
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="native functions linter",
fromfile_prefix_chars="@",
)
parser.add_argument(
"--native-functions-yml",
required=True,
help="location of native_functions.yaml",
)
args = parser.parse_args()
with open(args.native_functions_yml) as f:
contents = f.read()
yaml = ruamel.yaml.YAML() # type: ignore[attr-defined]
yaml.preserve_quotes = True # type: ignore[assignment]
yaml.width = 1000 # type: ignore[assignment]
yaml.boolean_representation = ["False", "True"] # type: ignore[attr-defined]
try:
r = yaml.load(contents)
except Exception as err:
msg = LintMessage(
path=None,
line=None,
char=None,
code="NATIVEFUNCTIONS",
severity=LintSeverity.ERROR,
name="YAML load failure",
original=None,
replacement=None,
description=f"Failed due to {err.__class__.__name__}:\n{err}",
)
print(json.dumps(msg._asdict()), flush=True)
sys.exit(0)
# Cuz ruamel's author intentionally didn't include conversion to string
# https://stackoverflow.com/questions/47614862/best-way-to-use-ruamel-yaml-to-dump-to-string-not-to-stream
string_stream = StringIO()
yaml.dump(r, string_stream)
new_contents = string_stream.getvalue()
string_stream.close()
if contents != new_contents:
msg = LintMessage(
path=args.native_functions_yml,
line=None,
char=None,
code="NATIVEFUNCTIONS",
severity=LintSeverity.ERROR,
name="roundtrip inconsistency",
original=contents,
replacement=new_contents,
description=(
"YAML roundtrip failed; run `lintrunner --take NATIVEFUNCTIONS -a` to apply the suggested changes. "
"If you think this is in error, please see tools/linter/adapters/nativefunctions_linter.py"
),
)
print(json.dumps(msg._asdict()), flush=True)
|
pytorch-master
|
tools/linter/adapters/nativefunctions_linter.py
|
import argparse
import concurrent.futures
import json
import logging
import os
import subprocess
import sys
import time
from enum import Enum
from pathlib import Path
from typing import Any, List, NamedTuple, Optional
IS_WINDOWS: bool = os.name == "nt"
def eprint(*args: Any, **kwargs: Any) -> None:
print(*args, file=sys.stderr, flush=True, **kwargs)
class LintSeverity(str, Enum):
ERROR = "error"
WARNING = "warning"
ADVICE = "advice"
DISABLED = "disabled"
class LintMessage(NamedTuple):
path: Optional[str]
line: Optional[int]
char: Optional[int]
code: str
severity: LintSeverity
name: str
original: Optional[str]
replacement: Optional[str]
description: Optional[str]
def as_posix(name: str) -> str:
return name.replace("\\", "/") if IS_WINDOWS else name
def _run_command(
args: List[str],
*,
timeout: int,
) -> "subprocess.CompletedProcess[bytes]":
logging.debug("$ %s", " ".join(args))
start_time = time.monotonic()
try:
return subprocess.run(
args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=IS_WINDOWS, # So batch scripts are found.
timeout=timeout,
check=True,
)
finally:
end_time = time.monotonic()
logging.debug("took %dms", (end_time - start_time) * 1000)
def run_command(
args: List[str],
*,
retries: int,
timeout: int,
) -> "subprocess.CompletedProcess[bytes]":
remaining_retries = retries
while True:
try:
return _run_command(args, timeout=timeout)
except subprocess.TimeoutExpired as err:
if remaining_retries == 0:
raise err
remaining_retries -= 1
logging.warning(
"(%s/%s) Retrying because command failed with: %r",
retries - remaining_retries,
retries,
err,
)
time.sleep(1)
def check_file(
filename: str,
binary: str,
retries: int,
timeout: int,
) -> List[LintMessage]:
try:
with open(filename, "rb") as f:
original = f.read()
proc = run_command(
[binary, filename],
retries=retries,
timeout=timeout,
)
except subprocess.TimeoutExpired:
return [
LintMessage(
path=filename,
line=None,
char=None,
code="CLANGFORMAT",
severity=LintSeverity.ERROR,
name="timeout",
original=None,
replacement=None,
description=(
"clang-format timed out while trying to process a file. "
"Please report an issue in pytorch/pytorch with the "
"label 'module: lint'"
),
)
]
except (OSError, subprocess.CalledProcessError) as err:
return [
LintMessage(
path=filename,
line=None,
char=None,
code="CLANGFORMAT",
severity=LintSeverity.ADVICE,
name="command-failed",
original=None,
replacement=None,
description=(
f"Failed due to {err.__class__.__name__}:\n{err}"
if not isinstance(err, subprocess.CalledProcessError)
else (
"COMMAND (exit code {returncode})\n"
"{command}\n\n"
"STDERR\n{stderr}\n\n"
"STDOUT\n{stdout}"
).format(
returncode=err.returncode,
command=" ".join(as_posix(x) for x in err.cmd),
stderr=err.stderr.decode("utf-8").strip() or "(empty)",
stdout=err.stdout.decode("utf-8").strip() or "(empty)",
)
),
)
]
replacement = proc.stdout
if original == replacement:
return []
return [
LintMessage(
path=filename,
line=None,
char=None,
code="CLANGFORMAT",
severity=LintSeverity.WARNING,
name="format",
original=original.decode("utf-8"),
replacement=replacement.decode("utf-8"),
description="See https://clang.llvm.org/docs/ClangFormat.html.\nRun `lintrunner -a` to apply this patch.",
)
]
def main() -> None:
parser = argparse.ArgumentParser(
description="Format files with clang-format.",
fromfile_prefix_chars="@",
)
parser.add_argument(
"--binary",
required=True,
help="clang-format binary path",
)
parser.add_argument(
"--retries",
default=3,
type=int,
help="times to retry timed out clang-format",
)
parser.add_argument(
"--timeout",
default=90,
type=int,
help="seconds to wait for clang-format",
)
parser.add_argument(
"--verbose",
action="store_true",
help="verbose logging",
)
parser.add_argument(
"filenames",
nargs="+",
help="paths to lint",
)
args = parser.parse_args()
logging.basicConfig(
format="<%(threadName)s:%(levelname)s> %(message)s",
level=logging.NOTSET
if args.verbose
else logging.DEBUG
if len(args.filenames) < 1000
else logging.INFO,
stream=sys.stderr,
)
binary = os.path.normpath(args.binary) if IS_WINDOWS else args.binary
if not Path(binary).exists():
lint_message = LintMessage(
path=None,
line=None,
char=None,
code="CLANGFORMAT",
severity=LintSeverity.ERROR,
name="init-error",
original=None,
replacement=None,
description=(
f"Could not find clang-format binary at {binary}, "
"did you forget to run `lintrunner init`?"
),
)
print(json.dumps(lint_message._asdict()), flush=True)
sys.exit(0)
with concurrent.futures.ThreadPoolExecutor(
max_workers=os.cpu_count(),
thread_name_prefix="Thread",
) as executor:
futures = {
executor.submit(check_file, x, binary, args.retries, args.timeout): x
for x in args.filenames
}
for future in concurrent.futures.as_completed(futures):
try:
for lint_message in future.result():
print(json.dumps(lint_message._asdict()), flush=True)
except Exception:
logging.critical('Failed at "%s".', futures[future])
raise
if __name__ == "__main__":
main()
|
pytorch-master
|
tools/linter/adapters/clangformat_linter.py
|
"""
EXEC: Ensure that source files are not executable.
"""
import argparse
import json
import logging
import os
import sys
from enum import Enum
from typing import NamedTuple, Optional
LINTER_CODE = "EXEC"
class LintSeverity(str, Enum):
ERROR = "error"
WARNING = "warning"
ADVICE = "advice"
DISABLED = "disabled"
class LintMessage(NamedTuple):
path: Optional[str]
line: Optional[int]
char: Optional[int]
code: str
severity: LintSeverity
name: str
original: Optional[str]
replacement: Optional[str]
description: Optional[str]
def check_file(filename: str) -> Optional[LintMessage]:
is_executable = os.access(filename, os.X_OK)
if is_executable:
return LintMessage(
path=filename,
line=None,
char=None,
code=LINTER_CODE,
severity=LintSeverity.ERROR,
name="executable-permissions",
original=None,
replacement=None,
description="This file has executable permission; please remove it by using `chmod -x`.",
)
return None
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="exec linter",
fromfile_prefix_chars="@",
)
parser.add_argument(
"--verbose",
action="store_true",
)
parser.add_argument(
"filenames",
nargs="+",
help="paths to lint",
)
args = parser.parse_args()
logging.basicConfig(
format="<%(threadName)s:%(levelname)s> %(message)s",
level=logging.NOTSET
if args.verbose
else logging.DEBUG
if len(args.filenames) < 1000
else logging.INFO,
stream=sys.stderr,
)
lint_messages = []
for filename in args.filenames:
lint_message = check_file(filename)
if lint_message is not None:
lint_messages.append(lint_message)
for lint_message in lint_messages:
print(json.dumps(lint_message._asdict()), flush=True)
|
pytorch-master
|
tools/linter/adapters/exec_linter.py
|
import argparse
import concurrent.futures
import json
import logging
import os
import re
import shutil
import subprocess
import sys
import time
from enum import Enum
from pathlib import Path
from sysconfig import get_paths as gp
from typing import Any, List, NamedTuple, Optional, Pattern
# PyTorch directory root
result = subprocess.run(
["git", "rev-parse", "--show-toplevel"],
stdout=subprocess.PIPE,
check=True,
)
PYTORCH_ROOT = result.stdout.decode("utf-8").strip()
IS_WINDOWS: bool = os.name == "nt"
# Returns '/usr/local/include/python<version number>'
def get_python_include_dir() -> str:
return gp()["include"]
def eprint(*args: Any, **kwargs: Any) -> None:
print(*args, file=sys.stderr, flush=True, **kwargs)
class LintSeverity(str, Enum):
ERROR = "error"
WARNING = "warning"
ADVICE = "advice"
DISABLED = "disabled"
class LintMessage(NamedTuple):
path: Optional[str]
line: Optional[int]
char: Optional[int]
code: str
severity: LintSeverity
name: str
original: Optional[str]
replacement: Optional[str]
description: Optional[str]
def as_posix(name: str) -> str:
return name.replace("\\", "/") if IS_WINDOWS else name
# c10/core/DispatchKey.cpp:281:26: error: 'k' used after it was moved [bugprone-use-after-move]
RESULTS_RE: Pattern[str] = re.compile(
r"""(?mx)
^
(?P<file>.*?):
(?P<line>\d+):
(?:(?P<column>-?\d+):)?
\s(?P<severity>\S+?):?
\s(?P<message>.*)
\s(?P<code>\[.*\])
$
"""
)
def run_command(
args: List[str],
) -> "subprocess.CompletedProcess[bytes]":
logging.debug("$ %s", " ".join(args))
start_time = time.monotonic()
try:
return subprocess.run(
args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
check=False,
)
finally:
end_time = time.monotonic()
logging.debug("took %dms", (end_time - start_time) * 1000)
# Severity is either "error" or "note":
# https://github.com/python/mypy/blob/8b47a032e1317fb8e3f9a818005a6b63e9bf0311/mypy/errors.py#L46-L47
severities = {
"error": LintSeverity.ERROR,
"warning": LintSeverity.WARNING,
}
def clang_search_dirs() -> List[str]:
# Compilers are ordered based on fallback preference
# We pick the first one that is available on the system
compilers = ["clang", "gcc", "cpp", "cc"]
compilers = [c for c in compilers if shutil.which(c) is not None]
if len(compilers) == 0:
raise RuntimeError(f"None of {compilers} were found")
compiler = compilers[0]
result = subprocess.run(
[compiler, "-E", "-x", "c++", "-", "-v"],
stdin=subprocess.DEVNULL,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
check=True,
)
stderr = result.stderr.decode().strip().split("\n")
search_start = r"#include.*search starts here:"
search_end = r"End of search list."
append_path = False
search_paths = []
for line in stderr:
if re.match(search_start, line):
if append_path:
continue
else:
append_path = True
elif re.match(search_end, line):
break
elif append_path:
search_paths.append(line.strip())
return search_paths
include_args = []
include_dir = [
"/usr/lib/llvm-11/include/openmp",
get_python_include_dir(),
os.path.join(PYTORCH_ROOT, "third_party/pybind11/include"),
] + clang_search_dirs()
for dir in include_dir:
include_args += ["--extra-arg", f"-I{dir}"]
def check_file(
filename: str,
binary: str,
build_dir: Path,
) -> List[LintMessage]:
try:
proc = run_command(
[binary, f"-p={build_dir}", *include_args, filename],
)
except (OSError) as err:
return [
LintMessage(
path=filename,
line=None,
char=None,
code="CLANGTIDY",
severity=LintSeverity.ERROR,
name="command-failed",
original=None,
replacement=None,
description=(f"Failed due to {err.__class__.__name__}:\n{err}"),
)
]
lint_messages = []
try:
# Change the current working directory to the build directory, since
# clang-tidy will report files relative to the build directory.
saved_cwd = os.getcwd()
os.chdir(build_dir)
for match in RESULTS_RE.finditer(proc.stdout.decode()):
# Convert the reported path to an absolute path.
abs_path = str(Path(match["file"]).resolve())
message = LintMessage(
path=abs_path,
name=match["code"],
description=match["message"],
line=int(match["line"]),
char=int(match["column"])
if match["column"] is not None and not match["column"].startswith("-")
else None,
code="CLANGTIDY",
severity=severities.get(match["severity"], LintSeverity.ERROR),
original=None,
replacement=None,
)
lint_messages.append(message)
finally:
os.chdir(saved_cwd)
return lint_messages
def main() -> None:
parser = argparse.ArgumentParser(
description="clang-tidy wrapper linter.",
fromfile_prefix_chars="@",
)
parser.add_argument(
"--binary",
required=True,
help="clang-tidy binary path",
)
parser.add_argument(
"--build_dir",
required=True,
help=(
"Where the compile_commands.json file is located. "
"Gets passed to clang-tidy -p"
),
)
parser.add_argument(
"--verbose",
action="store_true",
help="verbose logging",
)
parser.add_argument(
"filenames",
nargs="+",
help="paths to lint",
)
args = parser.parse_args()
logging.basicConfig(
format="<%(threadName)s:%(levelname)s> %(message)s",
level=logging.NOTSET
if args.verbose
else logging.DEBUG
if len(args.filenames) < 1000
else logging.INFO,
stream=sys.stderr,
)
if not os.path.exists(args.binary):
err_msg = LintMessage(
path="<none>",
line=None,
char=None,
code="CLANGTIDY",
severity=LintSeverity.ERROR,
name="command-failed",
original=None,
replacement=None,
description=(
f"Could not find clang-tidy binary at {args.binary},"
" you may need to run `lintrunner init`."
),
)
print(json.dumps(err_msg._asdict()), flush=True)
exit(0)
abs_build_dir = Path(args.build_dir).resolve()
with concurrent.futures.ThreadPoolExecutor(
max_workers=os.cpu_count(),
thread_name_prefix="Thread",
) as executor:
futures = {
executor.submit(
check_file,
filename,
args.binary,
abs_build_dir,
): filename
for filename in args.filenames
}
for future in concurrent.futures.as_completed(futures):
try:
for lint_message in future.result():
print(json.dumps(lint_message._asdict()), flush=True)
except Exception:
logging.critical('Failed at "%s".', futures[future])
raise
if __name__ == "__main__":
main()
|
pytorch-master
|
tools/linter/adapters/clangtidy_linter.py
|
import argparse
import json
import logging
import os
import re
import subprocess
import sys
import time
from enum import Enum
from pathlib import Path
from typing import Any, Dict, List, NamedTuple, Optional, Pattern
IS_WINDOWS: bool = os.name == "nt"
def eprint(*args: Any, **kwargs: Any) -> None:
print(*args, file=sys.stderr, flush=True, **kwargs)
class LintSeverity(str, Enum):
ERROR = "error"
WARNING = "warning"
ADVICE = "advice"
DISABLED = "disabled"
class LintMessage(NamedTuple):
path: Optional[str]
line: Optional[int]
char: Optional[int]
code: str
severity: LintSeverity
name: str
original: Optional[str]
replacement: Optional[str]
description: Optional[str]
def as_posix(name: str) -> str:
return name.replace("\\", "/") if IS_WINDOWS else name
# tools/linter/flake8_linter.py:15:13: error: Incompatibl...int") [assignment]
RESULTS_RE: Pattern[str] = re.compile(
r"""(?mx)
^
(?P<file>.*?):
(?P<line>\d+):
(?:(?P<column>-?\d+):)?
\s(?P<severity>\S+?):?
\s(?P<message>.*)
\s(?P<code>\[.*\])
$
"""
)
def run_command(
args: List[str],
*,
extra_env: Optional[Dict[str, str]],
retries: int,
) -> "subprocess.CompletedProcess[bytes]":
logging.debug("$ %s", " ".join(args))
start_time = time.monotonic()
try:
return subprocess.run(
args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
finally:
end_time = time.monotonic()
logging.debug("took %dms", (end_time - start_time) * 1000)
# Severity is either "error" or "note":
# https://github.com/python/mypy/blob/8b47a032e1317fb8e3f9a818005a6b63e9bf0311/mypy/errors.py#L46-L47
severities = {
"error": LintSeverity.ERROR,
"note": LintSeverity.ADVICE,
}
def check_files(
filenames: List[str],
config: str,
retries: int,
) -> List[LintMessage]:
try:
proc = run_command(
[sys.executable, "-mmypy", f"--config={config}"] + filenames,
extra_env={},
retries=retries,
)
except OSError as err:
return [
LintMessage(
path=None,
line=None,
char=None,
code="MYPY",
severity=LintSeverity.ERROR,
name="command-failed",
original=None,
replacement=None,
description=(f"Failed due to {err.__class__.__name__}:\n{err}"),
)
]
stdout = str(proc.stdout, "utf-8").strip()
return [
LintMessage(
path=match["file"],
name=match["code"],
description=match["message"],
line=int(match["line"]),
char=int(match["column"])
if match["column"] is not None and not match["column"].startswith("-")
else None,
code="MYPY",
severity=severities.get(match["severity"], LintSeverity.ERROR),
original=None,
replacement=None,
)
for match in RESULTS_RE.finditer(stdout)
]
def main() -> None:
parser = argparse.ArgumentParser(
description="mypy wrapper linter.",
fromfile_prefix_chars="@",
)
parser.add_argument(
"--retries",
default=3,
type=int,
help="times to retry timed out mypy",
)
parser.add_argument(
"--config",
required=True,
help="path to an mypy .ini config file",
)
parser.add_argument(
"--verbose",
action="store_true",
help="verbose logging",
)
parser.add_argument(
"filenames",
nargs="+",
help="paths to lint",
)
args = parser.parse_args()
logging.basicConfig(
format="<%(threadName)s:%(levelname)s> %(message)s",
level=logging.NOTSET
if args.verbose
else logging.DEBUG
if len(args.filenames) < 1000
else logging.INFO,
stream=sys.stderr,
)
# Use a dictionary here to preserve order. mypy cares about order,
# tragically, e.g. https://github.com/python/mypy/issues/2015
filenames: Dict[str, bool] = {}
# If a stub file exists, have mypy check it instead of the original file, in
# accordance with PEP-484 (see https://www.python.org/dev/peps/pep-0484/#stub-files)
for filename in args.filenames:
if filename.endswith(".pyi"):
filenames[filename] = True
continue
stub_filename = filename.replace(".py", ".pyi")
if Path(stub_filename).exists():
filenames[stub_filename] = True
else:
filenames[filename] = True
lint_messages = check_files(list(filenames), args.config, args.retries)
for lint_message in lint_messages:
print(json.dumps(lint_message._asdict()), flush=True)
if __name__ == "__main__":
main()
|
pytorch-master
|
tools/linter/adapters/mypy_linter.py
|
"""
NEWLINE: Checks files to make sure there are no trailing newlines.
"""
import argparse
import json
import logging
import os
import sys
from enum import Enum
from typing import NamedTuple, Optional
NEWLINE = 10 # ASCII "\n"
LINTER_CODE = "NEWLINE"
class LintSeverity(str, Enum):
ERROR = "error"
WARNING = "warning"
ADVICE = "advice"
DISABLED = "disabled"
class LintMessage(NamedTuple):
path: Optional[str]
line: Optional[int]
char: Optional[int]
code: str
severity: LintSeverity
name: str
original: Optional[str]
replacement: Optional[str]
description: Optional[str]
def correct_trailing_newlines(filename: str) -> bool:
with open(filename, "rb") as f:
a = len(f.read(2))
if a == 0:
return True
elif a == 1:
# file is wrong whether or not the only byte is a newline
return False
else:
f.seek(-2, os.SEEK_END)
b, c = f.read(2)
# no ASCII byte is part of any non-ASCII character in UTF-8
return b != NEWLINE and c == NEWLINE
def check_file(filename: str) -> Optional[LintMessage]:
logging.debug("Checking file %s", filename)
with open(filename, "rb") as f:
a = len(f.read(2))
if a == 0:
# File is empty, just leave it alone.
return None
elif a == 1:
# file is wrong whether or not the only byte is a newline
return LintMessage(
path=filename,
line=None,
char=None,
code=LINTER_CODE,
severity=LintSeverity.ERROR,
name="testestTrailing newline",
original=None,
replacement=None,
description="Trailing newline found. Run `lintrunner --take NEWLINE -a` to apply changes.",
)
else:
# Read the last two bytes
f.seek(-2, os.SEEK_END)
b, c = f.read(2)
# no ASCII byte is part of any non-ASCII character in UTF-8
if b != NEWLINE and c == NEWLINE:
return None
else:
f.seek(0)
try:
original = f.read().decode("utf-8")
except Exception as err:
return LintMessage(
path=filename,
line=None,
char=None,
code=LINTER_CODE,
severity=LintSeverity.ERROR,
name="Decoding failure",
original=None,
replacement=None,
description=f"utf-8 decoding failed due to {err.__class__.__name__}:\n{err}",
)
return LintMessage(
path=filename,
line=None,
char=None,
code=LINTER_CODE,
severity=LintSeverity.ERROR,
name="Trailing newline",
original=original,
replacement=original.rstrip("\n") + "\n",
description="Trailing newline found. Run `lintrunner --take NEWLINE -a` to apply changes.",
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="native functions linter",
fromfile_prefix_chars="@",
)
parser.add_argument(
"--verbose",
action="store_true",
help="location of native_functions.yaml",
)
parser.add_argument(
"filenames",
nargs="+",
help="paths to lint",
)
args = parser.parse_args()
logging.basicConfig(
format="<%(threadName)s:%(levelname)s> %(message)s",
level=logging.NOTSET
if args.verbose
else logging.DEBUG
if len(args.filenames) < 1000
else logging.INFO,
stream=sys.stderr,
)
lint_messages = []
for filename in args.filenames:
lint_message = check_file(filename)
if lint_message is not None:
lint_messages.append(lint_message)
for lint_message in lint_messages:
print(json.dumps(lint_message._asdict()), flush=True)
|
pytorch-master
|
tools/linter/adapters/newlines_linter.py
|
import argparse
import json
import logging
import shutil
import subprocess
import time
from enum import Enum
from typing import List, NamedTuple, Optional
LINTER_CODE = "SHELLCHECK"
class LintSeverity(str, Enum):
ERROR = "error"
WARNING = "warning"
ADVICE = "advice"
DISABLED = "disabled"
class LintMessage(NamedTuple):
path: Optional[str]
line: Optional[int]
char: Optional[int]
code: str
severity: LintSeverity
name: str
original: Optional[str]
replacement: Optional[str]
description: Optional[str]
def run_command(
args: List[str],
) -> "subprocess.CompletedProcess[bytes]":
logging.debug("$ %s", " ".join(args))
start_time = time.monotonic()
try:
return subprocess.run(
args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
finally:
end_time = time.monotonic()
logging.debug("took %dms", (end_time - start_time) * 1000)
def check_files(
files: List[str],
) -> List[LintMessage]:
try:
proc = run_command(
["shellcheck", "--external-sources", "--format=json1"] + files
)
except OSError as err:
return [
LintMessage(
path=None,
line=None,
char=None,
code=LINTER_CODE,
severity=LintSeverity.ERROR,
name="command-failed",
original=None,
replacement=None,
description=(f"Failed due to {err.__class__.__name__}:\n{err}"),
)
]
stdout = str(proc.stdout, "utf-8").strip()
results = json.loads(stdout)["comments"]
return [
LintMessage(
path=result["file"],
name=f"SC{result['code']}",
description=result["message"],
line=result["line"],
char=result["column"],
code=LINTER_CODE,
severity=LintSeverity.ERROR,
original=None,
replacement=None,
)
for result in results
]
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="shellcheck runner",
fromfile_prefix_chars="@",
)
parser.add_argument(
"filenames",
nargs="+",
help="paths to lint",
)
if shutil.which("shellcheck") is None:
err_msg = LintMessage(
path="<none>",
line=None,
char=None,
code=LINTER_CODE,
severity=LintSeverity.ERROR,
name="command-failed",
original=None,
replacement=None,
description="shellcheck is not installed, did you forget to run `lintrunner init`?",
)
print(json.dumps(err_msg._asdict()), flush=True)
exit(0)
args = parser.parse_args()
lint_messages = check_files(args.filenames)
for lint_message in lint_messages:
print(json.dumps(lint_message._asdict()), flush=True)
|
pytorch-master
|
tools/linter/adapters/shellcheck_linter.py
|
import os
import subprocess
import sys
from typing import List
def run_cmd(cmd: List[str]) -> None:
print(f"Running: {cmd}")
result = subprocess.run(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout, stderr = (
result.stdout.decode("utf-8").strip(),
result.stderr.decode("utf-8").strip(),
)
print(stdout)
print(stderr)
if result.returncode != 0:
print(f"Failed to run {cmd}")
exit(1)
def run_timed_cmd(cmd: List[str]) -> None:
run_cmd(["time"] + cmd)
def update_submodules() -> None:
run_cmd(["git", "submodule", "update", "--init", "--recursive"])
def gen_compile_commands() -> None:
os.environ["USE_NCCL"] = "0"
os.environ["USE_DEPLOY"] = "1"
os.environ["CC"] = "clang"
os.environ["CXX"] = "clang++"
run_timed_cmd([sys.executable, "setup.py", "--cmake-only", "build"])
def run_autogen() -> None:
run_timed_cmd(
[
sys.executable,
"-m",
"torchgen.gen",
"-s",
"aten/src/ATen",
"-d",
"build/aten/src/ATen",
"--per-operator-headers",
]
)
run_timed_cmd(
[
sys.executable,
"tools/setup_helpers/generate_code.py",
"--native-functions-path",
"aten/src/ATen/native/native_functions.yaml",
"--tags-path",
"aten/src/ATen/native/tags.yaml",
"--gen_lazy_ts_backend",
]
)
def generate_build_files() -> None:
update_submodules()
gen_compile_commands()
run_autogen()
if __name__ == "__main__":
generate_build_files()
|
pytorch-master
|
tools/linter/clang_tidy/generate_build_files.py
|
pytorch-master
|
tools/linter/clang_tidy/__init__.py
|
|
# Parses derivatives.yaml into autograd functions
#
# Each autograd function is represented by `DifferentiabilityInfo` containing
# a list of `Derivative`. See `torchgen.api.autograd` for the data models.
import re
from collections import defaultdict
from typing import Any, Counter, Dict, List, Match, Optional, Sequence, Set, Tuple
import yaml
from torchgen.api import cpp
from torchgen.api.autograd import (
Derivative,
DifferentiabilityInfo,
ForwardDerivative,
SavedAttribute,
)
from torchgen.api.types import (
BaseCType,
Binding,
boolT,
CppSignatureGroup,
intArrayRefT,
layoutT,
longT,
NamedCType,
OptionalCType,
scalarTypeT,
SpecialArgName,
stringT,
symIntArrayRefT,
tensorGeometryT,
tensorOptionsT,
typeAndSizeT,
VectorCType,
)
from torchgen.context import with_native_function
from torchgen.gen import get_grouped_by_view_native_functions, parse_native_yaml
from torchgen.model import (
AUTOGRAD_KEYS,
FunctionSchema,
NativeFunction,
NativeFunctionsViewGroup,
OperatorName,
Type,
Variant,
)
from torchgen.utils import concatMap, IDENT_REGEX, split_name_params, YamlLoader
_GLOBAL_LOAD_DERIVATIVE_CACHE = {}
_VALID_AUTOGRAD_KEYS = set(AUTOGRAD_KEYS)
# This function directly adds per-dispatchkey derivative entries for {view}_copy variants of each view op.
# Since every {view} and {view}_copy op shares the same derivative formula,
# we generate them here instead of duplicating them in the yaml.
# See Note [Codegen'd {view}_copy Operators]
def add_view_copy_derivatives(
infos: Dict[FunctionSchema, Dict[str, DifferentiabilityInfo]],
view_groups: List[NativeFunctionsViewGroup],
) -> None:
# Get the map from each view op's name to its corresponding view group
view_name_to_group: Dict[OperatorName, NativeFunctionsViewGroup] = {
g.view.func.name: g for g in view_groups
}
view_infos = dict()
for _, info_dispatch_dict in infos.items():
# maybe_view_group only needs to be calculated once per info_dispatch_dict
maybe_view_group = None
view_copy_differentiability_infos = dict()
for dispatch_key, info in info_dispatch_dict.items():
maybe_view_group = view_name_to_group.get(info.func.func.name, None)
if maybe_view_group is not None and maybe_view_group.view_copy is not None:
view_copy_info = info.create_view_copy_from_view_derivative(
maybe_view_group
)
if view_copy_info is not None:
fn_schema = view_copy_info.func.func
view_copy_differentiability_infos[dispatch_key] = view_copy_info
else:
break
if len(view_copy_differentiability_infos) > 0:
assert fn_schema is not None
view_infos[fn_schema] = view_copy_differentiability_infos
infos.update(view_infos)
def load_derivatives(
derivatives_yaml_path: str, native_yaml_path: str, tags_yaml_path: str
) -> Tuple[Dict[FunctionSchema, Dict[str, DifferentiabilityInfo]], Set[str]]:
# Do some caching as this is a deterministic function
global _GLOBAL_LOAD_DERIVATIVE_CACHE
key = (derivatives_yaml_path, native_yaml_path)
if key not in _GLOBAL_LOAD_DERIVATIVE_CACHE:
with open(derivatives_yaml_path, "r") as f:
definitions = yaml.load(f, Loader=YamlLoader)
funcs = parse_native_yaml(native_yaml_path, tags_yaml_path).native_functions
# From the parsed native functions, separate out the (generated) view_copy functions,
# so we can generate derivatives for them separately.
native_functions_with_view_groups = get_grouped_by_view_native_functions(funcs)
native_functions_without_view_copies = concatMap(
# We need to pull out the view_inplace ops too, since they might have their own derivative entries.
lambda g: [g]
if isinstance(g, NativeFunction)
else list(g.functions(include_copy=False)),
native_functions_with_view_groups,
)
view_groups = [
g
for g in native_functions_with_view_groups
if isinstance(g, NativeFunctionsViewGroup)
]
# What's the difference between function schema v.s. signature?
# function schema is the complete declaration including mutability annotation / default value and etc.
# signature is the canonical schema for a group of functions (in-place/out/functional variants)
# that are semantically related.
functions_by_signature: Dict[
FunctionSchema, List[NativeFunction]
] = defaultdict(list)
functions_by_schema: Dict[str, NativeFunction] = dict()
for function in native_functions_without_view_copies:
functions_by_signature[function.func.signature()].append(function)
assert str(function.func) not in functions_by_schema
functions_by_schema[str(function.func)] = function
# Keep track of how many of which ops we've seen so we can
# disambiguate them with a numeric suffix.
op_counter = Counter[str]()
# infos is a dict that maps FunctionSchema -> a dict of per dispatch key DifferentiabilityInfos
# this is useful because in tools/autograd/gen_autograd.py:match_differentiability_info
# we ultimately need to categorize the DifferentiabilityInfos by FunctionSchema
infos: Dict[FunctionSchema, Dict[str, DifferentiabilityInfo]] = dict()
used_dispatch_keys: Set[str] = set()
for defn_dict in definitions:
# Ensure that the old derivatives.yaml schema with no dispatch key can be loaded.
if "dispatch" not in defn_dict:
specification = defn_dict.pop("name")
output_differentiability = defn_dict.pop(
"output_differentiability", None
)
defn_dict = {"name": specification, "dispatch": {"Default": defn_dict}}
if output_differentiability:
defn_dict["output_differentiability"] = output_differentiability
name, per_dispatch_diffinfos = create_differentiability_info(
defn_dict,
functions_by_signature,
functions_by_schema,
op_counter,
used_dispatch_keys,
)
infos[name] = per_dispatch_diffinfos
add_view_copy_derivatives(infos, view_groups)
# cache both loaded infos as well a a set of all the dispatch_keys/aliases
# that appear in derivatives.yaml. used_dispatch_keys is useful for generating
# VariableType.cpp where we need a TORCH_LIBRARY_IMPL for every autograd dispatch key used
_GLOBAL_LOAD_DERIVATIVE_CACHE[key] = infos, used_dispatch_keys
return _GLOBAL_LOAD_DERIVATIVE_CACHE[key]
@with_native_function
def cpp_arguments(f: NativeFunction) -> Sequence[Binding]:
return CppSignatureGroup.from_native_function(f, method=False).signature.arguments()
def create_derivative(
f: NativeFunction,
formula: str,
var_names: Tuple[str, ...],
available_named_gradients: Sequence[str],
) -> Derivative:
original_formula = formula
arguments: List[NamedCType] = [
a.nctype.remove_const_ref() for a in cpp_arguments(f)
]
return_names = tuple(n if n != "self" else "result" for n in cpp.return_names(f))
return_types = tuple(cpp.return_type(r).remove_const_ref() for r in f.func.returns)
named_returns = [
NamedCType(name, type) for name, type in zip(return_names, return_types)
]
formula, saved_inputs = saved_variables(formula, arguments, var_names)
formula, saved_outputs = saved_variables(formula, named_returns, var_names)
used_named_gradients = {
name
for name in available_named_gradients
if re.search(IDENT_REGEX.format(name), formula)
}
# Check that the referenced derivatives in the formula are in bounds
for i in used_gradient_indices(formula):
if i >= len(f.func.returns):
raise RuntimeError(
f"Out of bounds grads access: derivative formula for {cpp.name(f.func)} "
f"used grads[{i}], but the forward only returns {len(f.func.returns)} outputs."
)
return Derivative(
formula=formula,
original_formula=original_formula,
var_names=var_names,
saved_inputs=saved_inputs,
saved_outputs=saved_outputs,
named_gradients=used_named_gradients,
)
def create_forward_derivative(
f: NativeFunction, formula: str, names: Tuple[str, ...]
) -> ForwardDerivative:
var_names = names
var_types: Optional[Tuple[Type, ...]] = None
for r in f.func.returns:
if r.name in var_names:
if var_types is None:
var_types = tuple()
var_types = var_types + (r.type,)
# Handle default return names
if var_types is None:
if var_names == ("result",):
assert len(f.func.returns) == 1
var_types = (f.func.returns[0].type,)
else:
for var_name in var_names:
res = re.findall(r"^result(\d+)$", var_name)
if len(res) == 1:
if var_types is None:
var_types = tuple()
arg_idx = int(res[0])
var_types = var_types + (f.func.returns[arg_idx].type,)
assert var_types is not None, "No matching output for forward derivative definition"
return ForwardDerivative(
formula=formula,
var_names=var_names,
var_types=var_types,
required_inputs_fw_grad=None,
required_inputs_primal=None,
required_original_self_value=False,
is_reusing_outplace_formula=False,
)
def postprocess_forward_derivatives(
f: NativeFunction,
defn_name: str,
all_arg_names: List[str],
derivatives: List[Derivative],
forward_derivatives: List[ForwardDerivative],
args_with_derivatives: Sequence[Binding],
) -> List[ForwardDerivative]:
def find_required_inputs(formula: str, postfix: str) -> Tuple[str, ...]:
required_inputs = set()
for arg in args_with_derivatives:
if arg.type == "at::TensorList":
# The functions taking TensorList handle everything internally
continue
arg_name = arg.name
found = re.search(IDENT_REGEX.format(arg_name), formula)
if found:
raise RuntimeError(
f"The forward formula for {defn_name} is using the base name of the {arg_name} "
f"argument which is ambiguous. You should use {arg_name}_p to access the primal "
f"value and {arg_name}_t to access the tangent."
)
found = re.search(IDENT_REGEX.format(arg_name + postfix), formula)
if found:
required_inputs.add(arg_name)
return tuple(required_inputs)
updated_derivatives: List[ForwardDerivative] = []
for defn in forward_derivatives:
formula = defn.formula
required_inputs_tangent = find_required_inputs(formula, "_t")
if formula == "auto_element_wise":
if (
(not len(args_with_derivatives) == 1)
or len(forward_derivatives) > 1
or len(forward_derivatives[0].var_names) > 1
):
raise RuntimeError(
f"Derivative definition of {defn_name} in derivatives.yaml defines the "
"forward definition of gradient as element_wise but this only "
"works for functions with a single differentiable input and a "
"single differentiable output."
)
if not len(derivatives) == 1:
raise RuntimeError(
f"Derivative definition of {defn_name} in derivatives.yaml defines the "
"forward definition of gradient as element_wise but it does not "
"defines the gradient formula for its argument which is required."
)
# This transformation is based on the observation that for element-wise functions, the Jacobian
# matrix is diagonal and thus doing J * v is the same as (v^T J)^T (in practice, we ignore the transpositions)
# For the complex case, we use hermitian transpose and get (v.conj() J).conj()
# So here we are going to re-use the backward formula and replace two things:
# 1) all occurrences of "grad" with "foo_t.conj()", where foo is the name of the unique differentiable input.
# 2) all usage of an original input "foo" with its primal value "foo_p".
# 3) conjugate the final result
# For example, for abs, the backward formula is:
# grad * self.sgn()
# And this function generates a forward formula that is:
# (self_t.conj() * self_p.sgn()).conj()
backward_formula = derivatives[0].original_formula
input_name = args_with_derivatives[0].name
# Do replacement 1) of the grad
def repl(m: Any) -> str:
return f"{m.group(1)}{input_name}_t.conj(){m.group(2)}"
fw_formula = re.sub(IDENT_REGEX.format("grad"), repl, backward_formula)
# Do replacement 2) of the input variables
for arg in args_with_derivatives:
arg_name = arg.name
def repl(m: Any) -> str:
return f"{m.group(1)}{arg_name}_p{m.group(2)}"
fw_formula = re.sub(IDENT_REGEX.format(arg_name), repl, fw_formula)
# Do the final conjugate 3)
fw_formula = f"({fw_formula}).conj()"
# Since there is a single differentiable inputs and we necessarily need its tangent we can
# simply require all differentiable input's tangent.
required_inputs_tangent = tuple(all_arg_names)
formula = fw_formula
elif formula == "auto_linear":
if (
len(forward_derivatives) > 1
or len(forward_derivatives[0].var_names) > 1
):
raise RuntimeError(
f"Derivative definition of {defn_name} in derivatives.yaml defines the "
"forward definition of gradient as linear but this only works "
"for functions with a single differentiable output."
)
# This transformation is based on the observation that linear functions can be written as:
# y = f(x) = A * x
# For some matrix A and the Jacobian of the function f is also A.
# So doing J * v = A * v = f(v).
# Hence to do the jvp, we simply need to evaluate the function at the point v instead of x.
# We do this by calling the forward again by replacing any occurrence of the differentiable
# input "foo" by it's tangent "foo_t".
# Note that multiple inputs are not a problem as long as the function is truly linear wrt to
# the vector where all the differentiable inputs are stacked.
diff_arg_names = [arg.name for arg in args_with_derivatives]
assert len(diff_arg_names) > 0
# Do replacement of input variables
new_args = []
for arg_name in all_arg_names:
if arg_name in diff_arg_names:
arg_name = arg_name + "_t"
new_args.append(arg_name)
# TODO we are trolling
if f.func.is_symint_fn():
defn_name += "_symint"
# Call into the forward again. We need two cases here to handle both Tensor methods and at:: functions.
if Variant.function in f.variants:
fw_formula = "at::{}({})".format(defn_name, ", ".join(new_args))
else:
assert Variant.method in f.variants
fw_formula = "{}.{}({})".format(
new_args[0], defn_name, ", ".join(new_args[1:])
)
# All of the input tangents are always used so all of them are required here.
required_inputs_tangent = tuple(diff_arg_names)
formula = fw_formula
# At this point, the formula is final and is not modified anymore.
# During forward formula, we use the primal instead of the input Tensors.
# This call inspects the formula to find for which input's primal are used.
required_inputs_primal = find_required_inputs(formula, "_p")
updated_derivatives.append(
ForwardDerivative(
formula=formula,
var_names=defn.var_names,
var_types=defn.var_types,
required_inputs_fw_grad=required_inputs_tangent,
required_inputs_primal=required_inputs_primal,
required_original_self_value=False,
is_reusing_outplace_formula=False,
)
)
return updated_derivatives
def is_forward_derivative_definition(
all_arg_names: List[str], names: Tuple[str, ...]
) -> bool:
for name in names:
if name not in all_arg_names:
return True
else:
return False
raise RuntimeError("Expected `names` to be non-empty")
def create_differentiability_info(
defn_dict: Dict[Any, Any],
functions_by_signature: Dict[FunctionSchema, List[NativeFunction]],
functions_by_schema: Dict[str, NativeFunction],
op_counter: Counter[str],
used_dispatch_keys: Set[str],
) -> Tuple[FunctionSchema, Dict[str, DifferentiabilityInfo]]:
"""Processes a single entry `defn` in derivatives.yaml"""
def canonical_function(
functions: Sequence[NativeFunction], name: str
) -> NativeFunction:
for f in functions:
if (
not f.func.is_functional_fn()
and not f.func.is_out_fn()
and name == str(f.func.name.name)
):
return f
# some functions only have in-place variants
assert name + "_" == cpp.name(functions[0].func)
return functions[0]
def split_names(raw_names: str) -> Tuple[str, ...]:
"""Given "foo, bar", return ["foo", "bar"]."""
return tuple(x.strip() for x in raw_names.split(","))
def check_grad_usage(defn_name: str, derivatives: Sequence[Derivative]) -> None:
"""
Check for some subtle mistakes one might make when writing derivatives.
These mistakes will compile, but will be latent until a function is
used with double backwards.
"""
uses_grad = False # true if any derivative uses "grad"
num_grads_uses = 0 # count of uses of "grads" or "grads[INDEX]"
uses_named_grads = False # true if any derivative uses "grad_{name}"
used_grads_indices: List[int] = [] # which indices of grads are used
for d in derivatives:
formula = d.formula
uses_grad = uses_grad or bool(
re.findall(IDENT_REGEX.format("grad"), formula)
)
num_grads_uses += len(re.findall(IDENT_REGEX.format("grads"), formula))
uses_named_grads = uses_named_grads or bool(d.named_gradients)
used_grads_indices.extend(used_gradient_indices(formula))
# This is a basic sanity check: the number of places we see
# "grads" should be no fewer than the number of indices we see
# inside "grads". They may not be equal because we may use
# "grads" without an index.
assert num_grads_uses >= len(used_grads_indices)
# Thus if the number is equal, every use of grads is also
# indexed.
only_used_grads_indices = num_grads_uses == len(used_grads_indices)
if uses_grad and num_grads_uses > 0:
raise RuntimeError(
f"Derivative definition of {defn_name} in derivatives.yaml illegally "
"mixes use of 'grad' and 'grads'. Consider replacing "
"occurrences of 'grad' with 'grads[0]'"
)
if only_used_grads_indices and set(used_grads_indices) == {0}:
raise RuntimeError(
f"Derivative definition of {defn_name} in derivatives.yaml solely "
"refers to 'grads[0]'. If the first output is indeed the "
"only differentiable output, replace 'grads[0]' with 'grad'; "
"otherwise, there is a likely error in your derivatives "
"declaration."
)
if uses_named_grads and (uses_grad or num_grads_uses > 0):
raise RuntimeError(
f"Derivative definition of {defn_name} in derivatives.yaml illegally "
'mixes use of "grad_RETURN_NAME" and "grad" or "grads[x]". Use '
"only one method for identifying gradients."
)
@with_native_function
def set_up_derivatives(
f: NativeFunction,
) -> Tuple[
Sequence[Derivative],
Sequence[ForwardDerivative],
Sequence[Binding],
Sequence[str],
Sequence[str],
]:
# Set up the derivative information
derivatives: List[Derivative] = []
forward_derivatives: List[ForwardDerivative] = []
non_differentiable_arg_names: List[str] = []
args_with_derivatives_set: Set[str] = set()
all_arg_names = [a.name for a in cpp_arguments(f)]
all_ret_names = [
r.name for r in f.func.returns
] # only used for the assert below
# output_differentiability is captured from the enclosed
# scope. Don't modify it.
#
# If it is not present, then no output is explicitly
# undifferentiable.
#
# It may be present and shorter than the length of return
# values. If that's the case, any return value that does not
# have a corresponding entry is considered not differentiable.
differentiability = output_differentiability or [True] * len(f.func.returns)
# A return is available as a named gradient ...
available_named_gradients = [
f"grad_{ret.name}"
for ret, differentiable in zip(f.func.returns, differentiability)
# if it has not been explicitly made undifferentiable
if differentiable
# and if it has a name
and ret.name is not None
# and if its type is differentiable
and ret.type.is_tensor_like()
]
for raw_names in sorted(defn.keys()):
formula = defn[raw_names]
names = split_names(raw_names)
for name in names:
assert not (name in all_arg_names and name in all_ret_names), (
f"While processing the derivative formula for '{f.func.name}' wrt '{name}', "
f"expected '{name}' to not be both an input arg and named return. "
)
if is_forward_derivative_definition(all_arg_names, names):
forward_derivatives.append(create_forward_derivative(f, formula, names))
else:
if formula.lower().strip() == "non_differentiable":
non_differentiable_arg_names += names
else:
derivative = create_derivative(
f, formula, names, available_named_gradients
)
derivatives.append(derivative)
args_with_derivatives_set |= set(names)
overlap = args_with_derivatives_set.intersection(non_differentiable_arg_names)
if overlap:
raise RuntimeError(
f"derivatives definition for {defn} have overlapped non_differentiable "
f"and differentiable variables: {overlap}"
)
# Next, let us determine the list of inputs in order.
# TODO: do we need eagerly calculate and save it here? Can it be derived
# from NativeFunction and `derivatives` on callsites instead?
args_with_derivatives = [
a for a in cpp_arguments(f) if a.name in args_with_derivatives_set
]
# Postprocess forward derivatives definitions now that we know the differentiable arguments
forward_derivatives = postprocess_forward_derivatives(
f,
defn_name,
all_arg_names,
derivatives,
forward_derivatives,
args_with_derivatives,
)
# Test to see if the use of 'grads' makes sense.
check_grad_usage(defn_name, derivatives)
return (
derivatives,
forward_derivatives,
args_with_derivatives,
non_differentiable_arg_names,
available_named_gradients,
)
# NB: Removes 'name' from defn dictionary
specification = defn_dict.pop("name")
defn_name, _ = split_name_params(specification)
# NB: Removes 'output_differentiability' from defn dictionary
# `None` means all differentiable.
output_differentiability = defn_dict.pop("output_differentiability", None)
output_differentiability_conditions = None
if output_differentiability and any(
[isinstance(diff, str) for diff in output_differentiability]
):
if len(output_differentiability) != 1:
raise RuntimeError(
f"Not supported: for {specification},"
f"output_differentiability must either be "
f"List[bool] or a List[str] where each str is a "
f"condition. In the case where it is a condition, "
f"we only support single-output functions. "
f"Please file us an issue. "
)
output_differentiability_conditions = output_differentiability
output_differentiability = [True]
schema_function = functions_by_schema.get(specification)
if not schema_function:
avail = "\n".join(
k for k, v in functions_by_schema.items() if cpp.name(v.func) == defn_name
)
raise RuntimeError(
f"could not find ATen function for schema: {specification} "
f". Available signatures:\n{avail}"
)
# now map this to the legacy schema; this isn't technically necessary, but we'd need some logic here
# to map in-place schemas to the out-of-place variants.
# TODO: maybe the logic to handle the legacy schema is no longer necessary?
signature = schema_function.func.signature()
functions = functions_by_signature[signature]
if len(functions) == 0:
avail = "\n".join(
str(k)
for k, v in functions_by_signature.items()
if cpp.name(k) == defn_name
)
raise RuntimeError(
f"could not find ATen function for legacy signature: {signature} "
f"corresponding to schema {specification}. Please report a bug to PyTorch. "
f"Available signatures:\n{avail}"
)
canonical = canonical_function(functions, defn_name)
if "grad_input_mask" in (a.name for a in cpp_arguments(canonical)):
raise RuntimeError(
f"Schema for {defn_name} has an argument named grad_input_mask, "
"but this name would be shadowed by our codegen. "
"Please use a different name in native_functions.yaml."
)
if "result" in (a.name for a in cpp_arguments(canonical)):
raise RuntimeError(
f"Schema for {defn_name} has an argument named result, "
"but this is only allowed for outputs."
"Please use a different name in native_functions.yaml."
)
diffinfo_dict = dict()
for key, defn in defn_dict["dispatch"].items():
if key != "Default" and key not in _VALID_AUTOGRAD_KEYS:
raise RuntimeError(
f"Invalid dispatch key {key} in derivatives.yaml for {specification},"
f" expected key to be one of {_VALID_AUTOGRAD_KEYS}"
)
if key not in used_dispatch_keys:
used_dispatch_keys.add(key)
(
derivatives,
forward_derivatives,
args_with_derivatives,
non_differentiable_arg_names,
available_named_gradients,
) = set_up_derivatives(canonical)
used_named_gradients: Set[str] = set()
for d in derivatives:
used_named_gradients |= d.named_gradients
# only assign an op name if we are actually going to calculate a derivative
op = None
if args_with_derivatives:
op_prefix = _create_op_prefix(defn_name)
if key != "Default":
op_prefix = op_prefix + key
op = f"{op_prefix}{op_counter[op_prefix]}"
op_counter[op_prefix] += 1
diffinfo_dict[key] = DifferentiabilityInfo(
name=defn_name,
func=canonical,
op=op,
derivatives=derivatives,
forward_derivatives=forward_derivatives,
all_saved_inputs=dedup_vars(
[v for d in derivatives for v in d.saved_inputs]
),
all_saved_outputs=dedup_vars(
[v for d in derivatives for v in d.saved_outputs]
),
available_named_gradients=available_named_gradients,
used_named_gradients=used_named_gradients,
args_with_derivatives=args_with_derivatives,
non_differentiable_arg_names=non_differentiable_arg_names,
output_differentiability=output_differentiability,
output_differentiability_conditions=output_differentiability_conditions,
)
return canonical.func, diffinfo_dict
GRAD_INDEX_REGEX = r"(?:^|\W)grads\[(\d+)\]"
def used_gradient_indices(formula: str) -> List[int]:
"""Determine a list of gradient indices (the i in grads[i]) that
are used by the formula.
>>> used_gradient_indices("foo(grads[0], grads[1])")
[0, 1]
"""
return [int(i) for i in re.findall(GRAD_INDEX_REGEX, formula)]
def saved_variables(
formula: str,
nctypes: List[NamedCType],
var_names: Tuple[str, ...],
) -> Tuple[str, Tuple[SavedAttribute, ...]]:
def stride_expr(name: str) -> str:
assert var_names == (name,), (
'Replacement for ".strides()" is currently only supported for single derivatives of the same tensor '
'that ".strides()" is being called on.'
)
return f'strides_or_error({name}, "{name}")'
REPLACEMENTS: List[Tuple[str, Dict[str, Any]]] = [
# replace self.sizes() with self_sizes
(
r"{}.sizes\(\)",
{
"suffix": "_sizes",
"nctype": lambda name: NamedCType(name, BaseCType(intArrayRefT)),
},
),
# replace self.sym_sizes() with self_sym_sizes
(
r"{}.sym_sizes\(\)",
{
"suffix": "_sym_sizes",
"nctype": lambda name: NamedCType(name, BaseCType(symIntArrayRefT)),
},
),
# replace self->sizes() with self_sizes_opt
(
r"{}->sizes\(\)",
{
"suffix": "_sizes_opt",
"nctype": lambda name: NamedCType(
name, OptionalCType(BaseCType(intArrayRefT))
),
"expr": lambda name: f"{name}.has_value() ? c10::optional<IntArrayRef>({name}->sizes()) : c10::nullopt",
},
),
# replace self.options() with self_options
(
r"{}.options\(\)",
{
"suffix": "_options",
"nctype": lambda name: NamedCType(name, BaseCType(tensorOptionsT)),
},
),
# replace zeros_like(self) with self_info
(
r"zeros_like\({}\)",
{
"suffix": "_info",
"nctype": lambda name: NamedCType(name, BaseCType(typeAndSizeT)),
"expr": lambda name: name, # at save-time
"res": lambda name: name + "_info.zeros()", # at eval-time
},
),
# replace self.size(2) with self_size_2
(
r"{}.size\((\w+)\)",
{
"suffix": lambda m: "_argsize_{}".format(*m.groups()),
"nctype": lambda name: NamedCType(name, BaseCType(longT)),
},
),
# replace self.numel() with self_numel
(
r"{}.numel\(\)",
{
"suffix": "_numel",
"nctype": lambda name: NamedCType(name, BaseCType(longT)),
},
),
# replace to_args_sizes(self) with self_args_sizes
(
r"to_args_sizes\({}\)",
{
"suffix": "_args_sizes",
"nctype": lambda name: NamedCType(
name, VectorCType(VectorCType(BaseCType(longT)))
),
},
),
# replace to_args_scalartypes(self) with self_args_scalartypes
(
r"to_args_scalartypes\({}\)",
{
"suffix": "_args_scalartypes",
"nctype": lambda name: NamedCType(
name, VectorCType(BaseCType(scalarTypeT))
),
},
),
# replace TensorGeometry(self) with self_geometry
(
r"TensorGeometry\({}\)",
{
"suffix": "_geometry",
"nctype": lambda name: NamedCType(name, BaseCType(tensorGeometryT)),
},
),
(
r"{}.scalar_type\(\)",
{
"suffix": "_scalar_type",
"nctype": lambda name: NamedCType(name, BaseCType(scalarTypeT)),
},
),
# replace self.dim() with self_dim
(
r"{}.dim\(\)",
{
"suffix": "_dim",
"nctype": lambda name: NamedCType(name, BaseCType(longT)),
},
),
# replace self.strides() with self_strides
(
r"{}.strides\(\)",
{
"suffix": "_strides",
"nctype": lambda name: NamedCType(name, BaseCType(intArrayRefT)),
"expr": stride_expr,
},
),
# replace self.layout() with self_layout
(
r"{}.layout\(\)",
{
"suffix": "_layout",
"nctype": lambda name: NamedCType(name, BaseCType(layoutT)),
},
),
# replace self.is_conj() with self_conjugate
(
r"{}.is_conj\(\)",
{
"suffix": "_conjugate",
"nctype": lambda name: NamedCType(name, BaseCType(boolT)),
},
),
]
# find which arguments need to be saved
saved: List[SavedAttribute] = []
for nctype in nctypes:
name = (
nctype.name.name if isinstance(nctype.name, SpecialArgName) else nctype.name
)
# First search the formula for expressions which can be evaluated
# when the autograd Function is created to avoid saving variables
for regex, info in REPLACEMENTS:
def repl(m: Match[str]) -> str:
suffix: str = (
info["suffix"](m) if callable(info["suffix"]) else info["suffix"]
)
expr: str = info["expr"](name) if "expr" in info else m.group(0)
saved.append(
SavedAttribute(
nctype=info["nctype"](name + suffix),
expr=expr,
)
)
if "res" in info:
replacement: str = info["res"](name)
return replacement
return name + suffix
formula = re.sub(regex.format(name), repl, formula)
# c10::optional<std::string> types stored in Backward nodes must be
# converted to c10::optional<c10::string_view> before being passed into
# the backward function
if nctype.type == OptionalCType(BaseCType(stringT)):
formula = re.sub(
rf"\b{name}\b",
f"{name}.has_value() ? c10::optional<c10::string_view>({name}.value()) : c10::nullopt",
formula,
)
# Find any variables which remain in the formula and save them
if re.search(IDENT_REGEX.format(name), formula):
saved.append(
SavedAttribute(
nctype=nctype,
expr=name,
)
)
return formula, tuple(saved)
def _create_op_prefix(name: str) -> str:
"""Takes a native function name converts to a op prefix name.
Note that the "name" parameter must be the native function name
without the optional variant suffix, so "add" instead of
"add.out".
OP names correspond to classes, hence the change to title case.
Example::
>>> _create_op_prefix('add')
'AddBackward'
"""
camel_case = "".join([p.title() for p in name.split("_")])
return (camel_case + "Backward").replace("ForwardBackward", "Backward")
def dedup_vars(vars: Sequence[SavedAttribute]) -> Sequence[SavedAttribute]:
seen: Set[str] = set()
saved: List[SavedAttribute] = []
for var in vars:
name = (
var.nctype.name.name
if isinstance(var.nctype.name, SpecialArgName)
else var.nctype.name
)
if name in seen:
continue
seen.add(name)
saved.append(var)
return saved
|
pytorch-master
|
tools/autograd/load_derivatives.py
|
"""
To run this file by hand from the root of the PyTorch
repository, run:
python -m tools.autograd.gen_autograd \
aten/src/ATen/native/native_functions.yaml \
aten/src/ATen/native/tags.yaml \
$OUTPUT_DIR \
tools/autograd
Where $OUTPUT_DIR is where you would like the files to be
generated. In the full build system, OUTPUT_DIR is
torch/csrc/autograd/generated/
"""
# gen_autograd.py generates C++ autograd functions and Python bindings.
#
# It delegates to the following scripts:
#
# gen_autograd_functions.py: generates subclasses of torch::autograd::Node
# gen_variable_type.py: generates VariableType.h which contains all tensor methods
# gen_python_functions.py: generates Python bindings to THPVariable
#
import argparse
import os
from typing import List
from torchgen.api import cpp
from torchgen.api.autograd import (
match_differentiability_info,
NativeFunctionWithDifferentiabilityInfo,
)
from torchgen.gen import parse_native_yaml
from torchgen.selective_build.selector import SelectiveBuilder
from . import gen_python_functions
from .gen_autograd_functions import (
gen_autograd_functions_lib,
gen_autograd_functions_python,
)
from .gen_inplace_or_view_type import gen_inplace_or_view_type
from .gen_trace_type import gen_trace_type
from .gen_variable_factories import gen_variable_factories
from .gen_variable_type import gen_variable_type
from .load_derivatives import load_derivatives
def gen_autograd(
native_functions_path: str,
tags_path: str,
out: str,
autograd_dir: str,
operator_selector: SelectiveBuilder,
disable_autograd: bool = False,
) -> None:
# Parse and load derivatives.yaml
differentiability_infos, used_dispatch_keys = load_derivatives(
os.path.join(autograd_dir, "derivatives.yaml"), native_functions_path, tags_path
)
template_path = os.path.join(autograd_dir, "templates")
native_funcs = parse_native_yaml(native_functions_path, tags_path).native_functions
fns = list(
sorted(
filter(
operator_selector.is_native_function_selected_for_training, native_funcs
),
key=lambda f: cpp.name(f.func),
)
)
fns_with_diff_infos: List[
NativeFunctionWithDifferentiabilityInfo
] = match_differentiability_info(fns, differentiability_infos)
# Generate VariableType.h/cpp
if not disable_autograd:
gen_variable_type(
out,
native_functions_path,
tags_path,
fns_with_diff_infos,
template_path,
used_dispatch_keys,
)
gen_inplace_or_view_type(
out, native_functions_path, tags_path, fns_with_diff_infos, template_path
)
# operator filter not applied as tracing sources are excluded in selective build
gen_trace_type(out, native_funcs, template_path)
# Generate Functions.h/cpp
gen_autograd_functions_lib(out, differentiability_infos, template_path)
# Generate variable_factories.h
gen_variable_factories(out, native_functions_path, tags_path, template_path)
def gen_autograd_python(
native_functions_path: str,
tags_path: str,
out: str,
autograd_dir: str,
) -> None:
differentiability_infos, _ = load_derivatives(
os.path.join(autograd_dir, "derivatives.yaml"), native_functions_path, tags_path
)
template_path = os.path.join(autograd_dir, "templates")
# Generate Functions.h/cpp
gen_autograd_functions_python(out, differentiability_infos, template_path)
# Generate Python bindings
deprecated_path = os.path.join(autograd_dir, "deprecated.yaml")
gen_python_functions.gen(
out, native_functions_path, tags_path, deprecated_path, template_path
)
def main() -> None:
parser = argparse.ArgumentParser(description="Generate autograd C++ files script")
parser.add_argument(
"native_functions", metavar="NATIVE", help="path to native_functions.yaml"
)
parser.add_argument("tags", metavar="NATIVE", help="path to tags.yaml")
parser.add_argument("out", metavar="OUT", help="path to output directory")
parser.add_argument(
"autograd", metavar="AUTOGRAD", help="path to autograd directory"
)
args = parser.parse_args()
gen_autograd(
args.native_functions,
args.tags,
args.out,
args.autograd,
SelectiveBuilder.get_nop_selector(),
)
if __name__ == "__main__":
main()
|
pytorch-master
|
tools/autograd/gen_autograd.py
|
# Generates Python bindings for ATen functions
#
# The bindings are generated as methods on python_variable or functions on the
# torch._C._nn. torch._C._fft, torch._C._linalg, torch._C._sparse or torch._C._special objects.
#
# Code tries to stick to the following rules:
#
# - templates should be colocated with the functions that use them.
# no templates are currently shared between functions, but if that
# happens, maybe put the template with the first one
#
# - don't use environment dictionaries when calling template.substitute().
# pass named arguments directly for everything, otherwise it's much too
# hard to track what's actually being used and by who
#
# - colocate any new hacks/adjustments with existing ones of the same kind.
# ideally in a data structure rather than code if possible. See e.g.
# SCHEMA_DEFAULT_CONVERSION_HACKS, etc.
#
# - similarly, conversions from one format to another should ideally happen
# all at once in a single place.
#
# - no nontrivial nested functions. couple-liners are ok but please no more.
# especially avoid functions that read/write outer variables defined far away.
#
# - raise RuntimeError instead of asserting, and put as much
# information as is available into the message. I.e. no need to
# plumb in new params whose only purpose is to fill out an error
# message, but use what's there
#
import itertools
import re
from collections import defaultdict
from typing import Callable, Dict, Iterable, List, Optional, Sequence, Set, Tuple
import yaml
from torchgen.api import cpp
from torchgen.api.python import (
arg_parser_output_exprs,
cpp_dispatch_exprs,
cpp_dispatch_target,
dispatch_lambda_args,
dispatch_lambda_exprs,
dispatch_lambda_return_str,
has_tensor_options,
namedtuple_fieldnames,
PythonSignature,
PythonSignatureDeprecated,
PythonSignatureGroup,
PythonSignatureNativeFunctionPair,
signature,
signature_from_schema,
)
from torchgen.code_template import CodeTemplate
from torchgen.context import with_native_function
from torchgen.gen import cpp_string, parse_native_yaml, parse_tags_yaml
from torchgen.model import (
Argument,
BaseOperatorName,
FunctionSchema,
NativeFunction,
Type,
Variant,
)
from torchgen.utils import FileManager, split_name_params, YamlLoader
from .gen_trace_type import should_trace
#
# declarations blocklist
# We skip codegen for these functions, for various reasons.
# Future PRs will categorize this list and eliminate or hoist
# them out of eager-only codegen.
# See https://github.com/pytorch/pytorch/issues/30788
#
# These functions require manual Python bindings or are not exposed to Python
_SKIP_PYTHON_BINDINGS = [
"alias",
"contiguous",
"is_cuda",
"is_sparse",
"is_sparse_csr",
"size",
"stride",
".*_backward",
".*_backward_(out|input|weight|bias)",
".*_forward",
".*_forward_out",
".*_jvp",
"_unsafe_view",
"tensor",
"_?sparse_(coo|compressed|csr|csc|bsr|bsc)_tensor.*",
"_range.*",
"_sparse_add_out",
"_sparse_div.*",
"_sparse_mul.*",
"_sparse_sub.*",
"_sparse_dense_add_out",
"index",
"index_out",
"unique_dim_consecutive",
"_cumsum.*",
"_cumprod.*",
"_sum.*",
"_prod.*",
"_th_.*",
"_thnn_.*",
"range.*",
"_solve.*",
"_inverse.*",
"_cholesky.*",
"_triangular_solve.*",
"_qr.*",
"_symeig.*",
"_svd.*",
"slice",
"item",
"_local_scalar_dense",
"to",
"_to_copy",
"copy_sparse_to_sparse_",
"copy_",
"numpy_T",
"matrix_H",
"mT",
"mH", # these need to be an attributes in Python, not functions
"nonzero(_(out|numpy))?",
"set_data",
".*_overrideable", # overrideable functions for backend extension
"data",
"is_leaf",
"output_nr",
"_version",
"requires_grad_",
"retains_grad",
"set_",
"_fw_primal",
"fake_quantize_per_tensor_affine_cachemask",
"fake_quantize_per_channel_affine_cachemask",
"_new_zeros_with_same_feature_meta",
"_has_same_storage_numel", # used for forward AD internals
"_reshape_alias",
"replace_", # only used by the functionalization pass, doesn't need to be exposed to python
"copy", # only used by the functionalization pass
"fill.Tensor", # only used by the functionalization pass
"fill.Scalar", # only used by the functionalization pass
"lift.*",
"normal_functional", # only used by the functionalization pas
]
SKIP_PYTHON_BINDINGS = list(
map(lambda pattern: re.compile(rf"^{pattern}$"), _SKIP_PYTHON_BINDINGS)
)
# These function signatures are not exposed to Python. Note that this signature
# list does not support regex.
SKIP_PYTHON_BINDINGS_SIGNATURES = [
"add.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor",
"add_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)",
"sub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor",
"sub_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)",
"mul.Scalar(Tensor self, Scalar other) -> Tensor",
"mul_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)",
"div.Scalar(Tensor self, Scalar other) -> Tensor",
"div_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)",
]
@with_native_function
def should_generate_py_binding(f: NativeFunction) -> bool:
# So far, all NativeFunctions that are entirely code-generated do not get python bindings.
if "generated" in f.tags:
return False
name = cpp.name(f.func)
for skip_regex in SKIP_PYTHON_BINDINGS:
if skip_regex.match(name):
return False
signature = str(f.func)
for pattern in SKIP_PYTHON_BINDINGS_SIGNATURES:
if pattern == signature:
return False
return True
def get_pycname(name: BaseOperatorName) -> str:
return f"THPVariable_{name}"
def is_noarg(overloads: Sequence[PythonSignatureNativeFunctionPair]) -> bool:
return len(overloads) == 1 and overloads[0].signature.arguments_count() == 0
def is_py_variable_method(f: NativeFunction) -> bool:
return f.python_module is None and Variant.method in f.variants
def is_py_torch_function(f: NativeFunction) -> bool:
return f.python_module is None and Variant.function in f.variants
def is_py_nn_function(f: NativeFunction) -> bool:
return f.python_module == "nn"
def is_py_fft_function(f: NativeFunction) -> bool:
return f.python_module == "fft"
def is_py_linalg_function(f: NativeFunction) -> bool:
return f.python_module == "linalg"
def is_py_sparse_function(f: NativeFunction) -> bool:
return f.python_module == "sparse"
def is_py_special_function(f: NativeFunction) -> bool:
return f.python_module == "special"
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
#
# Main Function
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
def gen(
out: str,
native_yaml_path: str,
tags_yaml_path: str,
deprecated_yaml_path: str,
template_path: str,
) -> None:
fm = FileManager(install_dir=out, template_dir=template_path, dry_run=False)
native_functions = parse_native_yaml(
native_yaml_path, tags_yaml_path
).native_functions
native_functions = list(filter(should_generate_py_binding, native_functions))
methods = load_signatures(native_functions, deprecated_yaml_path, method=True)
create_python_bindings(
fm,
methods,
is_py_variable_method,
None,
"python_variable_methods.cpp",
method=True,
)
# NOTE: num_shards here must be synced with gatherTorchFunctions in
# torch/csrc/autograd/python_torch_functions_manual.cpp
functions = load_signatures(native_functions, deprecated_yaml_path, method=False)
create_python_bindings_sharded(
fm,
functions,
is_py_torch_function,
"torch",
"python_torch_functions.cpp",
method=False,
num_shards=3,
)
create_python_bindings(
fm,
functions,
is_py_nn_function,
"torch.nn",
"python_nn_functions.cpp",
method=False,
)
create_python_bindings(
fm,
functions,
is_py_fft_function,
"torch.fft",
"python_fft_functions.cpp",
method=False,
)
create_python_bindings(
fm,
functions,
is_py_linalg_function,
"torch.linalg",
"python_linalg_functions.cpp",
method=False,
)
create_python_bindings(
fm,
functions,
is_py_sparse_function,
"torch.sparse",
"python_sparse_functions.cpp",
method=False,
)
create_python_bindings(
fm,
functions,
is_py_special_function,
"torch.special",
"python_special_functions.cpp",
method=False,
)
# Currently, we only use `functions` to generate `return_types` bindings.
# All methods which return namedtuple have function variant at this point.
# If any method only operator with namedtuple is added in the future,
# we will have to address that.
create_python_return_type_bindings(
fm, functions, lambda fn: True, "python_return_types.cpp"
)
valid_tags = parse_tags_yaml(tags_yaml_path)
def gen_tags_enum() -> Dict[str, str]:
return {
"enum_of_valid_tags": (
"".join([f'\n.value("{tag}", at::Tag::{tag})' for tag in valid_tags])
)
}
fm.write("python_enum_tag.cpp", gen_tags_enum)
def group_filter_overloads(
pairs: Sequence[PythonSignatureNativeFunctionPair],
pred: Callable[[NativeFunction], bool],
) -> Dict[BaseOperatorName, List[PythonSignatureNativeFunctionPair]]:
grouped: Dict[
BaseOperatorName, List[PythonSignatureNativeFunctionPair]
] = defaultdict(list)
for pair in pairs:
if pred(pair.function):
grouped[pair.function.func.name.name].append(pair)
return grouped
def create_python_bindings(
fm: FileManager,
pairs: Sequence[PythonSignatureNativeFunctionPair],
pred: Callable[[NativeFunction], bool],
module: Optional[str],
filename: str,
*,
method: bool,
) -> None:
"""Generates Python bindings to ATen functions"""
py_methods: List[str] = []
ops_headers: List[str] = []
py_method_defs: List[str] = []
py_forwards: List[str] = []
grouped = group_filter_overloads(pairs, pred)
for name in sorted(grouped.keys(), key=lambda x: str(x)):
overloads = grouped[name]
py_methods.append(method_impl(name, module, overloads, method=method))
py_method_defs.append(method_def(name, module, overloads, method=method))
py_forwards.extend(forward_decls(name, overloads, method=method))
ops_headers.append(f"#include <ATen/ops/{name.base}.h>")
fm.write_with_template(
filename,
filename,
lambda: {
"generated_comment": "@" + f"generated from {fm.template_dir}/{filename}",
"ops_headers": ops_headers,
"py_forwards": py_forwards,
"py_methods": py_methods,
"py_method_defs": py_method_defs,
},
)
def create_python_return_type_bindings(
fm: FileManager,
pairs: Sequence[PythonSignatureNativeFunctionPair],
pred: Callable[[NativeFunction], bool],
filename: str,
) -> None:
"""
Generate function to initialize and return named tuple for native functions
which returns named tuple and relevant entry for the map in `python_return_types.cpp`.
"""
py_return_types_definition: List[str] = []
py_return_types_map: List[str] = []
grouped = group_filter_overloads(pairs, pred)
for name in sorted(grouped.keys(), key=lambda x: str(x)):
overloads = grouped[name]
definitions, map_entries = generate_return_type_definition_and_map_entry(
overloads
)
py_return_types_definition.append(
"" if not definitions else "\n".join(definitions)
)
py_return_types_map.append("" if not map_entries else "\n".join(map_entries))
fm.write_with_template(
filename,
filename,
lambda: {
"generated_comment": "@" + f"generated from {fm.template_dir}/{filename}",
"py_return_types": py_return_types_definition,
"py_return_types_map": py_return_types_map,
},
)
def create_python_bindings_sharded(
fm: FileManager,
pairs: Sequence[PythonSignatureNativeFunctionPair],
pred: Callable[[NativeFunction], bool],
module: Optional[str],
filename: str,
*,
method: bool,
num_shards: int,
) -> None:
"""Generates Python bindings to ATen functions"""
grouped = group_filter_overloads(pairs, pred)
def key_func(
kv: Tuple[BaseOperatorName, List[PythonSignatureNativeFunctionPair]]
) -> str:
return kv[0].base
def env_func(
kv: Tuple[BaseOperatorName, List[PythonSignatureNativeFunctionPair]]
) -> Dict[str, List[str]]:
name, fn_pairs = kv
return {
"ops_headers": [f"#include <ATen/ops/{name.base}.h>"],
"py_forwards": list(forward_decls(name, fn_pairs, method=method)),
"py_methods": [method_impl(name, module, fn_pairs, method=method)],
"py_method_defs": [method_def(name, module, fn_pairs, method=method)],
}
fm.write_sharded(
filename,
grouped.items(),
base_env={
"generated_comment": "@" + f"generated from {fm.template_dir}/{filename}",
},
key_fn=key_func,
env_callable=env_func,
num_shards=num_shards,
sharded_keys={"ops_headers", "py_forwards", "py_methods", "py_method_defs"},
)
def load_signatures(
native_functions: List[NativeFunction],
deprecated_yaml_path: str,
*,
method: bool,
skip_deprecated: bool = False,
pyi: bool = False,
) -> Sequence[PythonSignatureNativeFunctionPair]:
@with_native_function
def gen_signature_pairs(f: NativeFunction) -> PythonSignatureNativeFunctionPair:
return PythonSignatureNativeFunctionPair(
signature=signature(f, method=method, pyi=pyi),
function=f,
)
pairs = list(map(gen_signature_pairs, native_functions))
deprecated = load_deprecated_signatures(
pairs, deprecated_yaml_path, method=method, pyi=pyi
)
return pairs if skip_deprecated else pairs + deprecated
def load_deprecated_signatures(
pairs: Sequence[PythonSignatureNativeFunctionPair],
deprecated_yaml_path: str,
*,
method: bool,
pyi: bool,
) -> List[PythonSignatureNativeFunctionPair]:
# The deprecated.yaml doesn't have complete type information, we need
# find and leverage the original ATen signature (to which it delegates
# the call) to generate the full python signature.
# We join the deprecated and the original signatures using type-only form.
# group the original ATen signatures by name
grouped: Dict[str, List[PythonSignatureNativeFunctionPair]] = defaultdict(list)
for pair in pairs:
grouped[pair.signature.name].append(pair)
# find matching original signatures for each deprecated signature
results: List[PythonSignatureNativeFunctionPair] = []
with open(deprecated_yaml_path, "r") as f:
deprecated_defs = yaml.load(f, Loader=YamlLoader)
for deprecated in deprecated_defs:
schema = FunctionSchema.parse(deprecated["name"])
aten_name, call_args = split_name_params(deprecated["aten"])
is_out = aten_name.endswith("_out")
if is_out:
aten_name = aten_name.replace("_out", "")
# HACK: these are fixed constants used to pass the the aten function.
# The type must be known ahead of time
known_constants = {
"1": Type.parse("Scalar"),
}
schema_args_by_name = {a.name: a for a in schema.arguments.flat_all}
for name in call_args:
assert (
name in schema_args_by_name or name in known_constants
), f"deprecation definiton: Unrecognized value {name}"
# Map deprecated signature arguments to their aten signature and test
# if the types and alias annotation match.
def is_schema_compatible(
aten_schema: FunctionSchema,
) -> bool:
arguments: Iterable[Argument]
if is_out:
arguments = itertools.chain(
aten_schema.arguments.out, aten_schema.arguments.flat_non_out
)
else:
arguments = aten_schema.arguments.flat_all
for i, arg in enumerate(arguments):
if i < len(call_args):
arg_name = call_args[i]
if arg_name in known_constants:
schema_type = known_constants[arg_name]
schema_annotation = None
else:
schema_arg = schema_args_by_name[arg_name]
schema_type = schema_arg.type
schema_annotation = schema_arg.annotation
if schema_type != arg.type or schema_annotation != arg.annotation:
return False
else:
if arg.default is None:
return False
return len(schema.returns) == len(aten_schema.returns) and all(
a == b for a, b in zip(schema.returns, aten_schema.returns)
)
any_schema_found = False
for pair in grouped[aten_name]:
if not is_schema_compatible(pair.function.func):
continue
any_schema_found = True
python_sig = signature_from_schema(
schema,
category_override=pair.function.category_override,
method=method,
pyi=pyi,
)
results.append(
PythonSignatureNativeFunctionPair(
signature=PythonSignatureDeprecated(
name=python_sig.name,
input_args=python_sig.input_args,
input_kwargs=python_sig.input_kwargs,
output_args=python_sig.output_args,
tensor_options_args=python_sig.tensor_options_args,
method=python_sig.method,
deprecated_schema=schema,
deprecated_args_exprs=tuple(call_args),
returns=python_sig.returns,
),
function=pair.function,
)
)
assert (
any_schema_found
), f"No native function with name {aten_name} matched signature:\n {str(schema)}"
return results
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
#
# Named Tuple Codegen
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
@with_native_function
def gen_namedtuple_typename_key(f: NativeFunction) -> str:
name = cpp.name(f.func)
fieldnames = namedtuple_fieldnames(f.func.returns)
return "_".join([name] + fieldnames)
def emit_namedtuple_call(
overloads: Sequence[PythonSignatureNativeFunctionPair],
) -> Tuple[List[str], Dict[str, str]]:
"""
Generate block of named tuple type def inits, and add typeref snippets
to declarations that use them
"""
typenames: Dict[
str, str
] = {} # map from unique name + field name lists to typedef name
typedefs: List[str] = [] # typedef declarations and init code
for overload in overloads:
fieldnames = namedtuple_fieldnames(overload.function.func.returns)
if not fieldnames:
continue
name = cpp.name(overload.function.func) # use @with_native_function?
tn_key = gen_namedtuple_typename_key(overload.function)
typename = typenames.get(tn_key)
if typename is None:
typename = f'NamedTuple{"" if not typedefs else len(typedefs)}'
typenames[tn_key] = typename
typedefs.append(
f"""\
static PyTypeObject* {typename} = get_namedtuple("{name}");"""
)
return typedefs, typenames
def generate_return_type_definition_and_map_entry(
overloads: Sequence[PythonSignatureNativeFunctionPair],
) -> Tuple[List[str], List[str]]:
"""
Generate block of function in `python_return_types.cpp` to initialize
and return named tuple for a native function which returns named tuple
and relevant entry for the map in same file.
"""
typenames: Dict[
str, str
] = {} # map from unique name + field name lists to typedef name
definitions: List[str] = [] # function defintion to register the typedef
map_entries: List[
str
] = [] # C++ map entry of <function_name, function creates it namedtuple>
for overload in overloads:
fieldnames = namedtuple_fieldnames(overload.function.func.returns)
if not fieldnames:
continue
fields = ", ".join(f'{{"{fn}", ""}}' for fn in fieldnames)
name = cpp.name(overload.function.func) # use @with_native_function?
tn_key = gen_namedtuple_typename_key(overload.function)
typename = typenames.get(tn_key)
if typename is None:
typename = f'{name}NamedTuple{"" if not definitions else len(definitions)}'
typenames[tn_key] = typename
definitions.append(
f"""\
PyTypeObject* get_{name}_namedtuple() {{
static PyStructSequence_Field NamedTuple_fields[] = {{ {fields}, {{nullptr}} }};
static PyTypeObject {typename};
static bool is_initialized = false;
static PyStructSequence_Desc desc = {{ "torch.return_types.{name}", nullptr, NamedTuple_fields, {len(fieldnames)} }};
if (!is_initialized) {{
PyStructSequence_InitType(&{typename}, &desc);
{typename}.tp_repr = (reprfunc)torch::utils::returned_structseq_repr;
is_initialized = true;
}}
return &{typename};
}}
"""
)
map_entries.append(f'{{"{name}", get_{name}_namedtuple()}}, ')
return definitions, map_entries
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
#
# Method Impl Codegen
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
# python binding for all overloads of a particular function/method
PY_VARIABLE_METHOD_VARARGS = CodeTemplate(
r"""\
// ${name}
static PyObject * ${pycname}(PyObject* self_, PyObject* args, PyObject* kwargs)
{
${method_header}
static PythonArgParser parser({
${signatures}
}, /*traceable=*/${traceable});
ParsedArgs<${max_args}> parsed_args;
auto _r = parser.parse(${self_}, args, kwargs, parsed_args);
${check_has_torch_function}
switch (_r.idx) {
${dispatch}
}
${method_footer}
}
"""
)
# handler for a single parsed signature - may be a single overload or
# a pair of overloads that whose signatures only differ in output params
# (plugged into PY_VARIABLE_METHOD_VARARGS as an item in ${dispatch})
PY_VARIABLE_CASE = CodeTemplate(
"""\
case ${overload_index}: {
${body}
}
"""
)
# python binding for single-overload function/method
PY_VARIABLE_METHOD_VARARGS_SINGLETON = CodeTemplate(
"""\
// ${name}
static PyObject * ${pycname}(PyObject* self_, PyObject* args, PyObject* kwargs)
{
${method_header}
static PythonArgParser parser({
${signatures}
}, /*traceable=*/${traceable});
ParsedArgs<${max_args}> parsed_args;
auto _r = parser.parse(${self_}, args, kwargs, parsed_args);
${check_has_torch_function}
${dispatch}
${method_footer}
}
"""
)
# python binding for a method with no args, shortcuts parsing
PY_VARIABLE_METHOD_NOARGS = CodeTemplate(
"""\
// ${name}
static PyObject * ${pycname}(PyObject* self_, PyObject* args)
{
${method_header}
${check_has_torch_function}
${dispatch}
${method_footer}
}
"""
)
def method_impl(
name: BaseOperatorName,
module: Optional[str],
overloads: Sequence[PythonSignatureNativeFunctionPair],
*,
method: bool,
) -> str:
"""
Generate a python binding for all overloads of an op.
"""
pycname = get_pycname(name)
noarg = is_noarg(overloads)
namedtuple_inits, namedtuple_typenames = emit_namedtuple_call(overloads)
method_header = ["HANDLE_TH_ERRORS"]
method_header += namedtuple_inits
method_header += (
["const Tensor& self = THPVariable_Unpack(self_);"] if method else []
)
method_footer = ([] if noarg else ["Py_RETURN_NONE;"]) + ["END_HANDLE_TH_ERRORS"]
traceable = "true" if all(should_trace(o.function) for o in overloads) else "false"
grouped_overloads: Sequence[PythonSignatureGroup] = group_overloads(overloads)
is_singleton = len(grouped_overloads) == 1
signatures: List[str] = []
dispatch: List[str] = []
for overload_index, overload in enumerate(grouped_overloads):
signature = overload.signature.signature_str()
signatures.append(f"{cpp_string(str(signature))},")
dispatch_body = emit_dispatch_case(overload, namedtuple_typenames)
dispatch.append(
PY_VARIABLE_CASE.substitute(
overload_index=overload_index, body=dispatch_body
)
if not is_singleton
else dispatch_body
)
if noarg:
template = PY_VARIABLE_METHOD_NOARGS
elif is_singleton:
template = PY_VARIABLE_METHOD_VARARGS_SINGLETON
else:
template = PY_VARIABLE_METHOD_VARARGS
return template.substitute(
name=name,
pycname=pycname,
method_header=method_header,
max_args=max(map(lambda o: o.signature.arguments_count(), overloads)),
signatures=signatures,
traceable=traceable,
check_has_torch_function=gen_has_torch_function_check(
name=name,
module=module,
noarg=noarg,
method=method,
),
dispatch=dispatch,
method_footer=method_footer,
self_="self_" if method else "nullptr",
)
def gen_has_torch_function_check(
name: BaseOperatorName, module: Optional[str], *, noarg: bool, method: bool
) -> str:
if noarg:
if method:
return f"""\
if(check_has_torch_function(self_)) {{
return handle_torch_function(self_, "{name}");
}}
"""
else:
return ""
self_ = "self_" if method else "nullptr"
namespace = (
{
"torch": "THPVariableFunctionsModule",
"torch.nn": "THPNNVariableFunctionsModule",
"torch.fft": "THPFFTVariableFunctionsModule",
"torch.linalg": "THPLinalgVariableFunctionsModule",
"torch.sparse": "THPSparseVariableFunctionsModule",
"torch.special": "THPSpecialVariableFunctionsModule",
}[module]
if module
else "THPVariableClass"
)
return f"""\
if(_r.has_torch_function()) {{
return handle_torch_function(_r, {self_}, args, kwargs, {namespace}, "{module or "torch.Tensor"}");
}}
"""
# handler for output/no-output overload pair
PY_VARIABLE_OUT = CodeTemplate(
"""\
if (_r.isNone(${out_idx})) {
${call_dispatch}
} else {
${call_dispatch_out}
}
"""
)
def emit_dispatch_case(
overload: PythonSignatureGroup,
namedtuple_typenames: Dict[str, str],
) -> str:
"""
Emit dispatch code for a single parsed signature. This corresponds to either
a single native function, or a pair that differ only in output params. In the
latter case, a single python signature is used for both and dispatching
switches on the presence/absence of passed output args.
"""
if overload.outplace is not None:
# dispatch output and no-output variants, branch on _r.isNone(<out_idx>)
return PY_VARIABLE_OUT.substitute(
out_idx=overload.signature.output_idx(),
call_dispatch=emit_single_dispatch(
overload.signature, overload.base, namedtuple_typenames
),
call_dispatch_out=emit_single_dispatch(
overload.signature,
overload.outplace,
namedtuple_typenames,
),
)
else:
# no-output version only
return emit_single_dispatch(
overload.signature, overload.base, namedtuple_typenames
)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
#
# Forward Declarations Codegen
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
def forward_decls(
name: BaseOperatorName,
overloads: Sequence[PythonSignatureNativeFunctionPair],
*,
method: bool,
) -> Tuple[str, ...]:
if method:
return ()
pycname = get_pycname(name)
if is_noarg(overloads):
return (
f"""\
static PyObject * {pycname}(PyObject* self_, PyObject* args);
""",
)
else:
return (
f"""\
static PyObject * {pycname}(PyObject* self_, PyObject* args, PyObject* kwargs);
""",
)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
#
# Method Def (Binding Table Entry) Codegen
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
def method_def(
name: BaseOperatorName,
module: Optional[str],
overloads: Sequence[PythonSignatureNativeFunctionPair],
*,
method: bool,
) -> str:
"""
Generate method def entry.
"""
pycname = get_pycname(name)
if is_noarg(overloads):
pyfunc_cast = ""
flags = "METH_NOARGS" if method else "METH_VARARGS | METH_KEYWORDS"
else:
pyfunc_cast = "castPyCFunctionWithKeywords"
flags = "METH_VARARGS | METH_KEYWORDS"
if module == "torch":
flags += " | METH_STATIC"
if name.dunder_method:
# PyMethodDef entry for binary op, throws not implemented error
return f"""\
{{"{name}", {pyfunc_cast}(TypeError_to_NotImplemented_<{pycname}>), {flags}, NULL}},"""
else:
# PyMethodDef entry
return f"""\
{{"{name}", {pyfunc_cast}({pycname}), {flags}, NULL}},"""
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
#
# Overload Sorting and Grouping
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
def group_overloads(
overloads: Sequence[PythonSignatureNativeFunctionPair],
) -> Sequence[PythonSignatureGroup]:
bases: Dict[str, PythonSignatureNativeFunctionPair] = {}
outplaces: Dict[str, PythonSignatureNativeFunctionPair] = {}
# first group by signature ignoring out arguments
for overload in overloads:
sig = overload.signature.signature_str(skip_outputs=True)
if overload.function.func.is_out_fn():
if sig in outplaces:
raise RuntimeError(
f"Found duplicated function definition:\n- {overload.function.func}.\n"
f"Existing definition:\n- {outplaces[sig].function.func}."
)
outplaces[sig] = overload
else:
if sig in bases:
raise RuntimeError(
f"Found duplicated function definition:\n- {overload.function.func}.\n"
f"Existing definition:\n- {bases[sig].function.func}."
)
bases[sig] = overload
for sig, out in outplaces.items():
if sig not in bases:
candidates: List[str] = []
for overload in overloads:
if (
str(overload.function.func.name.name)
== str(out.function.func.name.name)
and not overload.function.func.is_out_fn()
and not overload.signature.deprecated
):
candidates.append(
overload.signature.signature_str(skip_outputs=True)
)
out_sig = out.signature.signature_str()
raise RuntimeError(
f"While identifying overloads, we found an out schema {out_sig} without a corresponding non-out variant. "
f"We expected the non-out variant to have schema: \n- {sig}\nPlease check that you spelled the schema "
"correctly in native_functions.yaml. We discovered the following candidate(s): \n"
+ "\n".join(f"- {candidate}" for candidate in candidates)
)
grouped = [
PythonSignatureGroup.from_pairs(
functional=base,
out=outplaces.get(sig),
)
for sig, base in bases.items()
]
return sort_overloads(grouped)
# This function declares a partial order on declarations, and sorts them according
# to its linear extension. This is necessary, because there's some ambiguity in the
# choice of overload, and we want a different order.
#
# See Note[Order of overloads matters]
#
# A few examples of ambiguous python signature pairs.
#
# All parameters have the same type, except one taking Tensor the other taking
# Scalar. A numeric PyObject can be casted into Tensor, and a zero-dim Tensor
# object can be accepted as Scalar type parameter (see python_arg_parser.cpp).
# Therefore, same input arguments might be accepted by either python signature.
# We want to always parse the one taking Tensor first.
#
# bitwise_and(Tensor input, Tensor other, *, Tensor out=None)
# bitwise_and(Tensor input, Scalar other, *, Tensor out=None)
#
# If they have different number of parameters then they are not ambiguous - but
# the difference on output param can be ignored as it's optional.
#
# multiply(Tensor input, Tensor other, *, Tensor out=None)
# multiply(Tensor input, Scalar other)
#
# Both positional args and keyword-only args are considered together.
#
# subtract(Tensor other, *, Scalar alpha=1)
# subtract(Scalar other, Scalar alpha=1)
#
# A few ambiguous cases which it does NOT handle yet.
#
# If there is any difference in other parameters besides the Tensor/Scalar
# difference, then they are not considered ambiguous by this method anymore.
# However, the difference could be too trivial to disambiguate.
#
# foo(Tensor input, Scalar other, Scalar bar)
# foo(Tensor input, Tensor other, double bar)
#
# If they are taking different number of parameters then they are not considered
# ambiguous anymore, even if the difference is only on optional kwargs.
#
# foo(Scalar other, Scalar alpha=1)
# foo(Tensor other, *, Scalar alpha=1, Scalar beta=1)
#
def sort_overloads(
grouped_overloads: Sequence[PythonSignatureGroup],
) -> Sequence[PythonSignatureGroup]:
# NB: Smaller here means lower priority
def is_arg_smaller(t1: Type, t2: Type) -> bool:
return (
str(t1) == "Scalar"
and str(t2) == "Tensor"
or str(t1) == "Scalar?"
and str(t2) == "Tensor?"
or "Dimname" in str(t1)
and "Dimname" not in str(t2)
or
# In the discussion https://github.com/pytorch/pytorch/issues/54555 it has been
# discussed why it is important to prioritize int/int? over int[]
str(t1) == "int[]"
and (str(t2) == "int" or str(t2) == "int?")
or
# TensorList currently throws an error during argument parsing, that's why it needs to be
# last in signature ordering. See discussion: https://github.com/pytorch/pytorch/issues/58087
str(t1) == "Tensor[]"
and str(t2).find("[]") != -1
or
# Prioritize IntArrayRef overload over SymIntArrayRef
str(t1) == "SymInt[]"
and str(t2) == "int[]"
)
def is_smaller(s1: PythonSignature, s2: PythonSignature) -> bool:
"""Returns True if s1 < s2 in the partial order."""
args1, args2 = s1.arguments(skip_outputs=True), s2.arguments(skip_outputs=True)
if len(args1) != len(args2):
return False
# TODO: should use some canonical form instead of 'str(arg.type)' - see comments
# above. The old codegen used the deprecated 'dynamic_type(arg.type)', which
# ignores the optional annotation, i.e. 'Scalar' and 'Scalar?'.
equal = all(arg1.type == arg2.type for arg1, arg2 in zip(args1, args2))
smaller_or_equal = all(
str(arg1.type) == str(arg2.type) or is_arg_smaller(arg1.type, arg2.type)
for arg1, arg2 in zip(args1, args2)
)
return smaller_or_equal and not equal
# First sort by signature
grouped_overloads = sorted(
grouped_overloads, key=lambda x: x.signature.signature_str()
)
# Construct the relation graph
larger_than: Dict[int, Set[int]] = defaultdict(set)
for i1, overload1 in enumerate(grouped_overloads):
for i2, overload2 in enumerate(grouped_overloads):
if is_smaller(overload1.signature, overload2.signature):
larger_than[i1].add(i2)
if not larger_than:
return list(grouped_overloads)
# Use a topological sort to sort overloads according to the partial order.
N = len(grouped_overloads)
sorted_ids: List[int] = list(filter(lambda x: x not in larger_than, range(N)))
for idx in range(N):
# The size of sorted_ids will grow to N eventually.
i = sorted_ids[idx]
for j in sorted(larger_than.keys()):
larger = larger_than[j]
larger.discard(i)
if not larger:
del larger_than[j]
sorted_ids.append(j)
return list(map(lambda x: grouped_overloads[x], sorted_ids))
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
#
# Codegen API Integration
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
def emit_single_dispatch(
ps: PythonSignature, f: NativeFunction, namedtuple_typenames: Dict[str, str]
) -> str:
"""
Emit dispatch code for a single native function.
"""
@with_native_function
def go(f: NativeFunction) -> str:
# header comments
if isinstance(ps, PythonSignatureDeprecated):
schema_comment = f"// [deprecated] aten::{ps.deprecated_schema}"
else:
schema_comment = f"// aten::{f.func}"
deprecated = "[deprecated] " if ps.deprecated else ""
# dispatch lambda signature
name = cpp.name(f.func)
lambda_formals = ", ".join(
map(lambda a: f"{a.type_str} {a.name}", dispatch_lambda_args(ps, f))
)
lambda_return = dispatch_lambda_return_str(f)
# dispatch lambda body
dispatch_callee = cpp_dispatch_target(f)
dispatch_args = ", ".join(cpp_dispatch_exprs(f, python_signature=ps))
# from arg parser outputs to dispatch lambda arguments
parser_outputs = arg_parser_output_exprs(ps, f)
lambda_arg_exprs = dispatch_lambda_exprs(ps, f)
inits = "\n".join(lambda_arg_exprs.inits)
lambda_args = ", ".join(lambda_arg_exprs.exprs)
# scatter fields
# TODO: Checking `ps.method and ('requires_grad' in parser_outputs)` is a hacky
# solution for enabling the 'requires_grad' argument for tensor methods
# new_full, new_empty, and new_zeros. A much better but more difficult to
# implement solution involves refactoring according to Ed's description here:
# https://github.com/pytorch/pytorch/issues/36455#issuecomment-614767589
need_set_requires_grad = ps.tensor_options_args and (
not has_tensor_options(f)
or (ps.method and ("requires_grad" in parser_outputs))
)
set_requires_grad = (
f'.set_requires_grad({parser_outputs["requires_grad"].expr})'
if need_set_requires_grad
else ""
)
if lambda_return == "void":
return f"""\
{schema_comment}
{inits}
auto dispatch_{name} = []({lambda_formals}) -> {lambda_return} {{
pybind11::gil_scoped_release no_gil;
{dispatch_callee}({dispatch_args});
}};
dispatch_{name}({lambda_args}){set_requires_grad};
Py_RETURN_NONE;
"""
else:
typename = namedtuple_typenames.get(gen_namedtuple_typename_key(f))
namedtuple_typeref = f"{typename}, " if typename is not None else ""
return f"""\
{schema_comment}
{inits}
auto dispatch_{name} = []({lambda_formals}) -> {lambda_return} {{
pybind11::gil_scoped_release no_gil;
return {dispatch_callee}({dispatch_args});
}};
return wrap({namedtuple_typeref}dispatch_{name}({lambda_args}){set_requires_grad});
"""
return go(f)
|
pytorch-master
|
tools/autograd/gen_python_functions.py
|
# Generates C++ autograd functions for the derivatives of ATen operations
#
# This writes two files:
# Functions.h/cpp: subclasses of autograd::Node
# python_functions.h/cpp: Python bindings for the above classes
#
from typing import Dict, List, Sequence, Tuple
from torchgen.api.autograd import (
Derivative,
DifferentiabilityInfo,
SavedAttribute,
uses_retain_variables,
uses_single_grad,
)
from torchgen.api.types import (
ArrayRefCType,
BaseCType,
Binding,
boolT,
doubleT,
intArrayRefT,
ListCType,
longT,
MutRefCType,
OptionalCType,
optionalIntArrayRefT,
scalarT,
stringT,
symIntArrayRefT,
tensorListT,
tensorT,
)
from torchgen.code_template import CodeTemplate
from torchgen.model import Argument, FunctionSchema
from torchgen.utils import FileManager
from .gen_inplace_or_view_type import VIEW_FUNCTIONS
FUNCTION_DECLARATION = CodeTemplate(
"""\
struct TORCH_API ${op} : public ${superclass} {
using ${superclass}::${superclass};
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "${op}"; }
void release_variables() override {
${thread_lock}
${release_variables}
}
${will_release_variables}
${saved_variables}
${saved_list_sizes}
};
"""
)
WILL_RELEASE_VARIABLES = CodeTemplate(
"""\
bool retain_variables = true;
void will_release_variables() override {
retain_variables = false;
}
"""
)
FUNCTION_DEFINITION = CodeTemplate(
"""\
variable_list ${op}::apply(variable_list&& grads) {
${thread_lock}
${asserts}
IndexRangeGenerator gen;
${compute_index_ranges}
variable_list grad_inputs(gen.size());
${body}
return grad_inputs;
}
"""
)
GRAD_INPUT_MASK = CodeTemplate(
"""\
auto grad_input_mask = std::array<bool, ${n}>{
${masks}
};\
"""
)
DERIVATIVE_SINGLE = CodeTemplate(
"""\
if (task_should_compute_output({ ${name}_ix })) {
auto grad_result = ${derivative};
copy_range(grad_inputs, ${name}_ix, grad_result);
}
"""
)
DERIVATIVE_MULTI_COPY_RANGE = CodeTemplate(
"""\
if (task_should_compute_output({ ${name}_ix })) {
copy_range(grad_inputs, ${name}_ix, std::get<${i}>(grad_result));
}
"""
)
DERIVATIVE_MULTI = CodeTemplate(
"""\
if (task_should_compute_output({ ${idx_ranges} })) {
${grad_input_mask}
auto grad_result = ${derivative};
${copy_ranges}
}
"""
)
# Generates python bindings
#
# This generates the definitions for:
# (1) The PyTypeObject for each backward grad_fn subclassing Node
# (2) The entry for PyTypeObject's tp_getset slot (an array of PyGetSetDef structs)
# We generate one PyGetSetDef struct for each of grad_fn's saved inputs and outputs
# Each PyGetSetDef has a function ptr to a getter, also defined here (3).
# (3) Getters for each of grad_fn's saved inputs and outputs.
#
PY_FUNCTION_DEFINITION = CodeTemplate(
"""\
static PyTypeObject ${op}Class;
addClass<${op}>(${op}Class, "${op}", ${op}_properties);
"""
)
PY_FUNCTION_PROPS_AND_GETTERS = CodeTemplate(
"""\
${all_getter_definitions}
static struct PyGetSetDef ${op}_properties[] = {
THP_FUNCTION_DEFAULT_PROPERTIES,
${all_getsetdef_structs}
{nullptr} /* sentinel */
};
"""
)
PY_GETSETDEF_STRUCT = CodeTemplate(
"""\
{(char*)"_saved_${name}", (getter)THP${op}_${name}_getter, nullptr, nullptr, nullptr}"""
)
PY_RAW_GETSETDEF_STRUCT = CodeTemplate(
"""\
{(char*)"_raw_saved_${name}", (getter)THP${op}_${name}_raw_getter, nullptr, nullptr, nullptr}"""
)
# Getter templates
GETTER_DEFINITION = CodeTemplate(
"""\
PyObject* THP${op}_${name}_getter(THPCppFunction *self, void *_unused) {
HANDLE_TH_ERRORS
auto prop = static_cast<${op}*>(self->cdata.get())->${name};
${body}
END_HANDLE_TH_ERRORS
}
"""
)
GETTER_DEFINITION_SAVEDVAR = CodeTemplate(
"""\
PyObject* THP${op}_${name}_getter(THPCppFunction *self, void *_unused) {
HANDLE_TH_ERRORS
const auto& prop = static_cast<${op}*>(self->cdata.get())->${name}_;
${body}
END_HANDLE_TH_ERRORS
}
"""
)
GETTER_DEFINITION_RAW_SAVEDVAR = CodeTemplate(
"""\
PyObject* THP${op}_${name}_raw_getter(THPCppFunction *self, void *_unused) {
HANDLE_TH_ERRORS
const auto& prop = static_cast<${op}*>(self->cdata.get())->${name}_;
${body}
END_HANDLE_TH_ERRORS
}
"""
)
GETTER_DEFINITION_VEC_SAVEDVAR = CodeTemplate(
"""\
PyObject* THP${op}_${name}_getter(THPCppFunction *self, void *_unused) {
HANDLE_TH_ERRORS
const auto *node = static_cast<${op}*>(self->cdata.get());
const auto& prop = node->${name}_;
if (node->${name}_released_) {
PyErr_SetString(PyExc_RuntimeError, ERR_BACKWARD_TWICE);
return nullptr;
}
${body}
END_HANDLE_TH_ERRORS
}
"""
)
GETTER_DEFINITION_RAW_VEC_SAVEDVAR = CodeTemplate(
"""\
PyObject* THP${op}_${name}_raw_getter(THPCppFunction *self, void *_unused) {
HANDLE_TH_ERRORS
const auto *node = static_cast<${op}*>(self->cdata.get());
const auto& prop = node->${name}_;
if (node->${name}_released_) {
PyErr_SetString(PyExc_RuntimeError, ERR_BACKWARD_TWICE);
return nullptr;
}
${body}
END_HANDLE_TH_ERRORS
}
"""
)
GETTER_DEFINITION_OPT = CodeTemplate(
"""\
PyObject* THP${op}_${name}_getter(THPCppFunction *self, void *_unused) {
HANDLE_TH_ERRORS
auto opt_prop = static_cast<${op}*>(self->cdata.get())->${name};
if (!opt_prop.has_value()) {
Py_RETURN_NONE;
}
auto prop = opt_prop.value();
${body}
END_HANDLE_TH_ERRORS
}
"""
)
GETTER_DEFINITION_OPT_ARRAYREF = CodeTemplate(
"""\
PyObject* THP${op}_${name}_getter(THPCppFunction *self, void *_unused) {
HANDLE_TH_ERRORS
auto opt_prop = static_cast<${op}*>(self->cdata.get())->${name};
if (!opt_prop.list.has_value()) {
Py_RETURN_NONE;
}
auto prop = opt_prop.list.value();
${body}
END_HANDLE_TH_ERRORS
}
"""
)
# Getter body
GETTER_BODY_SAVEDVAR = """\
return THPVariable_Wrap(prop.unpack(self->cdata));
"""
GETTER_BODY_RAW_SAVEDVAR = """\
pybind11::object obj = pybind11::cast(prop, pybind11::return_value_policy::reference);
return obj.release().ptr();
"""
GETTER_BODY_VEC_SAVEDVAR = """\
PyObject* tup = PyTuple_New((Py_ssize_t) prop.size());
for (auto i: c10::irange(prop.size())) {
PyTuple_SetItem(tup, (Py_ssize_t) i, THPVariable_Wrap(prop[i].unpack(self->cdata)));
}
return tup;
"""
GETTER_BODY_RAW_VEC_SAVEDVAR = """\
PyObject* tup = PyTuple_New((Py_ssize_t) prop.size());
for (auto i : c10::irange(prop.size())) {
pybind11::object obj = pybind11::cast(prop[i], pybind11::return_value_policy::reference);
PyTuple_SetItem(tup, (Py_ssize_t) i, obj.release().ptr());
}
return tup;
"""
GETTER_BODY_ARRAYREF_LONG = """\
PyObject* tup = PyTuple_New((Py_ssize_t) prop.size());
for (auto i : c10::irange(prop.size())) {
PyTuple_SetItem(tup, (Py_ssize_t) i, PyLong_FromUnsignedLong((uint64_t) prop[i]));
}
return tup;
"""
GETTER_BODY_ARRAYREF_SYMINT = """\
PyObject* tup = PyTuple_New((Py_ssize_t) prop.size());
for (auto i : c10::irange(prop.size())) {
auto si = prop[i];
if (si.is_symbolic()) {
auto py_symint = py::cast(si.toSymIntNodeImpl()).release().ptr();
PyTuple_SetItem(tup, (Py_ssize_t) i, py_symint);
} else {
PyTuple_SetItem(tup, (Py_ssize_t) i, PyLong_FromUnsignedLong(si.as_int_unchecked()));
}
}
return tup;
"""
GETTER_BODY_ARRAYREF_DOUBLE = """\
PyObject* tup = PyTuple_New((Py_ssize_t) prop.size());
for (auto i : c10::irange(prop.size())) {
PyTuple_SetItem(tup, (Py_ssize_t) i, PyFloat_FromDouble((double) prop[i]));
}
return tup;
"""
GETTER_BODY_INT64_T = """\
return PyLong_FromUnsignedLong((int64_t) prop);
"""
GETTER_BODY_DOUBLE = """\
return PyFloat_FromDouble((double) prop);
"""
GETTER_BODY_BOOL = """\
if (prop) {
Py_RETURN_TRUE;
} else {
Py_RETURN_FALSE;
}
"""
GETTER_BODY_STRING = """\
return PyUnicode_FromStringAndSize(prop.data(), prop.size());
"""
GETTER_BODY_SCALAR = """\
if (prop.isComplex()) {
auto cprop = prop.to<c10::complex<double>>();
return PyComplex_FromDoubles(cprop.real(), cprop.imag());
} else if (prop.isFloatingPoint()) {
return PyFloat_FromDouble(prop.to<double>());
} else if (prop.isIntegral(/*includeBool=*/false)) {
return PyLong_FromLong(prop.to<int64_t>());
} else if (prop.isBoolean()) {
if (prop.to<bool>()) {
Py_RETURN_TRUE;
} else {
Py_RETURN_FALSE;
}
} else {
PyErr_SetString(PyExc_RuntimeError, "Unknown scalar type");
return nullptr;
}
"""
MISC_GETTER_DEFS = {
OptionalCType(BaseCType(longT)): (GETTER_DEFINITION_OPT, GETTER_BODY_INT64_T),
BaseCType(doubleT): (GETTER_DEFINITION, GETTER_BODY_DOUBLE),
OptionalCType(BaseCType(doubleT)): (GETTER_DEFINITION_OPT, GETTER_BODY_DOUBLE),
BaseCType(boolT): (GETTER_DEFINITION, GETTER_BODY_BOOL),
BaseCType(scalarT): (GETTER_DEFINITION, GETTER_BODY_SCALAR),
OptionalCType(BaseCType(scalarT)): (GETTER_DEFINITION_OPT, GETTER_BODY_SCALAR),
}
# These functions have backwards which cannot be traced, and so must have
# their backward functions traced opaquely.
# VIEW_FUNCTIONS are not traceable because they use as_strided, which
# has an untraceable backwards, see
# https://github.com/pytorch/pytorch/issues/4250
# TODO: This is probably not exhaustive, but it's a start
UNTRACEABLE_FUNCTIONS = VIEW_FUNCTIONS
def get_infos_with_derivatives_list(
differentiability_infos: Dict[FunctionSchema, Dict[str, DifferentiabilityInfo]]
) -> List[DifferentiabilityInfo]:
diff_info_list = [
info
for diffinfo_dict in differentiability_infos.values()
for info in diffinfo_dict.values()
]
return list(filter(lambda info: info.args_with_derivatives, diff_info_list))
def gen_autograd_functions_lib(
out: str,
differentiability_infos: Dict[FunctionSchema, Dict[str, DifferentiabilityInfo]],
template_path: str,
) -> None:
"""Functions.h and Functions.cpp body
These contain the auto-generated subclasses of torch::autograd::Node
for each every differentiable torch function.
"""
# get a 1D list of diffinfos, we do not need them to be per FunctionSchema/DispatchKey here
# infos with the diff dispatchkeys but the same name will still be in the same shard.
infos = get_infos_with_derivatives_list(differentiability_infos)
declarations = list(map(lambda f: process_function(f, FUNCTION_DECLARATION), infos))
definitions = list(map(lambda f: process_function(f, FUNCTION_DEFINITION), infos))
file_basename = "Functions"
fm = FileManager(install_dir=out, template_dir=template_path, dry_run=False)
for suffix in [".h", ".cpp"]:
fname = file_basename + suffix
fm.write_with_template(
fname,
fname,
lambda: {
"generated_comment": "@" + f"generated from {fm.template_dir}/" + fname,
"autograd_function_declarations": declarations,
"autograd_function_definitions": definitions,
},
)
def gen_autograd_functions_python(
out: str,
differentiability_infos: Dict[FunctionSchema, Dict[str, DifferentiabilityInfo]],
template_path: str,
) -> None:
fm = FileManager(install_dir=out, template_dir=template_path, dry_run=False)
num_shards = 5
fm.write(
"python_functions.h",
lambda: {
"generated_comment": f"@generated from {fm.template_dir}/python_functions.h",
"shard_forward_declare": [
f"void initialize_autogenerated_functions_{i}();"
for i in range(num_shards)
],
"shard_call": [
f"initialize_autogenerated_functions_{i}();" for i in range(num_shards)
],
},
)
# get a 1D list of diffinfos, we do not need them to be per FunctionSchema/DispatchKey here
# infos with the diff dispatchkeys but the same name will still be in the same shard.
infos = get_infos_with_derivatives_list(differentiability_infos)
fm.write_sharded(
"python_functions.cpp",
infos,
key_fn=lambda info: info.name,
base_env={
"generated_comment": f"@generated from {fm.template_dir}/python_functions.cpp",
},
env_callable=lambda info: {
"py_function_initializers": [
process_function(info, PY_FUNCTION_DEFINITION)
],
"py_function_props_and_getters": [
process_function(info, PY_FUNCTION_PROPS_AND_GETTERS)
],
},
num_shards=num_shards,
sharded_keys={"py_function_initializers", "py_function_props_and_getters"},
)
def process_function(info: DifferentiabilityInfo, template: CodeTemplate) -> str:
saved_variables: List[str] = []
release_variables: List[str] = []
saved_list_sizes: List[str] = []
unpack: List[str] = []
asserts: List[str] = []
compute_index_ranges: List[str] = []
getter_definitions: List[str] = []
py_getsetdef_structs: List[str] = []
for arg in info.args_with_derivatives:
if (
arg.type == "at::TensorList"
or arg.type == "const c10::List<c10::optional<at::Tensor>> &"
):
size = f"{arg.name}_size_"
saved_list_sizes.append(f"size_t {arg.name}_size_;")
else:
size = "1"
compute_index_ranges.append(f"auto {arg.name}_ix = gen.range({size});")
def save_var(var: SavedAttribute, is_output: bool) -> None:
name = var.nctype.name
type = var.nctype.type
should_append_getsetdef = True
should_append_raw_getsetdef = False
if (
type == BaseCType(tensorT)
or type == OptionalCType(BaseCType(tensorT))
or type == MutRefCType(OptionalCType(BaseCType(tensorT)))
or (type == BaseCType(scalarT) and is_output)
):
saved_variables.append(f"SavedVariable {name}_;")
release_variables.append(f"{name}_.reset_data();")
ptr = "shared_from_this()" if is_output else ""
unpack.append(f"auto {name} = {name}_.unpack({ptr});")
getter_definitions.append(
GETTER_DEFINITION_SAVEDVAR.substitute(
op=info.op, name=name, body=GETTER_BODY_SAVEDVAR
)
)
getter_definitions.append(
GETTER_DEFINITION_RAW_SAVEDVAR.substitute(
op=info.op, name=name, body=GETTER_BODY_RAW_SAVEDVAR
)
)
should_append_raw_getsetdef = True
elif type == BaseCType(tensorListT):
saved_variables.append(f"std::vector<SavedVariable> {name}_;")
saved_variables.append(f"bool {name}_released_ = false;")
# Just clear() is sufficient, we don't need to loop and clear each variable.
# Because the SavedVariable owns a tensor and a grad_fn, removing the SavedVariable makes them go away as well.
release_variables.append(f"{name}_.clear();")
release_variables.append(f"{name}_released_ = true;")
unpack.append(f"auto {name} = unpack_list({name}_);")
asserts.append(f"TORCH_CHECK(!{name}_released_, ERR_BACKWARD_TWICE);")
getter_definitions.append(
GETTER_DEFINITION_VEC_SAVEDVAR.substitute(
op=info.op, name=name, body=GETTER_BODY_VEC_SAVEDVAR
)
)
getter_definitions.append(
GETTER_DEFINITION_RAW_VEC_SAVEDVAR.substitute(
op=info.op, name=name, body=GETTER_BODY_RAW_VEC_SAVEDVAR
)
)
should_append_raw_getsetdef = True
elif type == ListCType(OptionalCType(BaseCType(tensorT))):
saved_variables.append(f"std::vector<SavedVariable> {name}_;")
saved_variables.append(f"bool {name}_released_ = false;")
# Just clear() is sufficient, we don't need to loop and clear each variable.
# Because the SavedVariable owns a tensor and a grad_fn, removing the SavedVariable makes them go away as well.
release_variables.append(f"{name}_.clear();")
release_variables.append(f"{name}_released_ = true;")
unpack.append(f"auto {name} = unpack_opt_list({name}_);")
asserts.append(f"TORCH_CHECK(!{name}_released_, ERR_BACKWARD_TWICE);")
getter_definitions.append(
GETTER_DEFINITION_VEC_SAVEDVAR.substitute(
op=info.op, name=name, body=GETTER_BODY_VEC_SAVEDVAR
)
)
getter_definitions.append(
GETTER_DEFINITION_RAW_VEC_SAVEDVAR.substitute(
op=info.op, name=name, body=GETTER_BODY_RAW_VEC_SAVEDVAR
)
)
should_append_raw_getsetdef = True
elif type == BaseCType(intArrayRefT):
saved_variables.append(f"std::vector<int64_t> {name};")
getter_definitions.append(
GETTER_DEFINITION.substitute(
op=info.op, name=name, body=GETTER_BODY_ARRAYREF_LONG
)
)
elif type == BaseCType(symIntArrayRefT):
saved_variables.append(f"std::vector<c10::SymInt> {name};")
getter_definitions.append(
GETTER_DEFINITION.substitute(
op=info.op, name=name, body=GETTER_BODY_ARRAYREF_SYMINT
)
)
elif type == BaseCType(optionalIntArrayRefT):
saved_variables.append(f"c10::OptionalArray<int64_t> {name};")
getter_definitions.append(
GETTER_DEFINITION_OPT_ARRAYREF.substitute(
op=info.op, name=name, body=GETTER_BODY_ARRAYREF_LONG
)
)
elif type == OptionalCType(BaseCType(intArrayRefT)):
saved_variables.append(f"c10::OptionalArray<int64_t> {name};")
getter_definitions.append(
GETTER_DEFINITION_OPT_ARRAYREF.substitute(
op=info.op, name=name, body=GETTER_BODY_ARRAYREF_LONG
)
)
elif type == OptionalCType(ArrayRefCType(BaseCType(doubleT))):
saved_variables.append(f"c10::OptionalArray<double> {name};")
getter_definitions.append(
GETTER_DEFINITION_OPT_ARRAYREF.substitute(
op=info.op, name=name, body=GETTER_BODY_ARRAYREF_DOUBLE
)
)
elif type == BaseCType(longT):
saved_variables.append(f"{type.cpp_type()} {name} = 0;")
getter_definitions.append(
GETTER_DEFINITION.substitute(
op=info.op, name=name, body=GETTER_BODY_INT64_T
)
)
elif type == BaseCType(stringT):
saved_variables.append(f"std::string {name};")
getter_definitions.append(
GETTER_DEFINITION.substitute(
op=info.op, name=name, body=GETTER_BODY_STRING
)
)
elif type == OptionalCType(BaseCType(stringT)):
saved_variables.append(f"c10::optional<std::string> {name};")
getter_definitions.append(
GETTER_DEFINITION_OPT.substitute(
op=info.op, name=name, body=GETTER_BODY_STRING
)
)
else:
saved_variables.append(f"{type.cpp_type()} {name};")
if type in MISC_GETTER_DEFS:
getter_def, body = MISC_GETTER_DEFS[type]
getter_definitions.append(
getter_def.substitute(op=info.op, name=name, body=body)
)
else:
# Types we don't expose python bindings to yet:
# TypeAndSize, at::ScalarType, TensorOptions, TensorGeometry,
# std::vector<std::vector<int64_t>>, std::vector<at::ScalarType>
should_append_getsetdef = False
if should_append_getsetdef:
py_getsetdef_structs.append(
PY_GETSETDEF_STRUCT.substitute(op=info.op, name=name)
)
if should_append_raw_getsetdef:
py_getsetdef_structs.append(
PY_RAW_GETSETDEF_STRUCT.substitute(op=info.op, name=name)
)
for var in info.all_saved_inputs:
save_var(var, is_output=False)
for var in info.all_saved_outputs:
save_var(var, is_output=True)
# lock the mutex when we release variables and in Node::apply to protect thread safety
# see Note [Thread Safety on Autograd Node]
if len(release_variables) > 0:
thread_lock = "std::lock_guard<std::mutex> lock(mutex_);"
else:
thread_lock = ""
if uses_retain_variables(info):
will_release_variables = WILL_RELEASE_VARIABLES.substitute()
else:
will_release_variables = ""
body: List[str] = []
if uses_single_grad(info):
body.append("const auto& grad = grads[0];")
else:
# Generate aliases for gradients named for returned values.
body.extend(
f"const auto& {name} = grads[{info.available_named_gradients.index(name)}];"
for name in info.used_named_gradients
)
def emit_derivative(
derivative: Derivative,
args_with_derivatives: Sequence[Binding],
) -> Tuple[bool, str]:
formula = derivative.formula
var_names = derivative.var_names
if len(var_names) == 1:
checks_any_grad_defined = False
if "not_implemented" not in formula:
matching_args = [
arg for arg in args_with_derivatives if arg.name == var_names[0]
]
if len(matching_args) == 1:
# We can add undefined grad support if the input variable is a Tensor
arg = matching_args[0]
if isinstance(arg.argument, Argument) and str(
arg.argument.type
) in ("Tensor", "Tensor?"):
formula = "any_grad_defined ? (" + formula + ") : Tensor()"
checks_any_grad_defined = True
return (
checks_any_grad_defined,
DERIVATIVE_SINGLE.substitute(name=var_names[0], derivative=formula),
)
else:
if "grad_input_mask" in formula:
masks = [
f"task_should_compute_output({{ {n}_ix }})," for n in var_names
]
grad_input_mask = GRAD_INPUT_MASK.substitute(
masks=masks, n=len(var_names)
)
else:
grad_input_mask = ""
idx_ranges = ", ".join(f"{n}_ix" for n in var_names)
copy_ranges: List[str] = []
for i, n in enumerate(var_names):
copy_ranges.append(DERIVATIVE_MULTI_COPY_RANGE.substitute(name=n, i=i))
return False, DERIVATIVE_MULTI.substitute(
idx_ranges=idx_ranges,
copy_ranges=copy_ranges,
derivative=formula,
grad_input_mask=grad_input_mask,
)
body.extend(unpack)
need_any_grad_defined_var = False
for derivative in info.derivatives:
checks_any_grad_defined, derivative_text = emit_derivative(
derivative, info.args_with_derivatives
)
body.append(derivative_text)
need_any_grad_defined_var |= checks_any_grad_defined
# Since single-output derivative formulas need to check if grads are
# defined, only perform the check once, before all the formulas
if need_any_grad_defined_var:
body.insert(
-len(info.derivatives),
"bool any_grad_defined = any_variable_defined(grads);",
)
if info.name in UNTRACEABLE_FUNCTIONS:
superclass = "Node"
else:
superclass = "TraceableFunction"
all_getsetdef_structs = (
",\n".join(py_getsetdef_structs) + "," if len(py_getsetdef_structs) != 0 else ""
)
all_getter_definitions = "\n".join(getter_definitions)
return template.substitute(
op=info.op,
compute_index_ranges=compute_index_ranges,
saved_variables=saved_variables,
release_variables=release_variables,
saved_list_sizes=saved_list_sizes,
asserts=asserts,
thread_lock=thread_lock,
will_release_variables=will_release_variables,
body=body,
superclass=superclass,
all_getter_definitions=all_getter_definitions,
all_getsetdef_structs=all_getsetdef_structs,
)
|
pytorch-master
|
tools/autograd/gen_autograd_functions.py
|
# Generates C++ functions that wrap ATen tensor factory methods to turn them into Variables.
#
# This writes one file: variable_factories.h
import re
from typing import List, Optional
import torchgen.api.python as python
from torchgen.api import cpp
from torchgen.api.types import CppSignatureGroup
from torchgen.context import with_native_function
from torchgen.gen import parse_native_yaml
from torchgen.model import NativeFunction, TensorOptionsArguments, Variant
from torchgen.utils import FileManager, mapMaybe
OPTIONAL_TYPE_PATTERN = re.compile(r"c10::optional<(.+)>")
TYPE_PATTERN = re.compile(r"(?:const\s+)?([A-Z]\w+)")
# Add 'at::' to types defined in ATen namespace, e.g. Tensor, TensorList, IntArrayRef and etc.
# TODO: maybe update the cpp argument API to take optional namespace argument?
def fully_qualified_type(argument_type: str) -> str:
def maybe_optional_type(type: str, is_opt: bool) -> str:
return f"c10::optional<{type}>" if is_opt else type
opt_match = OPTIONAL_TYPE_PATTERN.match(argument_type)
is_opt = opt_match is not None
if opt_match:
argument_type = argument_type[opt_match.start(1) : opt_match.end(1)]
match = TYPE_PATTERN.match(argument_type)
if match is None:
return maybe_optional_type(argument_type, is_opt)
index = match.start(1)
qualified_type = f"{argument_type[:index]}at::{argument_type[index:]}"
return maybe_optional_type(qualified_type, is_opt)
def gen_variable_factories(
out: str, native_yaml_path: str, tags_yaml_path: str, template_path: str
) -> None:
native_functions = parse_native_yaml(
native_yaml_path, tags_yaml_path
).native_functions
factory_functions = [fn for fn in native_functions if is_factory_function(fn)]
fm = FileManager(install_dir=out, template_dir=template_path, dry_run=False)
fm.write_with_template(
"variable_factories.h",
"variable_factories.h",
lambda: {
"generated_comment": "@"
+ f"generated from {fm.template_dir}/variable_factories.h",
"ops_headers": [
f"#include <ATen/ops/{fn.root_name}.h>" for fn in factory_functions
],
"function_definitions": list(mapMaybe(process_function, factory_functions)),
},
)
@with_native_function
def is_factory_function(f: NativeFunction) -> bool:
if Variant.function not in f.variants:
return False
name = cpp.name(f.func)
has_tensor_options = python.has_tensor_options(f)
return has_tensor_options or name.endswith("_like")
@with_native_function
def process_function(f: NativeFunction) -> Optional[str]:
name = cpp.name(f.func)
has_tensor_options = python.has_tensor_options(f)
is_factory = has_tensor_options or name.endswith("_like")
if Variant.function not in f.variants or not is_factory:
return None
sig = CppSignatureGroup.from_native_function(f, method=False).signature
formals: List[str] = []
exprs: List[str] = []
requires_grad = "false"
for arg in sig.arguments():
qualified_type = fully_qualified_type(arg.type)
if arg.default:
formals.append(f"{qualified_type} {arg.name} = {arg.default}")
else:
formals.append(f"{qualified_type} {arg.name}")
if isinstance(arg.argument, TensorOptionsArguments):
# note: we remove the requires_grad setting from the TensorOptions because
# it is ignored anyways (and we actually have an assertion that it isn't set
# which would fail otherwise). We handle requires_grad explicitly here
# instead of passing it through to the kernel.
exprs.append(f"at::TensorOptions({arg.name}).requires_grad(c10::nullopt)")
# Manually set the requires_grad bit on the result tensor.
requires_grad = f"{arg.name}.requires_grad()"
else:
exprs.append(arg.name)
return f"""\
inline at::Tensor {name}({', '.join(formals)}) {{
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::{name}({', '.join(exprs)}), /*requires_grad=*/{requires_grad});
}}
"""
|
pytorch-master
|
tools/autograd/gen_variable_factories.py
|
pytorch-master
|
tools/autograd/__init__.py
|
|
# Generates VariableType.h/cpp
#
# **If any changes are being made to the VariableType codegen please also check
# if updates are needed in torch/csrc/autograd/autograd_not_implemented_fallback.cpp
#
# VariableType is a subclass of at::Type that provides the binding code
# necessary to provide a differentiable version of ATen operators. There are a
# number of different things we could mean:
#
# - Given a non-differentiable forward implementation, we might
# directly associate it with a backward implementation to make
# it differentiable. This is the common case.
#
# - Some functions don't need a backwards implementation, because
# backpropagation will never propagate beyond them. There are a
# number of different reasons why this may be the case:
#
# - The function has no differentiable inputs
# - The function's output is not differentiable
# - The function has no data dependency on its input
#
# - Some function don't need a backwards implementation because they
# are implemented as a composition of other (differentiable) ATen
# functions. These are dispatched directly to the Type superclass,
# which will in turn dispatch back to VariableType for its
# differentiable subcomponents.
#
from typing import Callable, Dict, List, Optional, Sequence, Set, Tuple, Union
from torchgen.api import cpp
from torchgen.api.autograd import (
DifferentiableInput,
dispatch_strategy,
gen_differentiable_outputs,
is_differentiable,
NativeFunctionWithDifferentiabilityInfo,
SavedAttribute,
)
from torchgen.api.types import (
BaseCType,
Binding,
DispatcherSignature,
intArrayRefT,
ListCType,
MutRefCType,
OptionalCType,
scalarT,
SpecialArgName,
stringT,
symIntArrayRefT,
tensorListT,
tensorT,
TupleCType,
VectorCType,
)
from torchgen.code_template import CodeTemplate
from torchgen.context import (
native_function_manager,
with_native_function,
with_native_function_and,
)
from torchgen.model import (
Argument,
BaseType,
ListType,
NativeFunction,
SchemaKind,
SelfArgument,
TensorOptionsArguments,
)
from torchgen.utils import FileManager, mapMaybe
from .context import with_native_function_with_differentiability_info_and_key
from .gen_inplace_or_view_type import (
ALL_VIEW_FUNCTIONS,
ASSIGN_RETURN_VALUE,
AUTOGRAD_NOT_IMPLEMENTED_REGISTRATION,
gen_formals,
get_base_name,
get_view_info,
is_tensor_list_type,
is_tensor_type,
METHOD_DEFINITION,
modifies_arguments,
TMP_VAR,
unpack_args,
unpacked_name,
use_derived,
WRAPPER_REGISTRATION,
)
from .gen_trace_type import (
declare_returned_variables,
get_return_value,
MANUAL_AUTOGRAD_AND_TRACER,
MANUAL_BACKEND,
tie_return_values,
type_wrapper_name,
)
# We don't set or modify grad_fn on these methods. Generally, they return
# tensors that have requires_grad=False. In-place functions listed here will
# not examine or modify requires_grad or grad_fn.
# NB: this does NOT include overload name
DONT_REQUIRE_DERIVATIVE = {
# These only depend on the input Tensor's shape and device, not the data
"empty_like",
"ones_like",
"full_like",
"zeros_like",
"rand_like",
"randn_like",
"new_empty",
"new_empty_strided",
"new_full",
"new_zeros",
"new_ones",
# These are only implemented on integral types
"__and__",
"__iand__",
"__ilshift__",
"__ior__",
"__irshift__",
"__ixor__",
"__lshift__",
"__or__",
"__rshift__",
"__xor__",
# These work on integral data types, and hence don't require derivative
"_sobol_engine_draw",
"_sobol_engine_ff",
"_sobol_engine_scramble_",
"_sobol_engine_initialize_state_",
# This is an unsafe method that is meant to be out of reach of autograd.
"_coalesced_",
# Quantize functions should not record gradients
"quantize_per_tensor",
"quantize_per_channel",
# Functions that return integers should not have output that require gradients
"argmax",
"argmin",
"argsort",
"searchsorted",
"bucketize",
# Functions that return booleans are not differentiable
"isnan",
"isposinf",
"isneginf",
"isinf",
"signbit",
"isin",
"allclose",
# Functions return none are not differentiable
"record_stream",
# These functions are not differentiable
"logical_and",
"logical_xor",
"logical_not",
"logical_or",
# This function returns nested_tensor shape as a tensor that is non-differentiable
"_nested_tensor_size",
}
# The C -> R functions at the time of adding this are still being audited and tested
# but will not error out.
# C -> C, R -> C functions for which backward is correctly implemented and tested
GRADIENT_IMPLEMENTED_FOR_COMPLEX = {
"fill",
"t",
"view",
"reshape",
"reshape_as",
"view_as",
"roll",
"clone",
"block_diag",
"diag_embed",
"repeat",
"expand",
"flip",
"fliplr",
"flipud",
"rot90",
"transpose",
"permute",
"squeeze",
"unsqueeze",
"resize",
"resize_as",
"tril",
"triu",
"chunk",
"zero_",
"eq_",
"ne_",
"add",
"__radd__",
"sum",
"_conj",
"sin",
"cos",
"mul",
"sinc",
"sinh",
"cosh",
"__rmul__",
"sgn",
"asin",
"acos",
"sub",
"div",
"cat",
"view_as_complex",
"index_put",
"neg",
"complex",
"select",
"where",
"as_strided",
"as_strided_scatter",
"slice",
"constant_pad_nd",
"unbind",
"split",
"split_with_sizes",
"unsafe_split",
"split_with_sizes_backward",
"dot",
"vdot",
"cholesky",
"triangular_solve",
"mm",
"_unsafe_view",
"mv",
"outer",
"bmm",
"diagonal",
"alias",
"atan",
"log",
"log10",
"log1p",
"log2",
"reciprocal",
"tan",
"pow",
"rsqrt",
"tanh",
"tanh_backward",
"asinh",
"acosh",
"atanh",
"take",
"fill_",
"exp",
"nonzero",
"mean",
"std_mean",
"var_mean",
"inverse",
"solve",
"linalg_cholesky",
"addcmul",
"addcdiv",
"matrix_exp",
"linalg_matrix_exp",
"_linalg_eigh",
"cholesky_solve",
"linalg_qr",
"_linalg_svd",
"_fft_c2c",
"_fft_r2c",
"linalg_solve",
"sqrt",
"stack",
"gather",
"index_select",
"index_add_",
"linalg_inv",
"linalg_inv_ex",
"baddbmm",
"addbmm",
"addmm",
"addmv",
"addr",
"linalg_householder_product",
"constant_pad_nd",
"reflection_pad1d",
"reflection_pad2d",
"reflection_pad3d",
"linalg_cholesky_ex",
"linalg_eig",
"select_backward",
"diagonal_backward",
"slice_backward",
"reflection_pad1d_backward",
"reflection_pad2d_backward",
"reflection_pad3d_backward",
"symeig",
"_sparse_sparse_matmul",
"replication_pad1d",
"replication_pad2d",
"replication_pad3d",
"take",
"put",
"put_",
"_to_copy",
"replication_pad1d_backward",
"replication_pad2d_backward",
"replication_pad3d_backward",
"diag",
"masked_scatter",
"masked_select",
"index_add",
"index_fill",
"trace",
"polar",
"cumsum",
"rsub",
"eig",
"lerp",
"linalg_vector_norm",
"cumprod",
"prod",
"index_copy",
"lu",
"unfold",
"unfold_backward",
"index",
"masked_fill",
"linalg_cross",
"lu_unpack",
"renorm",
"_conj_physical",
"linalg_lu_factor_ex",
"scatter",
"scatter_add",
"sigmoid",
"sigmoid_backward",
"trapezoid",
"cumulative_trapezoid",
"conj_physical_",
"_neg_view",
"_reshape_alias",
"_linalg_det",
"lu_solve",
"linalg_solve_triangular",
"linalg_pinv",
"linalg_lstsq",
"col2im",
"col2im_backward",
"im2col",
"im2col_backward",
"cholesky_inverse",
"to_sparse",
"sparse_sampled_addmm",
"linalg_lu",
"pixel_shuffle",
"pixel_unshuffle",
"linalg_lu_solve",
"_linalg_slogdet",
"_linalg_solve_ex",
}
GRADIENT_IMPLEMENTED_FOR_SPARSE_COMPLEX = {
"_to_dense",
"_coalesce",
"coalesce",
"values",
"_sparse_coo_tensor_with_dims_and_tensors",
"sparse_mask_helper_cuda",
"_sparse_addmm",
}
GRADIENT_IMPLEMENTED_FOR_COMPLEX.update(GRADIENT_IMPLEMENTED_FOR_SPARSE_COMPLEX)
# Some operators invalidate the grad_accumulator. Let's reset it.
RESET_GRAD_ACCUMULATOR = {"set_", "resize_"}
# NOTE [ TensorImpl and Storage Pointer Sanity Checks ]
#
# We check the following properties:
# 1) A function should never change the input tensors' underlying c10::TensorImpl
# pointers or c10::Storage pointers, even if it modifies its input tensors (via
# inplace or out-variants)
# If the function does not modify its arguments, we also check the following properties
# pertaining to its output:
# 2) Its TensorImpl has use_count of 1
# 3) If the function is a view function, it has the same StorageImpl as that of
# the input it is aliased with. Otherwise, its StorageImpl has use_count of 1
#
# The following code templates implement the checks for this invariant:
SAVE_TENSOR_STORAGE = CodeTemplate(
"""\
c10::optional<Storage> ${tensor_name}_storage_saved =
${tensor_name}.has_storage() ? c10::optional<Storage>(${tensor_name}.storage()) : c10::nullopt;
"""
)
# If tensor_name == out_tensor_name, used to enforce (1), otherwise used for (2)
ENFORCE_SAME_TENSOR_STORAGE = CodeTemplate(
"""\
if (${tensor_name}_storage_saved.has_value() &&
!at::impl::dispatch_mode_enabled() &&
!at::impl::tensor_has_dispatch(${tensor_name}))
AT_ASSERT(${tensor_name}_storage_saved.value().is_alias_of(${out_tensor_name}.storage()));
"""
)
SAVE_TENSORLIST_STORAGE = CodeTemplate(
"""\
std::vector<c10::optional<Storage>> ${tensorlist_name}_storage_saved(${tensorlist_name}.size());
for (const Tensor& tensor : ${tensorlist_name})
${tensorlist_name}_storage_saved.push_back(
tensor.has_storage() ? c10::optional<Storage>(tensor.storage()) : c10::nullopt);
"""
)
ENFORCE_SAME_TENSORLIST_STORAGE = CodeTemplate(
"""\
for (size_t i=0; i<${tensorlist_name}.size() && !at::impl::dispatch_mode_enabled(); i++) {
if (${tensorlist_name}_storage_saved[i].has_value() && !at::impl::tensorlist_has_dispatch(${tensorlist_name}))
AT_ASSERT(${tensorlist_name}_storage_saved[i].value().is_alias_of(${tensorlist_name}[i].storage()));
}
"""
)
SAVE_OPTIONALTENSORLIST_STORAGE = CodeTemplate(
"""\
std::vector<c10::optional<Storage>> ${tensorlist_name}_storage_saved(${tensorlist_name}.size());
for (const c10::optional<Tensor>& tensor : ${tensorlist_name})
${tensorlist_name}_storage_saved.push_back(
tensor.has_value() && tensor->has_storage() ? c10::optional<Storage>(tensor->storage()) : c10::nullopt);
"""
)
ENFORCE_SAME_OPTIONALTENSORLIST_STORAGE = CodeTemplate(
"""\
for (size_t i=0; i<${tensorlist_name}.size() && !at::impl::dispatch_mode_enabled(); i++) {
if (${tensorlist_name}_storage_saved[i].has_value() && !at::impl::tensorlist_has_dispatch(${tensorlist_name}))
AT_ASSERT(${tensorlist_name}_storage_saved[i].value().is_alias_of(
static_cast<c10::optional<Tensor>>(${tensorlist_name}[i])->storage()));
}
"""
)
SAVE_TENSOR_IMPL = CodeTemplate(
"""\
c10::intrusive_ptr<TensorImpl> ${tensor_name}_impl_saved;
if (${tensor_name}.defined()) ${tensor_name}_impl_saved = ${tensor_name}.getIntrusivePtr();
"""
)
ENFORCE_SAME_TENSOR_IMPL = CodeTemplate(
"""\
if (${tensor_name}_impl_saved && !at::impl::dispatch_mode_enabled() && !at::impl::tensor_has_dispatch(${tensor_name}))
AT_ASSERT(${tensor_name}_impl_saved == ${tensor_name}.getIntrusivePtr());
"""
)
ENFORCE_TENSOR_IMPL_USE_COUNT_LT_OR_EQ_ONE = CodeTemplate(
"""\
if (!at::impl::dispatch_mode_enabled() && !at::impl::tensor_has_dispatch(${tensor_name}))
AT_ASSERT(${tensor_name}.use_count() <= 1, "function: ${fn_name}");
"""
)
ENFORCE_TENSOR_STORAGE_USE_COUNT_EQUALS_ONE = CodeTemplate(
"""\
if (${tensor_name}.has_storage() && !at::impl::dispatch_mode_enabled() && !at::impl::tensor_has_dispatch(${tensor_name})) {
AT_ASSERT(${tensor_name}.storage().use_count() == 1, "function: ${fn_name}");
}
"""
)
SAVE_TENSORLIST_IMPL = CodeTemplate(
"""\
std::vector<c10::intrusive_ptr<TensorImpl>> ${tensorlist_name}_impl_saved(${tensorlist_name}.size());
for (size_t i=0; i<${tensorlist_name}.size(); i++)
if (${tensorlist_name}[i].defined()) ${tensorlist_name}_impl_saved[i] = ${tensorlist_name}[i].getIntrusivePtr();
"""
)
ENFORCE_SAME_TENSORLIST_IMPL = CodeTemplate(
"""\
for (size_t i=0; i<${tensorlist_name}.size() && !at::impl::dispatch_mode_enabled(); i++) {
if (${tensorlist_name}_impl_saved[i] && !at::impl::tensorlist_has_dispatch(${tensorlist_name}))
AT_ASSERT(${tensorlist_name}_impl_saved[i] == ${tensorlist_name}[i].getIntrusivePtr());
}
"""
)
SAVE_OPTIONALTENSORLIST_IMPL = CodeTemplate(
"""\
std::vector<c10::intrusive_ptr<TensorImpl>> ${tensorlist_name}_impl_saved(${tensorlist_name}.size());
for (size_t i=0; i<${tensorlist_name}.size(); i++) {
c10::optional<Tensor> t = ${tensorlist_name}[i];
if (t.has_value() && t->defined()) ${tensorlist_name}_impl_saved[i] = t->getIntrusivePtr();
}
"""
)
ENFORCE_SAME_OPTIONALTENSORLIST_IMPL = CodeTemplate(
"""\
for (size_t i=0; i<${tensorlist_name}.size() && !at::impl::dispatch_mode_enabled(); i++) {
if (${tensorlist_name}_impl_saved[i])
AT_ASSERT(${tensorlist_name}_impl_saved[i] == static_cast<c10::optional<Tensor>>(${tensorlist_name}[i])->getIntrusivePtr());
}
"""
)
# The following list contains functions that we don't enforce the invariant on.
DONT_ENFORCE_SAME_TENSOR_IMPL_OR_STORAGE = {
# These functions are expected to change impl or storage of input tensors
"set_",
"_cudnn_rnn_flatten_weight",
}
DONT_ENFORCE_TENSOR_IMPL_USE_COUNT = {
# These non-inplace, non-out functions return tensors with use_count > 1
# Therefore, they MAY (but not necessarily) return one of its inputs as-is
# See https://github.com/pytorch/pytorch/issues/60426 for more information
"_embedding_bag",
"_embedding_bag_forward_only",
"q_per_channel_scales",
"q_per_channel_zero_points",
"lu_unpack",
"_cudnn_rnn_backward",
# The below failed StorageImpl use_count check but we skip tensor_impl check
# just in case
"_cudnn_rnn",
"dequantize_self",
# lift() should never actually be called with a requires_grad=True tensor,
"lift",
"lift_fresh",
"lift_fresh_copy",
# Nested Tensors related functions
# _nested_tensor_size() should never actually be called with requires_grad=True tensor
"_nested_tensor_size",
}
DONT_ENFORCE_STORAGE_IMPL_USE_COUNT = {
# These non-view functions return tensors with storage use_count != 1
"_slow_conv2d_forward",
"slow_conv3d_forward",
"channel_shuffle",
# If an input is returned as-is in output, we cannot guarantee its storage_impl
# use count to be 1 either.
*DONT_ENFORCE_TENSOR_IMPL_USE_COUNT,
}
# END CHECKS FOR [ TensorImpl and Storage Pointer Sanity Checks ]
DECLARE_GRAD_FN = CodeTemplate(
"""\
std::shared_ptr<${op}> grad_fn;
"""
)
SETUP_ANY_REQUIRES_GRAD = CodeTemplate(
"""\
auto _any_requires_grad = compute_requires_grad( ${args_with_derivatives} );
${extra_differentiability_conditions}
(void)_any_requires_grad;
"""
)
SETUP_DERIVATIVE = CodeTemplate(
"""\
if (_any_requires_grad) {
${setup}
}
"""
)
SETUP_NONE_REQUIRES_GRAD = CodeTemplate(
"""\
if (compute_requires_grad( ${args_to_check} )) {
throw_error_out_requires_grad("${base_name}");
}
"""
)
ASSIGN_GRAD_FN = CodeTemplate(
"""\
grad_fn = std::shared_ptr<${op}>(new ${op}(${op_ctor}), deleteNode);
grad_fn->set_next_edges(collect_next_edges( ${args_with_derivatives} ));
"""
)
CALL_REDISPATCH = CodeTemplate(
"""\
at::redispatch::${api_name}(${unpacked_args})"""
)
# If the non-variable operation has return values, we use the `tmp` variable to hold the
# values temporarily and pass the values to the return variables outside of the
# `at::AutoDispatchBelowAutograd` guard block.
DISPATCH_TO_NON_VAR_TYPE_WITH_TMP_RETURN_VALUES = CodeTemplate(
"""\
auto ${tmp_var} = ([&]() {
${guard}
return ${base_type_call};
})();
"""
)
DISPATCH_TO_NON_VAR_TYPE_WITHOUT_RETURN_VALUES = CodeTemplate(
"""\
{
${guard}
${base_type_call};
}
"""
)
SET_HISTORY = CodeTemplate(
"""\
if (grad_fn) {
${fn}_history(${differentiable_outputs}, grad_fn);
}
"""
)
CONDITIONAL = CodeTemplate(
"""\
if (${cond}) {
${statements}
}
"""
)
RUN_ONLY_IN_DEBUG_MODE = CodeTemplate(
"""\
#ifndef NDEBUG
${statements}
#endif
"""
)
FW_DERIVATIVE_CHECK_TEMPLATE = CodeTemplate(
"""\
isFwGradDefined(${req_inp})\
"""
)
FW_DERIVATIVE_DEFINED_GRAD_TEMPLATE = CodeTemplate(
"""\
auto ${inp}_t_raw = toNonOptFwGrad(${inp});
auto ${inp}_tensor = toNonOptTensor(${inp});
auto ${inp}_t = (${inp}_t_raw.defined() || !${inp}_tensor.defined())
? ${inp}_t_raw : at::${zeros_fn}(${inp}_tensor.sizes(), ${inp}_tensor.options());
"""
)
FW_DERIVATIVE_DEFINED_PRIMAL_TEMPLATE = CodeTemplate(
"""\
auto ${inp}_p = toNonOptPrimal(${inp});
"""
)
FW_DERIVATIVE_SETTER_TENSOR = CodeTemplate(
"""\
if (${out_arg}_new_fw_grad_opt.has_value() && ${out_arg}_new_fw_grad_opt.value().defined() && ${out_arg}.defined()) {
// The hardcoded 0 here will need to be updated once we support multiple levels.
${out_arg}._set_fw_grad(${out_arg}_new_fw_grad_opt.value(), /* level */ 0, /* is_inplace_op */ ${is_inplace});
}
"""
)
FW_DERIVATIVE_SETTER_MULTI_OUTPUT = CodeTemplate(
"""\
if (${all_res}_new_fw_grad_opt.has_value() && std::get<${idx}>(${all_res}_new_fw_grad_opt.value()).defined()
&& ${out_arg}.defined()) {
${out_arg}._set_fw_grad(std::get<${idx}>(${all_res}_new_fw_grad_opt.value()), /* level */ 0, /* is_inplace_op */ false);
}
"""
)
FW_DERIVATIVE_SETTER_TENSOR_LIST = CodeTemplate(
"""\
if (${out_arg}_new_fw_grad_opt.has_value()) {
auto ${out_arg}_new_fw_grad = ${out_arg}_new_fw_grad_opt.value();
TORCH_INTERNAL_ASSERT(${out_arg}.size() == ${out_arg}_new_fw_grad.size());
for (auto i=0; i<${out_arg}.size(); ++i) {
if (${out_arg}_new_fw_grad[i].defined() && ${out_arg}[i].defined()) {
// The hardcoded 0 here will need to be updated once we support multiple levels.
${out_arg}[i]._set_fw_grad(${out_arg}_new_fw_grad[i], /* level */ 0, /* is_inplace_op */ ${is_inplace});
}
}
}
"""
)
FW_DERIVATIVE_TEMPLATE = CodeTemplate(
"""\
${fw_grad_opt_definition}
if (${requires_fw_grad}) {
${unpacked_arguments}
${out_arg}_new_fw_grad_opt = ${formula};
}
"""
)
FW_DERIVATIVE_FORBID_TEMPLATE = CodeTemplate(
"""\
TORCH_CHECK_NOT_IMPLEMENTED(!(${cond}), "Trying to use forward AD with ${name} that does not support it ${msg}");
"""
)
FW_DERIVATIVE_FORBID_LIST_TEMPLATE = CodeTemplate(
"""\
for (const auto& _t: ${arg}) {
TORCH_CHECK_NOT_IMPLEMENTED(!(${cond}), "Trying to use forward AD with ${name} that does not support it ${msg}");
}
"""
)
def gen_variable_type(
out: str,
native_yaml_path: str,
tags_yaml_path: str,
fns_with_diff_infos: List[NativeFunctionWithDifferentiabilityInfo],
template_path: str,
used_keys: Set[str],
) -> None:
"""VariableType.h and VariableType.cpp body
This is the at::Type subclass for differentiable tensors. The
implementation of each function dispatches to the base tensor type to
compute the output. The grad_fn is attached to differentiable functions.
"""
fm = FileManager(install_dir=out, template_dir=template_path, dry_run=False)
fm.write(
"VariableType.h",
lambda: {
"generated_comment": "@" f"generated from {template_path}/VariableType.h"
},
)
# helper that generates a TORCH_LIBRARY_IMPL macro for each
# dispatch key that appears in derivatives.yaml
def wrapper_registrations(used_keys: Set[str]) -> str:
library_impl_macro_list: List[str] = []
for key in used_keys:
dispatch_key = key
if key == "Default":
dispatch_key = "Autograd"
library_impl_macro = (
f"TORCH_LIBRARY_IMPL(aten, {dispatch_key}, m) "
+ "{\n"
+ "${"
+ f"wrapper_registrations_{key}"
+ "}\n}"
)
library_impl_macro_list += [library_impl_macro]
return "\n\n".join(library_impl_macro_list)
# Generate a new template from VariableType.cpp which replaces ${wrapper_registrations}
# with per key TORCH_LIBRARY_IMPL macros for each key that appears in derivatives.yaml
fm1 = FileManager(
install_dir=out + "/templates", template_dir=template_path, dry_run=False
)
fm1.write(
"VariableType.cpp",
lambda: {
"type_derived_method_definitions": "\n\n".join(
[
"${" + f"type_derived_method_definitions_{key}" + "}"
for key in used_keys
]
),
"wrapper_registrations": wrapper_registrations(used_keys),
},
)
# Generate final VariableType_*.cpp files from the generated template
fm2 = FileManager(install_dir=out, template_dir=out + "/templates", dry_run=False)
sharded_keys = set(
[f"type_derived_method_definitions_{key}" for key in used_keys]
+ [f"wrapper_registrations_{key}" for key in used_keys]
)
# NOTE: see Note [Sharded File] at the top of the VariableType.cpp
# template regarding sharding of the generated files.
fm2.write_sharded(
"VariableType.cpp",
[fn for fn in fns_with_diff_infos if use_derived(fn)],
key_fn=lambda fn: cpp.name(fn.func.func),
base_env={
"generated_comment": "@" f"generated from {template_path}/VariableType.cpp",
},
env_callable=gen_variable_type_func,
num_shards=5,
sharded_keys=sharded_keys,
)
@with_native_function_and
def gen_wrapper_registration(f: NativeFunction, key: str = "Default") -> str:
return WRAPPER_REGISTRATION.substitute(
unqual_operator_name_with_overload=f.func.name,
type_wrapper_name=type_wrapper_name(f, key),
class_type="VariableType",
)
def gen_variable_type_func(
fn: NativeFunctionWithDifferentiabilityInfo,
) -> Dict[str, List[str]]:
f = fn.func
result = dict()
with native_function_manager(f):
name = cpp.name(f.func)
formals = gen_formals(f)
if (
fn.info is None
and not str(f.func.name.name) in RESET_GRAD_ACCUMULATOR
and not get_base_name(f) in DONT_REQUIRE_DERIVATIVE
and len(gen_differentiable_outputs(fn)) > 0
and not cpp.name(f.func) in DONT_ENFORCE_SAME_TENSOR_IMPL_OR_STORAGE
and not type_wrapper_name(f) in DONT_ENFORCE_STORAGE_IMPL_USE_COUNT
and not type_wrapper_name(f) in DONT_ENFORCE_TENSOR_IMPL_USE_COUNT
):
# NOTE: [ Registering AutogradNotImplemented boxed kernel ]
#
# When there is no derivatives.yaml entry, we register a generic boxed
# NotImplemented kernel to set grad_fn to be NotImplemented, so that forward
# proceeds as usual but an error is properly produced on backward.
# TODO: it would be nice to not have these special cases
#
# There are several cases where still let codegen handle it:
# 1) ops that need to reset grad accumulator (we let codegen handle this case
# because) the list is (currently) only accessible in Python.
# 2) User explicitly specifies DONT_REQUIRE_DERIVATIVE. This basically makes
# autograd a fallthrough with NDEBUG checks. This can be useful for when all
# outputs are integral.
# 3) When there are no differentiable outputs. This is similar to (2).
# 4) There are certain ops where we skip certain NDEBUG checks. this is similar
# to (1).
type_definition = ""
wrapper_registration = AUTOGRAD_NOT_IMPLEMENTED_REGISTRATION.substitute(
unqual_operator_name_with_overload=f.func.name
)
result["type_derived_method_definitions_Default"] = [type_definition]
result["wrapper_registrations_Default"] = [wrapper_registration]
else:
if not fn.info:
key = "Default"
type_definition = METHOD_DEFINITION.substitute(
return_type=cpp.returns_type(f.func.returns).cpp_type(),
type_wrapper_name=type_wrapper_name(f, key),
type_definition_body=emit_body(fn, key),
formals=formals,
)
wrapper_registration = gen_wrapper_registration(f, key)
result[f"type_derived_method_definitions_{key}"] = [type_definition]
result[f"wrapper_registrations_{key}"] = [wrapper_registration]
else:
for key, _ in fn.info.items():
type_definition = METHOD_DEFINITION.substitute(
return_type=cpp.returns_type(f.func.returns).cpp_type(),
type_wrapper_name=type_wrapper_name(f, key),
type_definition_body=emit_body(fn, key),
formals=formals,
)
wrapper_registration = gen_wrapper_registration(f, key)
result[f"type_derived_method_definitions_{key}"] = [type_definition]
result[f"wrapper_registrations_{key}"] = [wrapper_registration]
# See Note [Manual Backend kernels]
assert (name in MANUAL_BACKEND) == f.manual_kernel_registration
# If you want to register a kernel to Autograd, you must make the op abstract.
# In other words, this op must have dispatch section in native_functions.yaml.
if name in MANUAL_AUTOGRAD_AND_TRACER or (
fn.info and any(info.has_derivatives for info in fn.info.values())
):
msg = (
f"There's a formula for {name}(or its functional variant) in derivatives.yaml. "
f"It's required to add a dispatch section for it with explicit supported backends e.g CPU/CUDA "
f"or CompositeExplicitAutograd in native_functions.yaml. Please see "
f"https://github.com/pytorch/pytorch/tree/master/aten/src/ATen/native#choosing-the-right-dispatch-keyword "
f"for instructions to choose the right dispatch keyword."
)
assert f.is_abstract, msg
return result
@with_native_function_with_differentiability_info_and_key
def emit_body(
fn: NativeFunctionWithDifferentiabilityInfo, key: str = "Default"
) -> List[str]:
assert dispatch_strategy(fn) == "use_derived"
f = fn.func
info = fn.info[key] if fn.info else None
fw_derivatives = fn.fw_derivatives.get(key, []) if fn.fw_derivatives else []
name = cpp.name(f.func)
inplace = f.func.kind() == SchemaKind.inplace
is_out_fn = f.func.kind() == SchemaKind.out
returns_void = len(f.func.returns) == 0
base_name = get_base_name(f)
view_info = get_view_info(f)
def gen_differentiable_input(
arg: Union[Argument, SelfArgument, TensorOptionsArguments]
) -> Optional[DifferentiableInput]:
if isinstance(arg, TensorOptionsArguments):
return None
a: Argument = arg.argument if isinstance(arg, SelfArgument) else arg
# TODO: `cpp_type` is only to keep it byte-for-byte compatible with the old codegen, should remove.
# NB: This is not a clone of cpp.argument() - TensorOptionsArguments / faithful / binds are
# not handled properly as they are irrelevant for this codegen.
cpp_type = cpp.argument_type(a, binds=a.name).cpp_type()
if not is_differentiable(a.name, a.type, info):
return None
return DifferentiableInput(
name=a.name,
type=a.type,
cpp_type=cpp_type,
)
@with_native_function
def gen_differentiable_inputs(f: NativeFunction) -> List[DifferentiableInput]:
return list(mapMaybe(gen_differentiable_input, f.func.arguments.non_out))
def find_args_with_derivatives(
differentiable_inputs: List[DifferentiableInput],
) -> List[DifferentiableInput]:
"""Find arguments that have derivative definitions"""
if info is None or not info.has_derivatives:
return differentiable_inputs
names = set(name for d in info.derivatives for name in d.var_names)
differentiable = [arg for arg in differentiable_inputs if arg.name in names]
if len(differentiable) != len(names):
missing = names - set(arg.name for arg in differentiable)
raise RuntimeError(
f"Missing arguments for derivatives: {missing} in {info.name}"
)
return differentiable
differentiable_inputs = gen_differentiable_inputs(f)
args_with_derivatives = find_args_with_derivatives(differentiable_inputs)
differentiable_outputs = gen_differentiable_outputs(fn, key)
undifferentiable = (base_name in DONT_REQUIRE_DERIVATIVE) or (
name in DONT_REQUIRE_DERIVATIVE
)
requires_derivative = (
(not undifferentiable)
and (len(differentiable_inputs) > 0)
and (len(differentiable_outputs) > 0)
)
if (
info is not None
and info.has_derivatives
and not requires_derivative
# out= ops are allowed to have zero returns which cause requires_derivative to be False
# we shouldn't error out though (out= ops for autograd just redispatch)
and len(f.func.returns) > 0
):
raise RuntimeError(
f"ERROR: derivative ignored for {name} -- specified an autograd function without derivative"
)
def emit_save_inputs() -> List[str]:
setup: List[str] = []
if info is None or not info.has_derivatives:
return setup
has_tensorlist_arg = any(
is_tensor_list_type(arg.type) for arg in args_with_derivatives
)
# We don't want to save tensors if we know that they will never be used
# when computing the derivative, so we add guards to those statements
def guard_for(arg: SavedAttribute) -> Optional[str]:
assert info is not None
# It's hard to determine the edge offset if we have TensorLists
if has_tensorlist_arg:
return None
# Empirical evaluation of the cases where we insert those guards in
# backward show that they are somewhat useless. E.g. there's no need
# to guard on some values captured from forward, because they had to
# require_grad if the backward function even gets executed. I don't
# have any good ideas for detecting those cases, so I simply disabled the
# checks.
if "backward" in info.name:
return None
# If there's a single derivative we could compute, we already have
# a requires_grad check that is sufficient
if len(args_with_derivatives) <= 1:
return None
# We really only care about trimming down the amount of tensors we save
if arg.nctype.type != BaseCType(tensorT):
return None
# We want to emit simple guards, so we only allow that if checking one
# input is enough to determine whether we need that value
used_in = [d for d in info.derivatives if arg in d.saved_inputs]
assert len(used_in) > 0
if len(used_in) != 1:
return None
derivative = used_in[0]
if len(derivative.var_names) != 1:
return None
derivative_var_name = derivative.var_names[0]
# Figure out the offset of the edge that uses this variable
for edge_off, a in enumerate(args_with_derivatives):
if a.name == derivative_var_name:
break
else:
raise AssertionError()
return f"grad_fn->should_compute_output({edge_off})"
setup.extend(save_variables(info.all_saved_inputs, False, guard_for))
for arg in args_with_derivatives:
if is_tensor_list_type(arg.type):
setup.append(f"grad_fn->{arg.name}_size_ = {arg.name}.size();")
return setup
def setup_derivative(differentiable_inputs: List[DifferentiableInput]) -> List[str]:
body: List[str] = []
if is_out_fn:
# For out functions, ensure that no input or output requires grad
body.append(DECLARE_GRAD_FN.substitute(op="Node"))
body.append(
SETUP_NONE_REQUIRES_GRAD.substitute(
base_name=base_name,
args_to_check=[arg.name for arg in differentiable_inputs],
)
)
body.append(
SETUP_NONE_REQUIRES_GRAD.substitute(
base_name=base_name,
args_to_check=[arg.name for arg in differentiable_outputs],
)
)
return body
op = info.op if info is not None and info.has_derivatives else "NotImplemented"
setup = []
setup.extend(
ASSIGN_GRAD_FN.substitute(
op=op,
op_ctor=""
if info is not None and info.has_derivatives
else f'"{cpp.name(f.func)}"',
args_with_derivatives=[arg.name for arg in args_with_derivatives],
).split("\n")
)
setup.extend(emit_save_inputs())
body.extend(
emit_check_no_requires_grad(differentiable_inputs, args_with_derivatives)
)
body.append(DECLARE_GRAD_FN.substitute(op=op))
body.append(SETUP_DERIVATIVE.substitute(setup=setup))
return body
def emit_check_if_in_complex_autograd_allowlist() -> List[str]:
body: List[str] = []
if base_name in GRADIENT_IMPLEMENTED_FOR_COMPLEX:
return body
for arg in differentiable_outputs:
name = arg.name
# TODO: should be `arg.type.is_tensor_like()`?
if arg.cpp_type in [
"at::Tensor",
"at::TensorList",
"const c10::List<c10::optional<at::Tensor>> &",
]:
body.append(f'throw_error_for_complex_autograd({name}, "{base_name}");')
return body
def emit_check_no_requires_grad(
tensor_args: List[DifferentiableInput],
args_with_derivatives: List[DifferentiableInput],
) -> List[str]:
"""Checks that arguments without derivatives don't require grad"""
body: List[str] = []
for arg in tensor_args:
if arg in args_with_derivatives:
continue
arg_name = arg.name
if info and arg_name in info.non_differentiable_arg_names:
continue
if arg_name == "output":
# Double-backwards definitions sometimes take in 'input' and
# 'output', but only define the derivative for input.
continue
body.append(f'check_no_requires_grad({arg_name}, "{arg_name}", "{name}");')
return body
def emit_original_self_definition() -> List[str]:
body: List[str] = []
if inplace:
body.append("c10::optional<at::Tensor> original_self;")
all_forward_grad_cond = []
for derivative in fw_derivatives:
if derivative.required_original_self_value:
all_forward_grad_cond.append(
get_any_has_forward_grad_name(derivative.var_names)
)
if all_forward_grad_cond:
body.append(f'if ({" || ".join(all_forward_grad_cond)}) {{')
body.append(" original_self = self.clone();")
body.append("}")
return body
def save_variables(
saved_variables: Sequence[SavedAttribute],
is_output: bool,
guard_for: Callable[[SavedAttribute], Optional[str]] = lambda name: None,
) -> Sequence[str]:
# assign the saved variables to the generated grad_fn
stmts: List[str] = []
for arg in saved_variables:
name = (
arg.nctype.name.name
if isinstance(arg.nctype.name, SpecialArgName)
else arg.nctype.name
)
type = arg.nctype.type
expr = arg.expr
stmts_prepend = None
if (
type == BaseCType(tensorT)
or type == OptionalCType(BaseCType(tensorT))
or type == MutRefCType(OptionalCType(BaseCType(tensorT)))
or (is_output and type == BaseCType(scalarT))
):
var = name
name += "_"
if var == "self" and inplace:
stmts_prepend = (
"if (!original_self.has_value()) original_self = self.clone()"
)
var = "original_self.value()"
assert not is_output
if inplace and is_output:
var = "self"
is_inplace_view = f"{var}.is_view()"
expr = f"SavedVariable({var}, {str(is_output).lower()}, {is_inplace_view})"
else:
expr = f"SavedVariable({var}, {str(is_output).lower()})"
elif type == BaseCType(tensorListT) or type == ListCType(
OptionalCType(BaseCType(tensorT))
):
expr = f"make_saved_variable_list({name})"
name += "_"
elif type == BaseCType(intArrayRefT):
expr = expr + ".vec()"
elif type == BaseCType(symIntArrayRefT):
expr = expr + ".vec()"
elif type == BaseCType(stringT):
expr = f"std::string({expr})"
elif type == OptionalCType(BaseCType(stringT)):
expr = f"{expr}.has_value() ? c10::optional<std::string>(std::string({expr}.value())) : c10::nullopt"
guard = guard_for(arg)
if guard is None:
if stmts_prepend:
stmts.append(f"{stmts_prepend};")
stmts.append(f"grad_fn->{name} = {expr};")
else:
stmts.append(f"if ({guard}) {{")
if stmts_prepend:
stmts.append(f" {stmts_prepend};")
stmts.append(f" grad_fn->{name} = {expr};")
stmts.append("}")
return stmts
# Generates a Dispatcher::redispatch() call into the dispatcher. We do this mainly for performance reasons:
# - Pre-compute the full DispatchKeySet. This saves the dispatcher from having to read from TLS.
# - redispatch() avoids a redundant call to RecordFunction, which was already called right before
# we entered this autograd kernel.
def emit_dispatch_call(
f: NativeFunction, input_base: str, unpacked_args: Sequence[str]
) -> str:
"""Dispatch call via function in a namespace or method on Tensor."""
dispatcher_sig = DispatcherSignature.from_schema(f.func)
dispatcher_exprs = dispatcher_sig.exprs()
# code-generated autograd kernels plumb and recompute dispatch keys directly through the kernel for performance.
# Ops also always have a function variant of the redispatch API.
# See Note [Plumbing Keys Through The Dispatcher] for details.
dispatch_key_set = "ks & c10::after_autograd_keyset"
call = CALL_REDISPATCH.substitute(
api_name=cpp.name(
f.func,
faithful_name_for_out_overloads=True,
),
unpacked_args=[dispatch_key_set] + list(unpacked_args),
)
return call
def wrap_output(
f: NativeFunction, unpacked_bindings: List[Binding], var: str
) -> str:
call = ""
rhs_value: Optional[str] = None
if not any(r.type.is_tensor_like() for r in f.func.returns):
rhs_value = var
else:
rhs_value = f"std::move({var})"
assert rhs_value is not None
call += ASSIGN_RETURN_VALUE.substitute(
return_values=tie_return_values(f), rhs_value=rhs_value
)
return call
def check_tensorimpl_and_storage(
call: str, unpacked_bindings: List[Binding]
) -> str:
# See NOTE [ TensorImpl and Storage Pointer Sanity Checks ]
stmts_before_call: List[str] = []
stmts_after_call: List[str] = []
if cpp.name(f.func) in DONT_ENFORCE_SAME_TENSOR_IMPL_OR_STORAGE:
return call
# Check properties of inputs (enforce (1))
for unpacked_binding in unpacked_bindings:
arg = unpacked_binding.name
noref_cpp_type = unpacked_binding.nctype.type.remove_const_ref()
if noref_cpp_type == BaseCType(tensorListT):
stmts_before_call += [
SAVE_TENSORLIST_STORAGE.substitute(tensorlist_name=arg),
SAVE_TENSORLIST_IMPL.substitute(tensorlist_name=arg),
]
stmts_after_call += [
ENFORCE_SAME_TENSORLIST_STORAGE.substitute(tensorlist_name=arg),
ENFORCE_SAME_TENSORLIST_IMPL.substitute(tensorlist_name=arg),
]
elif noref_cpp_type == ListCType(OptionalCType(BaseCType(tensorT))):
stmts_before_call += [
SAVE_OPTIONALTENSORLIST_STORAGE.substitute(tensorlist_name=arg),
SAVE_OPTIONALTENSORLIST_IMPL.substitute(tensorlist_name=arg),
]
stmts_after_call += [
ENFORCE_SAME_OPTIONALTENSORLIST_STORAGE.substitute(
tensorlist_name=arg
),
ENFORCE_SAME_OPTIONALTENSORLIST_IMPL.substitute(
tensorlist_name=arg
),
]
elif noref_cpp_type == BaseCType(tensorT):
stmts_before_call += [
SAVE_TENSOR_STORAGE.substitute(tensor_name=arg),
SAVE_TENSOR_IMPL.substitute(tensor_name=arg),
]
stmts_after_call += [
ENFORCE_SAME_TENSOR_STORAGE.substitute(
tensor_name=arg, out_tensor_name=arg
),
ENFORCE_SAME_TENSOR_IMPL.substitute(tensor_name=arg),
]
assert (stmts_before_call and stmts_after_call) or (
not stmts_before_call and not stmts_after_call
)
# Check properties of outputs (enforce (2), (3))
if not f.func.kind() in (SchemaKind.inplace, SchemaKind.out):
base_name = f.func.name.name.base # TODO: should be str(f.func.name.name)?
aliased_arg_name = ALL_VIEW_FUNCTIONS.get(base_name, None)
if aliased_arg_name is not None:
aliased_arg_name = unpacked_name(aliased_arg_name)
for i, (ret, ret_name) in enumerate(
zip(f.func.returns, cpp.return_names(f))
):
noref_cpp_type = cpp.return_type(ret).remove_const_ref()
if noref_cpp_type == BaseCType(tensorT):
if aliased_arg_name is not None:
assert (
i == 0
), "Expect non-CompositeImplicitAutograd view function {base} to return single output"
stmts_after_call += [
ENFORCE_SAME_TENSOR_STORAGE.substitute(
tensor_name=aliased_arg_name, out_tensor_name=ret_name
)
]
else:
if (
type_wrapper_name(f)
not in DONT_ENFORCE_STORAGE_IMPL_USE_COUNT
):
stmts_after_call += [
ENFORCE_TENSOR_STORAGE_USE_COUNT_EQUALS_ONE.substitute(
tensor_name=ret_name, fn_name=type_wrapper_name(f)
)
]
if type_wrapper_name(f) not in DONT_ENFORCE_TENSOR_IMPL_USE_COUNT:
stmts_after_call += [
ENFORCE_TENSOR_IMPL_USE_COUNT_LT_OR_EQ_ONE.substitute(
tensor_name=ret_name, fn_name=type_wrapper_name(f)
)
]
# Currently we don't have any functions that return the following types, but
# we should update the checks once we do
elif noref_cpp_type == ListCType(OptionalCType(BaseCType(tensorT))):
raise AssertionError(
f"Please add use_count checks for {noref_cpp_type}"
)
elif noref_cpp_type == BaseCType(tensorListT):
raise AssertionError(
f"Please add use_count checks for {noref_cpp_type}"
)
if stmts_before_call and stmts_after_call:
call = (
RUN_ONLY_IN_DEBUG_MODE.substitute(statements=stmts_before_call)
+ call
+ RUN_ONLY_IN_DEBUG_MODE.substitute(statements=stmts_after_call)
)
return call
def emit_call(f: NativeFunction, unpacked_bindings: List[Binding]) -> str:
# We only care about adding `at::AutoDispatchBelowAutograd` guard for non-variable dispatch
# (which corresponds to 'use_derived' strategy). The purpose of this guard is to make sure
# the baseType operations still dispatch to non-Variable type, even if the arguments passed
# in are now Variables.
# See NOTE [ Treating Variables as non-Variables in type dispatch ] for details.
unpacked_args = [b.name for b in unpacked_bindings]
base_type_call = emit_dispatch_call(f, "self_", unpacked_args)
if get_view_info(f) is not None or modifies_arguments(f):
guard = "at::AutoDispatchBelowAutograd guard;"
else:
guard = "at::AutoDispatchBelowADInplaceOrView guard;"
if not modifies_arguments(f) and not returns_void:
call = DISPATCH_TO_NON_VAR_TYPE_WITH_TMP_RETURN_VALUES.substitute(
base_type_call=base_type_call, tmp_var=TMP_VAR, guard=guard
)
call += wrap_output(f, unpacked_bindings, TMP_VAR)
else:
call = DISPATCH_TO_NON_VAR_TYPE_WITHOUT_RETURN_VALUES.substitute(
base_type_call=base_type_call, guard=guard
)
call = check_tensorimpl_and_storage(call, unpacked_bindings)
return call
def emit_history() -> str:
fn = "rebase" if modifies_arguments(f) and view_info is None else "set"
output_names = [r.name for r in differentiable_outputs]
# TODO: flatten allocates a std::vector, which could be expensive
outs = CodeTemplate("flatten_tensor_args( ${outs} )").substitute(
outs=output_names
)
return SET_HISTORY.substitute(fn=fn, differentiable_outputs=outs)
def emit_save_outputs() -> str:
if is_out_fn:
# out functions don't currently support differentiation
return ""
if info is not None and info.has_derivatives:
stmts = save_variables(info.all_saved_outputs, True)
if len(stmts) == 0:
return ""
return CONDITIONAL.substitute(cond="grad_fn", statements=stmts)
return ""
def emit_any_requires_grad() -> List[str]:
extra_condition = ""
if info and info.output_differentiability_conditions:
assert len(info.output_differentiability_conditions) == 1
extra_condition = f"_any_requires_grad &= ({info.output_differentiability_conditions[0]});"
return [
SETUP_ANY_REQUIRES_GRAD.substitute(
args_with_derivatives=[arg.name for arg in args_with_derivatives],
extra_differentiability_conditions=extra_condition,
)
]
def get_any_has_forward_grad_name(var_names: Tuple[str, ...]) -> str:
if len(var_names) == 1:
return f"_any_has_forward_grad_{var_names[0]}"
else:
return f'_any_has_forward_grad_{"_".join(var_names)}'
def emit_any_has_forward_grad() -> List[str]:
content: List[str] = []
for derivative in fw_derivatives:
assert derivative.required_inputs_fw_grad is not None
requires_fw_grad = " || ".join(
[
FW_DERIVATIVE_CHECK_TEMPLATE.substitute(req_inp=inp.name)
for inp in differentiable_inputs
if inp.name in derivative.required_inputs_fw_grad
]
)
if not requires_fw_grad:
# Handle functions like stack
# For these, we don't unpack anything and always call the user function
if not (
len(differentiable_inputs) == 1
and is_tensor_list_type(differentiable_inputs[0].type)
):
raise RuntimeError(
f'No differentiable input to "{name}" is a differentiable Tensor (as the provided '
"forward AD formula does not use any input tangent) even though a forward gradient "
"formula has been defined for it. This case should only happen for function that "
"take a single TensorList as input. All other cases are not supported right now."
)
requires_fw_grad = "true"
if info and info.output_differentiability_conditions:
assert len(info.output_differentiability_conditions) == 1
requires_fw_grad = f"({info.output_differentiability_conditions[0]}) && ({requires_fw_grad})"
content.append(
f"auto {get_any_has_forward_grad_name(derivative.var_names)} = {requires_fw_grad};\n"
f"(void){get_any_has_forward_grad_name(derivative.var_names)};"
)
return content
def emit_check_inplace() -> List[str]:
if not inplace:
return []
return [
f"check_inplace({arg.name}, _any_requires_grad);"
for arg in differentiable_outputs
]
def emit_fw_derivatives() -> List[str]:
content: List[str] = []
fw_grad_setters: List[str] = []
for derivative in fw_derivatives:
res = derivative.var_names
if f.func.name.name.inplace:
assert (
len(res) == 1
), "Expected number of outputs to be 1 if function is inplace"
# TODO update this when inplace namings are unified
res = ("self",)
assert derivative.required_inputs_fw_grad is not None
unpacked_arguments = ""
for inp in differentiable_inputs:
zeros_fn = (
"zeros"
if inplace and inp.name == "self"
else "_efficientzerotensor"
)
if inp.name in derivative.required_inputs_fw_grad:
unpacked_arguments += (
FW_DERIVATIVE_DEFINED_GRAD_TEMPLATE.substitute(
inp=inp.name, zeros_fn=zeros_fn
)
)
if inp.name in (derivative.required_inputs_primal or []):
unpacked_arguments += (
FW_DERIVATIVE_DEFINED_PRIMAL_TEMPLATE.substitute(inp=inp.name)
)
if derivative.required_original_self_value:
unpacked_arguments += FW_DERIVATIVE_DEFINED_GRAD_TEMPLATE.substitute(
inp="original_self", zeros_fn=zeros_fn
)
unpacked_arguments += FW_DERIVATIVE_DEFINED_PRIMAL_TEMPLATE.substitute(
inp="original_self"
)
elif inplace and derivative.is_reusing_outplace_formula:
# The gradient wasn't already cloned, do it if grad mode is enabled
unpacked_arguments += (
"self_t = GradMode::is_enabled() ? self_t.clone() : self_t;"
)
if inplace:
is_inplace_str = "true"
else:
is_inplace_str = "false"
requires_fw_grad = get_any_has_forward_grad_name(derivative.var_names)
if all(
(isinstance(var_type, BaseType) and var_type.is_tensor_like())
for var_type in derivative.var_types
):
# Is there a way to get from BaseType to BaseCType
if len(derivative.var_types) == 1:
opt_res_grad_type = OptionalCType(BaseCType(tensorT)).cpp_type()
fw_grad_setters.append(
FW_DERIVATIVE_SETTER_TENSOR.substitute(
out_arg=res[0], is_inplace=is_inplace_str
)
)
requires_fw_grad += f" && ({derivative.var_names[0]}.defined())"
else:
tuple_type = TupleCType(
[BaseCType(tensorT)] * len(derivative.var_types)
)
opt_res_grad_type = OptionalCType(tuple_type).cpp_type()
for idx, single_res in enumerate(res):
fw_grad_setters.append(
FW_DERIVATIVE_SETTER_MULTI_OUTPUT.substitute(
idx=idx, all_res="_".join(res), out_arg=single_res
)
)
elif (
isinstance(derivative.var_types[0], ListType)
and derivative.var_types[0].is_tensor_like()
):
assert (
len(derivative.var_types) == 1
), "Expected number of outputs to be 1 if function returns ListType"
opt_res_grad_type = OptionalCType(
VectorCType(BaseCType(tensorT))
).cpp_type()
fw_grad_setters.append(
FW_DERIVATIVE_SETTER_TENSOR_LIST.substitute(
out_arg=res[0], is_inplace=is_inplace_str
)
)
else:
raise RuntimeError("Unsupported output type for forward derivative")
fw_grad_opt_definition = (
f"{opt_res_grad_type} {'_'.join(res)}_new_fw_grad_opt = c10::nullopt;"
)
# View ops create fw_grad that already is a view of the base's fw_grad so just use that
content.append(
FW_DERIVATIVE_TEMPLATE.substitute(
fw_grad_opt_definition=fw_grad_opt_definition,
requires_fw_grad=requires_fw_grad,
formula=derivative.formula,
out_arg="_".join(res),
unpacked_arguments=unpacked_arguments,
)
)
# Set all the grads at the end to avoid: https://github.com/pytorch/pytorch/issues/67367
content.append("\n".join(fw_grad_setters))
return content
def emit_forbid_fw_derivatives(is_out_fn: bool = False) -> str:
def get_msg() -> str:
if is_out_fn:
msg = "because it is an out= function"
else:
msg = (
"because it has not been implemented yet.\\nPlease file an issue "
"to PyTorch at https://github.com/pytorch/pytorch/issues/new?template=feature-request.yml "
"so that we can prioritize its implementation."
)
return msg
res = ""
to_check: List[str] = []
for inp in list(
mapMaybe(
gen_differentiable_input,
f.func.arguments.non_out + list(f.func.arguments.out), # type: ignore[operator]
)
):
if is_tensor_type(inp.type):
to_check.append(
FW_DERIVATIVE_CHECK_TEMPLATE.substitute(req_inp=inp.name)
)
elif is_tensor_list_type(inp.type):
cond = FW_DERIVATIVE_CHECK_TEMPLATE.substitute(req_inp="_t")
res += FW_DERIVATIVE_FORBID_LIST_TEMPLATE.substitute(
arg=inp.name, cond=cond, name=name, msg=get_msg()
)
else:
raise RuntimeError(
f'Unsupported input type for "{name}" when forbidding forward AD usage.'
)
if len(to_check) > 0:
cond = " || ".join(to_check)
res += FW_DERIVATIVE_FORBID_TEMPLATE.substitute(
cond=cond, name=name, msg=get_msg()
)
return res
body: List[str] = []
unpack_args_stats, unpacked_bindings = unpack_args(f)
body.extend(unpack_args_stats)
if requires_derivative:
body.extend(emit_any_requires_grad())
body.extend(emit_any_has_forward_grad())
body.extend(emit_check_inplace())
body.extend(emit_original_self_definition())
body.extend(setup_derivative(differentiable_inputs))
body.append(declare_returned_variables(f))
body.append(emit_call(f, unpacked_bindings))
if requires_derivative:
# set_flags has to appear after version_counter, because rebase_history
# requires that the counter is incremented before it is called
body.append(emit_history())
body.extend(emit_check_if_in_complex_autograd_allowlist())
if is_out_fn:
body.append(emit_forbid_fw_derivatives(is_out_fn=True))
else:
if requires_derivative:
body.extend(emit_fw_derivatives())
if len(fw_derivatives) == 0:
body.append(emit_forbid_fw_derivatives())
else:
assert sum(
len(derivative.var_names) for derivative in fw_derivatives
) == len(differentiable_outputs), (
"Expected the number of forward derivatives implemented to match the "
"number of differentiable outputs. NB: This only applies when at least "
"one forward derivative is implemented. Not implementing any forward "
"derivatives is also okay, and we would require inputs to the op to "
"not have associated tangents in that case."
)
if requires_derivative:
# Save only after the forward AD has been set up
body.append(emit_save_outputs())
if str(f.func.name.name) in RESET_GRAD_ACCUMULATOR:
# `inplace` implies that there is exactly one output named `self`,
# so we can keep the generated code easy. If you need to
# `reset_grad_accumulator` in an operator that's not `inplace`, you can
# remove this assert but the code generation will get more elaborate
assert inplace
body.append("reset_grad_accumulator(self);")
if not returns_void:
body.append(f"return {get_return_value(f)};")
return body
|
pytorch-master
|
tools/autograd/gen_variable_type.py
|
import functools
from typing import Callable
from torchgen.api.autograd import NativeFunctionWithDifferentiabilityInfo as NFWDI
from torchgen.context import native_function_manager
from torchgen.utils import T
# Like tools.api.context.with_native_function, but for
# NativeFunctionWithDifferentiabilityInfo.
def with_native_function_with_differentiability_info(
func: Callable[[NFWDI], T]
) -> Callable[[NFWDI], T]:
@functools.wraps(func)
def wrapper(f: NFWDI) -> T:
with native_function_manager(f.func):
return func(f)
return wrapper
# Like the above but with an additional dispatch key string argument
def with_native_function_with_differentiability_info_and_key(
func: Callable[[NFWDI, str], T]
) -> Callable[[NFWDI, str], T]:
@functools.wraps(func)
def wrapper(f: NFWDI, key: str) -> T:
with native_function_manager(f.func):
return func(f, key)
return wrapper
|
pytorch-master
|
tools/autograd/context.py
|
# Generates ADInplaceOrViewType.h/cpp
#
# NOTE: If any changes are being made to the ADInplaceOrView codegen please also check
# if updates are needed in torch/csrc/autograd/autograd_not_implemented_fallback.cpp
# The fallback is expected to mimick this codegen, so we should keep the two in sync.
from typing import Dict, List, Optional, Sequence, Tuple
from torchgen.api import cpp
from torchgen.api.autograd import (
dispatch_strategy,
gen_differentiable_outputs,
NativeFunctionWithDifferentiabilityInfo,
)
from torchgen.api.types import (
BaseCType,
Binding,
boolT,
CType,
DispatcherSignature,
intArrayRefT,
longT,
OptionalCType,
symIntArrayRefT,
)
from torchgen.code_template import CodeTemplate
from torchgen.context import with_native_function
from torchgen.model import (
NativeFunction,
SchemaKind,
SelfArgument,
TensorOptionsArguments,
Type,
)
from torchgen.utils import FileManager
from .context import with_native_function_with_differentiability_info
from .gen_trace_type import (
get_return_value,
MANUAL_AUTOGRAD,
tie_return_values,
type_wrapper_name,
)
# See NOTE [ Autograd View Variables ] in variable.h for details.
# If you update list VIEW_FUNCTIONS or RETURNS_VIEWS_OF_INPUT,
# you **MUST** also update the public list of view ops accordingly in
# docs/source/tensor_view.rst. Note not all ATen functions are exposed to public,
# e.g alias & sparse_coo_tensor_with_dims_and_tensors.
#
# A map: function name => name of the argument that all outputs are view of
VIEW_FUNCTIONS_WITH_METADATA_CHANGE = [
"view_as_complex",
"view_as_real",
"_conj",
"_neg_view",
]
VIEW_FUNCTIONS = {
"numpy_T": "self",
"alias": "self",
"as_strided": "self",
"diagonal": "self",
"expand": "self",
"permute": "self",
"select": "self",
"slice": "self",
"split": "self",
"split_with_sizes": "self",
"squeeze": "self",
"t": "self",
"transpose": "self",
"unfold": "self",
"unsqueeze": "self",
"flatten": "self",
"view": "self",
"unbind": "self",
"_indices": "self",
"_values": "self",
"indices": "self",
"values": "self",
"crow_indices": "self",
"col_indices": "self",
"ccol_indices": "self",
"row_indices": "self",
# sparse_coo ctor output should really be views of both indices and values,
# but we only supports making as view of a single variable, and indices is
# discrete anyways.
# FIXME: clone indices on construction.
"sparse_coo_tensor_with_dims_and_tensors": "values",
"_reshape_alias": "self",
}
for key in VIEW_FUNCTIONS_WITH_METADATA_CHANGE:
VIEW_FUNCTIONS[key] = "self"
# note: some VIEW_FUNCTIONS are just compositions of the view functions above
# this list contains both the root view functions and any that are purely composed
# of viewing functions, and is used by the JIT to determine when an operator
# may return a view of its inputs; however they may sometimes return a copy.
# (e.g. `contiguous`)
RETURNS_VIEWS_OF_INPUT = set(VIEW_FUNCTIONS.keys()).union(
{
"chunk",
"detach",
"contiguous",
"reshape",
"reshape_as",
"expand_as",
"view_as",
"real",
"imag",
"narrow",
"movedim",
"tensor_split",
"swapdims",
"swapaxes",
"mT",
"mH",
"adjoint",
"matrix_H",
}
)
# These are the functions we consider views for the purposes of validating
# StorageImpl and TensorImpl in gen_variable_type.
# `_unsafe_view` is not included in VIEW_FUNCTIONS above because it is not a
# view for the purposes of ADInplaceOrView kernel, we do not want to call as_view
# See NOTE [Unsafe View] for more info.
ALL_VIEW_FUNCTIONS = {
**VIEW_FUNCTIONS,
"_unsafe_view": "self",
}
ARRAYREF_TO_VEC = CodeTemplate(
"""\
auto ${vec} = ${arg}.vec();
"""
)
OPTIONAL_TO_VAL = CodeTemplate(
"""\
auto ${val} = ${arg}.value_or(${default});
"""
)
CALL_DISPATCH = CodeTemplate(
"""\
at::_ops::${unambiguous_name}::call(${unpacked_args})"""
)
SETUP_REPLAY_VIEW_IF_NOT_SUPPORT_AS_STRIDED_OR_VIEW_WITH_METADATA_CHANGE = CodeTemplate(
"""\
std::function<at::Tensor(const at::Tensor&)> func=nullptr;
if (${is_view_with_metadata_change} || !self.unsafeGetTensorImpl()->support_as_strided()) {
${replay_view_func}
}
"""
)
REPLAY_VIEW_LAMBDA_FUNC = CodeTemplate(
"""\
func = [=](const at::Tensor& ${input_base}) {
return ${replay_view_call};
};
"""
)
METHOD_DEFINITION = CodeTemplate(
"""\
${return_type} ${type_wrapper_name}(${formals}) {
${type_definition_body}
}
"""
)
WRAPPER_REGISTRATION = CodeTemplate(
"""\
m.impl("${unqual_operator_name_with_overload}",
TORCH_FN(${class_type}::${type_wrapper_name})
);
"""
)
AUTOGRAD_NOT_IMPLEMENTED_REGISTRATION = CodeTemplate(
"""\
m.impl("${unqual_operator_name_with_overload}", torch::autograd::autogradNotImplementedFallback());
"""
)
INPLACE_REDISPATCH = CodeTemplate(
"""\
{
at::AutoDispatchBelowADInplaceOrView guard;
at::_ops::${unambiguous_name}::redispatch(${unpacked_args});
}
"""
)
ASSIGN_RETURN_VALUE = CodeTemplate(
"""\
${return_values} = ${rhs_value};
"""
)
VIEW_REDISPATCH = CodeTemplate(
"""\
${assign_return_values} ([&]() {
at::AutoDispatchBelowADInplaceOrView guard;
return at::_ops::${unambiguous_name}::redispatch(${unpacked_args});
})();
"""
)
TMP_VAR = "_tmp"
# FIXME: Ideally these functions should be methods on Type class, but we have a
# comment in codegen/model.py there saying these concepts are not well defined.
# Thus we put a version that commonly used by autograd codegen here.
def is_tensor_type(t: Type) -> bool:
# TODO: Should handle optional here?
return t.is_tensor_like() and t.is_list_like() is None
def is_tensor_list_type(t: Type) -> bool:
# TODO: Should handle optional here?
return t.is_tensor_like() and t.is_list_like() is not None
UNPACK_TENSOR = CodeTemplate(
"""\
auto${ref} ${arg_name}_ = unpack${suffix}(${arg_name}, "${arg_name}", ${arg_pos});"""
)
def unpacked_name(arg_name: str) -> str:
return arg_name + "_"
@with_native_function
def unpack_args(f: NativeFunction) -> Tuple[List[str], List[Binding]]:
body: List[str] = []
unpacked_bindings: List[Binding] = []
bindings = [
r
for a in f.func.schema_order_arguments()
for r in cpp.argument(
a,
method=False,
cpp_no_default_args=set(),
faithful=False,
has_tensor_options=False,
)
]
for i, binding in enumerate(bindings):
assert not isinstance(binding.argument, SelfArgument)
if isinstance(binding.argument, TensorOptionsArguments):
raise RuntimeError("VariableKernel shouldn't take TensorOptions")
is_nullable = binding.argument.type.is_nullable()
if not binding.argument.type.is_tensor_like() or is_nullable:
unpacked_bindings.append(binding)
continue
is_tensor_list = is_tensor_list_type(binding.argument.type)
ref = (not is_nullable) and not is_tensor_list
suffix = "_opt" if is_nullable and not is_tensor_list else ""
body.append(
UNPACK_TENSOR.substitute(
arg_name=binding.name,
arg_pos=i,
suffix=suffix,
ref="&" if ref else "",
)
)
unpacked_bindings.append(
Binding(
name=unpacked_name(binding.name),
nctype=binding.nctype,
argument=binding.argument,
default=binding.default,
)
)
return body, unpacked_bindings
def get_base_name(f: NativeFunction) -> str:
return f.func.name.name.base # TODO: should be str(f.func.name.name)?
def get_view_info(f: NativeFunction) -> Optional[str]:
base_name = get_base_name(f)
view_info = VIEW_FUNCTIONS.get(base_name, None)
if view_info is None and base_name in RETURNS_VIEWS_OF_INPUT:
view_info = "self"
return view_info
# For view replay calls, we generate an ordinary Dispatcher::call() instead, because:
# - We want to replay the entire call into the op, including any previously-set dispatch keys (including autograd!).
# - The view replay call also is not part of the hot path.
def emit_view_call(
f: NativeFunction, input_base: str, unpacked_args: Sequence[str]
) -> str:
# View replay functions use the standard Dispatcher::call API.
return CALL_DISPATCH.substitute(
unambiguous_name=f.func.name.unambiguous_name(), unpacked_args=unpacked_args
)
def emit_view_lambda(f: NativeFunction, unpacked_bindings: List[Binding]) -> str:
"""Generate an additional lambda function to recover views in backward when as_strided is not supported.
See Note [View + Inplace update for base tensor] and [View + Inplace update for view tensor] for more details."""
input_base = "input_base"
replay_view_func = ""
updated_unpacked_args: List[str] = []
known_view_arg_simple_types: List[CType] = [
BaseCType(longT),
OptionalCType(BaseCType(longT)),
BaseCType(boolT),
BaseCType(intArrayRefT),
BaseCType(symIntArrayRefT),
]
for unpacked_binding in unpacked_bindings:
arg, arg_type = unpacked_binding.name, unpacked_binding.nctype.type
if arg == "self_":
updated_unpacked_args.append(input_base)
continue
if arg_type not in known_view_arg_simple_types:
known_types_str = ", ".join([str(t) for t in known_view_arg_simple_types])
raise TypeError(
f"You are adding an {arg_type} {arg} argument to op {cpp.name(f.func)} in addition to known types: "
f"{known_types_str}. Please update the list or materialize it so that it can be closed "
"over by value, also add a test in pytorch/xla/test/test_operations.py where this code "
"is exercised."
)
if arg_type == BaseCType(intArrayRefT) or arg_type == BaseCType(
symIntArrayRefT
):
# It's not safe to close over IntArrayRef by value, since this is a
# reference type, so materialize a vector to close over by value
arg_vec = arg + "_vec"
replay_view_func += ARRAYREF_TO_VEC.substitute(arg=arg, vec=arg_vec)
updated_unpacked_args.append(arg_vec)
elif arg_type == OptionalCType(BaseCType(longT)):
# Materialize int64_t? to int64_t
arg_value = arg + "_val"
replay_view_func += OPTIONAL_TO_VAL.substitute(
arg=arg, val=arg_value, default="0"
)
updated_unpacked_args.append(arg_value)
else:
updated_unpacked_args.append(arg)
replay_view_call = emit_view_call(f, input_base, updated_unpacked_args)
replay_view_func += REPLAY_VIEW_LAMBDA_FUNC.substitute(
input_base=input_base, replay_view_call=replay_view_call
)
is_view_with_metadata_change = (
"true" if cpp.name(f.func) in VIEW_FUNCTIONS_WITH_METADATA_CHANGE else "false"
)
return SETUP_REPLAY_VIEW_IF_NOT_SUPPORT_AS_STRIDED_OR_VIEW_WITH_METADATA_CHANGE.substitute(
is_view_with_metadata_change=is_view_with_metadata_change,
replay_view_func=replay_view_func,
)
def emit_view_body(
fn: NativeFunctionWithDifferentiabilityInfo, var: str
) -> Tuple[str, str]:
# See NOTE [ Autograd View Variables ] in variable.h for details.
f = fn.func
base_name = get_base_name(f)
view_info = get_view_info(f)
call = ""
differentiable_outputs = gen_differentiable_outputs(fn)
differentiable_output_vars = {r.name for r in differentiable_outputs}
if not isinstance(view_info, str):
raise TypeError(
f"The view info should be a string for {base_name}, but it is: {view_info}"
)
if len(differentiable_output_vars) == 0:
# no output is differentiable (.indices() for SparseTensors for example)
rhs_value = (
f"as_view({view_info}, {var}, "
f"/* is_bw_differentiable */ false, /* is_fw_differentiable */ false)"
)
elif len(differentiable_output_vars) == 1:
# Single differentiable output (Tensor or Tensor[])
return_info = differentiable_outputs[0]
# We only support simple Tensor or a TensorList for functions that return views
if not is_tensor_type(return_info.type) and not is_tensor_list_type(
return_info.type
):
raise RuntimeError(
f"{base_name} that return differentiable views can only return Tensor or Tensor[]"
)
# See Note [ View + Inplace detection]
def get_creation_meta_in_mode(original: str) -> str:
creation_meta_with_grad_mode = f"(at::GradMode::is_enabled() ? {original} : CreationMeta::NO_GRAD_MODE)"
return f"InferenceMode::is_enabled() ? CreationMeta::INFERENCE_MODE : {creation_meta_with_grad_mode}"
# Only allow rebasing of the history if we return a single Tensor
# If we are in a no grad block, raise a warning
# See NOTE [ View + Inplace detection ] for more details about this logic
if is_tensor_list_type(return_info.type):
creation_meta = get_creation_meta_in_mode("CreationMeta::MULTI_OUTPUT_NODE")
call += (
f"as_view(/* base */ {view_info}, /* output */ {var}, /* is_bw_differentiable */ true, "
"/* is_fw_differentiable */ true, "
f"/* creation_meta */ {creation_meta});"
)
rhs_value = f"std::move({var})"
else:
_, unpacked_bindings = unpack_args(f)
call += emit_view_lambda(f, unpacked_bindings)
creation_meta = get_creation_meta_in_mode("CreationMeta::DEFAULT")
rhs_value = (
f"as_view(/* base */ {view_info}, /* output */ {var}, /* is_bw_differentiable */ true, "
"/* is_fw_differentiable */ true, "
f"/* view_func */ func, /* creation_meta */ {creation_meta})"
)
else:
# This could be supported but we don't need it at the moment, so keeping things simple.
raise RuntimeError(
"Function that return multiple differentiable output "
"when at least one of them is view is not supported."
)
return call, rhs_value
def modifies_arguments(f: NativeFunction) -> bool:
return f.func.kind() in [SchemaKind.inplace, SchemaKind.out]
@with_native_function_with_differentiability_info
def emit_inplace_or_view_body(fn: NativeFunctionWithDifferentiabilityInfo) -> List[str]:
f = fn.func
inplace_view_body: List[str] = []
dispatcher_sig = DispatcherSignature.from_schema(f.func)
dispatcher_exprs = dispatcher_sig.exprs()
# code-generated ADInplaceOrView kernels plumb and recompute dispatch keys directly through the kernel for performance.
# See Note [Plumbing Keys Through The Dispatcher] for details.
dispatch_key_set = "ks & c10::after_ADInplaceOrView_keyset"
redispatch_args = ", ".join([dispatch_key_set] + [a.expr for a in dispatcher_exprs])
# Note that this calls the slow, dispatching variants of manual_cpp_binding ops.
# We could probably work harder to ensure that the fast variants are called instead, but the perf benefit would be minimal.
if modifies_arguments(f): # inplace op
inplace_view_body.append(
INPLACE_REDISPATCH.substitute(
unambiguous_name=f.func.name.unambiguous_name(),
unpacked_args=redispatch_args,
)
)
for r in cpp.return_names(f):
inplace_view_body.append(f"increment_version({r});")
else:
assert get_view_info(f) is not None
inplace_view_body.append(
VIEW_REDISPATCH.substitute(
assign_return_values="auto " + TMP_VAR + " = ",
unambiguous_name=f.func.name.unambiguous_name(),
unpacked_args=redispatch_args,
)
)
call, rhs_value = emit_view_body(fn, TMP_VAR)
inplace_view_body.append(call)
assert rhs_value is not None
inplace_view_body.append(
ASSIGN_RETURN_VALUE.substitute(
return_values=tie_return_values(f), rhs_value=rhs_value
)
)
if f.func.returns:
inplace_view_body.append(f"return {get_return_value(f)};")
return inplace_view_body
@with_native_function
def gen_formals(f: NativeFunction) -> str:
return ", ".join(
# code-generated autograd kernels plumb and recompute dispatch keys directly through the kernel for performance.
# See Note [Plumbing Keys Through The Dispatcher] for details.
["c10::DispatchKeySet ks"]
+ [
f'{cpp.argument_type(a, binds="__placeholder__").cpp_type()} {a.name}'
for a in f.func.schema_order_arguments()
]
)
@with_native_function_with_differentiability_info
def inplace_or_view_method_definition(
fn: NativeFunctionWithDifferentiabilityInfo,
) -> Optional[str]:
f = fn.func
if get_view_info(f) is None and (
# For functions that modify their inputs but don't return them,
# we can't give them autograd support.
# See https://github.com/pytorch/pytorch/issues/53796
not modifies_arguments(f)
or len(f.func.returns) == 0
):
return None
return METHOD_DEFINITION.substitute(
return_type=cpp.returns_type(f.func.returns).cpp_type(),
type_wrapper_name=type_wrapper_name(f),
formals=gen_formals(f),
type_definition_body=emit_inplace_or_view_body(fn),
)
@with_native_function_with_differentiability_info
def inplace_or_view_method_registration(
fn: NativeFunctionWithDifferentiabilityInfo,
) -> Optional[str]:
f = fn.func
if get_view_info(f) is None and (
not modifies_arguments(f) or len(f.func.returns) == 0
):
return None
return WRAPPER_REGISTRATION.substitute(
unqual_operator_name_with_overload=f.func.name,
type_wrapper_name=type_wrapper_name(f),
class_type="ADInplaceOrView",
)
def use_derived(fn: NativeFunctionWithDifferentiabilityInfo) -> bool:
f = fn.func
name = cpp.name(f.func)
return name not in MANUAL_AUTOGRAD and dispatch_strategy(fn) == "use_derived"
def gen_inplace_or_view_type_env(
fn: NativeFunctionWithDifferentiabilityInfo,
) -> Dict[str, List[str]]:
definition = inplace_or_view_method_definition(fn)
registration = inplace_or_view_method_registration(fn)
return {
"ops_headers": (
[f"#include <ATen/ops/{fn.func.root_name}_ops.h>"]
if definition is not None
else []
),
"inplace_or_view_method_definitions": [definition]
if definition is not None
else [],
"inplace_or_view_wrapper_registrations": [registration]
if registration is not None
else [],
}
def gen_inplace_or_view_type(
out: str,
native_yaml_path: str,
tags_yaml_path: str,
fns_with_infos: List[NativeFunctionWithDifferentiabilityInfo],
template_path: str,
) -> None:
# NOTE: see Note [Sharded File] at the top of the VariableType.cpp
# template regarding sharding of the generated files.
num_shards = 2
fm = FileManager(install_dir=out, template_dir=template_path, dry_run=False)
fm.write_sharded(
"ADInplaceOrViewType.cpp",
[fn for fn in fns_with_infos if use_derived(fn)],
key_fn=lambda fn: fn.func.root_name,
base_env={
"generated_comment": f"@generated from {template_path}/ADInplaceOrViewType.cpp",
},
env_callable=gen_inplace_or_view_type_env,
num_shards=2,
sharded_keys={
"ops_headers",
"inplace_or_view_method_definitions",
"inplace_or_view_wrapper_registrations",
},
)
|
pytorch-master
|
tools/autograd/gen_inplace_or_view_type.py
|
import itertools
from typing import Dict, List, Sequence, Union
from torchgen.api import cpp
from torchgen.api.types import DispatcherSignature
from torchgen.code_template import CodeTemplate
from torchgen.context import with_native_function
from torchgen.model import Argument, NativeFunction, SchemaKind, TensorOptionsArguments
from torchgen.utils import FileManager
# Note [Manual Backend kernels]
# For these ops, we want to manually register to dispatch key Backend and
# skip codegen-ed registeration to all keys before Backend.
# For codegen this means:
# - op set below must match ops with manual_kernel_registration=True in native_functions.yaml
# where we skip codegen backend kernels
# - all ops below are part of MANUAL_AUTOGRAD to skip codegen Autograd kernel registration
# - all ops below are part of MANUAL_TRACER to skip codegen Tracer kernel registration
# Note: we still register to dispatch key Profiler for these ops, keeping it untouched for now.
# You can find the manual registration in torch/csrc/autograd/VariableTypeManual.cpp
MANUAL_BACKEND = set(
[
"options",
"data",
"set_data",
"is_leaf",
"output_nr",
"_version",
"retain_grad",
"_backward",
"requires_grad_",
]
)
# For these ops we want to skip the codegen-ed registration to both Autograd and Tracer keys.
# You can find the manual registration in torch/csrc/autograd/VariableTypeManual.cpp
MANUAL_AUTOGRAD_AND_TRACER = set(
[
"resize_",
"resize_as_",
"detach",
"detach_",
"copy_",
"_fw_primal",
"_make_dual",
]
)
# Currently MANUAL_AUTOGRAD and MANUAL_TRACER share the same set of ops:
# union(MANUAL_BACKEND, MANUAL_AUTOGRAD_AND_TRACER)
# You can find the manual registration in torch/csrc/autograd/VariableTypeManual.cpp
MANUAL_AUTOGRAD = MANUAL_TRACER = MANUAL_BACKEND | MANUAL_AUTOGRAD_AND_TRACER
# These functions we don't want to record for tracing, because we always want
# to trace their constituent parts. This is a temporary hack in lieue
# of proper scopes, where subsequent compilation passes can ask for the unfolding
# on demand. Only concrete ATen methods can be disabled this way; it will have
# NO EFFECT otherwise.
DONT_RECORD_TRACE = {
"convolution",
"conv1d",
"conv2d",
"conv3d",
"conv_transpose1d",
"conv_transpose2d",
"conv_transpose3d",
"lstm_cell",
"gru_cell",
"rnn_tanh_cell",
"rnn_relu_cell",
# FIXME: figure out a better way when we support sparse tensors in jit
"_coalesced",
}
def should_trace(f: NativeFunction) -> bool:
# Operations involving Storage or Type are not traceable at the moment
if any(
str(arg.type) in {"Storage", "Type", "ConstQuantizerPtr"}
for arg in f.func.schema_order_arguments()
):
return False
# We can't trace functions which don't have any Tensor or TensorList returns
if not any(r.type.is_tensor_like() for r in f.func.returns):
return False
return f.func.name.name.base not in DONT_RECORD_TRACE
SELECT = CodeTemplate(
"""\
if (${cond}) {
${true}
} else {
${false}
}
"""
)
OP_NAME = CodeTemplate(
"""\
op_name = c10::Symbol::fromQualString("aten::${trace_name}");
"""
)
# These functions have their names recorded under trace renamed,
RENAME_TRACE = {
"zero": "zeros_like", # replacing aten::zero_ with aten::zeros_like
"fill": "full_like", # replacing aten::fill_ with aten::full_like
}
def format_trace_op_name(f: NativeFunction) -> str:
# TODO: byte-for-byte compatible with old codegen behavior - should clean up
if (
f.func.kind() in (SchemaKind.functional, SchemaKind.out)
or f.func.name.name.dunder_method
):
# special case for *_out functions: the in-place and out-of-place ops
# are overloaded with the same name in the JIT
trace_name = str(f.func.name.name)
trace_name = RENAME_TRACE.get(trace_name, trace_name)
return OP_NAME.substitute(trace_name=trace_name)
# otherwise, this is an in-place op and we need to emit both in- and
# out-of-place versions
outplace_trace_name = f.func.name.name.base
inplace_trace_name = cpp.name(f.func)
outplace_trace_name = RENAME_TRACE.get(outplace_trace_name, outplace_trace_name)
inplace_trace_name = RENAME_TRACE.get(inplace_trace_name, inplace_trace_name)
return SELECT.substitute(
cond="tracer_state->force_outplace",
true=OP_NAME.substitute(trace_name=outplace_trace_name),
false=OP_NAME.substitute(trace_name=inplace_trace_name),
)
ADD_TRACE_INPUT = CodeTemplate("""jit::tracer::addInputs(node, "${name}", ${input});""")
def format_trace_inputs(f: NativeFunction) -> str:
def dispatch_trace_input(
arg: Union[Argument, TensorOptionsArguments]
) -> Sequence[str]:
if isinstance(arg, TensorOptionsArguments):
name = "options"
return [
ADD_TRACE_INPUT.substitute(
name=name, input="optTypeMetaToScalarType(options.dtype_opt())"
),
ADD_TRACE_INPUT.substitute(name=name, input="options.layout()"),
ADD_TRACE_INPUT.substitute(name=name, input="options.device()"),
ADD_TRACE_INPUT.substitute(name=name, input="options.pinned_memory()"),
]
else:
name = arg.name
if str(arg.type) == "Tensor?[]":
return [f'jit::tracer::addInputs(node, "{name}", {name});']
else:
return [ADD_TRACE_INPUT.substitute(name=name, input=name)]
args: List[Union[Argument, TensorOptionsArguments]] = list(
f.func.schema_order_arguments()
)
if f.func.is_out_fn():
# *_out functions take the result as a separate argument, but we don't want to
# trace that argument directly. Instead, we trace its TensorOptions.
# So first, we need to remove the out argument from the list of arguments to trace.
# TODO: byte-for-byte compatible with old codegen behavior - it's incorrect to assume
# there is only one output argument.
args = args[:-1]
trace_inputs = itertools.chain.from_iterable(
dispatch_trace_input(arg) for arg in args
)
if f.func.is_out_fn():
# for *_out functions, handle the result argument differently for inplace/outplace.
# For inplace: just add the input to the end to confirm with the JIT schema
name = f.func.arguments.out[0].name # TODO: old codegen behavior - should fix
inplace = ADD_TRACE_INPUT.substitute(name=name, input=name)
# for outplace: do nothing, except if the function is a factory.
# Factories are a bit special because their out-of-place overloads
# take an extra TensorOptions argument, which is missing in the _out function
has_tensor_return = any(r.type.is_tensor_like() for r in f.func.returns)
has_tensor_input_arg = any(
a.type.is_tensor_like() for a in f.func.arguments.flat_non_out
)
is_factory_method = f.category_override == "factory" or (
has_tensor_return and not has_tensor_input_arg
)
# HACK: preserve old codegen behavior - the old codegen set the `is_factory_method`
# flag for the whole family of ops with the same basename if any of them is a
# factory method. For most cases the whole family of ops are indeed all factory
# method - 'normal' is the only exception. So we handle it specially here to avoid
# cloning the old logic.
if f.func.name.name.base == "normal":
is_factory_method = True
if is_factory_method:
outplace = [
ADD_TRACE_INPUT.substitute(
name="out",
input="optTypeMetaToScalarType(out.options().dtype_opt())",
),
ADD_TRACE_INPUT.substitute(name="out", input="out.options().layout()"),
ADD_TRACE_INPUT.substitute(name="out", input="out.options().device()"),
ADD_TRACE_INPUT.substitute(
name="out", input="out.options().pinned_memory()"
),
]
else:
outplace = []
trace_inputs = itertools.chain(
trace_inputs,
[
SELECT.substitute(
cond="tracer_state->force_outplace",
true="\n".join(outplace),
false=inplace,
)
],
)
return "\n".join(trace_inputs)
# `torch.jit.trace` have undocumented keyword argument `_force_outplace`,
# which force jit to replace functions with outplace variants (for
# example `aten::add_` becomes `aten::add`).
#
# This replacement implemented in-place with minimum modifications of
# arguments stack (as it assumes that outplace call has the same arguments
# as inplace version).
#
# However there are no such substitutions available for `aten::fill_`
# and `aten::zero_` operators, as we never implemented `aten::fill`
# and `aten::zero`. So jit tracing hack replacing `aten::zero_` with
# `aten::zeros_like` and replacing `aten::fill_` with `aten::full_like`.
#
# But as they potentially can have different arguments, we also have
# to hack into the stack and add missing ones.
#
# A possible alternative would be:
#
# - Add `aten::fill` and `aten::zero`
#
# - Or keep `aten::zeros_like` arguments aligned with `aten::zero_`
# arguments (inside of the `native_functions.yaml`)
RENAME_TRACE_ADD_ARGS = {
"fill": """\
jit::tracer::addInputs(node, "options", c10::optional<ScalarType>());
jit::tracer::addInputs(node, "options", layout_or_default(c10::nullopt));
jit::tracer::addInputs(node, "options", device_or_default(c10::nullopt));
jit::tracer::addInputs(node, "options", pinned_memory_or_default(c10::nullopt));
c10::optional<MemoryFormat> memory_format = c10::MemoryFormat::Preserve;
jit::tracer::addInputs(node, "memory_format", memory_format);
""",
"zero": """\
jit::tracer::addInputs(node, "options", c10::optional<ScalarType>());
jit::tracer::addInputs(node, "options", layout_or_default(c10::nullopt));
jit::tracer::addInputs(node, "options", device_or_default(c10::nullopt));
jit::tracer::addInputs(node, "options", pinned_memory_or_default(c10::nullopt));
c10::optional<MemoryFormat> memory_format = c10::MemoryFormat::Preserve;
jit::tracer::addInputs(node, "memory_format", memory_format);
""",
}
INPLACE_GUARD = CodeTemplate(
"""\
jit::tracer::ensureUniqueIfOutOfPlaced("${name}", ${mutable_input});
"""
)
PRE_RECORD_TRACE = CodeTemplate(
"""\
torch::jit::Node* node = nullptr;
std::shared_ptr<jit::tracer::TracingState> tracer_state;
if (jit::tracer::isTracing()) {
tracer_state = jit::tracer::getTracingState();
at::Symbol op_name;
${set_op_name}
node = tracer_state->createNode(op_name, /*num_outputs=*/0);
jit::tracer::recordSourceLocation(node);
${add_trace_inputs}
tracer_state->insertNode(node);
${inplace_guard}
jit::tracer::setTracingState(nullptr);
}
"""
)
def format_prerecord_trace(f: NativeFunction) -> str:
if not should_trace(f):
return ""
# TODO: clean up old codegen behavior
is_inplace = (
f.func.kind() in (SchemaKind.inplace, SchemaKind.out)
and not f.func.name.name.dunder_method
)
add_args = (
RENAME_TRACE_ADD_ARGS.get(f.func.name.name.base, "") if is_inplace else ""
)
additional_inputs = (
SELECT.substitute(
cond="tracer_state->force_outplace",
true=add_args,
false="",
)
if add_args
else ""
)
return PRE_RECORD_TRACE.substitute(
set_op_name=format_trace_op_name(f),
add_trace_inputs=format_trace_inputs(f) + additional_inputs,
inplace_guard=INPLACE_GUARD.substitute(
name=cpp.name(f.func),
mutable_input=f.func.arguments.out[0].name
if f.func.arguments.out
else "self",
)
if is_inplace
else "",
)
POST_RECORD_TRACE = CodeTemplate(
"""\
if (tracer_state) {
jit::tracer::setTracingState(std::move(tracer_state));
${add_trace_outputs}
}
"""
)
def format_postrecord_trace(f: NativeFunction) -> str:
if not should_trace(f):
return ""
# For outplacing ops, *_out overloads require special handling to move the
# output *argument* to a return value
if f.func.is_out_fn():
output_names_outplace = [arg.name for arg in f.func.arguments.out]
output_names_inplace = cpp.return_names(f)
# Code size optimization: the common case is that the return value is
# the same for both variants
if output_names_outplace == output_names_inplace:
outputs = [
f"jit::tracer::addOutput(node, {n});" for n in output_names_outplace
]
return POST_RECORD_TRACE.substitute(add_trace_outputs=outputs)
selection = SELECT.substitute(
cond="force_outplace",
true="\n".join(
f"jit::tracer::addOutput(node, {n});" for n in output_names_outplace
),
false="\n".join(
f"jit::tracer::addOutput(node, {n});" for n in output_names_inplace
),
)
return POST_RECORD_TRACE.substitute(add_trace_outputs=selection)
else:
output_names = cpp.return_names(f)
outputs = [f"jit::tracer::addOutput(node, {n});" for n in output_names]
return POST_RECORD_TRACE.substitute(add_trace_outputs=outputs)
def declare_returned_variables(f: NativeFunction) -> str:
modifies_arguments = f.func.kind() in (SchemaKind.inplace, SchemaKind.out)
if modifies_arguments:
return ""
if len(f.func.returns) == 1:
return ""
types = map(cpp.return_type, f.func.returns)
names = cpp.return_names(f)
return "\n".join(f"{type.cpp_type()} {name};" for type, name in zip(types, names))
def tie_return_values(f: NativeFunction) -> str:
if len(f.func.returns) == 1:
return f'auto {f.func.returns[0].name or "result"}'
names = cpp.return_names(f)
return f'std::tie({", ".join(names)})'
def get_return_value(f: NativeFunction) -> str:
names = cpp.return_names(f)
if len(f.func.returns) == 1:
return names[0]
if f.func.kind() == SchemaKind.out:
return f'std::forward_as_tuple({", ".join(names)})'
else:
moved = ", ".join(f"std::move({name})" for name in names)
return f"std::make_tuple({moved})"
TRACE_DISPATCH = CodeTemplate(
"""\
${assign_return_values}at::_ops::${unambiguous_name}::redispatch(${unpacked_args});"""
)
def emit_trace_body(f: NativeFunction) -> List[str]:
trace_body: List[str] = []
trace_body.append(format_prerecord_trace(f))
trace_body.append(declare_returned_variables(f))
dispatcher_sig = DispatcherSignature.from_schema(f.func)
dispatcher_exprs = dispatcher_sig.exprs()
# code-generated tracing kernels plumb and recompute dispatch keys directly through the kernel for performance.
# See Note [Plumbing Keys Through The Dispatcher] for details.
dispatch_key_set = "ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::Tracer)"
redispatch_args = ", ".join([dispatch_key_set] + [a.expr for a in dispatcher_exprs])
assign_return_values = (
f"{tie_return_values(f)} = "
if f.func.kind() in [SchemaKind.functional, SchemaKind.mutable]
and f.func.returns
else ""
)
# Note that this calls the slow, dispatching variants of manual_cpp_binding ops.
# We could probably work harder to ensure that the fast variants are called instead, but the perf benefit would be minimal.
trace_body.append(
TRACE_DISPATCH.substitute(
assign_return_values=assign_return_values,
unambiguous_name=f.func.name.unambiguous_name(),
unpacked_args=redispatch_args,
)
)
trace_body.append(format_postrecord_trace(f))
if f.func.returns:
trace_body.append(f"return {get_return_value(f)};")
return trace_body
METHOD_DEFINITION = CodeTemplate(
"""\
${return_type} ${type_wrapper_name}(${formals}) {
${type_definition_body}
}
"""
)
def type_wrapper_name(f: NativeFunction, key: str = "Default") -> str:
if f.func.name.overload_name:
name = f"{cpp.name(f.func)}_{f.func.name.overload_name}"
else:
name = cpp.name(f.func)
# The key argument is only used in gen_variable_type where we need fns per autograd dispatch key.
# In gen_trace_type and gen_inplace_view_type where only one fn per native_fn must be generated,
# the key argument should not be passed.
# We do not append key if it is Default so that generated functions from
# before per-dispatch-key derivatives were added retain the same names.
if key != "Default":
name = name + f"_{key}"
return name
@with_native_function
def method_definition(f: NativeFunction) -> str:
assert cpp.name(f.func) not in MANUAL_TRACER
formals = ", ".join(
# code-generated tracing kernels plumb and recompute dispatch keys directly through the kernel for performance.
# See Note [Plumbing Keys Through The Dispatcher] for details.
["c10::DispatchKeySet ks"]
+ [
f'{cpp.argument_type(a, binds="__placeholder__").cpp_type()} {a.name}'
for a in f.func.schema_order_arguments()
]
)
return METHOD_DEFINITION.substitute(
return_type=cpp.returns_type(f.func.returns).cpp_type(),
type_wrapper_name=type_wrapper_name(f),
formals=formals,
type_definition_body=emit_trace_body(f),
)
WRAPPER_REGISTRATION = CodeTemplate(
"""\
m.impl("${name}",
TORCH_FN(${class_type}::${type_wrapper_name})
);
"""
)
@with_native_function
def method_registration(f: NativeFunction) -> str:
assert cpp.name(f.func) not in MANUAL_TRACER
return WRAPPER_REGISTRATION.substitute(
name=f.func.name,
type_wrapper_name=type_wrapper_name(f),
class_type="TraceType",
)
def gen_trace_type_func(fn: NativeFunction) -> Dict[str, List[str]]:
return {
"ops_headers": [f"#include <ATen/ops/{fn.root_name}_ops.h>"],
"trace_method_definitions": [method_definition(fn)],
"trace_wrapper_registrations": [method_registration(fn)],
}
def gen_trace_type(
out: str, native_functions: List[NativeFunction], template_path: str
) -> None:
# NOTE: see Note [Sharded File] at the top of the VariableType.cpp
# template regarding sharding of the generated files.
fm = FileManager(install_dir=out, template_dir=template_path, dry_run=False)
fm.write_sharded(
"TraceType.cpp",
[fn for fn in native_functions if cpp.name(fn.func) not in MANUAL_TRACER],
key_fn=lambda fn: fn.root_name,
base_env={
"generated_comment": f"@generated from {template_path}/TraceType.cpp",
},
env_callable=gen_trace_type_func,
num_shards=5,
sharded_keys={
"ops_headers",
"trace_method_definitions",
"trace_wrapper_registrations",
},
)
|
pytorch-master
|
tools/autograd/gen_trace_type.py
|
"""
For procedural tests needed for __torch_function__, we use this function
to export method names and signatures as needed by the tests in
test/test_overrides.py.
python -m tools.autograd.gen_annotated_fn_args \
aten/src/ATen/native/native_functions.yaml \
aten/src/ATen/native/tags.yaml \
$OUTPUT_DIR \
tools/autograd
Where $OUTPUT_DIR is where you would like the files to be
generated. In the full build system, OUTPUT_DIR is
torch/testing/_internal/generated
"""
import argparse
import os
import textwrap
from collections import defaultdict
from typing import Any, Dict, List
import torchgen.api.python as python
from torchgen.context import with_native_function
from torchgen.gen import parse_native_yaml
from torchgen.model import BaseOperatorName, NativeFunction
from torchgen.utils import FileManager
from .gen_python_functions import (
is_py_fft_function,
is_py_linalg_function,
is_py_nn_function,
is_py_special_function,
is_py_torch_function,
is_py_variable_method,
should_generate_py_binding,
)
def gen_annotated(
native_yaml_path: str, tags_yaml_path: str, out: str, autograd_dir: str
) -> None:
native_functions = parse_native_yaml(
native_yaml_path, tags_yaml_path
).native_functions
mappings = (
(is_py_torch_function, "torch._C._VariableFunctions"),
(is_py_nn_function, "torch._C._nn"),
(is_py_linalg_function, "torch._C._linalg"),
(is_py_special_function, "torch._C._special"),
(is_py_fft_function, "torch._C._fft"),
(is_py_variable_method, "torch.Tensor"),
)
annotated_args: List[str] = []
for pred, namespace in mappings:
groups: Dict[BaseOperatorName, List[NativeFunction]] = defaultdict(list)
for f in native_functions:
if not should_generate_py_binding(f) or not pred(f):
continue
groups[f.func.name.name].append(f)
for group in groups.values():
for f in group:
annotated_args.append(f"{namespace}.{gen_annotated_args(f)}")
template_path = os.path.join(autograd_dir, "templates")
fm = FileManager(install_dir=out, template_dir=template_path, dry_run=False)
fm.write_with_template(
"annotated_fn_args.py",
"annotated_fn_args.py.in",
lambda: {
"annotated_args": textwrap.indent("\n".join(annotated_args), " "),
},
)
@with_native_function
def gen_annotated_args(f: NativeFunction) -> str:
out_args: List[Dict[str, Any]] = []
for arg in f.func.arguments.flat_positional:
if arg.default is not None:
continue
out_arg: Dict[str, Any] = {}
out_arg["name"] = arg.name
out_arg["simple_type"] = python.argument_type_str(arg.type, simple_type=True)
size = python.argument_type_size(arg.type)
if size:
out_arg["size"] = size
out_args.append(out_arg)
return f"{f.func.name.name}: {repr(out_args)},"
def main() -> None:
parser = argparse.ArgumentParser(description="Generate annotated_fn_args script")
parser.add_argument(
"native_functions", metavar="NATIVE", help="path to native_functions.yaml"
)
parser.add_argument("tags", metavar="TAGS", help="path to tags.yaml")
parser.add_argument("out", metavar="OUT", help="path to output directory")
parser.add_argument(
"autograd", metavar="AUTOGRAD", help="path to template directory"
)
args = parser.parse_args()
gen_annotated(args.native_functions, args.tags, args.out, args.autograd)
if __name__ == "__main__":
main()
|
pytorch-master
|
tools/autograd/gen_annotated_fn_args.py
|
import lldb # type: ignore[import]
# load into lldb instance with:
# command script import tools/lldb/deploy_debugger.py
target = lldb.debugger.GetSelectedTarget()
bp = target.BreakpointCreateByRegex("__deploy_register_code")
bp.SetScriptCallbackBody(
"""\
process = frame.thread.GetProcess()
target = process.target
symbol_addr = frame.module.FindSymbol("__deploy_module_info").GetStartAddress()
info_addr = symbol_addr.GetLoadAddress(target)
e = lldb.SBError()
ptr_size = 8
str_addr = process.ReadPointerFromMemory(info_addr, e)
file_addr = process.ReadPointerFromMemory(info_addr + ptr_size, e)
file_size = process.ReadPointerFromMemory(info_addr + 2*ptr_size, e)
load_bias = process.ReadPointerFromMemory(info_addr + 3*ptr_size, e)
name = process.ReadCStringFromMemory(str_addr, 512, e)
r = process.ReadMemory(file_addr, file_size, e)
from tempfile import NamedTemporaryFile
from pathlib import Path
stem = Path(name).stem
with NamedTemporaryFile(prefix=stem, suffix='.so', delete=False) as tf:
tf.write(r)
print("torch_deploy registering debug inforation for ", tf.name)
cmd1 = f"target modules add {tf.name}"
# print(cmd1)
lldb.debugger.HandleCommand(cmd1)
cmd2 = f"target modules load -f {tf.name} -s {hex(load_bias)}"
# print(cmd2)
lldb.debugger.HandleCommand(cmd2)
return False
"""
)
|
pytorch-master
|
tools/lldb/deploy_debugger.py
|
#!/usr/bin/env python3
import argparse
import os
import sys
sys.path.append(
os.path.realpath(
os.path.join(
__file__, os.path.pardir, os.path.pardir, os.path.pardir, "torch", "utils"
)
)
)
from hipify import hipify_python # type: ignore[import]
parser = argparse.ArgumentParser(
description="Top-level script for HIPifying, filling in most common parameters"
)
parser.add_argument(
"--out-of-place-only",
action="store_true",
help="Whether to only run hipify out-of-place on source files",
)
parser.add_argument(
"--project-directory",
type=str,
default="",
help="The root of the project.",
required=False,
)
parser.add_argument(
"--output-directory",
type=str,
default="",
help="The directory to store the hipified project",
required=False,
)
parser.add_argument(
"--extra-include-dir",
type=str,
default=[],
nargs="+",
help="The list of extra directories in caffe2 to hipify",
required=False,
)
args = parser.parse_args()
amd_build_dir = os.path.dirname(os.path.realpath(__file__))
proj_dir = os.path.join(os.path.dirname(os.path.dirname(amd_build_dir)))
if args.project_directory:
proj_dir = args.project_directory
out_dir = proj_dir
if args.output_directory:
out_dir = args.output_directory
includes = [
"caffe2/operators/*",
"caffe2/sgd/*",
"caffe2/image/*",
"caffe2/transforms/*",
"caffe2/video/*",
"caffe2/distributed/*",
"caffe2/queue/*",
"caffe2/contrib/aten/*",
"binaries/*",
"caffe2/**/*_test*",
"caffe2/core/*",
"caffe2/db/*",
"caffe2/utils/*",
"caffe2/contrib/gloo/*",
"caffe2/contrib/nccl/*",
"c10/cuda/*",
"c10/cuda/test/CMakeLists.txt",
"modules/*",
# PyTorch paths
# Keep this synchronized with is_pytorch_file in hipify_python.py
"aten/src/ATen/cuda/*",
"aten/src/ATen/native/cuda/*",
"aten/src/ATen/native/cudnn/*",
"aten/src/ATen/native/quantized/cudnn/*",
"aten/src/ATen/native/nested/cuda/*",
"aten/src/ATen/native/sparse/cuda/*",
"aten/src/ATen/native/quantized/cuda/*",
"aten/src/ATen/native/transformers/cuda/*",
"aten/src/THC/*",
"aten/src/ATen/test/*",
# CMakeLists.txt isn't processed by default, but there are a few
# we do want to handle, so explicitly specify them
"aten/src/THC/CMakeLists.txt",
"torch/*",
"tools/autograd/templates/python_variable_methods.cpp",
]
includes = [os.path.join(proj_dir, include) for include in includes]
for new_dir in args.extra_include_dir:
abs_new_dir = os.path.join(proj_dir, new_dir)
if os.path.exists(abs_new_dir):
abs_new_dir = os.path.join(abs_new_dir, "**/*")
includes.append(abs_new_dir)
ignores = [
"caffe2/operators/depthwise_3x3_conv_op_cudnn.cu",
"caffe2/operators/pool_op_cudnn.cu",
"*/hip/*",
# These files are compatible with both cuda and hip
"aten/src/ATen/core/*",
# Correct path to generate HIPConfig.h:
# CUDAConfig.h.in -> (amd_build) HIPConfig.h.in -> (cmake) HIPConfig.h
"aten/src/ATen/cuda/CUDAConfig.h",
"torch/csrc/jit/codegen/cuda/codegen.cpp",
"torch/csrc/jit/codegen/cuda/runtime/block_reduction.cu",
"torch/csrc/jit/codegen/cuda/runtime/broadcast.cu",
"torch/csrc/jit/codegen/cuda/runtime/grid_reduction.cu",
"torch/csrc/jit/codegen/fuser/cuda/resource_strings.h",
"torch/csrc/jit/tensorexpr/ir_printer.cpp",
# generated files we shouldn't frob
"torch/lib/tmp_install/*",
"torch/include/*",
]
ignores = [os.path.join(proj_dir, ignore) for ignore in ignores]
# Check if the compiler is hip-clang.
def is_hip_clang() -> bool:
try:
hip_path = os.getenv("HIP_PATH", "/opt/rocm/hip")
with open(hip_path + "/lib/.hipInfo") as f:
return "HIP_COMPILER=clang" in f.read()
except IOError:
return False
# TODO Remove once gloo submodule is recent enough to contain upstream fix.
if is_hip_clang():
gloo_cmake_file = "third_party/gloo/cmake/Hip.cmake"
do_write = False
if os.path.exists(gloo_cmake_file):
with open(gloo_cmake_file, "r") as sources:
lines = sources.readlines()
newlines = [line.replace(" hip_hcc ", " amdhip64 ") for line in lines]
if lines == newlines:
print("%s skipped" % gloo_cmake_file)
else:
with open(gloo_cmake_file, "w") as sources:
for line in newlines:
sources.write(line)
print("%s updated" % gloo_cmake_file)
gloo_cmake_file = "third_party/gloo/cmake/Modules/Findrccl.cmake"
if os.path.exists(gloo_cmake_file):
do_write = False
with open(gloo_cmake_file, "r") as sources:
lines = sources.readlines()
newlines = [line.replace("RCCL_LIBRARY", "RCCL_LIB_PATH") for line in lines]
if lines == newlines:
print("%s skipped" % gloo_cmake_file)
else:
with open(gloo_cmake_file, "w") as sources:
for line in newlines:
sources.write(line)
print("%s updated" % gloo_cmake_file)
# TODO Remove once gloo submodule is recent enough to contain upstream fix.
if is_hip_clang():
gloo_cmake_file = "third_party/gloo/cmake/Dependencies.cmake"
do_write = False
if os.path.exists(gloo_cmake_file):
with open(gloo_cmake_file, "r") as sources:
lines = sources.readlines()
newlines = [line.replace("HIP_HCC_FLAGS", "HIP_CLANG_FLAGS") for line in lines]
if lines == newlines:
print("%s skipped" % gloo_cmake_file)
else:
with open(gloo_cmake_file, "w") as sources:
for line in newlines:
sources.write(line)
print("%s updated" % gloo_cmake_file)
hipify_python.hipify(
project_directory=proj_dir,
output_directory=out_dir,
includes=includes,
ignores=ignores,
out_of_place_only=args.out_of_place_only,
hip_clang_launch=is_hip_clang(),
)
|
pytorch-master
|
tools/amd_build/build_amd.py
|
import argparse
import os
import pathlib
import sys
from typing import Any, cast, Optional
import yaml
try:
# use faster C loader if available
from yaml import CSafeLoader as YamlLoader
except ImportError:
from yaml import SafeLoader as YamlLoader # type: ignore[misc]
NATIVE_FUNCTIONS_PATH = "aten/src/ATen/native/native_functions.yaml"
TAGS_PATH = "aten/src/ATen/native/tags.yaml"
def generate_code(
gen_dir: pathlib.Path,
native_functions_path: Optional[str] = None,
tags_path: Optional[str] = None,
install_dir: Optional[str] = None,
subset: Optional[str] = None,
disable_autograd: bool = False,
force_schema_registration: bool = False,
operator_selector: Any = None,
) -> None:
from torchgen.selective_build.selector import SelectiveBuilder
from tools.autograd.gen_annotated_fn_args import gen_annotated
from tools.autograd.gen_autograd import gen_autograd, gen_autograd_python
# Build ATen based Variable classes
if install_dir is None:
install_dir = os.fspath(gen_dir / "torch/csrc")
python_install_dir = os.fspath(gen_dir / "torch/testing/_internal/generated")
else:
python_install_dir = install_dir
autograd_gen_dir = os.path.join(install_dir, "autograd", "generated")
for d in (autograd_gen_dir, python_install_dir):
os.makedirs(d, exist_ok=True)
autograd_dir = os.fspath(pathlib.Path(__file__).parent.parent / "autograd")
if subset == "pybindings" or not subset:
gen_autograd_python(
native_functions_path or NATIVE_FUNCTIONS_PATH,
tags_path or TAGS_PATH,
autograd_gen_dir,
autograd_dir,
)
if operator_selector is None:
operator_selector = SelectiveBuilder.get_nop_selector()
if subset == "libtorch" or not subset:
gen_autograd(
native_functions_path or NATIVE_FUNCTIONS_PATH,
tags_path or TAGS_PATH,
autograd_gen_dir,
autograd_dir,
disable_autograd=disable_autograd,
operator_selector=operator_selector,
)
if subset == "python" or not subset:
gen_annotated(
native_functions_path or NATIVE_FUNCTIONS_PATH,
tags_path or TAGS_PATH,
python_install_dir,
autograd_dir,
)
def get_selector_from_legacy_operator_selection_list(
selected_op_list_path: str,
) -> Any:
with open(selected_op_list_path, "r") as f:
# strip out the overload part
# It's only for legacy config - do NOT copy this code!
selected_op_list = {
opname.split(".", 1)[0] for opname in yaml.load(f, Loader=YamlLoader)
}
# Internal build doesn't use this flag any more. Only used by OSS
# build now. Every operator should be considered a root operator
# (hence generating unboxing code for it, which is consistent with
# the current behaviour), and also be considered as used for
# training, since OSS doesn't support training on mobile for now.
#
is_root_operator = True
is_used_for_training = True
from torchgen.selective_build.selector import SelectiveBuilder
selector = SelectiveBuilder.from_legacy_op_registration_allow_list(
selected_op_list,
is_root_operator,
is_used_for_training,
)
return selector
def get_selector(
selected_op_list_path: Optional[str],
operators_yaml_path: Optional[str],
) -> Any:
# cwrap depends on pyyaml, so we can't import it earlier
root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.insert(0, root)
from torchgen.selective_build.selector import SelectiveBuilder
assert not (
selected_op_list_path is not None and operators_yaml_path is not None
), (
"Expected at most one of selected_op_list_path and "
+ "operators_yaml_path to be set."
)
if selected_op_list_path is None and operators_yaml_path is None:
return SelectiveBuilder.get_nop_selector()
elif selected_op_list_path is not None:
return get_selector_from_legacy_operator_selection_list(selected_op_list_path)
else:
return SelectiveBuilder.from_yaml_path(cast(str, operators_yaml_path))
def main() -> None:
parser = argparse.ArgumentParser(description="Autogenerate code")
parser.add_argument("--native-functions-path")
parser.add_argument("--tags-path")
parser.add_argument(
"--gen-dir",
type=pathlib.Path,
default=pathlib.Path("."),
help="Root directory where to install files. Defaults to the current working directory.",
)
parser.add_argument(
"--install_dir",
help=(
"Deprecated. Use --gen-dir instead. The semantics are different, do not change "
"blindly."
),
)
parser.add_argument(
"--subset",
help='Subset of source files to generate. Can be "libtorch" or "pybindings". Generates both when omitted.',
)
parser.add_argument(
"--disable-autograd",
default=False,
action="store_true",
help="It can skip generating autograd related code when the flag is set",
)
parser.add_argument(
"--selected-op-list-path",
help="Path to the YAML file that contains the list of operators to include for custom build.",
)
parser.add_argument(
"--operators_yaml_path",
help="Path to the model YAML file that contains the list of operators to include for custom build.",
)
parser.add_argument(
"--force_schema_registration",
action="store_true",
help="force it to generate schema-only registrations for ops that are not"
"listed on --selected-op-list",
)
parser.add_argument(
"--gen_lazy_ts_backend",
action="store_true",
help="Enable generation of the torch::lazy TorchScript backend",
)
parser.add_argument(
"--per_operator_headers",
action="store_true",
help="Build lazy tensor ts backend with per-operator ATen headers, must match how ATen was built",
)
options = parser.parse_args()
generate_code(
options.gen_dir,
options.native_functions_path,
options.tags_path,
options.install_dir,
options.subset,
options.disable_autograd,
options.force_schema_registration,
# options.selected_op_list
operator_selector=get_selector(
options.selected_op_list_path, options.operators_yaml_path
),
)
if options.gen_lazy_ts_backend:
aten_path = os.path.dirname(os.path.dirname(options.native_functions_path))
ts_backend_yaml = os.path.join(aten_path, "native/ts_native_functions.yaml")
ts_native_functions = "torch/csrc/lazy/ts_backend/ts_native_functions.cpp"
ts_node_base = "torch/csrc/lazy/ts_backend/ts_node.h"
install_dir = options.install_dir or os.fspath(options.gen_dir / "torch/csrc")
lazy_install_dir = os.path.join(install_dir, "lazy/generated")
os.makedirs(lazy_install_dir, exist_ok=True)
assert os.path.isfile(
ts_backend_yaml
), f"Unable to access ts_backend_yaml: {ts_backend_yaml}"
assert os.path.isfile(
ts_native_functions
), f"Unable to access {ts_native_functions}"
from torchgen.dest.lazy_ir import GenTSLazyIR
from torchgen.gen_lazy_tensor import run_gen_lazy_tensor
run_gen_lazy_tensor(
aten_path=aten_path,
source_yaml=ts_backend_yaml,
backend_name="TorchScript",
output_dir=lazy_install_dir,
dry_run=False,
impl_path=ts_native_functions,
node_base="TsNode",
node_base_hdr=ts_node_base,
build_in_tree=True,
lazy_ir_generator=GenTSLazyIR,
per_operator_headers=options.per_operator_headers,
gen_forced_fallback_code=True,
)
if __name__ == "__main__":
main()
|
pytorch-master
|
tools/setup_helpers/generate_code.py
|
# Little stub file to get BUILD.bazel to play along
import os.path
import sys
root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.insert(0, root)
import tools.jit.gen_unboxing
tools.jit.gen_unboxing.main(sys.argv[1:])
|
pytorch-master
|
tools/setup_helpers/gen_unboxing.py
|
import os
import platform
import struct
import sys
from itertools import chain
from typing import cast, Iterable, List, Optional
IS_WINDOWS = platform.system() == "Windows"
IS_DARWIN = platform.system() == "Darwin"
IS_LINUX = platform.system() == "Linux"
IS_CONDA = (
"conda" in sys.version
or "Continuum" in sys.version
or any([x.startswith("CONDA") for x in os.environ])
)
CONDA_DIR = os.path.join(os.path.dirname(sys.executable), "..")
IS_64BIT = struct.calcsize("P") == 8
BUILD_DIR = "build"
def check_env_flag(name: str, default: str = "") -> bool:
return os.getenv(name, default).upper() in ["ON", "1", "YES", "TRUE", "Y"]
def check_negative_env_flag(name: str, default: str = "") -> bool:
return os.getenv(name, default).upper() in ["OFF", "0", "NO", "FALSE", "N"]
def gather_paths(env_vars: Iterable[str]) -> List[str]:
return list(chain(*(os.getenv(v, "").split(os.pathsep) for v in env_vars)))
def lib_paths_from_base(base_path: str) -> List[str]:
return [os.path.join(base_path, s) for s in ["lib/x64", "lib", "lib64"]]
# We promised that CXXFLAGS should also be affected by CFLAGS
if "CFLAGS" in os.environ and "CXXFLAGS" not in os.environ:
os.environ["CXXFLAGS"] = os.environ["CFLAGS"]
class BuildType(object):
"""Checks build type. The build type will be given in :attr:`cmake_build_type_env`. If :attr:`cmake_build_type_env`
is ``None``, then the build type will be inferred from ``CMakeCache.txt``. If ``CMakeCache.txt`` does not exist,
os.environ['CMAKE_BUILD_TYPE'] will be used.
Args:
cmake_build_type_env (str): The value of os.environ['CMAKE_BUILD_TYPE']. If None, the actual build type will be
inferred.
"""
def __init__(self, cmake_build_type_env: Optional[str] = None) -> None:
if cmake_build_type_env is not None:
self.build_type_string = cmake_build_type_env
return
cmake_cache_txt = os.path.join(BUILD_DIR, "CMakeCache.txt")
if os.path.isfile(cmake_cache_txt):
# Found CMakeCache.txt. Use the build type specified in it.
from .cmake_utils import get_cmake_cache_variables_from_file
with open(cmake_cache_txt) as f:
cmake_cache_vars = get_cmake_cache_variables_from_file(f)
# Normally it is anti-pattern to determine build type from CMAKE_BUILD_TYPE because it is not used for
# multi-configuration build tools, such as Visual Studio and XCode. But since we always communicate with
# CMake using CMAKE_BUILD_TYPE from our Python scripts, this is OK here.
self.build_type_string = cast(str, cmake_cache_vars["CMAKE_BUILD_TYPE"])
else:
self.build_type_string = os.environ.get("CMAKE_BUILD_TYPE", "Release")
def is_debug(self) -> bool:
"Checks Debug build."
return self.build_type_string == "Debug"
def is_rel_with_deb_info(self) -> bool:
"Checks RelWithDebInfo build."
return self.build_type_string == "RelWithDebInfo"
def is_release(self) -> bool:
"Checks Release build."
return self.build_type_string == "Release"
# hotpatch environment variable 'CMAKE_BUILD_TYPE'. 'CMAKE_BUILD_TYPE' always prevails over DEBUG or REL_WITH_DEB_INFO.
if "CMAKE_BUILD_TYPE" not in os.environ:
if check_env_flag("DEBUG"):
os.environ["CMAKE_BUILD_TYPE"] = "Debug"
elif check_env_flag("REL_WITH_DEB_INFO"):
os.environ["CMAKE_BUILD_TYPE"] = "RelWithDebInfo"
else:
os.environ["CMAKE_BUILD_TYPE"] = "Release"
build_type = BuildType()
|
pytorch-master
|
tools/setup_helpers/env.py
|
# Little stub file to get BUILD.bazel to play along
import os.path
import sys
root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.insert(0, root)
import torchgen.gen
torchgen.gen.main()
|
pytorch-master
|
tools/setup_helpers/gen.py
|
"""
This is refactored from cmake.py to avoid circular imports issue with env.py,
which calls get_cmake_cache_variables_from_file
"""
import re
from typing import Dict, IO, Optional, Union
CMakeValue = Optional[Union[bool, str]]
def convert_cmake_value_to_python_value(
cmake_value: str, cmake_type: str
) -> CMakeValue:
r"""Convert a CMake value in a string form to a Python value.
Args:
cmake_value (string): The CMake value in a string form (e.g., "ON", "OFF", "1").
cmake_type (string): The CMake type of :attr:`cmake_value`.
Returns:
A Python value corresponding to :attr:`cmake_value` with type :attr:`cmake_type`.
"""
cmake_type = cmake_type.upper()
up_val = cmake_value.upper()
if cmake_type == "BOOL":
# https://gitlab.kitware.com/cmake/community/wikis/doc/cmake/VariablesListsStrings#boolean-values-in-cmake
return not (
up_val in ("FALSE", "OFF", "N", "NO", "0", "", "NOTFOUND")
or up_val.endswith("-NOTFOUND")
)
elif cmake_type == "FILEPATH":
if up_val.endswith("-NOTFOUND"):
return None
else:
return cmake_value
else: # Directly return the cmake_value.
return cmake_value
def get_cmake_cache_variables_from_file(
cmake_cache_file: IO[str],
) -> Dict[str, CMakeValue]:
r"""Gets values in CMakeCache.txt into a dictionary.
Args:
cmake_cache_file: A CMakeCache.txt file object.
Returns:
dict: A ``dict`` containing the value of cached CMake variables.
"""
results = dict()
for i, line in enumerate(cmake_cache_file, 1):
line = line.strip()
if not line or line.startswith(("#", "//")):
# Blank or comment line, skip
continue
# Almost any character can be part of variable name and value. As a practical matter, we assume the type must be
# valid if it were a C variable name. It should match the following kinds of strings:
#
# USE_CUDA:BOOL=ON
# "USE_CUDA":BOOL=ON
# USE_CUDA=ON
# USE_CUDA:=ON
# Intel(R) MKL-DNN_SOURCE_DIR:STATIC=/path/to/pytorch/third_party/ideep/mkl-dnn
# "OpenMP_COMPILE_RESULT_CXX_openmp:experimental":INTERNAL=FALSE
matched = re.match(
r'("?)(.+?)\1(?::\s*([a-zA-Z_-][a-zA-Z0-9_-]*)?)?\s*=\s*(.*)', line
)
if matched is None: # Illegal line
raise ValueError(
"Unexpected line {} in {}: {}".format(i, repr(cmake_cache_file), line)
)
_, variable, type_, value = matched.groups()
if type_ is None:
type_ = ""
if type_.upper() in ("INTERNAL", "STATIC"):
# CMake internal variable, do not touch
continue
results[variable] = convert_cmake_value_to_python_value(value, type_)
return results
|
pytorch-master
|
tools/setup_helpers/cmake_utils.py
|
"Manages CMake."
import multiprocessing
import os
import platform
import sys
import sysconfig
from distutils.version import LooseVersion
from subprocess import CalledProcessError, check_call, check_output
from typing import Any, cast, Dict, List, Optional
from . import which
from .cmake_utils import CMakeValue, get_cmake_cache_variables_from_file
from .env import BUILD_DIR, check_negative_env_flag, IS_64BIT, IS_DARWIN, IS_WINDOWS
from .numpy_ import NUMPY_INCLUDE_DIR, USE_NUMPY
def _mkdir_p(d: str) -> None:
try:
os.makedirs(d, exist_ok=True)
except OSError as e:
raise RuntimeError(
f"Failed to create folder {os.path.abspath(d)}: {e.strerror}"
) from e
# Ninja
# Use ninja if it is on the PATH. Previous version of PyTorch required the
# ninja python package, but we no longer use it, so we do not have to import it
USE_NINJA = not check_negative_env_flag("USE_NINJA") and which("ninja") is not None
class CMake:
"Manages cmake."
def __init__(self, build_dir: str = BUILD_DIR) -> None:
self._cmake_command = CMake._get_cmake_command()
self.build_dir = build_dir
@property
def _cmake_cache_file(self) -> str:
r"""Returns the path to CMakeCache.txt.
Returns:
string: The path to CMakeCache.txt.
"""
return os.path.join(self.build_dir, "CMakeCache.txt")
@staticmethod
def _get_cmake_command() -> str:
"Returns cmake command."
cmake_command = "cmake"
if IS_WINDOWS:
return cmake_command
cmake3_version = CMake._get_version(which("cmake3"))
cmake_version = CMake._get_version(which("cmake"))
_cmake_min_version = LooseVersion("3.13.0")
if all(
(
ver is None or ver < _cmake_min_version
for ver in [cmake_version, cmake3_version]
)
):
raise RuntimeError("no cmake or cmake3 with version >= 3.13.0 found")
if cmake3_version is None:
cmake_command = "cmake"
elif cmake_version is None:
cmake_command = "cmake3"
else:
if cmake3_version >= cmake_version:
cmake_command = "cmake3"
else:
cmake_command = "cmake"
return cmake_command
@staticmethod
def _get_version(cmd: Optional[str]) -> Any:
"Returns cmake version."
if cmd is None:
return None
for line in check_output([cmd, "--version"]).decode("utf-8").split("\n"):
if "version" in line:
return LooseVersion(line.strip().split(" ")[2])
raise RuntimeError("no version found")
def run(self, args: List[str], env: Dict[str, str]) -> None:
"Executes cmake with arguments and an environment."
command = [self._cmake_command] + args
print(" ".join(command))
try:
check_call(command, cwd=self.build_dir, env=env)
except (CalledProcessError, KeyboardInterrupt) as e:
# This error indicates that there was a problem with cmake, the
# Python backtrace adds no signal here so skip over it by catching
# the error and exiting manually
sys.exit(1)
@staticmethod
def defines(args: List[str], **kwargs: CMakeValue) -> None:
"Adds definitions to a cmake argument list."
for key, value in sorted(kwargs.items()):
if value is not None:
args.append("-D{}={}".format(key, value))
def get_cmake_cache_variables(self) -> Dict[str, CMakeValue]:
r"""Gets values in CMakeCache.txt into a dictionary.
Returns:
dict: A ``dict`` containing the value of cached CMake variables.
"""
with open(self._cmake_cache_file) as f:
return get_cmake_cache_variables_from_file(f)
def generate(
self,
version: Optional[str],
cmake_python_library: Optional[str],
build_python: bool,
build_test: bool,
my_env: Dict[str, str],
rerun: bool,
) -> None:
"Runs cmake to generate native build files."
if rerun and os.path.isfile(self._cmake_cache_file):
os.remove(self._cmake_cache_file)
ninja_build_file = os.path.join(self.build_dir, "build.ninja")
if os.path.exists(self._cmake_cache_file) and not (
USE_NINJA and not os.path.exists(ninja_build_file)
):
# Everything's in place. Do not rerun.
return
args = []
if USE_NINJA:
# Avoid conflicts in '-G' and the `CMAKE_GENERATOR`
os.environ["CMAKE_GENERATOR"] = "Ninja"
args.append("-GNinja")
elif IS_WINDOWS:
generator = os.getenv("CMAKE_GENERATOR", "Visual Studio 15 2017")
supported = ["Visual Studio 15 2017", "Visual Studio 16 2019"]
if generator not in supported:
print("Unsupported `CMAKE_GENERATOR`: " + generator)
print("Please set it to one of the following values: ")
print("\n".join(supported))
sys.exit(1)
args.append("-G" + generator)
toolset_dict = {}
toolset_version = os.getenv("CMAKE_GENERATOR_TOOLSET_VERSION")
if toolset_version is not None:
toolset_dict["version"] = toolset_version
curr_toolset = os.getenv("VCToolsVersion")
if curr_toolset is None:
print(
"When you specify `CMAKE_GENERATOR_TOOLSET_VERSION`, you must also "
"activate the vs environment of this version. Please read the notes "
"in the build steps carefully."
)
sys.exit(1)
if IS_64BIT:
if platform.machine() == "ARM64":
args.append("-A ARM64")
else:
args.append("-Ax64")
toolset_dict["host"] = "x64"
if toolset_dict:
toolset_expr = ",".join(
["{}={}".format(k, v) for k, v in toolset_dict.items()]
)
args.append("-T" + toolset_expr)
base_dir = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
)
install_dir = os.path.join(base_dir, "torch")
_mkdir_p(install_dir)
_mkdir_p(self.build_dir)
# Store build options that are directly stored in environment variables
build_options: Dict[str, CMakeValue] = {}
# Build options that do not start with "BUILD_", "USE_", or "CMAKE_" and are directly controlled by env vars.
# This is a dict that maps environment variables to the corresponding variable name in CMake.
additional_options = {
# Key: environment variable name. Value: Corresponding variable name to be passed to CMake. If you are
# adding a new build option to this block: Consider making these two names identical and adding this option
# in the block below.
"_GLIBCXX_USE_CXX11_ABI": "GLIBCXX_USE_CXX11_ABI",
"CUDNN_LIB_DIR": "CUDNN_LIBRARY",
"USE_CUDA_STATIC_LINK": "CAFFE2_STATIC_LINK_CUDA",
}
additional_options.update(
{
# Build options that have the same environment variable name and CMake variable name and that do not start
# with "BUILD_", "USE_", or "CMAKE_". If you are adding a new build option, also make sure you add it to
# CMakeLists.txt.
var: var
for var in (
"BLAS",
"WITH_BLAS",
"BUILDING_WITH_TORCH_LIBS",
"CUDA_HOST_COMILER",
"CUDA_NVCC_EXECUTABLE",
"CUDA_SEPARABLE_COMPILATION",
"CUDNN_LIBRARY",
"CUDNN_INCLUDE_DIR",
"CUDNN_ROOT",
"EXPERIMENTAL_SINGLE_THREAD_POOL",
"INSTALL_TEST",
"JAVA_HOME",
"INTEL_MKL_DIR",
"INTEL_OMP_DIR",
"MKL_THREADING",
"MKLDNN_CPU_RUNTIME",
"MSVC_Z7_OVERRIDE",
"CAFFE2_USE_MSVC_STATIC_RUNTIME",
"Numa_INCLUDE_DIR",
"Numa_LIBRARIES",
"ONNX_ML",
"ONNX_NAMESPACE",
"ATEN_THREADING",
"WERROR",
"OPENSSL_ROOT_DIR",
"STATIC_DISPATCH_BACKEND",
"SELECTED_OP_LIST",
)
}
)
# Aliases which are lower priority than their canonical option
low_priority_aliases = {
"CUDA_HOST_COMPILER": "CMAKE_CUDA_HOST_COMPILER",
"CUDAHOSTCXX": "CUDA_HOST_COMPILER",
"CMAKE_CUDA_HOST_COMPILER": "CUDA_HOST_COMPILER",
"CMAKE_CUDA_COMPILER": "CUDA_NVCC_EXECUTABLE",
"CUDACXX": "CUDA_NVCC_EXECUTABLE",
}
for var, val in my_env.items():
# We currently pass over all environment variables that start with "BUILD_", "USE_", and "CMAKE_". This is
# because we currently have no reliable way to get the list of all build options we have specified in
# CMakeLists.txt. (`cmake -L` won't print dependent options when the dependency condition is not met.) We
# will possibly change this in the future by parsing CMakeLists.txt ourselves (then additional_options would
# also not be needed to be specified here).
true_var = additional_options.get(var)
if true_var is not None:
build_options[true_var] = val
elif var.startswith(("BUILD_", "USE_", "CMAKE_")) or var.endswith(
("EXITCODE", "EXITCODE__TRYRUN_OUTPUT")
):
build_options[var] = val
if var in low_priority_aliases:
key = low_priority_aliases[var]
if key not in build_options:
build_options[key] = val
# The default value cannot be easily obtained in CMakeLists.txt. We set it here.
py_lib_path = sysconfig.get_path("purelib")
cmake_prefix_path = build_options.get("CMAKE_PREFIX_PATH", None)
if cmake_prefix_path:
build_options["CMAKE_PREFIX_PATH"] = (
py_lib_path + ";" + cast(str, cmake_prefix_path)
)
else:
build_options["CMAKE_PREFIX_PATH"] = py_lib_path
# Some options must be post-processed. Ideally, this list will be shrunk to only one or two options in the
# future, as CMake can detect many of these libraries pretty comfortably. We have them here for now before CMake
# integration is completed. They appear here not in the CMake.defines call below because they start with either
# "BUILD_" or "USE_" and must be overwritten here.
build_options.update(
{
# Note: Do not add new build options to this dict if it is directly read from environment variable -- you
# only need to add one in `CMakeLists.txt`. All build options that start with "BUILD_", "USE_", or "CMAKE_"
# are automatically passed to CMake; For other options you can add to additional_options above.
"BUILD_PYTHON": build_python,
"BUILD_TEST": build_test,
# Most library detection should go to CMake script, except this one, which Python can do a much better job
# due to NumPy's inherent Pythonic nature.
"USE_NUMPY": USE_NUMPY,
}
)
# Options starting with CMAKE_
cmake__options = {
"CMAKE_INSTALL_PREFIX": install_dir,
}
# We set some CMAKE_* options in our Python build code instead of relying on the user's direct settings. Emit an
# error if the user also attempts to set these CMAKE options directly.
specified_cmake__options = set(build_options).intersection(cmake__options)
if len(specified_cmake__options) > 0:
print(
", ".join(specified_cmake__options)
+ " should not be specified in the environment variable. They are directly set by PyTorch build script."
)
sys.exit(1)
build_options.update(cmake__options)
CMake.defines(
args,
PYTHON_EXECUTABLE=sys.executable,
PYTHON_LIBRARY=cmake_python_library,
PYTHON_INCLUDE_DIR=sysconfig.get_path("include"),
TORCH_BUILD_VERSION=version,
NUMPY_INCLUDE_DIR=NUMPY_INCLUDE_DIR,
**build_options,
)
expected_wrapper = "/usr/local/opt/ccache/libexec"
if IS_DARWIN and os.path.exists(expected_wrapper):
if "CMAKE_C_COMPILER" not in build_options and "CC" not in os.environ:
CMake.defines(args, CMAKE_C_COMPILER="{}/gcc".format(expected_wrapper))
if "CMAKE_CXX_COMPILER" not in build_options and "CXX" not in os.environ:
CMake.defines(
args, CMAKE_CXX_COMPILER="{}/g++".format(expected_wrapper)
)
for env_var_name in my_env:
if env_var_name.startswith("gh"):
# github env vars use utf-8, on windows, non-ascii code may
# cause problem, so encode first
try:
my_env[env_var_name] = str(my_env[env_var_name].encode("utf-8"))
except UnicodeDecodeError as e:
shex = ":".join(
"{:02x}".format(ord(c)) for c in my_env[env_var_name]
)
print(
"Invalid ENV[{}] = {}".format(env_var_name, shex),
file=sys.stderr,
)
print(e, file=sys.stderr)
# According to the CMake manual, we should pass the arguments first,
# and put the directory as the last element. Otherwise, these flags
# may not be passed correctly.
# Reference:
# 1. https://cmake.org/cmake/help/latest/manual/cmake.1.html#synopsis
# 2. https://stackoverflow.com/a/27169347
args.append(base_dir)
self.run(args, env=my_env)
def build(self, my_env: Dict[str, str]) -> None:
"Runs cmake to build binaries."
from .env import build_type
build_args = [
"--build",
".",
"--target",
"install",
"--config",
build_type.build_type_string,
]
# Determine the parallelism according to the following
# priorities:
# 1) MAX_JOBS environment variable
# 2) If using the Ninja build system, delegate decision to it.
# 3) Otherwise, fall back to the number of processors.
# Allow the user to set parallelism explicitly. If unset,
# we'll try to figure it out.
max_jobs = os.getenv("MAX_JOBS")
if max_jobs is not None or not USE_NINJA:
# Ninja is capable of figuring out the parallelism on its
# own: only specify it explicitly if we are not using
# Ninja.
# This lists the number of processors available on the
# machine. This may be an overestimate of the usable
# processors if CPU scheduling affinity limits it
# further. In the future, we should check for that with
# os.sched_getaffinity(0) on platforms that support it.
max_jobs = max_jobs or str(multiprocessing.cpu_count())
# This ``if-else'' clause would be unnecessary when cmake
# 3.12 becomes minimum, which provides a '-j' option:
# build_args += ['-j', max_jobs] would be sufficient by
# then. Until then, we use "--" to pass parameters to the
# underlying build system.
build_args += ["--"]
if IS_WINDOWS and not USE_NINJA:
# We are likely using msbuild here
build_args += ["/p:CL_MPCount={}".format(max_jobs)]
else:
build_args += ["-j", max_jobs]
self.run(build_args, my_env)
|
pytorch-master
|
tools/setup_helpers/cmake.py
|
# Ideally, there would be a way in Bazel to parse version.txt
# and use the version numbers from there as substitutions for
# an expand_template action. Since there isn't, this silly script exists.
import argparse
import os
from typing import cast, Dict, Tuple
Version = Tuple[int, int, int]
def parse_version(version: str) -> Version:
"""
Parses a version string into (major, minor, patch) version numbers.
Args:
version: Full version number string, possibly including revision / commit hash.
Returns:
An int 3-tuple of (major, minor, patch) version numbers.
"""
# Extract version number part (i.e. toss any revision / hash parts).
version_number_str = version
for i in range(len(version)):
c = version[i]
if not (c.isdigit() or c == "."):
version_number_str = version[:i]
break
return cast(Version, tuple([int(n) for n in version_number_str.split(".")]))
def apply_replacements(replacements: Dict[str, str], text: str) -> str:
"""
Applies the given replacements within the text.
Args:
replacements (dict): Mapping of str -> str replacements.
text (str): Text in which to make replacements.
Returns:
Text with replacements applied, if any.
"""
for (before, after) in replacements.items():
text = text.replace(before, after)
return text
def main(args: argparse.Namespace) -> None:
with open(args.version_path) as f:
version = f.read().strip()
(major, minor, patch) = parse_version(version)
replacements = {
"@TORCH_VERSION_MAJOR@": str(major),
"@TORCH_VERSION_MINOR@": str(minor),
"@TORCH_VERSION_PATCH@": str(patch),
}
# Create the output dir if it doesn't exist.
os.makedirs(os.path.dirname(args.output_path), exist_ok=True)
with open(args.template_path) as input:
with open(args.output_path, "w") as output:
for line in input.readlines():
output.write(apply_replacements(replacements, line))
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Generate version.h from version.h.in template",
)
parser.add_argument(
"--template-path",
required=True,
help="Path to the template (i.e. version.h.in)",
)
parser.add_argument(
"--version-path",
required=True,
help="Path to the file specifying the version",
)
parser.add_argument(
"--output-path",
required=True,
help="Output path for expanded template (i.e. version.h)",
)
args = parser.parse_args()
main(args)
|
pytorch-master
|
tools/setup_helpers/gen_version_header.py
|
import os
import sys
from typing import Optional
def which(thefile: str) -> Optional[str]:
path = os.environ.get("PATH", os.defpath).split(os.pathsep)
for d in path:
fname = os.path.join(d, thefile)
fnames = [fname]
if sys.platform == "win32":
exts = os.environ.get("PATHEXT", "").split(os.pathsep)
fnames += [fname + ext for ext in exts]
for name in fnames:
if os.access(name, os.F_OK | os.X_OK) and not os.path.isdir(name):
return name
return None
|
pytorch-master
|
tools/setup_helpers/__init__.py
|
"""NumPy helper.
Note: If you plan to add a library detection script like this one, consider it twice. Most library detection should go
to CMake script. This one is an exception, because Python code can do a much better job due to NumPy's inherent Pythonic
nature.
"""
from .env import check_negative_env_flag
# Set USE_NUMPY to what the user wants, because even if we fail here, cmake
# will check for the presence of NumPy again (`cmake/Dependencies.cmake`).
USE_NUMPY = not check_negative_env_flag("USE_NUMPY")
NUMPY_INCLUDE_DIR = None
if USE_NUMPY:
try:
import numpy as np
except ImportError:
pass
else:
# To reach here, the user must has not disabled NumPy build and the
# NumPy library is present in the system.
NUMPY_INCLUDE_DIR = np.get_include()
|
pytorch-master
|
tools/setup_helpers/numpy_.py
|
from .cwrap_common import set_declaration_defaults, sort_by_number_of_args
from .module_loader import import_module
|
pytorch-master
|
tools/shared/__init__.py
|
# this code should be common among cwrap and ATen preprocessing
# for now, I have put it in one place but right now is copied out of cwrap
import copy
from typing import Any, Dict, Iterable, List, Union
Arg = Dict[str, Any]
def parse_arguments(args: List[Union[str, Arg]]) -> List[Arg]:
new_args = []
for arg in args:
# Simple arg declaration of form "<type> <name>"
if isinstance(arg, str):
t, _, name = arg.partition(" ")
new_args.append({"type": t, "name": name})
elif isinstance(arg, dict):
if "arg" in arg:
arg["type"], _, arg["name"] = arg["arg"].partition(" ")
del arg["arg"]
new_args.append(arg)
else:
raise AssertionError()
return new_args
Declaration = Dict[str, Any]
def set_declaration_defaults(declaration: Declaration) -> None:
if "schema_string" not in declaration:
# This happens for legacy TH bindings like
# _thnn_conv_depthwise2d_backward
declaration["schema_string"] = ""
declaration.setdefault("arguments", [])
declaration.setdefault("return", "void")
if "cname" not in declaration:
declaration["cname"] = declaration["name"]
if "backends" not in declaration:
declaration["backends"] = ["CPU", "CUDA"]
assert "api_name" not in declaration
declaration["api_name"] = declaration["name"]
# NB: keep this in sync with gen_autograd.py
if declaration.get("overload_name"):
declaration["type_wrapper_name"] = "{}_{}".format(
declaration["name"], declaration["overload_name"]
)
else:
declaration["type_wrapper_name"] = declaration["name"]
# TODO: Uggggh, parsing the schema string here, really???
declaration["operator_name_with_overload"] = declaration["schema_string"].split(
"("
)[0]
if declaration["schema_string"]:
declaration["unqual_schema_string"] = declaration["schema_string"].split("::")[
1
]
declaration["unqual_operator_name_with_overload"] = declaration[
"operator_name_with_overload"
].split("::")[1]
else:
declaration["unqual_schema_string"] = ""
declaration["unqual_operator_name_with_overload"] = ""
# Simulate multiple dispatch, even if it's not necessary
if "options" not in declaration:
declaration["options"] = [
{
"arguments": copy.deepcopy(declaration["arguments"]),
"schema_order_arguments": copy.deepcopy(
declaration["schema_order_arguments"]
),
}
]
del declaration["arguments"]
del declaration["schema_order_arguments"]
# Parse arguments (some of them can be strings)
for option in declaration["options"]:
option["arguments"] = parse_arguments(option["arguments"])
option["schema_order_arguments"] = parse_arguments(
option["schema_order_arguments"]
)
# Propagate defaults from declaration to options
for option in declaration["options"]:
for k, v in declaration.items():
# TODO(zach): why does cwrap not propagate 'name'? I need it
# propagaged for ATen
if k != "options":
option.setdefault(k, v)
# TODO(zach): added option to remove keyword handling for C++ which cannot
# support it.
Option = Dict[str, Any]
def filter_unique_options(
options: Iterable[Option],
allow_kwarg: bool,
type_to_signature: Dict[str, str],
remove_self: bool,
) -> List[Option]:
def exclude_arg(arg: Arg) -> bool:
return arg["type"] == "CONSTANT" # type: ignore[no-any-return]
def exclude_arg_with_self_check(arg: Arg) -> bool:
return exclude_arg(arg) or (remove_self and arg["name"] == "self")
def signature(option: Option, num_kwarg_only: int) -> str:
if num_kwarg_only == 0:
kwarg_only_count = None
else:
kwarg_only_count = -num_kwarg_only
arg_signature = "#".join(
type_to_signature.get(arg["type"], arg["type"])
for arg in option["arguments"][:kwarg_only_count]
if not exclude_arg_with_self_check(arg)
)
if kwarg_only_count is None:
return arg_signature
kwarg_only_signature = "#".join(
arg["name"] + "#" + arg["type"]
for arg in option["arguments"][kwarg_only_count:]
if not exclude_arg(arg)
)
return arg_signature + "#-#" + kwarg_only_signature
seen_signatures = set()
unique = []
for option in options:
# if only check num_kwarg_only == 0 if allow_kwarg == False
limit = len(option["arguments"]) if allow_kwarg else 0
for num_kwarg_only in range(0, limit + 1):
sig = signature(option, num_kwarg_only)
if sig not in seen_signatures:
if num_kwarg_only > 0:
for arg in option["arguments"][-num_kwarg_only:]:
arg["kwarg_only"] = True
unique.append(option)
seen_signatures.add(sig)
break
return unique
def sort_by_number_of_args(declaration: Declaration, reverse: bool = True) -> None:
def num_args(option: Option) -> int:
return len(option["arguments"])
declaration["options"].sort(key=num_args, reverse=reverse)
class Function(object):
def __init__(self, name: str) -> None:
self.name = name
self.arguments: List["Argument"] = []
def add_argument(self, arg: "Argument") -> None:
assert isinstance(arg, Argument)
self.arguments.append(arg)
def __repr__(self) -> str:
return self.name + "(" + ", ".join(a.__repr__() for a in self.arguments) + ")"
class Argument(object):
def __init__(self, _type: str, name: str, is_optional: bool):
self.type = _type
self.name = name
self.is_optional = is_optional
def __repr__(self) -> str:
return self.type + " " + self.name
def parse_header(path: str) -> List[Function]:
with open(path, "r") as f:
lines: Iterable[Any] = f.read().split("\n")
# Remove empty lines and prebackend directives
lines = filter(lambda l: l and not l.startswith("#"), lines)
# Remove line comments
lines = (l.partition("//") for l in lines)
# Select line and comment part
lines = ((l[0].strip(), l[2].strip()) for l in lines)
# Remove trailing special signs
lines = ((l[0].rstrip(");").rstrip(","), l[1]) for l in lines)
# Split arguments
lines = ((l[0].split(","), l[1]) for l in lines)
# Flatten lines
new_lines = []
for l, c in lines:
for split in l:
new_lines.append((split, c))
lines = new_lines
del new_lines
# Remove unnecessary whitespace
lines = ((l[0].strip(), l[1]) for l in lines)
# Remove empty lines
lines = filter(lambda l: l[0], lines)
generic_functions = []
for l, c in lines:
if l.startswith("TH_API void THNN_"):
fn_name = l[len("TH_API void THNN_") :]
if fn_name[0] == "(" and fn_name[-2] == ")":
fn_name = fn_name[1:-2]
else:
fn_name = fn_name[:-1]
generic_functions.append(Function(fn_name))
elif l.startswith("TORCH_CUDA_CPP_API void THNN_"):
fn_name = l[len("TORCH_CUDA_CPP_API void THNN_") :]
if fn_name[0] == "(" and fn_name[-2] == ")":
fn_name = fn_name[1:-2]
else:
fn_name = fn_name[:-1]
generic_functions.append(Function(fn_name))
elif l.startswith("TORCH_CUDA_CU_API void THNN_"):
fn_name = l[len("TORCH_CUDA_CU_API void THNN_") :]
if fn_name[0] == "(" and fn_name[-2] == ")":
fn_name = fn_name[1:-2]
else:
fn_name = fn_name[:-1]
generic_functions.append(Function(fn_name))
elif l:
t, name = l.split()
if "*" in name:
t = t + "*"
name = name[1:]
generic_functions[-1].add_argument(Argument(t, name, "[OPTIONAL]" in c))
return generic_functions
|
pytorch-master
|
tools/shared/cwrap_common.py
|
from importlib.abc import Loader
from types import ModuleType
from typing import cast
def import_module(name: str, path: str) -> ModuleType:
import importlib.util
spec = importlib.util.spec_from_file_location(name, path)
assert spec is not None
module = importlib.util.module_from_spec(spec)
cast(Loader, spec.loader).exec_module(module)
return module
|
pytorch-master
|
tools/shared/module_loader.py
|
#!/usr/bin/env python3
import argparse
import fnmatch
import pathlib
import subprocess
import textwrap
from typing import Any, Dict, List
import yaml
REPO_ROOT = pathlib.Path(__file__).parent.parent.parent
CONFIG_YML = REPO_ROOT / ".circleci" / "config.yml"
WORKFLOWS_DIR = REPO_ROOT / ".github" / "workflows"
WORKFLOWS_TO_CHECK = [
"binary_builds",
"build",
"master_build",
# These are formatted slightly differently, skip them
# "scheduled-ci",
# "debuggable-scheduled-ci",
# "slow-gradcheck-scheduled-ci",
# "promote",
]
def add_job(
workflows: Dict[str, Any],
workflow_name: str,
type: str,
job: Dict[str, Any],
past_jobs: Dict[str, Any],
) -> None:
"""
Add job 'job' under 'type' and 'workflow_name' to 'workflow' in place. Also
add any dependencies (they must already be in 'past_jobs')
"""
if workflow_name not in workflows:
workflows[workflow_name] = {"when": "always", "jobs": []}
requires = job.get("requires", None)
if requires is not None:
for requirement in requires:
dependency = past_jobs[requirement]
add_job(
workflows,
dependency["workflow_name"],
dependency["type"],
dependency["job"],
past_jobs,
)
workflows[workflow_name]["jobs"].append({type: job})
def get_filtered_circleci_config(
workflows: Dict[str, Any], relevant_jobs: List[str]
) -> Dict[str, Any]:
"""
Given an existing CircleCI config, remove every job that's not listed in
'relevant_jobs'
"""
new_workflows: Dict[str, Any] = {}
past_jobs: Dict[str, Any] = {}
for workflow_name, workflow in workflows.items():
if workflow_name not in WORKFLOWS_TO_CHECK:
# Don't care about this workflow, skip it entirely
continue
for job_dict in workflow["jobs"]:
for type, job in job_dict.items():
if "name" not in job:
# Job doesn't have a name so it can't be handled
print("Skipping", type)
else:
if job["name"] in relevant_jobs:
# Found a job that was specified at the CLI, add it to
# the new result
add_job(new_workflows, workflow_name, type, job, past_jobs)
# Record the job in case it's needed as a dependency later
past_jobs[job["name"]] = {
"workflow_name": workflow_name,
"type": type,
"job": job,
}
return new_workflows
def commit_ci(files: List[str], message: str) -> None:
# Check that there are no other modified files than the ones edited by this
# tool
stdout = subprocess.run(
["git", "status", "--porcelain"], stdout=subprocess.PIPE
).stdout.decode()
for line in stdout.split("\n"):
if line == "":
continue
if line[0] != " ":
raise RuntimeError(
f"Refusing to commit while other changes are already staged: {line}"
)
# Make the commit
subprocess.run(["git", "add"] + files)
subprocess.run(["git", "commit", "-m", message])
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="make .circleci/config.yml only have a specific set of jobs and delete GitHub actions"
)
parser.add_argument("--job", action="append", help="job name", default=[])
parser.add_argument(
"--filter-gha", help="keep only these github actions (glob match)", default=""
)
parser.add_argument(
"--make-commit",
action="store_true",
help="add change to git with to a do-not-merge commit",
)
args = parser.parse_args()
touched_files = [CONFIG_YML]
with open(CONFIG_YML, "r") as f:
config_yml = yaml.safe_load(f.read())
config_yml["workflows"] = get_filtered_circleci_config(
config_yml["workflows"], args.job
)
with open(CONFIG_YML, "w") as f:
yaml.dump(config_yml, f)
if args.filter_gha:
for relative_file in WORKFLOWS_DIR.iterdir():
path = REPO_ROOT.joinpath(relative_file)
if not fnmatch.fnmatch(path.name, args.filter_gha):
touched_files.append(path)
path.resolve().unlink()
if args.make_commit:
jobs_str = "\n".join([f" * {job}" for job in args.job])
message = textwrap.dedent(
f"""
[skip ci][do not merge] Edit config.yml to filter specific jobs
Filter CircleCI to only run:
{jobs_str}
See [Run Specific CI Jobs](https://github.com/pytorch/pytorch/blob/master/CONTRIBUTING.md#run-specific-ci-jobs) for details.
"""
).strip()
commit_ci([str(f.relative_to(REPO_ROOT)) for f in touched_files], message)
|
pytorch-master
|
tools/testing/explicit_ci_jobs.py
|
pytorch-master
|
tools/testing/__init__.py
|
|
import os
import subprocess
from typing import Dict, List, Tuple
from tools.stats.import_test_stats import get_disabled_tests, get_slow_tests
def calculate_shards(
num_shards: int, tests: List[str], job_times: Dict[str, float]
) -> List[Tuple[float, List[str]]]:
filtered_job_times: Dict[str, float] = dict()
unknown_jobs: List[str] = []
for test in tests:
if test in job_times:
filtered_job_times[test] = job_times[test]
else:
unknown_jobs.append(test)
# The following attempts to implement a partition approximation greedy algorithm
# See more at https://en.wikipedia.org/wiki/Greedy_number_partitioning
sorted_jobs = sorted(
filtered_job_times, key=lambda j: filtered_job_times[j], reverse=True
)
sharded_jobs: List[Tuple[float, List[str]]] = [(0.0, []) for _ in range(num_shards)]
for job in sorted_jobs:
min_shard_index = sorted(range(num_shards), key=lambda i: sharded_jobs[i][0])[0]
curr_shard_time, curr_shard_jobs = sharded_jobs[min_shard_index]
curr_shard_jobs.append(job)
sharded_jobs[min_shard_index] = (
curr_shard_time + filtered_job_times[job],
curr_shard_jobs,
)
# Round robin the unknown jobs starting with the smallest shard
index = sorted(range(num_shards), key=lambda i: sharded_jobs[i][0])[0]
for job in unknown_jobs:
sharded_jobs[index][1].append(job)
index = (index + 1) % num_shards
return sharded_jobs
def _query_changed_test_files() -> List[str]:
default_branch = f"origin/{os.environ.get('GIT_DEFAULT_BRANCH', 'master')}"
cmd = ["git", "diff", "--name-only", default_branch, "HEAD"]
proc = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if proc.returncode != 0:
raise RuntimeError("Unable to get changed files")
lines = proc.stdout.decode().strip().split("\n")
lines = [line.strip() for line in lines]
return lines
def get_reordered_tests(tests: List[str]) -> List[str]:
"""Get the reordered test filename list based on github PR history or git changed file."""
prioritized_tests: List[str] = []
if len(prioritized_tests) == 0:
try:
changed_files = _query_changed_test_files()
except Exception:
# If unable to get changed files from git, quit without doing any sorting
return tests
prefix = f"test{os.path.sep}"
prioritized_tests = [
f for f in changed_files if f.startswith(prefix) and f.endswith(".py")
]
prioritized_tests = [f[len(prefix) :] for f in prioritized_tests]
prioritized_tests = [f[: -len(".py")] for f in prioritized_tests]
print("Prioritized test from test file changes.")
bring_to_front = []
the_rest = []
for test in tests:
if test in prioritized_tests:
bring_to_front.append(test)
else:
the_rest.append(test)
if len(tests) == len(bring_to_front) + len(the_rest):
print(
f"reordering tests for PR:\n"
f"prioritized: {bring_to_front}\nthe rest: {the_rest}\n"
)
return bring_to_front + the_rest
else:
print(
f"Something went wrong in CI reordering, expecting total of {len(tests)}:\n"
f"but found prioritized: {len(bring_to_front)}\nthe rest: {len(the_rest)}\n"
)
return tests
def get_test_case_configs(dirpath: str) -> None:
get_slow_tests(dirpath=dirpath)
get_disabled_tests(dirpath=dirpath)
|
pytorch-master
|
tools/testing/test_selections.py
|
import modulefinder
import os
import pathlib
import sys
import warnings
from typing import Any, Dict, List, Set
REPO_ROOT = pathlib.Path(__file__).resolve().parent.parent.parent
# These tests are slow enough that it's worth calculating whether the patch
# touched any related files first. This list was manually generated, but for every
# run with --determine-from, we use another generated list based on this one and the
# previous test stats.
TARGET_DET_LIST = [
# test_autograd.py is not slow, so it does not belong here. But
# note that if you try to add it back it will run into
# https://bugs.python.org/issue40350 because it imports files
# under test/autograd/.
"test_binary_ufuncs",
"test_cpp_extensions_aot_ninja",
"test_cpp_extensions_aot_no_ninja",
"test_cpp_extensions_jit",
"test_cpp_extensions_open_device_registration",
"test_cuda",
"test_cuda_primary_ctx",
"test_dataloader",
"test_determination",
"test_futures",
"test_jit",
"test_jit_legacy",
"test_jit_profiling",
"test_linalg",
"test_multiprocessing",
"test_nn",
"test_numpy_interop",
"test_optim",
"test_overrides",
"test_pruning_op",
"test_quantization",
"test_reductions",
"test_serialization",
"test_shape_ops",
"test_sort_and_select",
"test_tensorboard",
"test_testing",
"test_torch",
"test_utils",
"test_view_ops",
]
_DEP_MODULES_CACHE: Dict[str, Set[str]] = {}
def should_run_test(
target_det_list: List[str], test: str, touched_files: List[str], options: Any
) -> bool:
test = parse_test_module(test)
# Some tests are faster to execute than to determine.
if test not in target_det_list:
if options.verbose:
print_to_stderr(f"Running {test} without determination")
return True
# HACK: "no_ninja" is not a real module
if test.endswith("_no_ninja"):
test = test[: (-1 * len("_no_ninja"))]
if test.endswith("_ninja"):
test = test[: (-1 * len("_ninja"))]
dep_modules = get_dep_modules(test)
for touched_file in touched_files:
file_type = test_impact_of_file(touched_file)
if file_type == "NONE":
continue
elif file_type == "CI":
# Force all tests to run if any change is made to the CI
# configurations.
log_test_reason(file_type, touched_file, test, options)
return True
elif file_type == "UNKNOWN":
# Assume uncategorized source files can affect every test.
log_test_reason(file_type, touched_file, test, options)
return True
elif file_type in ["TORCH", "CAFFE2", "TEST"]:
parts = os.path.splitext(touched_file)[0].split(os.sep)
touched_module = ".".join(parts)
# test/ path does not have a "test." namespace
if touched_module.startswith("test."):
touched_module = touched_module.split("test.")[1]
if touched_module in dep_modules or touched_module == test.replace(
"/", "."
):
log_test_reason(file_type, touched_file, test, options)
return True
# If nothing has determined the test has run, don't run the test.
if options.verbose:
print_to_stderr(f"Determination is skipping {test}")
return False
def test_impact_of_file(filename: str) -> str:
"""Determine what class of impact this file has on test runs.
Possible values:
TORCH - torch python code
CAFFE2 - caffe2 python code
TEST - torch test code
UNKNOWN - may affect all tests
NONE - known to have no effect on test outcome
CI - CI configuration files
"""
parts = filename.split(os.sep)
if parts[0] in [".jenkins", ".circleci"]:
return "CI"
if parts[0] in ["docs", "scripts", "CODEOWNERS", "README.md"]:
return "NONE"
elif parts[0] == "torch":
if parts[-1].endswith(".py") or parts[-1].endswith(".pyi"):
return "TORCH"
elif parts[0] == "caffe2":
if parts[-1].endswith(".py") or parts[-1].endswith(".pyi"):
return "CAFFE2"
elif parts[0] == "test":
if parts[-1].endswith(".py") or parts[-1].endswith(".pyi"):
return "TEST"
return "UNKNOWN"
def log_test_reason(file_type: str, filename: str, test: str, options: Any) -> None:
if options.verbose:
print_to_stderr(
"Determination found {} file {} -- running {}".format(
file_type,
filename,
test,
)
)
def get_dep_modules(test: str) -> Set[str]:
# Cache results in case of repetition
if test in _DEP_MODULES_CACHE:
return _DEP_MODULES_CACHE[test]
test_location = REPO_ROOT / "test" / f"{test}.py"
# HACK: some platforms default to ascii, so we can't just run_script :(
finder = modulefinder.ModuleFinder(
# Ideally exclude all third party modules, to speed up calculation.
excludes=[
"scipy",
"numpy",
"numba",
"multiprocessing",
"sklearn",
"setuptools",
"hypothesis",
"llvmlite",
"joblib",
"email",
"importlib",
"unittest",
"urllib",
"json",
"collections",
# Modules below are excluded because they are hitting https://bugs.python.org/issue40350
# Trigger AttributeError: 'NoneType' object has no attribute 'is_package'
"mpl_toolkits",
"google",
"onnx",
# Triggers RecursionError
"mypy",
],
)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
finder.run_script(str(test_location))
dep_modules = set(finder.modules.keys())
_DEP_MODULES_CACHE[test] = dep_modules
return dep_modules
def parse_test_module(test: str) -> str:
return test.split(".")[0]
def print_to_stderr(message: str) -> None:
print(message, file=sys.stderr)
|
pytorch-master
|
tools/testing/modulefinder_determinator.py
|
# Generates RegisterCodegenUnboxedKernels.cpp, UnboxingFunctions.h and UnboxingFunctions.cpp.
import argparse
import os
import pathlib
import sys
from dataclasses import dataclass
from typing import List, Sequence, Union
import yaml
from torchgen.api import cpp, unboxing
from torchgen.api.translate import translate
from torchgen.api.types import CppSignatureGroup
from torchgen.api.unboxing import convert_arguments
from torchgen.context import method_with_native_function
from torchgen.gen import cpp_string, get_custom_build_selector, parse_native_yaml
from torchgen.model import Argument, NativeFunction, NativeFunctionsGroup, Variant
from torchgen.selective_build.selector import SelectiveBuilder
from torchgen.utils import FileManager, make_file_manager, mapMaybe, Target
from typing_extensions import Literal
# Generates UnboxingFunctions.h & UnboxingFunctions.cpp.
@dataclass(frozen=True)
class ComputeUnboxingFunctions:
target: Union[Literal[Target.DECLARATION], Literal[Target.DEFINITION]]
selector: SelectiveBuilder
@method_with_native_function
def __call__(self, f: NativeFunction) -> str:
if not self.selector.is_root_operator(f"aten::{f.func.name}"):
return ""
if self.target is Target.DECLARATION:
# Note [The ATen Codegen Unboxing API]
# Similar to the ATen Operators API, ATen Codegen Unboxing API lives in the at::unboxing namespace, and
# will be used by codegen unboxing wrappers (CodegenUnboxingWrappers.cpp).
# The Wrappers will be registered into torch::jit::OperatorRegistry using RegisterOperators API.
#
# Important characteristics about the Codegen Unboxing API:
# (1) It follows the OperatorRegistry API.
# This is kind of necessary to avoid overhead.
# For example: if it followed the C++ API, then all of the faithful C++ factory functions
# would need to wrap their arguments into TensorOptions only to unwrap them again.
# (2) Under the hood it calls C++ API.
return f"""
// aten::{f.func}
TORCH_API void {f.func.name.unambiguous_name()}(Stack & stack);
"""
else:
sig_group = CppSignatureGroup.from_native_function(
f, method=(Variant.method in f.variants)
)
sig = sig_group.most_faithful_signature()
# parse arguments into C++ code
binding_list, code_list = convert_arguments(f)
# for each C++ argument, generate the conversion code
code_connector = "\n\t"
arg_connector = ", "
# function call and push back to stack
prefix = "self_base." if sig.method else "at::"
translated_args = translate(
binding_list, sig.arguments(), method=sig.method
)
args_str = f"{arg_connector.join(e.expr for e in translated_args)}"
if len(f.func.returns) == 0:
ret_str = ""
push_str = ""
else:
ret_str = "auto result_ = "
push_str = """
pack(stack, std::move(result_));
"""
return f"""
// aten::{f.func}
TORCH_API void {f.func.name.unambiguous_name()}(Stack & stack) {{
{code_connector.join(code_list)}
drop(stack, {len(binding_list)});
{ret_str}{prefix}{sig.name()}({args_str});
{push_str}
}}
"""
# Generates RegisterCodegenUnboxedKernels.cpp.
@dataclass(frozen=True)
class ComputeCodegenUnboxedKernels:
selector: SelectiveBuilder
@method_with_native_function
def __call__(self, f: NativeFunction) -> str:
if not self.selector.is_root_operator(f"aten::{f.func.name}"):
return ""
# We unconditionally generate function wrappers,
sig_group = CppSignatureGroup.from_native_function(f, method=False)
sig = sig_group.most_faithful_signature()
# escape double quote in schema, get rid of extra double quotes
schema = cpp_string(str(sig.func))[1:-1]
# arguments
args = sig.arguments()
connector = ",\n\t\t"
args_code = []
for arg in args:
# Using method=False faithful C++ API, so we should not see SelfArgument/TensorOptionsArgument
assert isinstance(arg.argument, Argument)
if not arg.argument.default:
arg_cpp = "c10::IValue(c10::nullopt)"
else:
# The unboxing code uses the faithful C++ API to avoid the overhead
# from wrapping/unwrapping TensorOptios.
# However, we would look to include default args for schema parsing.
# Default args only show up in the nonfaithful C++ API,
arg_default = cpp.default_expr(arg.argument.default, arg.argument.type)
if arg_default.startswith("{"):
arg_cpp = f"c10::IntArrayRef({arg_default})"
else:
arg_cpp = f"c10::IValue({arg_default})"
args_code.append(
f"""c10::Argument("{arg.name}", nullptr, c10::nullopt, {arg_cpp})"""
)
returns = f.func.returns
returns_code = []
for ret in returns:
returns_code.append(f"""c10::Argument("{ret.name if ret.name else ""}")""")
return f"""
// aten::{schema}
OperatorGenerator(
"aten::{f.func.name.name}",
"{f.func.name.overload_name}",
{{
{connector.join(args_code)}
}},
{{
{connector.join(returns_code)}
}},
[](Stack & stack) {{
RECORD_FUNCTION("{sig.name()}", std::vector<c10::IValue>());
at::unboxing::{unboxing.name(f)}(stack);
}},
aliasAnalysisFromSchema()
),
"""
def gen_unboxing(
*,
native_functions: Sequence[NativeFunction],
cpu_fm: FileManager,
selector: SelectiveBuilder,
) -> None:
def key_func(fn: Union[NativeFunction, NativeFunctionsGroup]) -> str:
return fn.root_name
selected_op_num: int = len(selector.operators)
# a best practice threshold of operators to enable sharding
sharding_threshold: int = 100
cpu_fm.write_sharded(
"UnboxingFunctions.cpp",
native_functions,
key_fn=key_func,
env_callable=lambda fn: {
"definitions": [ComputeUnboxingFunctions(Target.DEFINITION, selector)(fn)]
},
num_shards=1 if selected_op_num < sharding_threshold else 5,
sharded_keys={"definitions"},
)
cpu_fm.write(
"UnboxingFunctions.h",
lambda: {
"declarations": list(
mapMaybe(
ComputeUnboxingFunctions(Target.DECLARATION, selector),
native_functions,
)
),
},
)
cpu_fm.write_sharded(
"RegisterCodegenUnboxedKernels.cpp",
native_functions,
key_fn=key_func,
env_callable=lambda fn: {
"unboxed_ops": [ComputeCodegenUnboxedKernels(selector)(fn)]
},
num_shards=1 if selected_op_num < sharding_threshold else 10,
sharded_keys={"unboxed_ops"},
)
def main(args: List[str]) -> None:
parser = argparse.ArgumentParser(description="Generate unboxing source files")
parser.add_argument(
"-s",
"--source-path",
help="path to source directory for ATen",
default="aten/src/ATen",
)
parser.add_argument(
"-d", "--install_dir", help="output directory", default="build/aten/src/ATen"
)
parser.add_argument(
"-o",
"--output-dependencies",
help="output a list of dependencies into the given file and exit",
)
parser.add_argument(
"--dry-run",
action="store_true",
help="run without writing any files (still updates outputs)",
)
parser.add_argument(
"--op_selection_yaml_path",
help="Provide a path to the operator selection (for custom build) YAML "
"that contains the information about the set of selected operators "
"and their categories (training, ...). Each operator is either a "
"full operator name with overload or just a bare operator name. "
"The operator names also contain the namespace prefix (e.g. aten::)",
)
parser.add_argument(
"--op_registration_allowlist",
nargs="*",
help="filter op registrations by the allowlist (if set); "
"each item is `namespace`::`operator name` without overload name; "
"e.g.: aten::empty aten::conv2d ...",
)
parser.add_argument(
"--TEST_ONLY_op_registration_allowlist_yaml_path",
help="Provide a path to the operator selection (for custom build) YAML "
"which contains a list of operators. It is to serve testing purpose and "
"each item is `namespace`::`operator name` without overload name; "
"e.g.: aten::empty aten::conv2d ...",
)
options = parser.parse_args(args)
if options.op_registration_allowlist:
op_registration_allowlist = options.op_registration_allowlist
elif options.TEST_ONLY_op_registration_allowlist_yaml_path:
with open(options.TEST_ONLY_op_registration_allowlist_yaml_path, "r") as f:
op_registration_allowlist = yaml.safe_load(f)
else:
op_registration_allowlist = None
selector = get_custom_build_selector(
op_registration_allowlist,
options.op_selection_yaml_path,
)
native_yaml_path = os.path.join(options.source_path, "native/native_functions.yaml")
tags_yaml_path = os.path.join(options.source_path, "native/tags.yaml")
parsed_yaml = parse_native_yaml(native_yaml_path, tags_yaml_path)
native_functions, backend_indices = (
parsed_yaml.native_functions,
parsed_yaml.backend_indices,
)
cpu_fm = make_file_manager(options=options)
gen_unboxing(native_functions=native_functions, cpu_fm=cpu_fm, selector=selector)
if options.output_dependencies:
depfile_path = pathlib.Path(options.output_dependencies).resolve()
depfile_name = depfile_path.name
depfile_stem = depfile_path.stem
path = depfile_path.parent / depfile_name
cpu_fm.write_outputs(depfile_stem, str(path))
if __name__ == "__main__":
main(sys.argv[1:])
|
pytorch-master
|
tools/jit/gen_unboxing.py
|
pytorch-master
|
tools/jit/__init__.py
|
|
pytorch-master
|
tools/jit/test/__init__.py
|
|
import tempfile
import unittest
from unittest.mock import NonCallableMock, patch
import tools.jit.gen_unboxing as gen_unboxing
@patch("tools.jit.gen_unboxing.get_custom_build_selector")
@patch("tools.jit.gen_unboxing.parse_native_yaml")
@patch("tools.jit.gen_unboxing.make_file_manager")
@patch("tools.jit.gen_unboxing.gen_unboxing")
class TestGenUnboxing(unittest.TestCase):
def test_get_custom_build_selector_with_allowlist(
self,
mock_gen_unboxing: NonCallableMock,
mock_make_file_manager: NonCallableMock,
mock_parse_native_yaml: NonCallableMock,
mock_get_custom_build_selector: NonCallableMock,
) -> None:
args = ["--op_registration_allowlist=op1", "--op_selection_yaml_path=path2"]
gen_unboxing.main(args)
mock_get_custom_build_selector.assert_called_once_with(["op1"], "path2")
def test_get_custom_build_selector_with_allowlist_yaml(
self,
mock_gen_unboxing: NonCallableMock,
mock_make_file_manager: NonCallableMock,
mock_parse_native_yaml: NonCallableMock,
mock_get_custom_build_selector: NonCallableMock,
) -> None:
temp_file = tempfile.NamedTemporaryFile()
temp_file.write(b"- aten::add.Tensor")
temp_file.seek(0)
args = [
f"--TEST_ONLY_op_registration_allowlist_yaml_path={temp_file.name}",
"--op_selection_yaml_path=path2",
]
gen_unboxing.main(args)
mock_get_custom_build_selector.assert_called_once_with(
["aten::add.Tensor"], "path2"
)
temp_file.close()
def test_get_custom_build_selector_with_both_allowlist_and_yaml(
self,
mock_gen_unboxing: NonCallableMock,
mock_make_file_manager: NonCallableMock,
mock_parse_native_yaml: NonCallableMock,
mock_get_custom_build_selector: NonCallableMock,
) -> None:
temp_file = tempfile.NamedTemporaryFile()
temp_file.write(b"- aten::add.Tensor")
temp_file.seek(0)
args = [
"--op_registration_allowlist=op1",
"--TEST_ONLY_op_registration_allowlist_yaml_path={temp_file.name}",
"--op_selection_yaml_path=path2",
]
gen_unboxing.main(args)
mock_get_custom_build_selector.assert_called_once_with(["op1"], "path2")
temp_file.close()
if __name__ == "__main__":
unittest.main()
|
pytorch-master
|
tools/jit/test/test_gen_unboxing.py
|
pytorch-master
|
tools/lite_interpreter/__init__.py
|
|
#!/usr/bin/env python3
import argparse
import os
from typing import Set
import yaml
from torchgen.code_template import CodeTemplate
from torchgen.selective_build.selector import SelectiveBuilder
# Safely load fast C Yaml loader/dumper if they are available
try:
from yaml import CSafeLoader as Loader
except ImportError:
from yaml import SafeLoader as Loader # type: ignore[misc]
if_condition_template_str = """if (kernel_tag_sv.compare("$kernel_tag_name") == 0) {
return $dtype_checks;
}"""
if_condition_template = CodeTemplate(if_condition_template_str)
selected_kernel_dtypes_h_template_str = """
#include <c10/core/ScalarType.h>
#include <c10/util/string_view.h>
#include <c10/macros/Macros.h>
namespace at {
inline constexpr bool should_include_kernel_dtype(
const char *kernel_tag_str,
at::ScalarType scalar_type
) {
c10::string_view kernel_tag_sv C10_UNUSED = c10::string_view(kernel_tag_str);
$body
return false;
}
}
"""
selected_kernel_dtypes_h_template = CodeTemplate(selected_kernel_dtypes_h_template_str)
selected_mobile_ops_preamble = """#pragma once
/**
* Generated by gen_selected_mobile_ops_header.py
*/
"""
def extract_root_operators(selective_builder: SelectiveBuilder) -> Set[str]:
ops = []
for (op_name, op) in selective_builder.operators.items():
if op.is_root_operator:
ops.append(op_name)
return set(ops)
def get_selected_kernel_dtypes_code(
selective_builder: SelectiveBuilder,
) -> str:
# See https://www.internalfb.com/intern/paste/P153411698/ for an example of the
# generated code in case all kernel dtypes are selected and in case some kernel
# dtypes are selected (i.e. both cases).
#
body = "return true;"
if (
selective_builder.include_all_operators is False
and selective_builder.include_all_non_op_selectives is False
):
body_parts = []
for kernel_tag, dtypes in selective_builder.kernel_metadata.items():
conditions = list(
map(lambda x: "scalar_type == at::ScalarType::" + x, dtypes)
)
body_parts.append(
if_condition_template.substitute(
kernel_tag_name=kernel_tag,
dtype_checks=" || ".join(conditions),
),
)
body = " else ".join(body_parts)
header_contents = selected_kernel_dtypes_h_template.substitute(body=body)
return header_contents
# Write the file selected_mobile_ops.h with optionally:
# 1. The selected root operators
# 2. The selected kernel dtypes
def write_selected_mobile_ops(
output_file_path: str,
selective_builder: SelectiveBuilder,
) -> None:
root_ops = extract_root_operators(selective_builder)
custom_classes = selective_builder.custom_classes
build_features = selective_builder.build_features
with open(output_file_path, "wb") as out_file:
body_parts = [selected_mobile_ops_preamble]
# This condition checks if we are in selective build.
# if these lists are not defined the corresponding selective build macros trivially return the item in question was selected
if not selective_builder.include_all_operators:
body_parts.append(
"#define TORCH_OPERATOR_WHITELIST "
+ (";".join(sorted(root_ops)))
+ ";\n\n"
)
# This condition checks if we are in tracing based selective build
if selective_builder.include_all_non_op_selectives is False:
body_parts.append(
"#define TORCH_CUSTOM_CLASS_ALLOWLIST "
+ (";".join(sorted(custom_classes)))
+ ";\n\n"
)
body_parts.append(
"#define TORCH_BUILD_FEATURE_ALLOWLIST "
+ (";".join(sorted(build_features)))
+ ";\n\n"
)
body_parts.append(get_selected_kernel_dtypes_code(selective_builder))
header_contents = "".join(body_parts)
out_file.write(header_contents.encode("utf-8"))
# root_ops: a set of selected root operators for selective build
# Write the file selected_mobile_ops.h with optionally:
# 1. The selected root operators from root_ops
# 2. All kernel dtypes
def write_selected_mobile_ops_with_all_dtypes(
output_file_path: str,
root_ops: Set[str],
) -> None:
with open(output_file_path, "wb") as out_file:
body_parts = [selected_mobile_ops_preamble]
body_parts.append(
"#define TORCH_OPERATOR_WHITELIST " + (";".join(sorted(root_ops))) + ";\n\n"
)
selective_builder = SelectiveBuilder.get_nop_selector()
body_parts.append(get_selected_kernel_dtypes_code(selective_builder))
header_contents = "".join(body_parts)
out_file.write(header_contents.encode("utf-8"))
def main() -> None:
parser = argparse.ArgumentParser(
description="Generate selected_mobile_ops.h for selective build."
)
parser.add_argument(
"-p",
"--yaml_file_path",
type=str,
required=True,
help="Path to the yaml" " file with a list of operators used by the model.",
)
parser.add_argument(
"-o",
"--output_file_path",
type=str,
required=True,
help="Path to destination"
"folder where selected_mobile_ops.h will be written.",
)
parsed_args = parser.parse_args()
model_file_name = parsed_args.yaml_file_path
print("Loading yaml file: ", model_file_name)
loaded_model = {}
with open(model_file_name, "rb") as model_file:
loaded_model = yaml.load(model_file, Loader=Loader)
root_operators_set = set(loaded_model)
print("Writing header file selected_mobile_ops.h: ", parsed_args.output_file_path)
write_selected_mobile_ops_with_all_dtypes(
os.path.join(parsed_args.output_file_path, "selected_mobile_ops.h"),
root_operators_set,
)
if __name__ == "__main__":
main()
|
pytorch-master
|
tools/lite_interpreter/gen_selected_mobile_ops_header.py
|
pytorch-master
|
tools/pyi/__init__.py
|
|
import argparse
import collections
from pprint import pformat
from typing import Dict, List, Sequence
from torchgen.api.python import (
PythonSignatureGroup,
PythonSignatureNativeFunctionPair,
returns_named_tuple_pyi,
)
from torchgen.gen import parse_native_yaml
from torchgen.model import Variant
from torchgen.utils import FileManager
from tools.autograd.gen_python_functions import (
group_overloads,
load_signatures,
should_generate_py_binding,
)
"""
This module implements generation of type stubs for PyTorch,
enabling use of autocomplete in IDEs like PyCharm, which otherwise
don't understand C extension modules.
At the moment, this module only handles type stubs for torch and
torch.Tensor. It should eventually be expanded to cover all functions
which come are autogenerated.
Here's our general strategy:
- We start off with a hand-written __init__.pyi.in file. This
file contains type definitions for everything we cannot automatically
generate, including pure Python definitions directly in __init__.py
(the latter case should be pretty rare).
- We go through automatically bound functions based on the
type information recorded in native_functions.yaml and
generate type hints for them (generate_type_hints)
There are a number of type hints which we've special-cased;
read gen_pyi for the gory details.
"""
def get_py_torch_functions(
python_funcs: Sequence[PythonSignatureNativeFunctionPair],
method: bool = False,
) -> Sequence[PythonSignatureGroup]:
"""
Get declarations (grouped by name) which should be generated
as either functions in the "torch" module or methods on Tensor.
"""
def should_bind_function(python_func: PythonSignatureNativeFunctionPair) -> bool:
return (
should_generate_py_binding(python_func.function)
and not python_func.function.python_module
and Variant.function in python_func.function.variants
)
def should_bind_method(python_func: PythonSignatureNativeFunctionPair) -> bool:
return (
should_generate_py_binding(python_func.function)
and not python_func.function.python_module
and Variant.method in python_func.function.variants
)
should_bind = should_bind_method if method else should_bind_function
return group_overloads([f for f in python_funcs if should_bind(f)])
# TODO: Consider defining some aliases for our Union[...] types, to make
# the stubs to read on the human eye.
DEVICE_PARAM = "device: Union[_device, str, None]=None"
FACTORY_PARAMS = (
f"dtype: Optional[_dtype]=None, {DEVICE_PARAM}, requires_grad: _bool=False"
)
# this could be more precise w.r.t list contents etc. How to do Ellipsis?
INDICES = "indices: Union[None, _int, slice, Tensor, List, Tuple]"
blocklist = [
"__init_subclass__",
"__new__",
"__subclasshook__",
"cdist",
"device",
"grad",
"requires_grad",
"range",
# defined in functional
"einsum",
# reduction argument; these bindings don't make sense
"binary_cross_entropy_with_logits",
"ctc_loss",
"cosine_embedding_loss",
"hinge_embedding_loss",
"kl_div",
"margin_ranking_loss",
"triplet_margin_loss",
# Somehow, these are defined in both _C and in functional. Ick!
"broadcast_tensors",
# Manually define named tensor type stubs in __init__.pyi.in
"align_tensors",
"meshgrid",
"cartesian_prod",
"block_diag",
"norm",
"chain_matmul",
"stft",
"tensordot",
"split",
"unique_consecutive",
"atleast_1d",
"atleast_2d",
"atleast_3d",
# These are handled specially by python_arg_parser.cpp
"add",
"add_",
"add_out",
"sub",
"sub_",
"sub_out",
"mul",
"mul_",
"mul_out",
"div",
"div_",
"div_out",
"true_divide",
"true_divide_",
"true_divide_out",
"floor_divide",
"floor_divide_",
"floor_divide_out",
]
binary_ops = (
"add",
"sub",
"mul",
"div",
"pow",
"lshift",
"rshift",
"mod",
"truediv",
"matmul",
"floordiv",
"radd",
"rsub",
"rmul",
"rtruediv",
"rfloordiv",
"rpow", # reverse arithmetic
"and",
"or",
"xor",
"rand",
"ror",
"rxor", # logic
"iadd",
"iand",
"idiv",
"ilshift",
"imul",
"ior",
"irshift",
"isub",
"ixor",
"ifloordiv",
"imod", # inplace ops
)
symmetric_comparison_ops = ("eq", "ne")
asymmetric_comparison_ops = ("ge", "gt", "lt", "le")
comparison_ops = symmetric_comparison_ops + asymmetric_comparison_ops
unary_ops = ("neg", "abs", "invert")
to_py_type_ops = ("bool", "float", "complex", "long", "index", "int", "nonzero")
all_ops = binary_ops + comparison_ops + unary_ops + to_py_type_ops
def sig_for_ops(opname: str) -> List[str]:
"""sig_for_ops(opname : str) -> List[str]
Returns signatures for operator special functions (__add__ etc.)"""
# we have to do this by hand, because they are hand-bound in Python
assert opname.endswith("__") and opname.startswith("__"), "Unexpected op {}".format(
opname
)
name = opname[2:-2]
if name in binary_ops:
return ["def {}(self, other: Any) -> Tensor: ...".format(opname)]
elif name in comparison_ops:
sig = "def {}(self, other: Any) -> Tensor: ...".format(opname)
if name in symmetric_comparison_ops:
# unsafe override https://github.com/python/mypy/issues/5704
sig += " # type: ignore[override]"
return [sig]
elif name in unary_ops:
return ["def {}(self) -> Tensor: ...".format(opname)]
elif name in to_py_type_ops:
if name in {"bool", "float", "complex"}:
tname = name
elif name == "nonzero":
tname = "bool"
else:
tname = "int"
if tname in {"float", "int", "bool", "complex"}:
tname = "builtins." + tname
return ["def {}(self) -> {}: ...".format(opname, tname)]
else:
raise Exception("unknown op", opname)
def generate_type_hints(sig_group: PythonSignatureGroup) -> List[str]:
type_hints: List[str] = []
# Some deprecated ops that are on the blocklist are still included in pyi
if sig_group.signature.name in blocklist and not sig_group.signature.deprecated:
return type_hints
# deprecated signatures have separate entries for their functional and out variants
# (as opposed to the native ops, which fuse the two into a single signature).
# generate the functional variant here, if an out variant exists.
if sig_group.signature.deprecated and sig_group.outplace is not None:
type_hint = sig_group.signature.signature_str_pyi(skip_outputs=True)
type_hints.append(type_hint)
# PythonSignatureGroups that have both a functional + out variant get a single signature, with an optional out argument
# Generates the out variant if one exists. Otherwise, generate the functional variant
type_hint = sig_group.signature.signature_str_pyi(
skip_outputs=sig_group.outplace is None
)
type_hints.append(type_hint)
# Some operators also additionally have a vararg variant of their signature
type_hint_vararg = sig_group.signature.signature_str_pyi_vararg(
skip_outputs=sig_group.outplace is None
)
if type_hint_vararg:
type_hints.append(type_hint_vararg)
return type_hints
def gen_nn_functional(fm: FileManager) -> None:
# Functions imported into `torch.nn.functional` from `torch`, perhaps being filtered
# through an `_add_docstr` call
imports = [
"conv1d",
"conv2d",
"conv3d",
"conv_transpose1d",
"conv_transpose2d",
"conv_transpose3d",
"conv_tbc",
"avg_pool1d",
"relu_",
"selu_",
"celu_",
"rrelu_",
"pixel_shuffle",
"pixel_unshuffle",
"channel_shuffle",
"native_channel_shuffle",
"pdist",
"cosine_similarity",
]
# Functions generated by `torch._jit_internal.boolean_dispatch`
dispatches = [
"fractional_max_pool2d",
"fractional_max_pool3d",
"max_pool1d",
"max_pool2d",
"max_pool3d",
"adaptive_max_pool1d",
"adaptive_max_pool2d",
"adaptive_max_pool3d",
]
# Functions directly imported from `torch._C`
from_c = [
"avg_pool2d",
"avg_pool3d",
"hardtanh_",
"elu_",
"leaky_relu_",
"logsigmoid",
"softplus",
"softshrink",
"one_hot",
]
import_code = ["from .. import {0} as {0}".format(_) for _ in imports]
# TODO make these types more precise
dispatch_code = ["{}: Callable".format(_) for _ in (dispatches + from_c)]
fm.write_with_template(
"torch/nn/functional.pyi",
"torch/nn/functional.pyi.in",
lambda: {
"imported_hints": import_code,
"dispatched_hints": dispatch_code,
},
)
# functional.pyi already contains the definitions for those functions
# so, we don't export then to it
from_c.extend(["hardtanh", "leaky_relu", "hardsigmoid"])
dispatch_code = ["{}: Callable".format(_) for _ in (dispatches + from_c)]
fm.write_with_template(
"torch/_C/_nn.pyi",
"torch/_C/_nn.pyi.in",
lambda: {
"imported_hints": import_code,
"dispatched_hints": dispatch_code,
},
)
def gen_pyi(
native_yaml_path: str,
tags_yaml_path: str,
deprecated_yaml_path: str,
fm: FileManager,
) -> None:
"""gen_pyi()
This function generates a pyi file for torch.
"""
# Some of this logic overlaps with generate_python_signature in
# tools/autograd/gen_python_functions.py; however, this
# function is all about generating mypy type signatures, whereas
# the other function generates are custom format for argument
# checking. If you are update this, consider if your change
# also needs to update the other file.
# Dictionary for NamedTuple definitions
namedtuples: Dict[str, str] = {}
# Generate type signatures for top-level functions
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
unsorted_function_hints: Dict[str, List[str]] = collections.defaultdict(list)
for n, n1, n2 in [
("csr", "crow", "col"),
("csc", "ccol", "row"),
("bsr", "crow", "col"),
("bsc", "ccol", "row"),
]:
unsorted_function_hints.update(
{
f"sparse_{n}_tensor": [
f"def sparse_{n}_tensor({n1}_indices: Union[Tensor, List],"
f"{n2}_indices: Union[Tensor, List],"
" values: Union[Tensor, List], size: Optional[_size]=None,"
" *, dtype: Optional[_dtype]=None,"
" device: Union[_device, str, None]=None, requires_grad:_bool=False) -> Tensor: ..."
],
f"_sparse_{n}_tensor_unsafe": [
f"def _sparse_{n}_tensor_unsafe({n1}_indices: Union[Tensor, List],"
f"{n2}_indices: Union[Tensor, List],"
" values: Union[Tensor, List], size: List[int],"
" dtype: Optional[_dtype] = None, device: Optional[_device] = None,"
" requires_grad: bool = False) -> Tensor: ..."
],
}
)
unsorted_function_hints.update(
{
"set_flush_denormal": ["def set_flush_denormal(mode: _bool) -> _bool: ..."],
"get_default_dtype": ["def get_default_dtype() -> _dtype: ..."],
"asarray": [
"def asarray(obj: Any, *, dtype: Optional[_dtype]=None, "
"device: Union[_device, str, None]=None, copy: Optional[_bool]=None, "
"requires_grad: _bool=False) -> Tensor: ..."
],
"from_numpy": ["def from_numpy(ndarray) -> Tensor: ..."],
"frombuffer": [
"def frombuffer(buffer: Any, *, dtype: _dtype, count: int=-1, "
"offset: int=0, device: Union[_device, str, None]=None, "
"requires_grad: _bool=False) -> Tensor: ..."
],
"numel": ["def numel(self: Tensor) -> _int: ..."],
"as_tensor": [
"def as_tensor(data: Any, dtype: _dtype=None, device: Optional[_device]=None) -> Tensor: ..."
],
"get_num_threads": ["def get_num_threads() -> _int: ..."],
"set_num_threads": ["def set_num_threads(num: _int) -> None: ..."],
"init_num_threads": ["def init_num_threads() -> None: ..."],
"get_num_interop_threads": ["def get_num_interop_threads() -> _int: ..."],
"set_num_interop_threads": [
"def set_num_interop_threads(num: _int) -> None: ..."
],
# These functions are explicitly disabled by
# SKIP_PYTHON_BINDINGS because they are hand bound.
# Correspondingly, we must hand-write their signatures.
"tensor": [
"def tensor(data: Any, {}) -> Tensor: ...".format(FACTORY_PARAMS)
],
"sparse_coo_tensor": [
"def sparse_coo_tensor(indices: Tensor, values: Union[Tensor,List],"
" size: Optional[_size]=None, *, dtype: Optional[_dtype]=None,"
" device: Union[_device, str, None]=None, requires_grad:_bool=False) -> Tensor: ..."
],
"_sparse_coo_tensor_unsafe": [
"def _sparse_coo_tensor_unsafe(indices: Tensor, values: Tensor, size: List[int],"
" dtype: Optional[_dtype] = None, device: Optional[_device] = None,"
" requires_grad: bool = False) -> Tensor: ..."
],
"sparse_compressed_tensor": [
"def sparse_compressed_tensor(compressed_indices: Union[Tensor, List],"
"plain_indices: Union[Tensor, List],"
" values: Union[Tensor, List], size: Optional[_size]=None,"
" *, dtype: Optional[_dtype]=None, layout: Optional[_layout] = None,"
" device: Union[_device, str, None]=None, requires_grad:_bool=False) -> Tensor: ..."
],
"_sparse_compressed_tensor_unsafe": [
"def _sparse_compressed_tensor_unsafe(comp_indices: Union[Tensor, List],"
"plain_indices: Union[Tensor, List],"
" values: Union[Tensor, List], size: List[int],"
" dtype: Optional[_dtype] = None, layout: Optional[_layout] = None,"
" device: Optional[_device] = None,"
" requires_grad: bool = False) -> Tensor: ..."
],
"_is_functional_tensor": [
"def _is_functional_tensor(t: Tensor) -> _bool: ..."
],
"_from_functional_tensor": [
"def _from_functional_tensor(t: Tensor) -> Tensor: ..."
],
"_to_functional_tensor": [
"def _to_functional_tensor(t: Tensor) -> Tensor: ..."
],
"range": [
"def range(start: Number, end: Number,"
" step: Number=1, *, out: Optional[Tensor]=None, {}) -> Tensor: ...".format(
FACTORY_PARAMS
)
],
"arange": [
"def arange(start: Number, end: Number, step: Number, *,"
" out: Optional[Tensor]=None, {}) -> Tensor: ...".format(
FACTORY_PARAMS
),
"def arange(start: Number, end: Number, *, out: Optional[Tensor]=None, {}) -> Tensor: ...".format(
FACTORY_PARAMS
),
"def arange(end: Number, *, out: Optional[Tensor]=None, {}) -> Tensor: ...".format(
FACTORY_PARAMS
),
],
"linspace": [
"def linspace(start: Number, end: Number, steps: Optional[_int]=None, *,"
" out: Optional[Tensor]=None, {}) -> Tensor: ...".format(FACTORY_PARAMS)
],
"logspace": [
"def logspace(start: Number, end: Number, steps: Optional[_int]=None, base: _float=10.0, *,"
" out: Optional[Tensor]=None, {}) -> Tensor: ...".format(FACTORY_PARAMS)
],
"randint": [
"def randint(low: _int, high: _int, size: _size, *,"
" generator: Optional[Generator]=None, {}) -> Tensor: ...".format(
FACTORY_PARAMS
),
"def randint(high: _int, size: _size, *,"
" generator: Optional[Generator]=None, {}) -> Tensor: ...".format(
FACTORY_PARAMS
),
],
"full": [
"def full(size: _size, fill_value: Number, *,"
" out: Optional[Tensor]=None,"
" layout: _layout=strided, {}) -> Tensor: ...".format(FACTORY_PARAMS),
"def full(size: _size, fill_value: Number, *,"
" names: List[Union[str, None]],"
" layout: _layout=strided, {}) -> Tensor: ...".format(FACTORY_PARAMS),
],
"is_grad_enabled": ["def is_grad_enabled() -> _bool: ..."],
"is_inference_mode_enabled": [
"def is_inference_mode_enabled() -> _bool: ..."
],
"nonzero": [
"def nonzero(input: Tensor, *, as_tuple: Literal[False]=False, out: Optional[Tensor]=None) -> Tensor: ...",
"def nonzero(input: Tensor, *, as_tuple: Literal[True]) -> Tuple[Tensor, ...]: ...",
],
"binary_cross_entropy_with_logits": [
"def binary_cross_entropy_with_logits(input: Tensor, target: Tensor, "
"weight: Optional[Tensor] = None, size_average: Optional[bool] = None, "
"reduce: Optional[bool] = None, reduction: str = ..., "
"pos_weight: Optional[Tensor] = None) -> Tensor: ..."
],
"cosine_embedding_loss": [
"def cosine_embedding_loss(input1: Tensor, input2: Tensor, "
"target: Tensor, margin: float = ..., size_average: Optional[bool] = ..., "
"reduce: Optional[bool] = ..., reduction: str = ...) -> Tensor: ..."
],
"ctc_loss": [
"def ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: Tensor, target_lengths: Tensor,"
" blank: int = ..., reduction: str = ..., zero_infinity: bool = ...) -> Tensor: ..."
],
"hinge_embedding_loss": [
"def hinge_embedding_loss(input: Tensor, target: Tensor, margin: float = ...,"
" size_average: Optional[bool] = ..., reduce: Optional[bool] = ..., "
"reduction: str = ...) -> Tensor: ..."
],
"kl_div": [
"def kl_div(input: Tensor, target: Tensor, size_average: Optional[bool] = ..., "
"reduce: Optional[bool] = ..., reduction: str = ..., log_target: bool = ...) -> Tensor: ..."
],
"margin_ranking_loss": [
"def margin_ranking_loss(input1: Tensor, input2: Tensor, target: Tensor,"
" margin: float = ..., size_average: Optional[bool] = ..., "
" reduce: Optional[bool] = ..., reduction: str = ...) -> Tensor: ..."
],
"triplet_margin_loss": [
"def triplet_margin_loss(anchor: Tensor, positive: Tensor, negative: Tensor, "
"margin: float = ..., p: float = ..., eps: float = ..., swap: bool = ..., "
"size_average: Optional[bool] = ..., "
"reduce: Optional[bool] = ..., reduction: str = ...) -> Tensor: ..."
],
"dsmm": ["def dsmm(input: Tensor, mat2: Tensor) -> Tensor: ..."],
"hsmm": ["def hsmm(input: Tensor, mat2: Tensor) -> Tensor: ..."],
"saddmm": [
"def saddmm(input: Tensor, mat1: Tensor, mat2: Tensor, *, beta: Number=1, "
"alpha: Number=1, out: Optional[Tensor]=None) -> Tensor: ..."
],
"spmm": ["def spmm(input: Tensor, mat2: Tensor) -> Tensor: ..."],
"div": [
"def div(input: Union[Tensor, Number], other: Union[Tensor, Number], *, "
"rounding_mode: Optional[str] = None, out: Optional[Tensor]=None) -> Tensor: ..."
],
}
)
for binop in ["mul", "true_divide", "floor_divide"]:
unsorted_function_hints[binop].append(
"def {}(input: Union[Tensor, Number],"
" other: Union[Tensor, Number],"
" *, out: Optional[Tensor]=None) -> Tensor: ...".format(binop)
)
for binop in ["add", "sub"]:
unsorted_function_hints[binop].append(
"def {}(input: Union[Tensor, Number],"
" other: Union[Tensor, Number],"
" *, alpha: Optional[Number]=1, out: Optional[Tensor]=None) -> Tensor: ...".format(
binop
)
)
native_functions = parse_native_yaml(
native_yaml_path, tags_yaml_path
).native_functions
native_functions = list(filter(should_generate_py_binding, native_functions))
function_signatures = load_signatures(
native_functions, deprecated_yaml_path, method=False, pyi=True
)
sig_groups = get_py_torch_functions(function_signatures)
for group in sorted(sig_groups, key=lambda g: g.signature.name):
name = group.signature.name
unsorted_function_hints[name] += generate_type_hints(group)
named_tuple = returns_named_tuple_pyi(group.signature)
if named_tuple is not None and not group.signature.deprecated:
# deprecated namedtuples are currently not included for torch functions
tuple_name, tuple_def = named_tuple
if tuple_name in namedtuples:
assert namedtuples[tuple_name] == tuple_def
else:
namedtuples[tuple_name] = tuple_def
function_hints = []
for name, hints in sorted(unsorted_function_hints.items()):
if len(hints) > 1:
hints = ["@overload\n" + h for h in hints]
function_hints += hints
# Generate type signatures for Tensor methods
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
unsorted_tensor_method_hints: Dict[str, List[str]] = collections.defaultdict(list)
unsorted_tensor_method_hints.update(
{
"size": [
"def size(self) -> Size: ...",
"def size(self, dim: _int) -> _int: ...",
],
"stride": [
"def stride(self) -> Tuple[_int]: ...",
"def stride(self, _int) -> _int: ...",
],
"new_ones": [
"def new_ones(self, size: _size, {}) -> Tensor: ...".format(
FACTORY_PARAMS
)
],
"new_tensor": [
"def new_tensor(self, data: Any, {}) -> Tensor: ...".format(
FACTORY_PARAMS
)
],
# new and __init__ have the same signatures differ only in return type
# Adapted from legacy_tensor_ctor and legacy_tensor_new
"new": [
"def new(self, *args: Any, {}) ->Tensor: ...".format(DEVICE_PARAM),
"def new(self, storage: Storage) -> Tensor: ...",
"def new(self, other: Tensor) -> Tensor: ...",
"def new(self, size: _size, *, {}) -> Tensor: ...".format(DEVICE_PARAM),
],
"__init__": [
"def __init__(self, *args: Any, {}) -> None: ...".format(DEVICE_PARAM),
"def __init__(self, storage: Storage) -> None: ...",
"def __init__(self, other: Tensor) -> None: ...",
"def __init__(self, size: _size, *, {}) -> None: ...".format(
DEVICE_PARAM
),
],
"as_subclass": ["def as_subclass(self, cls: Tensor) -> Tensor: ..."],
"_make_subclass": [
"def _make_subclass(cls, data: Tensor, require_grad: _bool = False, dispatch_strides: _bool=False,"
" dispatch_device: _bool=False, device_for_backend_keys: Optional[_device] = None) -> Tensor: ..."
],
"__getitem__": ["def __getitem__(self, {}) -> Tensor: ...".format(INDICES)],
"__setitem__": [
"def __setitem__(self, {}, val: Union[Tensor, Number])"
" -> None: ...".format(INDICES)
],
"tolist": ["def tolist(self) -> List: ..."],
"requires_grad_": [
"def requires_grad_(self, mode: _bool=True) -> Tensor: ..."
],
"element_size": ["def element_size(self) -> _int: ..."],
"data_ptr": ["def data_ptr(self) -> _int: ..."],
"dim": ["def dim(self) -> _int: ..."],
"nonzero": [
"def nonzero(self, *, as_tuple: Literal[False]=False) -> Tensor: ...",
"def nonzero(self, *, as_tuple: Literal[True]) -> Tuple[Tensor, ...]: ...",
],
"numel": ["def numel(self) -> _int: ..."],
"ndimension": ["def ndimension(self) -> _int: ..."],
"nelement": ["def nelement(self) -> _int: ..."],
"cuda": [
"def cuda(self, device: Optional[Union[_device, _int, str]]=None, non_blocking: _bool=False) -> Tensor: ..."
],
"numpy": ["def numpy(self, *, force: _bool=False) -> Any: ..."],
"apply_": ["def apply_(self, callable: Callable) -> Tensor: ..."],
"map_": [
"def map_(self, tensor: Tensor, callable: Callable) -> Tensor: ..."
],
"map2_": [
"def map2_(self, x: Tensor, y: Tensor, callable: Callable) -> Tensor: ..."
],
"storage": ["def _storage(self) -> Storage: ..."],
"storage_type": ["def storage_type(self) -> Storage: ..."],
"type": [
"def type(self, dtype: None=None, non_blocking: _bool=False) -> str: ...",
"def type(self, dtype: Union[str, _dtype], non_blocking: _bool=False) -> Tensor: ...",
],
"get_device": ["def get_device(self) -> _int: ..."],
"contiguous": [
"def contiguous(self, memory_format=torch.contiguous_format) -> Tensor: ..."
],
"has_names": ["def has_names(self) -> _bool: ..."],
"is_contiguous": [
"def is_contiguous(self, memory_format=torch.contiguous_format) -> _bool: ..."
],
"_is_view": ["def _is_view(self) -> _bool: ..."],
"is_cuda": ["is_cuda: _bool"],
"is_leaf": ["is_leaf: _bool"],
"is_nested": ["is_nested: _bool"],
"is_sparse": ["is_sparse: _bool"],
"is_sparse_csr": ["is_sparse_csr: _bool"],
"is_quantized": ["is_quantized: _bool"],
"is_meta": ["is_meta: _bool"],
"is_mps": ["is_mps: _bool"],
"is_ort": ["is_ort: _bool"],
"is_mkldnn": ["is_mkldnn: _bool"],
"is_vulkan": ["is_vulkan: _bool"],
"is_ipu": ["is_ipu: _bool"],
"storage_offset": ["def storage_offset(self) -> _int: ..."],
"to": [
"def to(self, dtype: _dtype, non_blocking: _bool=False, copy: _bool=False) -> Tensor: ...",
"def to(self, device: Optional[Union[_device, str]]=None, dtype: Optional[_dtype]=None, "
"non_blocking: _bool=False, copy: _bool=False) -> Tensor: ...",
"def to(self, other: Tensor, non_blocking: _bool=False, copy: _bool=False) -> Tensor: ...",
],
"item": ["def item(self) -> Number: ..."],
"copy_": [
"def copy_(self, src: Tensor, non_blocking: _bool=False) -> Tensor: ..."
],
"set_": [
"def set_(self, storage: Union[Storage, TypedStorage], offset: _int, size: _size, stride: _size) -> Tensor: ...",
"def set_(self, storage: Union[Storage, TypedStorage]) -> Tensor: ...",
],
"split": [
"def split(self, split_size: _int, dim: _int=0) -> Sequence[Tensor]: ...",
"def split(self, split_size: Tuple[_int, ...], dim: _int=0) -> Sequence[Tensor]: ...",
],
"div": [
"def div(self, other: Union[Tensor, Number], *, rounding_mode: Optional[str] = None) -> Tensor: ..."
],
"div_": [
"def div_(self, other: Union[Tensor, Number], *, rounding_mode: Optional[str] = None) -> Tensor: ..."
],
}
)
for binop in ["mul", "true_divide", "floor_divide"]:
for inplace in [False, True]:
out_suffix = ", *, out: Optional[Tensor]=None"
if inplace:
binop += "_"
out_suffix = ""
unsorted_tensor_method_hints[binop].append(
"def {}(self, other: Union[Tensor, Number]{})"
" -> Tensor: ...".format(binop, out_suffix)
)
for binop in ["add", "sub"]:
for inplace in [False, True]:
out_suffix = ", out: Optional[Tensor]=None"
if inplace:
binop += "_"
out_suffix = ""
unsorted_tensor_method_hints[binop].append(
"def {}(self, other: Union[Tensor, Number], "
"*, alpha: Optional[Number]=1{})"
" -> Tensor: ...".format(binop, out_suffix)
)
simple_conversions = [
"byte",
"char",
"cpu",
"double",
"float",
"half",
"int",
"long",
"short",
"bool",
"bfloat16",
]
for name in simple_conversions:
unsorted_tensor_method_hints[name].append(
"def {}(self) -> Tensor: ...".format(name)
)
# pyi tensor methods don't currently include deprecated signatures for some reason
# TODO: we should probably add them in
tensor_method_signatures = load_signatures(
native_functions,
deprecated_yaml_path,
method=True,
skip_deprecated=True,
pyi=True,
)
tensor_method_sig_groups = get_py_torch_functions(
tensor_method_signatures, method=True
)
for group in sorted(tensor_method_sig_groups, key=lambda g: g.signature.name):
name = group.signature.name
unsorted_tensor_method_hints[name] += generate_type_hints(group)
named_tuple = returns_named_tuple_pyi(group.signature)
if named_tuple is not None and not group.signature.deprecated:
# deprecated namedtuples are currently not included for torch functions
tuple_name, tuple_def = named_tuple
if tuple_name in namedtuples:
assert namedtuples[tuple_name] == tuple_def
else:
namedtuples[tuple_name] = tuple_def
for op in all_ops:
name = "__{}__".format(op)
unsorted_tensor_method_hints[name] += sig_for_ops(name)
tensor_method_hints = []
for name, hints in sorted(unsorted_tensor_method_hints.items()):
if len(hints) > 1:
hints = ["@overload\n" + h for h in hints]
tensor_method_hints += hints
# TODO: Missing type hints for nn
# Generate namedtuple definitions
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
namedtuple_defs = [
"{} = {}".format(name, defn) for name, defn in namedtuples.items()
]
# Generate type signatures for legacy classes
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
legacy_storage_base_hints = ["class StorageBase(object): ..."]
legacy_class_hints = []
for c in (
"DoubleTensor",
"FloatTensor",
"LongTensor",
"IntTensor",
"ShortTensor",
"HalfTensor",
"CharTensor",
"ByteTensor",
"BoolTensor",
):
legacy_class_hints.append("class {}(Tensor): ...".format(c))
# Generate type signatures for dtype classes
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# TODO: don't explicitly list dtypes here; get it from canonical
# source
dtype_class_hints = [
"{}: dtype = ...".format(n)
for n in [
"float32",
"float",
"float64",
"double",
"float16",
"bfloat16",
"half",
"uint8",
"int8",
"int16",
"short",
"int32",
"int",
"int64",
"long",
"complex32",
"complex64",
"cfloat",
"complex128",
"cdouble",
"quint8",
"qint8",
"qint32",
"bool",
"quint4x2",
"quint2x4",
]
]
# Generate __all__ directive
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Include only the functions that contain hints, to prevent undefined
# symbols to be included in the `__all__` directive.
hinted_function_names = [
name for name, hint in unsorted_function_hints.items() if hint
]
all_symbols = sorted(list(namedtuples.keys()) + hinted_function_names)
all_directive = pformat(all_symbols, width=100, compact=True).split("\n")
all_directive[0] = "__all__ = {}".format(all_directive[0])
# Write out the stub
# ~~~~~~~~~~~~~~~~~~
env = {
"namedtuple_defs": namedtuple_defs,
"function_hints": function_hints,
"tensor_method_hints": tensor_method_hints,
"legacy_class_hints": legacy_class_hints,
"legacy_storage_base_hints": legacy_storage_base_hints,
"dtype_class_hints": dtype_class_hints,
"all_directive": all_directive,
}
fm.write_with_template(
"torch/_C/__init__.pyi",
"torch/_C/__init__.pyi.in",
lambda: {
"generated_comment": "@" + "generated from torch/_C/__init__.pyi.in",
**env,
},
)
fm.write_with_template(
"torch/_C/_VariableFunctions.pyi",
"torch/_C/_VariableFunctions.pyi.in",
lambda: {
"generated_comment": "@"
+ "generated from torch/_C/_VariableFunctions.pyi.in",
**env,
},
)
fm.write_with_template(
"torch/_VF.pyi",
"torch/_C/_VariableFunctions.pyi.in",
lambda: {
"generated_comment": "@"
+ "generated from torch/_C/_VariableFunctions.pyi.in",
**env,
},
)
fm.write_with_template(
"torch/return_types.pyi",
"torch/_C/return_types.pyi.in",
lambda: {
"generated_comment": "@" + "generated from torch/_C/return_types.pyi",
**env,
},
)
gen_nn_functional(fm)
def main() -> None:
parser = argparse.ArgumentParser(description="Generate type stubs for PyTorch")
parser.add_argument(
"--native-functions-path",
metavar="NATIVE",
default="aten/src/ATen/native/native_functions.yaml",
help="path to native_functions.yaml",
)
parser.add_argument(
"--tags-path",
metavar="TAGS",
default="aten/src/ATen/native/tags.yaml",
help="path to tags.yaml",
)
parser.add_argument(
"--deprecated-functions-path",
metavar="DEPRECATED",
default="tools/autograd/deprecated.yaml",
help="path to deprecated.yaml",
)
parser.add_argument(
"--out", metavar="OUT", default=".", help="path to output directory"
)
args = parser.parse_args()
fm = FileManager(install_dir=args.out, template_dir=".", dry_run=False)
gen_pyi(
args.native_functions_path, args.tags_path, args.deprecated_functions_path, fm
)
if __name__ == "__main__":
main()
|
pytorch-master
|
tools/pyi/gen_pyi.py
|
import setuptools # type: ignore[import]
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="coverage-plugins",
version="0.0.1",
author="PyTorch Team",
author_email="packages@pytorch.org",
description="plug-in to coverage for PyTorch JIT",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/pytorch/pytorch",
project_urls={
"Bug Tracker": "https://github.com/pytorch/pytorch/issues",
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
package_dir={"": "src"},
packages=setuptools.find_packages(where="src"),
python_requires=">=3.6",
)
|
pytorch-master
|
tools/coverage_plugins_package/setup.py
|
"""
This coverage plug-in attempts to cover JIT'd functions and methods that were previously missed in code coverage. Any
function and method that was passed through/decorated with torch.jit.script or torch.jit.script_method should now be
marked covered when coverage is run with this plug-in.
DISCLAIMER: note that this will mark the entire JIT'd function/method as covered without seeking proof that the
compiled code has been executed. This means that even if the code chunk is merely compiled and not run, it will get
marked as covered.
"""
from inspect import (
getsourcefile,
getsourcelines,
isclass,
iscode,
isfunction,
ismethod,
ismodule,
)
from time import time
from typing import Any
from coverage import CoverageData, CoveragePlugin # type: ignore[import]
# All coverage stats resulting from this plug-in will be in a separate .coverage file that should be merged later with
# `coverage combine`. The convention seems to be .coverage.dotted.suffix based on the following link:
# https://coverage.readthedocs.io/en/coverage-5.5/cmd.html#combining-data-files-coverage-combine
cov_data = CoverageData(basename=f".coverage.jit.{time()}")
def is_not_builtin_class(obj: Any) -> bool:
return isclass(obj) and not type(obj).__module__ == "builtins"
class JitPlugin(CoveragePlugin): # type: ignore[misc, no-any-unimported]
"""
dynamic_context is an overridden function that gives us access to every frame run during the coverage process. We
look for when the function being run is `should_drop`, as all functions that get passed into `should_drop` will be
compiled and thus should be marked as covered.
"""
def dynamic_context(self, frame: Any) -> None:
if frame.f_code.co_name == "should_drop":
obj = frame.f_locals["fn"]
# The many conditions in the if statement below are based on the accepted arguments to getsourcefile. Based
# on its documentation (https://docs.python.org/3/library/inspect.html#inspect.getsourcefile), the argument
# must be a module, class, method, function, traceback, frame, or code object AND it cannot be a built-in
# module, class, or function.
# Currently, we DO NOT include tracebacks or frames as they should not be JIT'd, and we have not checked for
# built-in modules or functions as those do not seem to be JIT'd either.
if (
is_not_builtin_class(obj)
or ismodule(obj)
or ismethod(obj)
or isfunction(obj)
or iscode(obj)
):
filename = getsourcefile(obj)
# We don't want to report for filename = None
if filename:
# TODO: Because torch.jit._IgnoreContextManager relies on Python's `exec` method
# which doesn't generate source codelines, getsourcelines(obj) fails. For now,
# we just ignore the exception until we figure out a better way to
# implement torch.jit._IgnoreContextManager.
try:
sourcelines, starting_lineno = getsourcelines(obj)
except OSError:
pass
else:
line_data = {
filename: range(
starting_lineno, starting_lineno + len(sourcelines)
)
}
cov_data.add_lines(line_data)
super().dynamic_context(frame)
def coverage_init(reg: Any, options: Any) -> None:
reg.add_dynamic_context(JitPlugin())
|
pytorch-master
|
tools/coverage_plugins_package/src/coverage_plugins/jit_plugin.py
|
pytorch-master
|
tools/coverage_plugins_package/src/coverage_plugins/__init__.py
|
|
#!/usr/bin/env python3
import argparse
import json
import sys
from typing import Any, Dict, List, Optional
import yaml
from gen_op_registration_allowlist import (
canonical_name,
gen_transitive_closure,
load_op_dep_graph,
)
from torchgen.selective_build.operator import (
merge_operator_dicts,
SelectiveBuildOperator,
)
from torchgen.selective_build.selector import merge_kernel_metadata
# Generate YAML file containing the operators used for a specific PyTorch model.
# ------------------------------------------------------------------------------
#
# This binary is responsible for generating the model_operators.yaml file for
# each model from a pt_operator_library() BUCK macro invocation.
#
# Output YAML file format:
# ------------------------
#
# <BEGIN FILE CONTENTS>
# include_all_non_op_selectives: False
# include_all_operators: False
# debug_info:
# - model1@v100
# - model2@v50
# operators:
# aten::add:
# is_root_operator: Yes
# is_used_for_training: Yes
# include_all_overloads: No
# debug_info:
# - model1@v100
# - model2@v50
# aten::add.int:
# is_root_operator: No
# is_used_for_training: No
# include_all_overloads: Yes
# kernel_metadata:
# add_kernel:
# - Int8
# - UInt32
# sub_kernel:
# - Int16
# - Float
# <END FILE CONTENTS>
#
# There are a few main inputs to this application
# -----------------------------------------------
#
# 1. Inference Root Operators (--root_ops): Root operators (called directly
# from TorchScript) used by inference use-cases.
#
# 2. Training Root Operators (--training_root_ops): Root operators used
# by training use-cases. Currently, this list is the list of all operators
# used by training, and not just the root operators. All Training ops are
# also considered for inference, so these are merged into inference ops.
#
# 3. Operator Depencency Graph (--dep_graph_yaml_path): A path to the
# operator dependency graph used to determine which operators depend on
# which other operators for correct functioning. This is used for
# generating the transitive closure of all the operators used by the
# model based on the root operators when static selective build is used.
# For tracing based selective build, we don't need to perform this
# transitive cloure.
#
# 4. Model Metadata (--model_name, --model_versions, --model_assets,
# --model_backends): Self-descriptive. These are used to tell this
# script which model operator lists to fetch from the Unified Model
# Build Metadata YAML file.
#
# 5. Unified Model YAML file (--models_yaml_path): A path to the Unified
# model YAML operator list file. This yaml file contains (for each
# model/version/asset/backend) the set of used root and traced
# operators. This is used to extract the actual set of operators
# needed to be included in the build.
#
def canonical_opnames(opnames: List[str]) -> List[str]:
return [canonical_name(opname) for opname in opnames]
def make_filter_from_options(
model_name: str,
model_versions: List[str],
model_assets: Optional[List[str]],
model_backends: Optional[List[str]],
):
def is_model_included(model_info):
model = model_info["model"]
if model["name"] != model_name:
return False
if str(model["version"]) not in model_versions:
return False
if model_assets is not None and model["asset"] not in model_assets:
return False
# TODO: Handle backend later
return True
return is_model_included
# Returns if a the specified rule is a new or old style pt_operator_library
def is_new_style_rule(model_name: str, model_versions: Optional[List[str]]):
return model_name is not None and model_versions is not None
# Verifies that specified model_name, and all specified versions and assets
# appear in at least one model yaml. Throws if verification is failed,
# returns None on success
def verify_all_specified_present(
model_assets: Optional[List[str]],
model_versions: List[str],
selected_models_yaml: List[Dict[str, Any]],
rule_name: str,
model_name: str,
new_style_rule: bool,
):
def find_missing_items(model_items, key, selected_models_yaml):
missing_items = []
if not new_style_rule or not model_items:
return missing_items
for item in model_items:
found = False
for model in selected_models_yaml:
if str(model["model"][key]) == item:
found = True
if not found:
missing_items.append(item)
return missing_items
missing_assets = find_missing_items(model_assets, "asset", selected_models_yaml)
missing_versions = find_missing_items(
model_versions, "version", selected_models_yaml
)
if len(missing_versions) > 0 or len(missing_assets) > 0: # at least one is missing
name_warning = ""
if len(selected_models_yaml) == 0:
name_warning = (
"WARNING: 0 yaml's were found for target rule. This could be because the "
+ "provided model name: {name} is incorrect. Please check that field as well as "
+ "the assets and versions."
).format(name=model_name)
raise RuntimeError(
(
"Error: From the pt_operator_library rule for Rule: {name}, at least one entry for the "
+ "following fields was expected -- Model: {model_name} Expected Assets: {expected_assets}, Expected Versions: "
+ "{expected_versions}. {name_warning} In all_mobile_models.yaml either no assets were on one of the "
+ "specified versions, one of the specified assets was not present on any of the specified "
+ "versions, or both. Assets not found: {missing_assets}, Versions not found: {missing_versions} "
+ "For questions please ask in https://fb.workplace.com/groups/2148543255442743/"
).format(
name=rule_name,
model_name=model_name,
expected_versions=model_versions,
expected_assets=model_assets
if model_assets
else "<All model assets present on specified versions>",
name_warning=name_warning,
missing_versions=missing_versions
if len(missing_versions) > 0
else "<All specified versions had at least one asset>",
missing_assets=missing_assets
if len(missing_assets) > 0
else "<All specified assets are present on at least 1 version>",
)
)
# Uses the selected models configs and then combines them into one dictionary,
# formats them as a string, and places the string into output as a top level debug_info
def create_debug_info_from_selected_models(
output: Dict[str, object],
selected_models: List[dict],
new_style_rule: bool,
):
model_dict = {
"asset_info": {}, # maps asset name -> dict of asset metadata like hashes
"is_new_style_rule": new_style_rule,
}
for model in selected_models:
model_info = model["model"]
asset = model_info["asset"]
hash = model_info["md5_hash"]
asset_info = model_dict["asset_info"].setdefault(asset, {})
asset_info.setdefault("md5_hash", []).append(hash)
# Will later be used in gen_oplist to generate the model/version/asset checking
output["debug_info"] = [json.dumps(model_dict)]
def fill_output(output: Dict[str, object], options: object):
"""Populate the output dict with the information required to serialize
the YAML file used for selective build.
"""
dept_graph = load_op_dep_graph(options.dep_graph_yaml_path)
model_versions = (
options.model_versions.split(",") if options.model_versions is not None else []
)
model_assets = (
options.model_assets.split(",") if options.model_assets is not None else None
)
with open(options.models_yaml_path, "rb") as models_yaml_file:
all_models_yaml = yaml.safe_load(models_yaml_file) or []
model_filter_func = make_filter_from_options(
options.model_name, model_versions, model_assets, options.model_backends
)
selected_models_yaml = list(filter(model_filter_func, all_models_yaml))
verify_all_specified_present(
model_assets=model_assets,
model_versions=model_versions,
selected_models_yaml=selected_models_yaml,
rule_name=options.rule_name,
model_name=options.model_name,
new_style_rule=is_new_style_rule(options.model_name, options.model_versions),
)
create_debug_info_from_selected_models(
output,
selected_models_yaml,
is_new_style_rule(options.model_name, options.model_versions),
)
# initialize variables for static build from the pt_operator_library rule
if options.root_ops is not None:
static_root_ops = set(filter(lambda x: len(x) > 0, options.root_ops.split(",")))
else:
static_root_ops = set()
static_training_root_ops = set(
filter(
lambda x: len(x) > 0,
(options.training_root_ops or "").split(","),
)
)
if len(static_training_root_ops) > 0:
static_root_ops = static_root_ops | static_training_root_ops
# end if
root_ops_unexpand = set()
traced_ops = set()
training_root_ops_unexpand = set()
traced_training_ops = set()
all_kernel_metadata = []
all_custom_classes = set()
all_build_features = set()
# Go through each yaml file and retrieve operator information.
for model_info in selected_models_yaml:
if "traced_operators" not in model_info:
# If this YAML file doesn't specify any traced operators, then it is using
# the static analysis selective build approach of finding transitively
# used operators, and we should update root_ops with the set of root
# operators, all of whose overloads must be included. In addition, these
# root_ops will be further expanded using the transitive closure of
# operator dependencies.
static_root_ops = static_root_ops | set(model_info["root_operators"])
else:
# If this YAML file specifies traced operators, then it is using
# the tracing based selective build approach of finding used
# operators, and we should update root_ops_unexpand with the set of root
# operators whose overloads don't need to be included. In addition, these
# root_ops_unexpand will NOT be further expanded. If the train flag is
# set then the ops will be used for training, so we put them in a separate
# set
if model_info["train"]:
training_root_ops_unexpand = training_root_ops_unexpand | set(
model_info["root_operators"]
)
traced_training_ops = traced_training_ops | set(
model_info["traced_operators"]
)
else:
root_ops_unexpand = root_ops_unexpand | set(
model_info["root_operators"]
)
traced_ops = traced_ops | set(model_info["traced_operators"])
if "kernel_metadata" in model_info:
all_kernel_metadata.append(model_info["kernel_metadata"])
if "custom_classes" in model_info:
all_custom_classes = all_custom_classes | set(model_info["custom_classes"])
if "build_features" in model_info:
all_build_features = all_build_features | set(model_info["build_features"])
# This following section on transitive closure is relevant to static build only
canonical_root_ops = canonical_opnames(static_root_ops)
# If no canonical_root_ops exist, don't compute the transitive closure
# otherwise, we will include __BASE__ and __ROOT__ ops and mark them as required
# for inference.
if len(canonical_root_ops) > 0:
closure_op_list = gen_transitive_closure(dept_graph, canonical_root_ops)
else:
closure_op_list = set()
canonical_training_root_ops = canonical_opnames(static_training_root_ops)
# If no canonical_training_root_ops exist, don't compute the transitive closure
# otherwise, we will include __BASE__ and __ROOT__ ops and mark them as required
# for training.
if len(canonical_training_root_ops) > 0:
closure_training_op_list = gen_transitive_closure(
dept_graph, canonical_training_root_ops, train=True
)
else:
closure_training_op_list = set()
# bucketed_ops holds sets of operators that correspond to specific semantic buckets. For
# example:
#
# 1. Root Operators not used for training w/o full overload inclusion
# 2. Root Operators not used for training w/ full overload inclusion
# 3. Root Operators used for training w/o full overload inclusion
# 4. Root Operators used for training w/ full overload inclusion
# 5. Non-root Operators not used for training w/o full overload inclusion
# etc...
#
# Basically for each of the 3 boolean conditional, there are 2
# options (True/False).
#
bucketed_ops = []
# START STATIC BUILD OPS
static_root_ops_bucket = {}
for op_name in static_root_ops:
op = SelectiveBuildOperator.from_yaml_dict(
op_name,
{
"is_root_operator": True,
"is_used_for_training": False,
"include_all_overloads": True,
"debug_info": [options.model_name],
},
)
static_root_ops_bucket[op_name] = op
bucketed_ops.append(static_root_ops_bucket)
closure_ops_bucket = {}
for op_name in closure_op_list:
op = SelectiveBuildOperator.from_yaml_dict(
op_name,
{
"is_root_operator": False,
"is_used_for_training": False,
"include_all_overloads": True,
"debug_info": [options.model_name],
},
)
closure_ops_bucket[op_name] = op
bucketed_ops.append(closure_ops_bucket)
static_training_root_ops_bucket = {}
for op_name in static_training_root_ops:
op = SelectiveBuildOperator.from_yaml_dict(
op_name,
{
"is_root_operator": True,
"is_used_for_training": True,
"include_all_overloads": True,
"debug_info": [options.model_name],
},
)
static_training_root_ops_bucket[op_name] = op
bucketed_ops.append(static_training_root_ops_bucket)
closure_training_ops_bucket = {}
for op_name in closure_training_op_list:
op = SelectiveBuildOperator.from_yaml_dict(
op_name,
{
"is_root_operator": False,
"is_used_for_training": True,
"include_all_overloads": True,
"debug_info": [options.model_name],
},
)
closure_training_ops_bucket[op_name] = op
bucketed_ops.append(closure_training_ops_bucket)
# END STATIC BUILD OPS
# START TRACING BASED BUILD OPS
root_ops_unexpand_bucket = {}
for op_name in root_ops_unexpand:
op = SelectiveBuildOperator.from_yaml_dict(
op_name,
{
"is_root_operator": True,
"is_used_for_training": False,
"include_all_overloads": False,
"debug_info": [options.model_name],
},
)
root_ops_unexpand_bucket[op_name] = op
bucketed_ops.append(root_ops_unexpand_bucket)
traced_ops_bucket = {}
for op_name in traced_ops:
op = SelectiveBuildOperator.from_yaml_dict(
op_name,
{
"is_root_operator": False,
"is_used_for_training": False,
"include_all_overloads": False,
"debug_info": [options.model_name],
},
)
traced_ops_bucket[op_name] = op
bucketed_ops.append(traced_ops_bucket)
training_root_ops_unexpand_bucket = {}
for op_name in training_root_ops_unexpand:
op = SelectiveBuildOperator.from_yaml_dict(
op_name,
{
"is_root_operator": True,
"is_used_for_training": True,
"include_all_overloads": False,
"debug_info": [options.model_name],
},
)
training_root_ops_unexpand_bucket[op_name] = op
bucketed_ops.append(training_root_ops_unexpand_bucket)
traced_training_ops_bucket = {}
for op_name in traced_training_ops:
op = SelectiveBuildOperator.from_yaml_dict(
op_name,
{
"is_root_operator": False,
"is_used_for_training": True,
"include_all_overloads": False,
"debug_info": [options.model_name],
},
)
traced_training_ops_bucket[op_name] = op
bucketed_ops.append(traced_training_ops_bucket)
# END TRACING BASED BUILD OPS
# Merge dictionaries together to remove op duplication
operators: Dict[str, SelectiveBuildOperator] = {}
for ops_dict in bucketed_ops:
operators = merge_operator_dicts(operators, ops_dict)
# Loop over all operators, and if any of the them specifies that
# all overloads need to be included, then set include_all_non_op_selectives
# to True, since it indicates that this operator list came from something
# other than a traced operator list.
include_all_non_op_selectives = False
for (op_name, op_info) in operators.items():
include_all_non_op_selectives = (
include_all_non_op_selectives or op_info.include_all_overloads
)
operators_as_dict = {}
for (k, v) in operators.items():
operators_as_dict[k] = v.to_dict()
output["operators"] = operators_as_dict
output["custom_classes"] = all_custom_classes
output["build_features"] = all_build_features
output["include_all_non_op_selectives"] = include_all_non_op_selectives
if len(all_kernel_metadata) > 0:
kernel_metadata = {}
for kt in all_kernel_metadata:
kernel_metadata = merge_kernel_metadata(kernel_metadata, kt)
output["kernel_metadata"] = kernel_metadata
def get_parser_options(parser: argparse.ArgumentParser) -> argparse.Namespace:
parser.add_argument(
"--root_ops",
help="A comma separated list of root operators used by the model",
required=False,
)
parser.add_argument(
"--training_root_ops",
help="A comma separated list of root operators used for training",
required=False,
)
parser.add_argument(
"--output_path",
help="The location of the output yaml file.",
required=True,
)
parser.add_argument(
"--dep_graph_yaml_path",
type=str,
help="A path to the Operator Dependency Graph YAML file.",
required=True,
)
parser.add_argument(
"--model_name",
type=str,
help="The name of the model that uses the specified root operators.",
required=True,
)
parser.add_argument(
"--model_versions",
type=str,
help="A comma separated list of model versions.",
required=False,
)
parser.add_argument(
"--model_assets",
type=str,
help="A comma separate list of model asset names (if absent, defaults to all assets for this model).",
required=False,
)
parser.add_argument(
"--model_backends",
type=str,
default="CPU",
help="A comma separated list of model backends.",
required=False,
)
parser.add_argument(
"--models_yaml_path",
type=str,
help="The path to where the unified Mobile Model Config YAML resides.",
required=True,
)
parser.add_argument(
"--include_all_operators",
action="store_true",
default=False,
help="Set this flag to request inclusion of all opeators (i.e. build is not selective).",
required=False,
)
parser.add_argument(
"--rule_name",
type=str,
help="The name of pt_operator_library rule resulting in this generation",
required=True,
)
options = parser.parse_args()
return options
def main(argv) -> None:
parser = argparse.ArgumentParser(description="Generate used operators YAML")
options = get_parser_options(parser)
model_dict = {
"model_name": options.model_name,
"asset_info": {},
"is_new_style_rule": False,
}
output = {
"debug_info": [json.dumps(model_dict)],
}
if options.include_all_operators:
output["include_all_operators"] = True
output["operators"] = {}
output["kernel_metadata"] = {}
else:
fill_output(output, options)
with open(options.output_path, "wb") as out_file:
out_file.write(
yaml.safe_dump(
output,
default_flow_style=False,
).encode("utf-8")
)
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
pytorch-master
|
tools/code_analyzer/gen_operators_yaml.py
|
#!/usr/bin/env python3
import argparse
import json
import os
import sys
from functools import reduce
from typing import Any, List, Set
import yaml
from tools.lite_interpreter.gen_selected_mobile_ops_header import (
write_selected_mobile_ops,
)
from torchgen.selective_build.selector import (
combine_selective_builders,
SelectiveBuilder,
)
def extract_all_operators(selective_builder: SelectiveBuilder) -> Set[str]:
ops = []
for (op_name, op) in selective_builder.operators.items():
ops.append(op_name)
return set(ops)
def extract_training_operators(selective_builder: SelectiveBuilder) -> Set[str]:
ops = []
for (op_name, op) in selective_builder.operators.items():
if op.is_used_for_training:
ops.append(op_name)
return set(ops)
def throw_if_any_op_includes_overloads(selective_builder: SelectiveBuilder) -> None:
ops = []
for (op_name, op) in selective_builder.operators.items():
if op.include_all_overloads:
ops.append(op_name)
if ops:
raise Exception(
(
"Operators that include all overloads are "
+ "not allowed since --allow_include_all_overloads "
+ "was specified: {}"
).format(", ".join(ops))
)
def gen_supported_mobile_models(model_dicts: List[Any], output_dir: str) -> None:
supported_mobile_models_source = """/*
* Generated by gen_oplist.py
*/
#include "fb/supported_mobile_models/SupportedMobileModels.h"
struct SupportedMobileModelCheckerRegistry {{
SupportedMobileModelCheckerRegistry() {{
auto& ref = facebook::pytorch::supported_model::SupportedMobileModelChecker::singleton();
ref.set_supported_md5_hashes(std::unordered_set<std::string>{{
{supported_hashes_template}
}});
}}
}};
// This is a global object, initializing which causes the registration to happen.
SupportedMobileModelCheckerRegistry register_model_versions;
"""
# Generate SupportedMobileModelsRegistration.cpp
md5_hashes = set()
for model_dict in model_dicts:
if "debug_info" in model_dict:
debug_info = json.loads(model_dict["debug_info"][0])
if debug_info["is_new_style_rule"]:
for asset, asset_info in debug_info["asset_info"].items():
md5_hashes.update(asset_info["md5_hash"])
supported_hashes = ""
for md5 in md5_hashes:
supported_hashes += '"{}",\n'.format(md5)
with open(
os.path.join(output_dir, "SupportedMobileModelsRegistration.cpp"), "wb"
) as out_file:
source = supported_mobile_models_source.format(
supported_hashes_template=supported_hashes
)
out_file.write(source.encode("utf-8"))
def main(argv: List[Any]) -> None:
"""This binary generates 3 files:
1. selected_mobile_ops.h: Primary operators used by templated selective build and Kernel Function
dtypes captured by tracing
2. selected_operators.yaml: Selected root and non-root operators (either via tracing or static analysis)
"""
parser = argparse.ArgumentParser(description="Generate operator lists")
parser.add_argument(
"--output_dir",
help=(
"The directory to store the output yaml files (selected_mobile_ops.h, "
+ "selected_kernel_dtypes.h, selected_operators.yaml)"
),
required=True,
)
parser.add_argument(
"--model_file_list_path",
help=(
"Path to a file that contains the locations of individual "
+ "model YAML files that contain the set of used operators. This "
+ "file path must have a leading @-symbol, which will be stripped "
+ "out before processing."
),
required=True,
)
parser.add_argument(
"--allow_include_all_overloads",
help=(
"Flag to allow operators that include all overloads. "
+ "If not set, operators registered without using the traced style will"
+ "break the build."
),
action="store_true",
default=False,
required=False,
)
options = parser.parse_args()
if os.path.isfile(options.model_file_list_path):
print("Processing model file: ", options.model_file_list_path)
model_dicts = []
model_dict = yaml.safe_load(open(options.model_file_list_path))
model_dicts.append(model_dict)
else:
print("Processing model directory: ", options.model_file_list_path)
assert options.model_file_list_path[0] == "@"
model_file_list_path = options.model_file_list_path[1:]
model_dicts = []
with open(model_file_list_path) as model_list_file:
model_file_names = model_list_file.read().split()
for model_file_name in model_file_names:
with open(model_file_name, "rb") as model_file:
model_dict = yaml.safe_load(model_file)
model_dicts.append(model_dict)
selective_builders = list(
map(
lambda m: SelectiveBuilder.from_yaml_dict(m),
model_dicts,
)
)
# While we have the model_dicts generate the supported mobile models api
gen_supported_mobile_models(model_dicts, options.output_dir)
# We may have 0 selective builders since there may not be any viable
# pt_operator_library rule marked as a dep for the pt_operator_registry rule.
# This is potentially an error, and we should probably raise an assertion
# failure here. However, this needs to be investigated further.
selective_builder = SelectiveBuilder.from_yaml_dict({})
if len(selective_builders) > 0:
selective_builder = reduce(
combine_selective_builders,
selective_builders,
)
if not options.allow_include_all_overloads:
throw_if_any_op_includes_overloads(selective_builder)
with open(
os.path.join(options.output_dir, "selected_operators.yaml"), "wb"
) as out_file:
out_file.write(
yaml.safe_dump(
selective_builder.to_dict(), default_flow_style=False
).encode("utf-8"),
)
write_selected_mobile_ops(
os.path.join(options.output_dir, "selected_mobile_ops.h"),
selective_builder,
)
if __name__ == "__main__":
main(sys.argv)
|
pytorch-master
|
tools/code_analyzer/gen_oplist.py
|
"""
This util is invoked from cmake to produce the op registration allowlist param
for `ATen/gen.py` for custom mobile build.
For custom build with dynamic dispatch, it takes the op dependency graph of ATen
and the list of root ops, and outputs all transitive dependencies of the root
ops as the allowlist.
For custom build with static dispatch, the op dependency graph will be omitted,
and it will directly output root ops as the allowlist.
"""
import argparse
from collections import defaultdict
from typing import Dict, List, Set
import yaml
DepGraph = Dict[str, Set[str]]
def canonical_name(opname: str) -> str:
# Skip the overload name part as it's not supported by code analyzer yet.
return opname.split(".", 1)[0]
def load_op_dep_graph(fname: str) -> DepGraph:
with open(fname, "r") as stream:
result = defaultdict(set)
for op in yaml.safe_load(stream):
op_name = canonical_name(op["name"])
for dep in op.get("depends", []):
dep_name = canonical_name(dep["name"])
result[op_name].add(dep_name)
return dict(result)
def load_root_ops(fname: str) -> List[str]:
result = []
with open(fname, "r") as stream:
for op in yaml.safe_load(stream):
result.append(canonical_name(op))
return result
def gen_transitive_closure(
dep_graph: DepGraph,
root_ops: List[str],
train: bool = False,
) -> List[str]:
result = set(root_ops)
queue = root_ops[:]
# The dependency graph might contain a special entry with key = `__BASE__`
# and value = (set of `base` ops to always include in custom build).
queue.append("__BASE__")
# The dependency graph might contain a special entry with key = `__ROOT__`
# and value = (set of ops reachable from C++ functions). Insert the special
# `__ROOT__` key to include ops which can be called from C++ code directly,
# in addition to ops that are called from TorchScript model.
# '__ROOT__' is only needed for full-jit. Keep it only for training.
# TODO: when FL is migrated from full-jit to lite trainer, remove '__ROOT__'
if train:
queue.append("__ROOT__")
while queue:
cur = queue.pop()
for dep in dep_graph.get(cur, []):
if dep not in result:
result.add(dep)
queue.append(dep)
return sorted(result)
def gen_transitive_closure_str(dep_graph: DepGraph, root_ops: List[str]) -> str:
return " ".join(gen_transitive_closure(dep_graph, root_ops))
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Util to produce transitive dependencies for custom build"
)
parser.add_argument(
"--op-dependency",
help="input yaml file of op dependency graph "
"- can be omitted for custom build with static dispatch",
)
parser.add_argument(
"--root-ops",
required=True,
help="input yaml file of root (directly used) operators",
)
args = parser.parse_args()
deps = load_op_dep_graph(args.op_dependency) if args.op_dependency else {}
root_ops = load_root_ops(args.root_ops)
print(gen_transitive_closure_str(deps, root_ops))
|
pytorch-master
|
tools/code_analyzer/gen_op_registration_allowlist.py
|
#!/usr/bin/env python3
import time
from package.oss.cov_json import get_json_report
from package.oss.init import initialization
from package.tool.summarize_jsons import summarize_jsons
from package.util.setting import TestPlatform
from package.util.utils import print_time
def report_coverage() -> None:
start_time = time.time()
(options, test_list, interested_folders) = initialization()
# run cpp tests
get_json_report(test_list, options)
# collect coverage data from json profiles
if options.need_summary:
summarize_jsons(test_list, interested_folders, [""], TestPlatform.OSS)
# print program running time
print_time("Program Total Time: ", start_time)
if __name__ == "__main__":
report_coverage()
|
pytorch-master
|
tools/code_coverage/oss_coverage.py
|
pytorch-master
|
tools/code_coverage/package/__init__.py
|
|
pytorch-master
|
tools/code_coverage/package/util/__init__.py
|
|
import os
from enum import Enum
from typing import Dict, List, Set
# <project folder>
HOME_DIR = os.environ["HOME"]
TOOLS_FOLDER = os.path.join(
os.path.dirname(os.path.realpath(__file__)), os.path.pardir, os.path.pardir
)
# <profile folder>
PROFILE_DIR = os.path.join(TOOLS_FOLDER, "profile")
JSON_FOLDER_BASE_DIR = os.path.join(PROFILE_DIR, "json")
MERGED_FOLDER_BASE_DIR = os.path.join(PROFILE_DIR, "merged")
SUMMARY_FOLDER_DIR = os.path.join(PROFILE_DIR, "summary")
# <log path>
LOG_DIR = os.path.join(PROFILE_DIR, "log")
# test type, DO NOT change the name, it should be consistent with [buck query --output-attribute] result
class TestType(Enum):
CPP: str = "cxx_test"
PY: str = "python_test"
class Test:
name: str
target_pattern: str
test_set: str # like __aten__
test_type: TestType
def __init__(
self, name: str, target_pattern: str, test_set: str, test_type: TestType
) -> None:
self.name = name
self.target_pattern = target_pattern
self.test_set = test_set
self.test_type = test_type
TestList = List[Test]
TestStatusType = Dict[str, Set[str]]
# option
class Option:
need_build: bool = False
need_run: bool = False
need_merge: bool = False
need_export: bool = False
need_summary: bool = False
need_pytest: bool = False
# test platform
class TestPlatform(Enum):
FBCODE: str = "fbcode"
OSS: str = "oss"
# compiler type
class CompilerType(Enum):
CLANG: str = "clang"
GCC: str = "gcc"
|
pytorch-master
|
tools/code_coverage/package/util/setting.py
|
import argparse
import os
from typing import Any
from .setting import (
JSON_FOLDER_BASE_DIR,
LOG_DIR,
MERGED_FOLDER_BASE_DIR,
Option,
PROFILE_DIR,
SUMMARY_FOLDER_DIR,
)
from .utils import create_folder, get_raw_profiles_folder, remove_file
def remove_files() -> None:
# remove log
remove_file(os.path.join(LOG_DIR, "log.txt"))
def create_folders() -> None:
create_folder(
PROFILE_DIR,
MERGED_FOLDER_BASE_DIR,
JSON_FOLDER_BASE_DIR,
get_raw_profiles_folder(),
SUMMARY_FOLDER_DIR,
LOG_DIR,
)
def add_arguments_utils(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
parser.add_argument("--run", help="run the cpp test binaries", action="store_true")
parser.add_argument(
"--merge",
help="merge raw profiles (only apply to clang coverage)",
action="store_true",
)
parser.add_argument(
"--export", help="generate json report for each file", action="store_true"
)
parser.add_argument(
"--summary",
help="read json report and generate file/line-oriented summary",
action="store_true",
)
parser.add_argument(
"--interest-only",
help="Final report will be only about these folders and its sub-folders; for example: caff2/c10;",
nargs="+",
default=None,
)
parser.add_argument(
"--clean",
help="delete all files generated by coverage tool",
action="store_true",
default=False,
)
return parser
def have_option(have_stage: bool, option: int) -> int:
if have_stage:
return option
else:
return 0
def get_options(args: Any) -> Option:
option: Option = Option()
if args.__contains__("build"):
if args.build:
option.need_build = True
if args.__contains__("run"):
if args.run:
option.need_run = True
if args.__contains__("merge"):
if args.merge:
option.need_merge = True
if args.__contains__("export"):
if args.export:
option.need_export = True
if args.__contains__("summary"):
if args.summary:
option.need_summary = True
# user does not have specified stage like run
if not any(vars(option).values()):
option.need_build = True
option.need_run = True
option.need_merge = True
option.need_export = True
option.need_summary = True
option.need_pytest = True
return option
|
pytorch-master
|
tools/code_coverage/package/util/utils_init.py
|
import os
import shutil
import sys
import time
from typing import Any, NoReturn, Optional
from .setting import (
CompilerType,
LOG_DIR,
PROFILE_DIR,
TestList,
TestPlatform,
TestType,
)
def convert_time(seconds: float) -> str:
seconds = int(round(seconds))
seconds = seconds % (24 * 3600)
hour = seconds // 3600
seconds %= 3600
minutes = seconds // 60
seconds %= 60
return "%d:%02d:%02d" % (hour, minutes, seconds)
def print_time(message: str, start_time: float, summary_time: bool = False) -> None:
with open(os.path.join(LOG_DIR, "log.txt"), "a+") as log_file:
end_time = time.time()
print(message, convert_time(end_time - start_time), file=log_file)
if summary_time:
print("\n", file=log_file)
def print_log(*args: Any) -> None:
with open(os.path.join(LOG_DIR, "log.txt"), "a+") as log_file:
print(f"[LOG] {' '.join(args)}", file=log_file)
def print_error(*args: Any) -> None:
with open(os.path.join(LOG_DIR, "log.txt"), "a+") as log_file:
print(f"[ERROR] {' '.join(args)}", file=log_file)
def remove_file(path: str) -> None:
if os.path.exists(path):
os.remove(path)
def remove_folder(path: str) -> None:
shutil.rmtree(path)
def create_folder(*paths: Any) -> None:
for path in paths:
os.makedirs(path, exist_ok=True)
# clean up all the files generated by coverage tool
def clean_up() -> None:
# remove profile folder
remove_folder(PROFILE_DIR)
sys.exit("Clean Up Successfully!")
def convert_to_relative_path(whole_path: str, base_path: str) -> str:
# ("profile/raw", "profile") -> "raw"
if base_path not in whole_path:
raise RuntimeError(base_path + " is not in " + whole_path)
return whole_path[len(base_path) + 1 :]
def replace_extension(filename: str, ext: str) -> str:
return filename[: filename.rfind(".")] + ext
# a file is related if it's in one of the test_list folder
def related_to_test_list(file_name: str, test_list: TestList) -> bool:
for test in test_list:
if test.name in file_name:
return True
return False
def get_raw_profiles_folder() -> str:
return os.environ.get("RAW_PROFILES_FOLDER", os.path.join(PROFILE_DIR, "raw"))
def detect_compiler_type(platform: TestPlatform) -> CompilerType:
if platform == TestPlatform.OSS:
from package.oss.utils import detect_compiler_type # type: ignore[misc]
cov_type = detect_compiler_type() # type: ignore[call-arg]
else:
from caffe2.fb.code_coverage.tool.package.fbcode.utils import ( # type: ignore[import]
detect_compiler_type,
)
cov_type = detect_compiler_type()
check_compiler_type(cov_type)
return cov_type
def get_test_name_from_whole_path(path: str) -> str:
# code_coverage_tool/profile/merged/haha.merged -> haha
start = path.rfind("/")
end = path.rfind(".")
assert start >= 0 and end >= 0
return path[start + 1 : end]
def check_compiler_type(cov_type: Optional[CompilerType]) -> None:
if cov_type is not None and cov_type in [CompilerType.GCC, CompilerType.CLANG]:
return
raise Exception(
f"Can't parse compiler type: {cov_type}.",
" Please set environment variable COMPILER_TYPE as CLANG or GCC",
)
def check_platform_type(platform_type: TestPlatform) -> None:
if platform_type in [TestPlatform.OSS, TestPlatform.FBCODE]:
return
raise Exception(
f"Can't parse platform type: {platform_type}.",
" Please set environment variable COMPILER_TYPE as OSS or FBCODE",
)
def check_test_type(test_type: str, target: str) -> None:
if test_type in [TestType.CPP.value, TestType.PY.value]:
return
raise Exception(
f"Can't parse test type: {test_type}.",
f" Please check the type of buck target: {target}",
)
def raise_no_test_found_exception(
cpp_binary_folder: str, python_binary_folder: str
) -> NoReturn:
raise RuntimeError(
f"No cpp and python tests found in folder **{cpp_binary_folder} and **{python_binary_folder}**"
)
|
pytorch-master
|
tools/code_coverage/package/util/utils.py
|
import os
import time
from ..tool import clang_coverage, gcc_coverage
from ..util.setting import TestList, TestPlatform
from ..util.utils import get_raw_profiles_folder, print_time
from .utils import get_oss_binary_file
def clang_run(tests: TestList) -> None:
start_time = time.time()
for test in tests:
# raw_file
raw_file = os.path.join(get_raw_profiles_folder(), test.name + ".profraw")
# binary file
binary_file = get_oss_binary_file(test.name, test.test_type)
clang_coverage.run_target(
binary_file, raw_file, test.test_type, TestPlatform.OSS
)
print_time("running binaries takes time: ", start_time, summary_time=True)
def gcc_run(tests: TestList) -> None:
start_time = time.time()
for test in tests:
# binary file
binary_file = get_oss_binary_file(test.name, test.test_type)
gcc_coverage.run_target(binary_file, test.test_type)
print_time("run binaries takes time: ", start_time, summary_time=True)
|
pytorch-master
|
tools/code_coverage/package/oss/run.py
|
pytorch-master
|
tools/code_coverage/package/oss/__init__.py
|
|
import os
import subprocess
from typing import List, Optional
from ..util.setting import CompilerType, TestType, TOOLS_FOLDER
from ..util.utils import print_error, remove_file
def get_oss_binary_folder(test_type: TestType) -> str:
assert test_type in {TestType.CPP, TestType.PY}
# TODO: change the way we get binary file -- binary may not in build/bin ?
return os.path.join(
get_pytorch_folder(), "build/bin" if test_type == TestType.CPP else "test"
)
def get_oss_shared_library() -> List[str]:
lib_dir = os.path.join(get_pytorch_folder(), "build", "lib")
return [
os.path.join(lib_dir, lib)
for lib in os.listdir(lib_dir)
if lib.endswith(".dylib")
]
def get_oss_binary_file(test_name: str, test_type: TestType) -> str:
assert test_type in {TestType.CPP, TestType.PY}
binary_folder = get_oss_binary_folder(test_type)
binary_file = os.path.join(binary_folder, test_name)
if test_type == TestType.PY:
# add python to the command so we can directly run the script by using binary_file variable
binary_file = "python " + binary_file
return binary_file
def get_llvm_tool_path() -> str:
return os.environ.get(
"LLVM_TOOL_PATH", "/usr/local/opt/llvm/bin"
) # set default as llvm path in dev server, on mac the default may be /usr/local/opt/llvm/bin
def get_pytorch_folder() -> str:
# TOOLS_FOLDER in oss: pytorch/tools/code_coverage
return os.path.abspath(
os.environ.get(
"PYTORCH_FOLDER", os.path.join(TOOLS_FOLDER, os.path.pardir, os.path.pardir)
)
)
def detect_compiler_type() -> Optional[CompilerType]:
# check if user specifies the compiler type
user_specify = os.environ.get("CXX", None)
if user_specify:
if user_specify in ["clang", "clang++"]:
return CompilerType.CLANG
elif user_specify in ["gcc", "g++"]:
return CompilerType.GCC
raise RuntimeError(f"User specified compiler is not valid {user_specify}")
# auto detect
auto_detect_result = subprocess.check_output(
["cc", "-v"], stderr=subprocess.STDOUT
).decode("utf-8")
if "clang" in auto_detect_result:
return CompilerType.CLANG
elif "gcc" in auto_detect_result:
return CompilerType.GCC
raise RuntimeError(f"Auto detected compiler is not valid {auto_detect_result}")
def clean_up_gcda() -> None:
gcda_files = get_gcda_files()
for item in gcda_files:
remove_file(item)
def get_gcda_files() -> List[str]:
folder_has_gcda = os.path.join(get_pytorch_folder(), "build")
if os.path.isdir(folder_has_gcda):
# TODO use glob
# output = glob.glob(f"{folder_has_gcda}/**/*.gcda")
output = subprocess.check_output(["find", folder_has_gcda, "-iname", "*.gcda"])
return output.decode("utf-8").split("\n")
else:
return []
def run_oss_python_test(binary_file: str) -> None:
# python test script
try:
subprocess.check_call(
binary_file, shell=True, cwd=get_oss_binary_folder(TestType.PY)
)
except subprocess.CalledProcessError:
print_error(f"Binary failed to run: {binary_file}")
|
pytorch-master
|
tools/code_coverage/package/oss/utils.py
|
from ..tool import clang_coverage
from ..util.setting import CompilerType, Option, TestList, TestPlatform
from ..util.utils import check_compiler_type
from .init import detect_compiler_type # type: ignore[attr-defined]
from .run import clang_run, gcc_run
def get_json_report(test_list: TestList, options: Option) -> None:
cov_type = detect_compiler_type()
check_compiler_type(cov_type)
if cov_type == CompilerType.CLANG:
# run
if options.need_run:
clang_run(test_list)
# merge && export
if options.need_merge:
clang_coverage.merge(test_list, TestPlatform.OSS)
if options.need_export:
clang_coverage.export(test_list, TestPlatform.OSS)
elif cov_type == CompilerType.GCC:
# run
if options.need_run:
gcc_run(test_list)
|
pytorch-master
|
tools/code_coverage/package/oss/cov_json.py
|
import argparse
import os
from typing import cast, List, Optional, Tuple
from ..util.setting import (
CompilerType,
JSON_FOLDER_BASE_DIR,
LOG_DIR,
Option,
Test,
TestList,
TestType,
)
from ..util.utils import (
clean_up,
create_folder,
print_log,
raise_no_test_found_exception,
remove_file,
remove_folder,
)
from ..util.utils_init import add_arguments_utils, create_folders, get_options
from .utils import (
clean_up_gcda,
detect_compiler_type,
get_llvm_tool_path,
get_oss_binary_folder,
get_pytorch_folder,
)
BLOCKED_PYTHON_TESTS = {
"run_test.py",
"test_dataloader.py",
"test_multiprocessing.py",
"test_multiprocessing_spawn.py",
"test_utils.py",
}
def initialization() -> Tuple[Option, TestList, List[str]]:
# create folder if not exists
create_folders()
# add arguments
parser = argparse.ArgumentParser()
parser = add_arguments_utils(parser)
parser = add_arguments_oss(parser)
# parse arguments
(options, args_interested_folder, args_run_only, arg_clean) = parse_arguments(
parser
)
# clean up
if arg_clean:
clean_up_gcda()
clean_up()
# get test lists
test_list = get_test_list(args_run_only)
# get interested folder -- final report will only over these folders
interested_folders = empty_list_if_none(args_interested_folder)
# print initialization information
print_init_info()
# remove last time's log
remove_file(os.path.join(LOG_DIR, "log.txt"))
return (options, test_list, interested_folders)
def add_arguments_oss(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
parser.add_argument(
"--run-only",
help="only run certain test(s), for example: atest test_nn.py.",
nargs="*",
default=None,
)
return parser
def parse_arguments(
parser: argparse.ArgumentParser,
) -> Tuple[Option, Optional[List[str]], Optional[List[str]], Optional[bool]]:
# parse args
args = parser.parse_args()
# get option
options = get_options(args)
return (options, args.interest_only, args.run_only, args.clean)
def get_test_list_by_type(
run_only: Optional[List[str]], test_type: TestType
) -> TestList:
test_list: TestList = []
binary_folder = get_oss_binary_folder(test_type)
g = os.walk(binary_folder)
for _, _, file_list in g:
for file_name in file_list:
if run_only is not None and file_name not in run_only:
continue
# target pattern in oss is used in printing report -- which tests we have run
test: Test = Test(
name=file_name,
target_pattern=file_name,
test_set="",
test_type=test_type,
)
test_list.append(test)
return test_list
def get_test_list(run_only: Optional[List[str]]) -> TestList:
test_list: TestList = []
# add c++ test list
test_list.extend(get_test_list_by_type(run_only, TestType.CPP))
# add python test list
py_run_only = get_python_run_only(run_only)
test_list.extend(get_test_list_by_type(py_run_only, TestType.PY))
# not find any test to run
if not test_list:
raise_no_test_found_exception(
get_oss_binary_folder(TestType.CPP), get_oss_binary_folder(TestType.PY)
)
return test_list
def empty_list_if_none(arg_interested_folder: Optional[List[str]]) -> List[str]:
if arg_interested_folder is None:
return []
# if this argument is specified, just return itself
return arg_interested_folder
def gcc_export_init() -> None:
remove_folder(JSON_FOLDER_BASE_DIR)
create_folder(JSON_FOLDER_BASE_DIR)
def get_python_run_only(args_run_only: Optional[List[str]]) -> List[str]:
# if user specifies run-only option
if args_run_only:
return args_run_only
# if not specified, use default setting, different for gcc and clang
if detect_compiler_type() == CompilerType.GCC:
return ["run_test.py"]
else:
# for clang, some tests will result in too large intermidiate files that can't be merged by llvm, we need to skip them
run_only: List[str] = []
binary_folder = get_oss_binary_folder(TestType.PY)
g = os.walk(binary_folder)
for _, _, file_list in g:
for file_name in file_list:
if file_name in BLOCKED_PYTHON_TESTS or not file_name.endswith(".py"):
continue
run_only.append(file_name)
# only run tests in the first-level folder in test/
break
return run_only
def print_init_info() -> None:
print_log("pytorch folder: ", get_pytorch_folder())
print_log("cpp test binaries folder: ", get_oss_binary_folder(TestType.CPP))
print_log("python test scripts folder: ", get_oss_binary_folder(TestType.PY))
print_log("compiler type: ", cast(CompilerType, detect_compiler_type()).value)
print_log(
"llvm tool folder (only for clang, if you are using gcov please ignore it): ",
get_llvm_tool_path(),
)
|
pytorch-master
|
tools/code_coverage/package/oss/init.py
|
import json
import os
import time
from typing import Any, Dict, List, Set, Tuple
from ..util.setting import (
CompilerType,
JSON_FOLDER_BASE_DIR,
TestList,
TestPlatform,
TestStatusType,
)
from ..util.utils import (
detect_compiler_type,
print_error,
print_time,
related_to_test_list,
)
from .parser.coverage_record import CoverageRecord
from .parser.gcov_coverage_parser import GcovCoverageParser
from .parser.llvm_coverage_parser import LlvmCoverageParser
from .print_report import (
file_oriented_report,
html_oriented_report,
line_oriented_report,
)
# coverage_records: Dict[str, LineInfo] = dict()
covered_lines: Dict[str, Set[int]] = {}
uncovered_lines: Dict[str, Set[int]] = {}
tests_type: TestStatusType = {"success": set(), "partial": set(), "fail": set()}
def transform_file_name(
file_path: str, interested_folders: List[str], platform: TestPlatform
) -> str:
remove_patterns: Set[str] = {".DEFAULT.cpp", ".AVX.cpp", ".AVX2.cpp"}
for pattern in remove_patterns:
file_path = file_path.replace(pattern, "")
# if user has specifiled interested folder
if interested_folders:
for folder in interested_folders:
if folder in file_path:
return file_path[file_path.find(folder) :]
# remove pytorch base folder path
if platform == TestPlatform.OSS:
from package.oss.utils import get_pytorch_folder
pytorch_foler = get_pytorch_folder()
assert file_path.startswith(pytorch_foler)
file_path = file_path[len(pytorch_foler) + 1 :]
return file_path
def is_intrested_file(
file_path: str, interested_folders: List[str], platform: TestPlatform
) -> bool:
ignored_patterns = ["cuda", "aten/gen_aten", "aten/aten_", "build/"]
if any([pattern in file_path for pattern in ignored_patterns]):
return False
# ignore files that are not belong to pytorch
if platform == TestPlatform.OSS:
from package.oss.utils import get_pytorch_folder
if not file_path.startswith(get_pytorch_folder()):
return False
# if user has specifiled interested folder
if interested_folders:
for folder in interested_folders:
intersted_folder_path = folder if folder.endswith("/") else f"{folder}/"
if intersted_folder_path in file_path:
return True
return False
else:
return True
def get_json_obj(json_file: str) -> Tuple[Any, int]:
"""
Sometimes at the start of file llvm/gcov will complains "fail to find coverage data",
then we need to skip these lines
-- success read: 0 - this json file have the full json coverage information
-- partial success: 1 - this json file starts with some error prompt, but still have the coverage information
-- fail to read: 2 - this json file doesn't have any coverage information
"""
read_status = -1
with open(json_file) as f:
lines = f.readlines()
for line in lines:
try:
json_obj = json.loads(line)
except json.JSONDecodeError:
read_status = 1
continue
else:
if read_status == -1:
# not meet jsonDecoderError before, return success
read_status = 0
return (json_obj, read_status)
return None, 2
def parse_json(json_file: str, platform: TestPlatform) -> List[CoverageRecord]:
print("start parse:", json_file)
json_obj, read_status = get_json_obj(json_file)
if read_status == 0:
tests_type["success"].add(json_file)
elif read_status == 1:
tests_type["partial"].add(json_file)
else:
tests_type["fail"].add(json_file)
raise RuntimeError(
"Fail to do code coverage! Fail to load json file: ", json_file
)
cov_type = detect_compiler_type(platform)
coverage_records: List[CoverageRecord] = []
if cov_type == CompilerType.CLANG:
coverage_records = LlvmCoverageParser(json_obj).parse("fbcode")
# print(coverage_records)
elif cov_type == CompilerType.GCC:
coverage_records = GcovCoverageParser(json_obj).parse()
return coverage_records
def parse_jsons(
test_list: TestList, interested_folders: List[str], platform: TestPlatform
) -> None:
g = os.walk(JSON_FOLDER_BASE_DIR)
for path, _, file_list in g:
for file_name in file_list:
if file_name.endswith(".json"):
# if compiler is clang, we only analyze related json / when compiler is gcc, we analyze all jsons
cov_type = detect_compiler_type(platform)
if cov_type == CompilerType.CLANG and not related_to_test_list(
file_name, test_list
):
continue
json_file = os.path.join(path, file_name)
try:
coverage_records = parse_json(json_file, platform)
except RuntimeError:
print_error("Fail to load json file: ", json_file)
continue
# collect information from each target's export file and merge them together:
update_coverage(coverage_records, interested_folders, platform)
def update_coverage(
coverage_records: List[CoverageRecord],
interested_folders: List[str],
platform: TestPlatform,
) -> None:
for item in coverage_records:
# extract information for the record
record = item.to_dict()
file_path = record["filepath"]
if not is_intrested_file(file_path, interested_folders, platform):
continue
covered_range = record["covered_lines"]
uncovered_range = record["uncovered_lines"]
# transform file name: remote/13223/caffe2/aten -> caffe2/aten
file_path = transform_file_name(file_path, interested_folders, platform)
# if file not exists, add it into dictionary
if file_path not in covered_lines:
covered_lines[file_path] = set()
if file_path not in uncovered_lines:
uncovered_lines[file_path] = set()
# update this file's covered and uncovered lines
if covered_range is not None:
covered_lines[file_path].update(covered_range)
if uncovered_range is not None:
uncovered_lines[file_path].update(uncovered_range)
def update_set() -> None:
for file_name in covered_lines:
# difference_update
uncovered_lines[file_name].difference_update(covered_lines[file_name])
def summarize_jsons(
test_list: TestList,
interested_folders: List[str],
coverage_only: List[str],
platform: TestPlatform,
) -> None:
start_time = time.time()
if detect_compiler_type(platform) == CompilerType.GCC:
html_oriented_report()
else:
parse_jsons(test_list, interested_folders, platform)
update_set()
line_oriented_report(
test_list,
tests_type,
interested_folders,
coverage_only,
covered_lines,
uncovered_lines,
)
file_oriented_report(
test_list,
tests_type,
interested_folders,
coverage_only,
covered_lines,
uncovered_lines,
)
print_time("summary jsons take time: ", start_time)
|
pytorch-master
|
tools/code_coverage/package/tool/summarize_jsons.py
|
pytorch-master
|
tools/code_coverage/package/tool/__init__.py
|
|
import os
import subprocess
import time
from typing import Dict
# gcc is only used in oss
from ..oss.utils import get_gcda_files, run_oss_python_test
from ..util.setting import JSON_FOLDER_BASE_DIR, TestType
from ..util.utils import print_log, print_time
from .utils import run_cpp_test
def update_gzip_dict(gzip_dict: Dict[str, int], file_name: str) -> str:
file_name = file_name.lower()
gzip_dict[file_name] = gzip_dict.get(file_name, 0) + 1
num = gzip_dict[file_name]
return str(num) + "_" + file_name
def run_target(binary_file: str, test_type: TestType) -> None:
print_log("start run", test_type.value, "test: ", binary_file)
start_time = time.time()
assert test_type in {TestType.CPP, TestType.PY}
if test_type == TestType.CPP:
run_cpp_test(binary_file)
else:
run_oss_python_test(binary_file)
print_time(" time: ", start_time)
def export() -> None:
start_time = time.time()
# collect .gcda files
gcda_files = get_gcda_files()
# file name like utils.cpp may have same name in different folder
gzip_dict: Dict[str, int] = {}
for gcda_item in gcda_files:
# generate json.gz
subprocess.check_call(["gcov", "-i", gcda_item])
# cp json.gz to profile/json folder
gz_file_name = os.path.basename(gcda_item) + ".gcov.json.gz"
new_file_path = os.path.join(
JSON_FOLDER_BASE_DIR, update_gzip_dict(gzip_dict, gz_file_name)
)
os.rename(gz_file_name, new_file_path)
# unzip json.gz to json
subprocess.check_output(["gzip", "-d", new_file_path])
print_time("export take time: ", start_time, summary_time=True)
|
pytorch-master
|
tools/code_coverage/package/tool/gcc_coverage.py
|
import os
import subprocess
import time
from typing import List
from ..util.setting import (
JSON_FOLDER_BASE_DIR,
MERGED_FOLDER_BASE_DIR,
TestList,
TestPlatform,
TestType,
)
from ..util.utils import (
check_platform_type,
convert_to_relative_path,
create_folder,
get_raw_profiles_folder,
get_test_name_from_whole_path,
print_log,
print_time,
related_to_test_list,
replace_extension,
)
from .utils import get_tool_path_by_platform, run_cpp_test
def create_corresponding_folder(
cur_path: str, prefix_cur_path: str, dir_list: List[str], new_base_folder: str
) -> None:
for dir_name in dir_list:
relative_path = convert_to_relative_path(
cur_path, prefix_cur_path
) # get folder name like 'aten'
new_folder_path = os.path.join(new_base_folder, relative_path, dir_name)
create_folder(new_folder_path)
def run_target(
binary_file: str, raw_file: str, test_type: TestType, platform_type: TestPlatform
) -> None:
print_log("start run: ", binary_file)
# set environment variable -- raw profile output path of the binary run
os.environ["LLVM_PROFILE_FILE"] = raw_file
# run binary
if test_type == TestType.PY and platform_type == TestPlatform.OSS:
from ..oss.utils import run_oss_python_test
run_oss_python_test(binary_file)
else:
run_cpp_test(binary_file)
def merge_target(raw_file: str, merged_file: str, platform_type: TestPlatform) -> None:
print_log("start to merge target: ", raw_file)
# run command
llvm_tool_path = get_tool_path_by_platform(platform_type)
subprocess.check_call(
[
f"{llvm_tool_path}/llvm-profdata",
"merge",
"-sparse",
raw_file,
"-o",
merged_file,
]
)
def export_target(
merged_file: str,
json_file: str,
binary_file: str,
shared_library_list: List[str],
platform_type: TestPlatform,
) -> None:
if binary_file is None:
raise Exception(f"{merged_file} doesn't have corresponding binary!")
print_log("start to export: ", merged_file)
# run export
cmd_shared_library = (
""
if not shared_library_list
else f" -object {' -object '.join(shared_library_list)}"
)
# if binary_file = "", then no need to add it (python test)
cmd_binary = "" if not binary_file else f" -object {binary_file} "
llvm_tool_path = get_tool_path_by_platform(platform_type)
cmd = f"{llvm_tool_path}/llvm-cov export {cmd_binary} {cmd_shared_library} -instr-profile={merged_file} > {json_file}"
os.system(cmd)
def merge(test_list: TestList, platform_type: TestPlatform) -> None:
print("start merge")
start_time = time.time()
# find all raw profile under raw_folder and sub-folders
raw_folder_path = get_raw_profiles_folder()
g = os.walk(raw_folder_path)
for path, dir_list, file_list in g:
# if there is a folder raw/aten/, create corresponding merged folder profile/merged/aten/ if not exists yet
create_corresponding_folder(
path, raw_folder_path, dir_list, MERGED_FOLDER_BASE_DIR
)
# check if we can find raw profile under this path's folder
for file_name in file_list:
if file_name.endswith(".profraw"):
if not related_to_test_list(file_name, test_list):
continue
print(f"start merge {file_name}")
raw_file = os.path.join(path, file_name)
merged_file_name = replace_extension(file_name, ".merged")
merged_file = os.path.join(
MERGED_FOLDER_BASE_DIR,
convert_to_relative_path(path, raw_folder_path),
merged_file_name,
)
merge_target(raw_file, merged_file, platform_type)
print_time("merge take time: ", start_time, summary_time=True)
def export(test_list: TestList, platform_type: TestPlatform) -> None:
print("start export")
start_time = time.time()
# find all merged profile under merged_folder and sub-folders
g = os.walk(MERGED_FOLDER_BASE_DIR)
for path, dir_list, file_list in g:
# create corresponding merged folder in [json folder] if not exists yet
create_corresponding_folder(
path, MERGED_FOLDER_BASE_DIR, dir_list, JSON_FOLDER_BASE_DIR
)
# check if we can find merged profile under this path's folder
for file_name in file_list:
if file_name.endswith(".merged"):
if not related_to_test_list(file_name, test_list):
continue
print(f"start export {file_name}")
# merged file
merged_file = os.path.join(path, file_name)
# json file
json_file_name = replace_extension(file_name, ".json")
json_file = os.path.join(
JSON_FOLDER_BASE_DIR,
convert_to_relative_path(path, MERGED_FOLDER_BASE_DIR),
json_file_name,
)
check_platform_type(platform_type)
# binary file and shared library
binary_file = ""
shared_library_list = []
if platform_type == TestPlatform.FBCODE:
from caffe2.fb.code_coverage.tool.package.fbcode.utils import ( # type: ignore[import]
get_fbcode_binary_folder,
)
binary_file = os.path.join(
get_fbcode_binary_folder(path),
get_test_name_from_whole_path(merged_file),
)
elif platform_type == TestPlatform.OSS:
from ..oss.utils import get_oss_binary_file, get_oss_shared_library
test_name = get_test_name_from_whole_path(merged_file)
# if it is python test, no need to provide binary, shared library is enough
binary_file = (
""
if test_name.endswith(".py")
else get_oss_binary_file(test_name, TestType.CPP)
)
shared_library_list = get_oss_shared_library()
export_target(
merged_file,
json_file,
binary_file,
shared_library_list,
platform_type,
)
print_time("export take time: ", start_time, summary_time=True)
|
pytorch-master
|
tools/code_coverage/package/tool/clang_coverage.py
|
import subprocess
from ..util.setting import TestPlatform
from ..util.utils import print_error
def run_cpp_test(binary_file: str) -> None:
# cpp test binary
try:
subprocess.check_call(binary_file)
except subprocess.CalledProcessError:
print_error(f"Binary failed to run: {binary_file}")
def get_tool_path_by_platform(platform: TestPlatform) -> str:
if platform == TestPlatform.FBCODE:
from caffe2.fb.code_coverage.tool.package.fbcode.utils import ( # type: ignore[import]
get_llvm_tool_path,
)
return get_llvm_tool_path() # type: ignore[no-any-return]
else:
from ..oss.utils import get_llvm_tool_path # type: ignore[no-redef]
return get_llvm_tool_path() # type: ignore[no-any-return]
|
pytorch-master
|
tools/code_coverage/package/tool/utils.py
|
import os
import subprocess
from typing import Dict, IO, List, Set, Tuple
from ..oss.utils import get_pytorch_folder
from ..util.setting import SUMMARY_FOLDER_DIR, TestList, TestStatusType
CoverageItem = Tuple[str, float, int, int]
def key_by_percentage(x: CoverageItem) -> float:
return x[1]
def key_by_name(x: CoverageItem) -> str:
return x[0]
def is_intrested_file(file_path: str, interested_folders: List[str]) -> bool:
if "cuda" in file_path:
return False
if "aten/gen_aten" in file_path or "aten/aten_" in file_path:
return False
for folder in interested_folders:
if folder in file_path:
return True
return False
def is_this_type_of_tests(target_name: str, test_set_by_type: Set[str]) -> bool:
# tests are divided into three types: success / partial success / fail to collect coverage
for test in test_set_by_type:
if target_name in test:
return True
return False
def print_test_by_type(
tests: TestList, test_set_by_type: Set[str], type_name: str, summary_file: IO[str]
) -> None:
print("Tests " + type_name + " to collect coverage:", file=summary_file)
for test in tests:
if is_this_type_of_tests(test.name, test_set_by_type):
print(test.target_pattern, file=summary_file)
print(file=summary_file)
def print_test_condition(
tests: TestList,
tests_type: TestStatusType,
interested_folders: List[str],
coverage_only: List[str],
summary_file: IO[str],
summary_type: str,
) -> None:
print_test_by_type(tests, tests_type["success"], "fully success", summary_file)
print_test_by_type(tests, tests_type["partial"], "partially success", summary_file)
print_test_by_type(tests, tests_type["fail"], "failed", summary_file)
print(
"\n\nCoverage Collected Over Interested Folders:\n",
interested_folders,
file=summary_file,
)
print(
"\n\nCoverage Compilation Flags Only Apply To: \n",
coverage_only,
file=summary_file,
)
print(
"\n\n---------------------------------- "
+ summary_type
+ " ----------------------------------",
file=summary_file,
)
def line_oriented_report(
tests: TestList,
tests_type: TestStatusType,
interested_folders: List[str],
coverage_only: List[str],
covered_lines: Dict[str, Set[int]],
uncovered_lines: Dict[str, Set[int]],
) -> None:
with open(os.path.join(SUMMARY_FOLDER_DIR, "line_summary"), "w+") as report_file:
print_test_condition(
tests,
tests_type,
interested_folders,
coverage_only,
report_file,
"LINE SUMMARY",
)
for file_name in covered_lines:
covered = covered_lines[file_name]
uncovered = uncovered_lines[file_name]
print(
f"{file_name}\n covered lines: {sorted(covered)}\n unconvered lines:{sorted(uncovered)}",
file=report_file,
)
def print_file_summary(
covered_summary: int, total_summary: int, summary_file: IO[str]
) -> float:
# print summary first
try:
coverage_percentage = 100.0 * covered_summary / total_summary
except ZeroDivisionError:
coverage_percentage = 0
print(
f"SUMMARY\ncovered: {covered_summary}\nuncovered: {total_summary}\npercentage: {coverage_percentage:.2f}%\n\n",
file=summary_file,
)
if coverage_percentage == 0:
print("Coverage is 0, Please check if json profiles are valid")
return coverage_percentage
def print_file_oriented_report(
tests_type: TestStatusType,
coverage: List[CoverageItem],
covered_summary: int,
total_summary: int,
summary_file: IO[str],
tests: TestList,
interested_folders: List[str],
coverage_only: List[str],
) -> None:
coverage_percentage = print_file_summary(
covered_summary, total_summary, summary_file
)
# print test condition (interested folder / tests that are successsful or failed)
print_test_condition(
tests,
tests_type,
interested_folders,
coverage_only,
summary_file,
"FILE SUMMARY",
)
# print each file's information
for item in coverage:
print(
item[0].ljust(75),
(str(item[1]) + "%").rjust(10),
str(item[2]).rjust(10),
str(item[3]).rjust(10),
file=summary_file,
)
print(f"summary percentage:{coverage_percentage:.2f}%")
def file_oriented_report(
tests: TestList,
tests_type: TestStatusType,
interested_folders: List[str],
coverage_only: List[str],
covered_lines: Dict[str, Set[int]],
uncovered_lines: Dict[str, Set[int]],
) -> None:
with open(os.path.join(SUMMARY_FOLDER_DIR, "file_summary"), "w+") as summary_file:
covered_summary = 0
total_summary = 0
coverage = []
for file_name in covered_lines:
# get coverage number for this file
covered_count = len(covered_lines[file_name])
total_count = covered_count + len(uncovered_lines[file_name])
try:
percentage = round(covered_count / total_count * 100, 2)
except ZeroDivisionError:
percentage = 0
# store information in a list to be sorted
coverage.append((file_name, percentage, covered_count, total_count))
# update summary
covered_summary = covered_summary + covered_count
total_summary = total_summary + total_count
# sort
coverage.sort(key=key_by_name)
coverage.sort(key=key_by_percentage)
# print
print_file_oriented_report(
tests_type,
coverage,
covered_summary,
total_summary,
summary_file,
tests,
interested_folders,
coverage_only,
)
def get_html_ignored_pattern() -> List[str]:
return ["/usr/*", "*anaconda3/*", "*third_party/*"]
def html_oriented_report() -> None:
# use lcov to generate the coverage report
build_folder = os.path.join(get_pytorch_folder(), "build")
coverage_info_file = os.path.join(SUMMARY_FOLDER_DIR, "coverage.info")
# generage coverage report -- coverage.info in build folder
subprocess.check_call(
[
"lcov",
"--capture",
"--directory",
build_folder,
"--output-file",
coverage_info_file,
]
)
# remove files that are unrelated
cmd_array = (
["lcov", "--remove", coverage_info_file]
+ get_html_ignored_pattern()
+ ["--output-file", coverage_info_file]
)
subprocess.check_call(
# ["lcov", "--remove", coverage_info_file, "--output-file", coverage_info_file]
cmd_array
)
# generate beautiful html page
subprocess.check_call(
[
"genhtml",
coverage_info_file,
"--output-directory",
os.path.join(SUMMARY_FOLDER_DIR, "html_report"),
]
)
|
pytorch-master
|
tools/code_coverage/package/tool/print_report.py
|
from typing import List, NamedTuple, Optional, Tuple
class LlvmCoverageSegment(NamedTuple):
line: int
col: int
segment_count: int
has_count: int
is_region_entry: int
is_gap_entry: Optional[int]
@property
def has_coverage(self) -> bool:
return self.segment_count > 0
@property
def is_executable(self) -> bool:
return self.has_count > 0
def get_coverage(
self, prev_segment: "LlvmCoverageSegment"
) -> Tuple[List[int], List[int]]:
# Code adapted from testpilot.testinfra.runners.gtestcoveragerunner.py
if not prev_segment.is_executable:
return [], []
# this segment ends at the line if col == 1
# (so segment effectively ends on the line) and
# line+1 if col is > 1 (so it touches at least some part of last line).
end_of_segment = self.line if self.col == 1 else self.line + 1
lines_range = list(range(prev_segment.line, end_of_segment))
return (lines_range, []) if prev_segment.has_coverage else ([], lines_range)
def parse_segments(raw_segments: List[List[int]]) -> List[LlvmCoverageSegment]:
"""
Creates LlvmCoverageSegment from a list of lists in llvm export json.
each segment is represented by 5-element array.
"""
ret: List[LlvmCoverageSegment] = []
for raw_segment in raw_segments:
assert (
len(raw_segment) == 5 or len(raw_segment) == 6
), "list is not compatible with llvmcom export:"
" Expected to have 5 or 6 elements"
if len(raw_segment) == 5:
ret.append(
LlvmCoverageSegment(
raw_segment[0],
raw_segment[1],
raw_segment[2],
raw_segment[3],
raw_segment[4],
None,
)
)
else:
ret.append(LlvmCoverageSegment(*raw_segment))
return ret
|
pytorch-master
|
tools/code_coverage/package/tool/parser/llvm_coverage_segment.py
|
from typing import Any, Dict, List, Set, Tuple
from .coverage_record import CoverageRecord
from .llvm_coverage_segment import LlvmCoverageSegment, parse_segments
class LlvmCoverageParser:
"""
Accepts a parsed json produced by llvm-cov export -- typically,
representing a single C++ test and produces a list
of CoverageRecord(s).
"""
def __init__(self, llvm_coverage: Dict[str, Any]) -> None:
self._llvm_coverage = llvm_coverage
@staticmethod
def _skip_coverage(path: str) -> bool:
"""
Returns True if file path should not be processed.
This is repo-specific and only makes sense for the current state of
ovrsource.
"""
if "/third-party/" in path:
return True
return False
@staticmethod
def _collect_coverage(
segments: List[LlvmCoverageSegment],
) -> Tuple[List[int], List[int]]:
"""
Stateful parsing of coverage segments.
"""
covered_lines: Set[int] = set()
uncovered_lines: Set[int] = set()
prev_segment = LlvmCoverageSegment(1, 0, 0, 0, 0, None)
for segment in segments:
covered_range, uncovered_range = segment.get_coverage(prev_segment)
covered_lines.update(covered_range)
uncovered_lines.update(uncovered_range)
prev_segment = segment
uncovered_lines.difference_update(covered_lines)
return sorted(covered_lines), sorted(uncovered_lines)
def parse(self, repo_name: str) -> List[CoverageRecord]:
# The JSON format is described in the LLVM source code
# https://github.com/llvm-mirror/llvm/blob/master/tools/llvm-cov/CoverageExporterJson.cpp
records: List[CoverageRecord] = []
for export_unit in self._llvm_coverage["data"]:
for file_info in export_unit["files"]:
filepath = file_info["filename"]
if self._skip_coverage(filepath):
continue
if filepath is None:
continue
segments = file_info["segments"]
covered_lines, uncovered_lines = self._collect_coverage(
parse_segments(segments)
)
records.append(CoverageRecord(filepath, covered_lines, uncovered_lines))
return records
|
pytorch-master
|
tools/code_coverage/package/tool/parser/llvm_coverage_parser.py
|
from typing import Any, Dict, List, Set
from .coverage_record import CoverageRecord
class GcovCoverageParser:
"""
Accepts a parsed json produced by gcov --json-format -- typically,
representing a single C++ test and produces a list
of CoverageRecord(s).
"""
def __init__(self, llvm_coverage: Dict[str, Any]) -> None:
self._llvm_coverage = llvm_coverage
@staticmethod
def _skip_coverage(path: str) -> bool:
"""
Returns True if file path should not be processed.
This is repo-specific and only makes sense for the current state of
ovrsource.
"""
if "third-party" in path:
return True
return False
def parse(self) -> List[CoverageRecord]:
# The JSON format is described in the gcov source code
# https://gcc.gnu.org/onlinedocs/gcc/Invoking-Gcov.html
records: List[CoverageRecord] = []
for file_info in self._llvm_coverage["files"]:
filepath = file_info["file"]
if self._skip_coverage(filepath):
continue
# parse json file
covered_lines: Set[int] = set()
uncovered_lines: Set[int] = set()
for line in file_info["lines"]:
line_number = line["line_number"]
count = line["count"]
if count == 0:
uncovered_lines.update([line_number])
else:
covered_lines.update([line_number])
records.append(
CoverageRecord(filepath, sorted(covered_lines), sorted(uncovered_lines))
)
return records
|
pytorch-master
|
tools/code_coverage/package/tool/parser/gcov_coverage_parser.py
|
pytorch-master
|
tools/code_coverage/package/tool/parser/__init__.py
|
|
import typing as t
class CoverageRecord(t.NamedTuple):
filepath: str
covered_lines: t.List[int]
uncovered_lines: t.Optional[t.List[int]] = None
def to_dict(self) -> t.Dict[str, t.Any]:
return {
"filepath": self.filepath,
"covered_lines": self.covered_lines,
"uncovered_lines": self.uncovered_lines,
}
|
pytorch-master
|
tools/code_coverage/package/tool/parser/coverage_record.py
|
#!/usr/bin/env python3
import datetime
import json
import signal
import time
from typing import Any, Dict, List
import psutil # type: ignore[import]
import pynvml # type: ignore[import]
def get_processes_running_python_tests() -> List[Any]:
python_processes = []
for process in psutil.process_iter():
try:
if "python" in process.name() and process.cmdline():
python_processes.append(process)
except (psutil.NoSuchProcess, psutil.AccessDenied):
# access denied or the process died
pass
return python_processes
def get_per_process_cpu_info() -> List[Dict[str, Any]]:
processes = get_processes_running_python_tests()
per_process_info = []
for p in processes:
info = {
"pid": p.pid,
"cmd": " ".join(p.cmdline()),
"cpu_percent": p.cpu_percent(),
"rss_memory": p.memory_info().rss,
"uss_memory": p.memory_full_info().uss,
}
if "pss" in p.memory_full_info():
# only availiable in linux
info["pss_memory"] = p.memory_full_info().pss
per_process_info.append(info)
return per_process_info
def get_per_process_gpu_info(handle: Any) -> List[Dict[str, Any]]:
processes = pynvml.nvmlDeviceGetComputeRunningProcesses(handle)
per_process_info = []
for p in processes:
info = {"pid": p.pid, "gpu_memory": p.usedGpuMemory}
per_process_info.append(info)
return per_process_info
if __name__ == "__main__":
handle = None
try:
pynvml.nvmlInit()
handle = pynvml.nvmlDeviceGetHandleByIndex(0)
except pynvml.NVMLError:
# no pynvml avaliable, probably because not cuda
pass
kill_now = False
def exit_gracefully(*args: Any) -> None:
global kill_now
kill_now = True
signal.signal(signal.SIGTERM, exit_gracefully)
while not kill_now:
try:
stats = {
"time": datetime.datetime.utcnow().isoformat("T") + "Z",
"total_cpu_percent": psutil.cpu_percent(),
"per_process_cpu_info": get_per_process_cpu_info(),
}
if handle is not None:
stats["per_process_gpu_info"] = get_per_process_gpu_info(handle)
stats["total_gpu_utilizaiton"] = pynvml.nvmlDeviceGetUtilizationRates(
handle
).gpu
except Exception as e:
stats = {
"time": datetime.datetime.utcnow().isoformat("T") + "Z",
"error": str(e),
}
finally:
print(json.dumps(stats))
time.sleep(1)
|
pytorch-master
|
tools/stats/monitor.py
|
#!/usr/bin/env python3
import datetime
import json
import os
import pathlib
import re
from typing import Any, Callable, cast, Dict, List, Optional
from urllib.request import urlopen
def get_disabled_issues() -> List[str]:
pr_body = os.getenv("PR_BODY", "")
commit_messages = os.getenv("COMMIT_MESSAGES", "")
# The below regex is meant to match all *case-insensitive* keywords that
# GitHub has delineated would link PRs to issues, more details here:
# https://docs.github.com/en/issues/tracking-your-work-with-issues/linking-a-pull-request-to-an-issue.
# E.g., "Close #62851", "fixES #62851" and "RESOLVED #62851" would all match, but not
# "closes #62851" --> extra space, "fixing #62851" --> not a keyword, nor "fix 62851" --> no #
regex = "(?i)(Close(d|s)?|Resolve(d|s)?|Fix(ed|es)?) (#|https://github.com/pytorch/pytorch/issues/)([0-9]+)"
issue_numbers = [x[5] for x in re.findall(regex, pr_body + commit_messages)]
print("Ignoring disabled issues: ", issue_numbers)
return issue_numbers
IGNORE_DISABLED_ISSUES: List[str] = get_disabled_issues()
SLOW_TESTS_FILE = ".pytorch-slow-tests.json"
DISABLED_TESTS_FILE = ".pytorch-disabled-tests.json"
FILE_CACHE_LIFESPAN_SECONDS = datetime.timedelta(hours=3).seconds
def fetch_and_cache(
dirpath: str,
name: str,
url: str,
process_fn: Callable[[Dict[str, Any]], Dict[str, Any]],
) -> Dict[str, Any]:
"""
This fetch and cache utils allows sharing between different process.
"""
path = os.path.join(dirpath, name)
print(f"Downloading {url} to {path}")
def is_cached_file_valid() -> bool:
# Check if the file is new enough (see: FILE_CACHE_LIFESPAN_SECONDS). A real check
# could make a HEAD request and check/store the file's ETag
fname = pathlib.Path(path)
now = datetime.datetime.now()
mtime = datetime.datetime.fromtimestamp(fname.stat().st_mtime)
diff = now - mtime
return diff.total_seconds() < FILE_CACHE_LIFESPAN_SECONDS
if os.path.exists(path) and is_cached_file_valid():
# Another test process already download the file, so don't re-do it
with open(path, "r") as f:
return cast(Dict[str, Any], json.load(f))
for _ in range(3):
try:
contents = urlopen(url, timeout=5).read().decode("utf-8")
processed_contents = process_fn(json.loads(contents))
with open(path, "w") as f:
f.write(json.dumps(processed_contents))
return processed_contents
except Exception as e:
print(f"Could not download {url} because: {e}.")
print(f"All retries exhausted, downloading {url} failed.")
return {}
def get_slow_tests(
dirpath: str, filename: str = SLOW_TESTS_FILE
) -> Optional[Dict[str, float]]:
url = "https://raw.githubusercontent.com/pytorch/test-infra/generated-stats/stats/slow-tests.json"
try:
return fetch_and_cache(dirpath, filename, url, lambda x: x)
except Exception:
print("Couldn't download slow test set, leaving all tests enabled...")
return {}
def get_test_times(dirpath: str, filename: str) -> Dict[str, Dict[str, float]]:
url = "https://raw.githubusercontent.com/pytorch/test-infra/generated-stats/stats/test-times.json"
build_environment = os.environ.get("BUILD_ENVIRONMENT")
if build_environment is None:
test_times = fetch_and_cache(dirpath, filename, url, lambda x: x)
raise RuntimeError(
f"BUILD_ENVIRONMENT is not defined, available keys are {test_times.keys()}"
)
def process_response(the_response: Dict[str, Any]) -> Any:
if build_environment not in the_response:
raise RuntimeError(
f"{build_environment} not found, available envs are: {the_response.keys()}"
)
return the_response[build_environment]
try:
return fetch_and_cache(dirpath, filename, url, process_response)
except Exception:
print("Couldn't download test times...")
return {}
def get_disabled_tests(
dirpath: str, filename: str = DISABLED_TESTS_FILE
) -> Optional[Dict[str, Any]]:
def process_disabled_test(the_response: Dict[str, Any]) -> Dict[str, Any]:
disabled_test_from_issues = dict()
for item in the_response["items"]:
title = item["title"]
key = "DISABLED "
issue_url = item["html_url"]
issue_number = issue_url.split("/")[-1]
if title.startswith(key) and issue_number not in IGNORE_DISABLED_ISSUES:
test_name = title[len(key) :].strip()
body = item["body"]
platforms_to_skip = []
key = "platforms:"
# When the issue has no body, it is assumed that all platforms should skip the test
if body is not None:
for line in body.splitlines():
line = line.lower()
if line.startswith(key):
pattern = re.compile(r"^\s+|\s*,\s*|\s+$")
platforms_to_skip.extend(
[x for x in pattern.split(line[len(key) :]) if x]
)
disabled_test_from_issues[test_name] = (
item["html_url"],
platforms_to_skip,
)
return disabled_test_from_issues
try:
url = "https://raw.githubusercontent.com/pytorch/test-infra/generated-stats/stats/disabled-tests.json"
return fetch_and_cache(dirpath, filename, url, process_disabled_test)
except Exception:
print("Couldn't download test skip set, leaving all tests enabled...")
return {}
|
pytorch-master
|
tools/stats/import_test_stats.py
|
import gzip
import io
import json
import os
import zipfile
from pathlib import Path
from typing import Any, Dict, List
import boto3 # type: ignore[import]
import requests
import rockset # type: ignore[import]
PYTORCH_REPO = "https://api.github.com/repos/pytorch/pytorch"
S3_RESOURCE = boto3.resource("s3")
def _get_request_headers() -> Dict[str, str]:
return {
"Accept": "application/vnd.github.v3+json",
"Authorization": "token " + os.environ["GITHUB_TOKEN"],
}
def _get_artifact_urls(prefix: str, workflow_run_id: int) -> Dict[Path, str]:
"""Get all workflow artifacts with 'test-report' in the name."""
response = requests.get(
f"{PYTORCH_REPO}/actions/runs/{workflow_run_id}/artifacts?per_page=100",
)
artifacts = response.json()["artifacts"]
while "next" in response.links.keys():
response = requests.get(
response.links["next"]["url"], headers=_get_request_headers()
)
artifacts.extend(response.json()["artifacts"])
artifact_urls = {}
for artifact in artifacts:
if artifact["name"].startswith(prefix):
artifact_urls[Path(artifact["name"])] = artifact["archive_download_url"]
return artifact_urls
def _download_artifact(
artifact_name: Path, artifact_url: str, workflow_run_attempt: int
) -> Path:
# [Artifact run attempt]
# All artifacts on a workflow share a single namespace. However, we can
# re-run a workflow and produce a new set of artifacts. To avoid name
# collisions, we add `-runattempt1<run #>-` somewhere in the artifact name.
#
# This code parses out the run attempt number from the artifact name. If it
# doesn't match the one specified on the command line, skip it.
atoms = str(artifact_name).split("-")
for atom in atoms:
if atom.startswith("runattempt"):
found_run_attempt = int(atom[len("runattempt") :])
if workflow_run_attempt != found_run_attempt:
print(
f"Skipping {artifact_name} as it is an invalid run attempt. "
f"Expected {workflow_run_attempt}, found {found_run_attempt}."
)
print(f"Downloading {artifact_name}")
response = requests.get(artifact_url, headers=_get_request_headers())
with open(artifact_name, "wb") as f:
f.write(response.content)
return artifact_name
def download_s3_artifacts(
prefix: str, workflow_run_id: int, workflow_run_attempt: int
) -> List[Path]:
bucket = S3_RESOURCE.Bucket("gha-artifacts")
objs = bucket.objects.filter(
Prefix=f"pytorch/pytorch/{workflow_run_id}/{workflow_run_attempt}/artifact/{prefix}"
)
found_one = False
paths = []
for obj in objs:
found_one = True
p = Path(Path(obj.key).name)
print(f"Downloading {p}")
with open(p, "wb") as f:
f.write(obj.get()["Body"].read())
paths.append(p)
if not found_one:
print(
"::warning title=s3 artifacts not found::"
"Didn't find any test reports in s3, there might be a bug!"
)
return paths
def download_gha_artifacts(
prefix: str, workflow_run_id: int, workflow_run_attempt: int
) -> List[Path]:
artifact_urls = _get_artifact_urls(prefix, workflow_run_id)
paths = []
for name, url in artifact_urls.items():
paths.append(_download_artifact(Path(name), url, workflow_run_attempt))
return paths
def upload_to_rockset(collection: str, docs: List[Any]) -> None:
print(f"Writing {len(docs)} documents to Rockset")
client = rockset.Client(
api_server="api.rs2.usw2.rockset.com", api_key=os.environ["ROCKSET_API_KEY"]
)
client.Collection.retrieve(collection).add_docs(docs)
print("Done!")
def upload_to_s3(
workflow_run_id: int,
workflow_run_attempt: int,
collection: str,
docs: List[Dict[str, Any]],
) -> None:
print(f"Writing {len(docs)} documents to S3")
body = io.StringIO()
for doc in docs:
json.dump(doc, body)
body.write("\n")
S3_RESOURCE.Object(
"ossci-raw-job-status",
f"{collection}/{workflow_run_id}/{workflow_run_attempt}",
).put(
Body=gzip.compress(body.getvalue().encode()),
ContentEncoding="gzip",
ContentType="application/json",
)
print("Done!")
def unzip(p: Path) -> None:
"""Unzip the provided zipfile to a similarly-named directory.
Returns None if `p` is not a zipfile.
Looks like: /tmp/test-reports.zip -> /tmp/unzipped-test-reports/
"""
assert p.is_file()
unzipped_dir = p.with_name("unzipped-" + p.stem)
print(f"Extracting {p} to {unzipped_dir}")
with zipfile.ZipFile(p, "r") as zip:
zip.extractall(unzipped_dir)
|
pytorch-master
|
tools/stats/upload_stats_lib.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import bz2
import datetime
import json
import math
import os
import re
import statistics
import subprocess
import time
from collections import defaultdict
from pathlib import Path
from typing import (
Any,
cast,
DefaultDict,
Dict,
Iterable,
Iterator,
List,
Optional,
Set,
Tuple,
)
from xml.dom import minidom
from typing_extensions import TypedDict
from tools.stats.s3_stat_parser import (
Commit,
get_S3_object_from_bucket,
get_test_stats_summaries_for_job,
HAVE_BOTO3,
newify_case,
Report,
ReportMetaMeta,
Status,
Version1Report,
Version2Case,
Version2Report,
VersionedReport,
)
from tools.stats.scribe import send_to_scribe
SimplerSuite = Dict[str, Version2Case]
SimplerFile = Dict[str, SimplerSuite]
SimplerReport = Dict[str, SimplerFile]
class Stat(TypedDict):
center: float
spread: Optional[float]
class CaseDiff(TypedDict):
margin: str
name: str
was: Optional[Tuple[Stat, Status]]
now: Optional[Version2Case]
class SuiteDiff(TypedDict):
margin: str
name: str
was: Optional[Stat]
now: Optional[float]
cases: List[CaseDiff]
# TODO: consolidate this with the get_cases function from
# tools/stats/test_history.py
# Here we translate to a three-layer format (file -> suite -> case)
# rather than a two-layer format (suite -> case) because as mentioned in
# a comment in the body of this function, if we consolidate suites that
# share a name, there will be test case name collisions, and once we
# have those, there's no clean way to deal with it in the diffing logic.
# It's not great to have to add a dummy empty string for the filename
# for version 1 reports, but it's better than either losing cases that
# share a name (for version 2 reports) or using a list of cases rather
# than a dict.
def simplify(report: Report) -> SimplerReport:
if "format_version" not in report: # version 1 implicitly
v1report = cast(Version1Report, report)
return {
# we just don't have test filename information sadly, so we
# just make one fake filename that is the empty string
"": {
suite_name: {
# This clobbers some cases that have duplicate names
# because in version 1, we would merge together all
# the suites with a given name (even if they came
# from different files), so there were actually
# situations in which two cases in the same suite
# shared a name (because they actually originally
# came from two suites that were then merged). It
# would probably be better to warn about the cases
# that we're silently discarding here, but since
# we're only uploading in the new format (where
# everything is also keyed by filename) going
# forward, it shouldn't matter too much.
case["name"]: newify_case(case)
for case in suite["cases"]
}
for suite_name, suite in v1report["suites"].items()
}
}
else:
v_report = cast(VersionedReport, report)
version = v_report["format_version"]
if version == 2:
v2report = cast(Version2Report, v_report)
return {
filename: {
suite_name: suite["cases"]
for suite_name, suite in file_data["suites"].items()
}
for filename, file_data in v2report["files"].items()
}
else:
raise RuntimeError(f"Unknown format version: {version}")
def plural(n: int) -> str:
return "" if n == 1 else "s"
def get_base_commit(sha1: str) -> str:
default_branch = os.environ.get("GIT_DEFAULT_BRANCH")
# capture None and "" cases
if not default_branch:
default_branch = "master"
default_remote = f"origin/{default_branch}"
return subprocess.check_output(
["git", "merge-base", sha1, default_remote],
encoding="ascii",
).strip()
def display_stat(
x: Stat,
format: Tuple[Tuple[int, int], Tuple[int, int]],
) -> str:
spread_len = format[1][0] + 1 + format[1][1]
spread = x["spread"]
if spread is not None:
spread_str = f" ± {spread:{spread_len}.{format[1][1]}f}s"
else:
spread_str = " " * (3 + spread_len + 1)
mean_len = format[0][0] + 1 + format[0][1]
return f'{x["center"]:{mean_len}.{format[0][1]}f}s{spread_str}'
def list_stat(l: List[float]) -> Stat:
return {
"center": statistics.mean(l),
"spread": statistics.stdev(l) if len(l) > 1 else None,
}
def zero_stat() -> Stat:
return {"center": 0, "spread": None}
def recenter(was: Stat, now: float) -> Stat:
return {"center": now - was["center"], "spread": was["spread"]}
def sum_normals(stats: Iterable[Stat]) -> Stat:
"""
Returns a stat corresponding to the sum of the given stats.
Assumes that the center and spread for each of the given stats are
mean and stdev, respectively.
"""
l = list(stats)
spread: Optional[float]
if any(stat["spread"] is not None for stat in l):
spread = math.sqrt(sum((stat["spread"] or 0) ** 2 for stat in l))
else:
spread = None
return {
"center": sum(stat["center"] for stat in l),
"spread": spread,
}
def format_seconds(seconds: List[float]) -> str:
if len(seconds) > 0:
x = list_stat(seconds)
return f"total time {display_stat(x, ((5, 2), (4, 2)))}".strip()
return ""
def show_ancestors(num_commits: int) -> str:
return f" | : ({num_commits} commit{plural(num_commits)})"
def unlines(lines: List[str]) -> str:
return "".join(f"{line}\n" for line in lines)
def matching_test_times(
*,
base_reports: Dict[Commit, List[SimplerReport]],
filename: str,
suite_name: str,
case_name: str,
status: Status,
) -> List[float]:
times: List[float] = []
for reports in base_reports.values():
for report in reports:
file_data = report.get(filename)
if file_data:
suite = file_data.get(suite_name)
if suite:
case = suite.get(case_name)
if case:
t = case["seconds"]
s = case["status"]
if s == status:
times.append(t)
return times
def analyze(
*,
head_report: SimplerReport,
base_reports: Dict[Commit, List[SimplerReport]],
) -> List[SuiteDiff]:
nonempty_shas = [sha for sha, reports in base_reports.items() if reports]
# most recent main ancestor with at least one S3 report,
# or empty list if there are none (will show all tests as added)
base_report = base_reports[nonempty_shas[0]] if nonempty_shas else []
# find all relevant suites (those in either base or head or both)
all_reports = [head_report] + base_report
all_suites: Set[Tuple[str, str]] = {
(filename, suite_name)
for r in all_reports
for filename, file_data in r.items()
for suite_name in file_data.keys()
}
removed_suites: List[SuiteDiff] = []
modified_suites: List[SuiteDiff] = []
added_suites: List[SuiteDiff] = []
for filename, suite_name in sorted(all_suites):
case_diffs: List[CaseDiff] = []
head_suite = head_report.get(filename, {}).get(suite_name)
base_cases: Dict[str, Status] = dict(
sorted(
set.intersection(
*[
{
(n, case["status"])
for n, case in report.get(filename, {})
.get(suite_name, {})
.items()
}
for report in base_report
]
or [set()]
)
)
)
case_stats: Dict[str, Stat] = {}
if head_suite:
now = sum(case["seconds"] for case in head_suite.values())
if any(
filename in report and suite_name in report[filename]
for report in base_report
):
removed_cases: List[CaseDiff] = []
for case_name, case_status in base_cases.items():
case_stats[case_name] = list_stat(
matching_test_times(
base_reports=base_reports,
filename=filename,
suite_name=suite_name,
case_name=case_name,
status=case_status,
)
)
if case_name not in head_suite:
removed_cases.append(
{
"margin": "-",
"name": case_name,
"was": (case_stats[case_name], case_status),
"now": None,
}
)
modified_cases: List[CaseDiff] = []
added_cases: List[CaseDiff] = []
for head_case_name in sorted(head_suite):
head_case = head_suite[head_case_name]
if head_case_name in base_cases:
stat = case_stats[head_case_name]
base_status = base_cases[head_case_name]
if head_case["status"] != base_status:
modified_cases.append(
{
"margin": "!",
"name": head_case_name,
"was": (stat, base_status),
"now": head_case,
}
)
else:
added_cases.append(
{
"margin": "+",
"name": head_case_name,
"was": None,
"now": head_case,
}
)
# there might be a bug calculating this stdev, not sure
was = sum_normals(case_stats.values())
case_diffs = removed_cases + modified_cases + added_cases
if case_diffs:
modified_suites.append(
{
"margin": " ",
"name": suite_name,
"was": was,
"now": now,
"cases": case_diffs,
}
)
else:
for head_case_name in sorted(head_suite):
head_case = head_suite[head_case_name]
case_diffs.append(
{
"margin": " ",
"name": head_case_name,
"was": None,
"now": head_case,
}
)
added_suites.append(
{
"margin": "+",
"name": suite_name,
"was": None,
"now": now,
"cases": case_diffs,
}
)
else:
for case_name, case_status in base_cases.items():
case_stats[case_name] = list_stat(
matching_test_times(
base_reports=base_reports,
filename=filename,
suite_name=suite_name,
case_name=case_name,
status=case_status,
)
)
case_diffs.append(
{
"margin": " ",
"name": case_name,
"was": (case_stats[case_name], case_status),
"now": None,
}
)
removed_suites.append(
{
"margin": "-",
"name": suite_name,
# there might be a bug calculating this stdev, not sure
"was": sum_normals(case_stats.values()),
"now": None,
"cases": case_diffs,
}
)
return removed_suites + modified_suites + added_suites
def case_diff_lines(diff: CaseDiff) -> List[str]:
lines = [f'def {diff["name"]}: ...']
case_fmt = ((3, 3), (2, 3))
was = diff["was"]
if was:
was_line = f" # was {display_stat(was[0], case_fmt)}"
was_status = was[1]
if was_status:
was_line += f" ({was_status})"
lines.append(was_line)
now = diff["now"]
if now:
now_stat: Stat = {"center": now["seconds"], "spread": None}
now_line = f" # now {display_stat(now_stat, case_fmt)}"
now_status = now["status"]
if now_status:
now_line += f" ({now_status})"
lines.append(now_line)
return [""] + [f'{diff["margin"]} {l}' for l in lines]
def display_suite_diff(diff: SuiteDiff) -> str:
lines = [f'class {diff["name"]}:']
suite_fmt = ((4, 2), (3, 2))
was = diff["was"]
if was:
lines.append(f" # was {display_stat(was, suite_fmt)}")
now = diff["now"]
if now is not None:
now_stat: Stat = {"center": now, "spread": None}
lines.append(f" # now {display_stat(now_stat, suite_fmt)}")
for case_diff in diff["cases"]:
lines.extend([f" {l}" for l in case_diff_lines(case_diff)])
return unlines([""] + [f'{diff["margin"]} {l}'.rstrip() for l in lines] + [""])
def anomalies(diffs: List[SuiteDiff]) -> str:
return "".join(map(display_suite_diff, diffs))
def graph(
*,
head_sha: Commit,
head_seconds: float,
base_seconds: Dict[Commit, List[float]],
on_master: bool,
ancestry_path: int = 0,
other_ancestors: int = 0,
) -> str:
lines = [
"Commit graph (base is most recent master ancestor with at least one S3 report):",
"",
" : (master)",
" |",
]
head_time_str = f" {format_seconds([head_seconds])}"
if on_master:
lines.append(f" * {head_sha[:10]} (HEAD) {head_time_str}")
else:
lines.append(f" | * {head_sha[:10]} (HEAD) {head_time_str}")
if ancestry_path > 0:
lines += [
" | |",
show_ancestors(ancestry_path),
]
if other_ancestors > 0:
lines += [
" |/|",
show_ancestors(other_ancestors),
" |",
]
else:
lines.append(" |/")
is_first = True
for sha, seconds in base_seconds.items():
num_runs = len(seconds)
prefix = str(num_runs).rjust(3)
base = "(base)" if is_first and num_runs > 0 else " "
if num_runs > 0:
is_first = False
t = format_seconds(seconds)
p = plural(num_runs)
if t:
p = f"{p}, ".ljust(3)
lines.append(f" * {sha[:10]} {base} {prefix} report{p}{t}")
lines.extend([" |", " :"])
return unlines(lines)
def case_delta(case: CaseDiff) -> Stat:
was = case["was"]
now = case["now"]
return recenter(
was[0] if was else zero_stat(),
now["seconds"] if now else 0,
)
def display_final_stat(stat: Stat) -> str:
center = stat["center"]
spread = stat["spread"]
displayed = display_stat(
{"center": abs(center), "spread": spread},
((4, 2), (3, 2)),
)
if center < 0:
sign = "-"
elif center > 0:
sign = "+"
else:
sign = " "
return f"{sign}{displayed}".rstrip()
def summary_line(message: str, d: DefaultDict[str, List[CaseDiff]]) -> str:
all_cases = [c for cs in d.values() for c in cs]
tests = len(all_cases)
suites = len(d)
sp = f"{plural(suites)})".ljust(2)
tp = f"{plural(tests)},".ljust(2)
# there might be a bug calculating this stdev, not sure
stat = sum_normals(case_delta(c) for c in all_cases)
return "".join(
[
f"{message} (across {suites:>4} suite{sp}",
f"{tests:>6} test{tp}",
f" totaling {display_final_stat(stat)}",
]
)
def summary(analysis: List[SuiteDiff]) -> str:
removed_tests: DefaultDict[str, List[CaseDiff]] = defaultdict(list)
modified_tests: DefaultDict[str, List[CaseDiff]] = defaultdict(list)
added_tests: DefaultDict[str, List[CaseDiff]] = defaultdict(list)
for diff in analysis:
# the use of 'margin' here is not the most elegant
name = diff["name"]
margin = diff["margin"]
cases = diff["cases"]
if margin == "-":
removed_tests[name] += cases
elif margin == "+":
added_tests[name] += cases
else:
removed = list(filter(lambda c: c["margin"] == "-", cases))
added = list(filter(lambda c: c["margin"] == "+", cases))
modified = list(filter(lambda c: c["margin"] == "!", cases))
if removed:
removed_tests[name] += removed
if added:
added_tests[name] += added
if modified:
modified_tests[name] += modified
return unlines(
[
summary_line("Removed ", removed_tests),
summary_line("Modified", modified_tests),
summary_line("Added ", added_tests),
]
)
def regression_info(
*,
head_sha: Commit,
head_report: Report,
base_reports: Dict[Commit, List[Report]],
job_name: str,
on_master: bool,
ancestry_path: int,
other_ancestors: int,
) -> str:
"""
Return a human-readable report describing any test time regressions.
The head_sha and head_report args give info about the current commit
and its test times. Since Python dicts maintain insertion order
(guaranteed as part of the language spec since 3.7), the
base_reports argument must list the head's several most recent
main commits, from newest to oldest (so the merge-base is
list(base_reports)[0]).
"""
simpler_head = simplify(head_report)
simpler_base: Dict[Commit, List[SimplerReport]] = {}
for commit, reports in base_reports.items():
simpler_base[commit] = [simplify(r) for r in reports]
analysis = analyze(
head_report=simpler_head,
base_reports=simpler_base,
)
return "\n".join(
[
unlines(
[
"----- Historic stats comparison result ------",
"",
f" job: {job_name}",
f" commit: {head_sha}",
]
),
# don't print anomalies, because sometimes due to sharding, the
# output from this would be very long and obscure better signal
# anomalies(analysis),
graph(
head_sha=head_sha,
head_seconds=head_report["total_seconds"],
base_seconds={
c: [r["total_seconds"] for r in rs]
for c, rs in base_reports.items()
},
on_master=on_master,
ancestry_path=ancestry_path,
other_ancestors=other_ancestors,
),
summary(analysis),
]
)
class TestCase:
def __init__(self, dom: Any) -> None:
self.class_name = str(dom.attributes["classname"].value)
self.name = str(dom.attributes["name"].value)
self.time = float(dom.attributes["time"].value)
error_elements = dom.getElementsByTagName("error")
# DISCLAIMER: unexpected successes and expected failures are currently not reported in assemble_s3_object
self.expected_failure = False
self.skipped = False
self.errored = False
self.unexpected_success = False
if len(error_elements) > 0:
# We are only expecting 1 element here
error_element = error_elements[0]
self.unexpected_success = (
error_element.hasAttribute("type")
and error_element.attributes["type"].value == "UnexpectedSuccess"
)
self.errored = not self.unexpected_success
skipped_elements = dom.getElementsByTagName("skipped")
if len(skipped_elements) > 0:
# We are only expecting 1 element here
skipped_element = skipped_elements[0]
self.expected_failure = (
skipped_element.hasAttribute("type")
and skipped_element.attributes["type"].value == "XFAIL"
)
self.skipped = not self.expected_failure
self.failed = len(dom.getElementsByTagName("failure")) > 0
def __repr__(self) -> str:
return self.__str__()
def __str__(self) -> str:
return (
f"[TestCase name: {self.name} | class_name: {self.class_name} | time: {self.time} | "
f"expected_failure: {self.expected_failure} | skipped: {self.skipped} | errored: {self.errored} | "
f"unexpected_success: {self.unexpected_success} | failed: {self.failed}]\n"
)
class TestSuite:
def __init__(self, name: str) -> None:
self.name = name
self.test_cases: Dict[str, TestCase] = dict()
self.failed_count = 0
self.skipped_count = 0
self.errored_count = 0
self.total_time = 0.0
# The below are currently not included in test reports
self.unexpected_success_count = 0
self.expected_failure_count = 0
def __repr__(self) -> str:
rc = (
f"{self.name} run_time: {self.total_time:.2f} tests: {len(self.test_cases)}"
)
if self.skipped_count > 0:
rc += f" skipped: {self.skipped_count}"
return f"TestSuite({rc})"
def append(self, test_case: TestCase) -> None:
self.test_cases[test_case.name] = test_case
self.total_time += test_case.time
self.failed_count += 1 if test_case.failed else 0
self.skipped_count += 1 if test_case.skipped else 0
self.errored_count += 1 if test_case.errored else 0
self.unexpected_success_count += 1 if test_case.unexpected_success else 0
self.expected_failure_count += 1 if test_case.expected_failure else 0
def update(self, test_case: TestCase) -> None:
name = test_case.name
assert (
name in self.test_cases
), f"Error: attempting to replace nonexistent test case {name}"
# Note that time for unexpected successes and expected failures are reported as 0s
self.test_cases[name].time += test_case.time
self.test_cases[name].failed |= test_case.failed
self.test_cases[name].errored |= test_case.errored
self.test_cases[name].skipped |= test_case.skipped
self.test_cases[name].unexpected_success |= test_case.unexpected_success
self.test_cases[name].expected_failure |= test_case.expected_failure
# Tests that spawn duplicates (usually only twice) intentionally
MULTITESTS = [
"test_cpp_extensions_aot",
"distributed/test_distributed_spawn",
"distributed\\test_distributed_spawn", # for windows
"distributed/test_c10d_gloo",
"distributed\\test_c10d_gloo", # for windows
"cpp", # The caffe2 cpp tests spawn duplicate test cases as well.
]
class TestFile:
def __init__(self, name: str) -> None:
self.name = name
self.total_time = 0.0
self.test_suites: Dict[str, TestSuite] = dict()
def append(self, test_case: TestCase) -> None:
suite_name = test_case.class_name
if suite_name not in self.test_suites:
self.test_suites[suite_name] = TestSuite(suite_name)
if test_case.name in self.test_suites[suite_name].test_cases:
if self.name in MULTITESTS:
self.test_suites[suite_name].update(test_case)
self.total_time += test_case.time
else:
self.test_suites[suite_name].append(test_case)
self.total_time += test_case.time
def parse_report(path: str) -> Iterator[TestCase]:
try:
dom = minidom.parse(path)
except Exception as e:
print(f"Error occurred when parsing {path}: {e}")
return
for test_case in dom.getElementsByTagName("testcase"):
yield TestCase(test_case)
def get_recursive_files(folder: str, extension: str) -> Iterable[str]:
"""
Get recursive list of files with given extension even.
Use it instead of glob(os.path.join(folder, '**', f'*{extension}'))
if folder/file names can start with `.`, which makes it hidden on Unix platforms
"""
assert extension.startswith(".")
for root, _, files in os.walk(folder):
for fname in files:
if os.path.splitext(fname)[1] == extension:
yield os.path.join(root, fname)
def parse_reports(folder: str) -> Dict[str, TestFile]:
tests_by_file = dict()
for report in get_recursive_files(folder, ".xml"):
report_path = Path(report)
# basename of the directory of test-report is the test filename
test_filename = re.sub(r"\.", "/", report_path.parent.name)
if test_filename not in tests_by_file:
tests_by_file[test_filename] = TestFile(test_filename)
for test_case in parse_report(report):
tests_by_file[test_filename].append(test_case)
return tests_by_file
def build_info() -> ReportMetaMeta:
return {
"build_pr": os.environ.get("PR_NUMBER", os.environ.get("CIRCLE_PR_NUMBER", "")),
"build_tag": os.environ.get("TAG", os.environ.get("CIRCLE_TAG", "")),
"build_sha1": os.environ.get("SHA1", os.environ.get("CIRCLE_SHA1", "")),
"build_base_commit": get_base_commit(
os.environ.get("SHA1", os.environ.get("CIRCLE_SHA1", "HEAD"))
),
"build_branch": os.environ.get("BRANCH", os.environ.get("CIRCLE_BRANCH", "")),
"build_job": os.environ.get(
"BUILD_ENVIRONMENT", os.environ.get("CIRCLE_JOB", "")
),
"build_workflow_id": os.environ.get(
"WORKFLOW_ID", os.environ.get("CIRCLE_WORKFLOW_ID", "")
),
"build_start_time_epoch": str(
int(os.path.getmtime(os.path.realpath(__file__)))
),
}
def build_message(
test_file: TestFile,
test_suite: TestSuite,
test_case: TestCase,
meta_info: ReportMetaMeta,
) -> Dict[str, Dict[str, Any]]:
return {
"normal": {
**meta_info,
"test_filename": test_file.name,
"test_suite_name": test_suite.name,
"test_case_name": test_case.name,
},
"int": {
"time": int(time.time()),
"test_total_count": 1,
"test_total_time": int(test_case.time * 1000),
"test_failed_count": 1 if test_case.failed > 0 else 0,
"test_skipped_count": 1 if test_case.skipped > 0 else 0,
"test_errored_count": 1 if test_case.errored > 0 else 0,
},
}
def send_report_to_scribe(reports: Dict[str, TestFile]) -> None:
meta_info = build_info()
logs = json.dumps(
[
{
"category": "perfpipe_pytorch_test_times",
"message": json.dumps(
build_message(test_file, test_suite, test_case, meta_info)
),
"line_escape": False,
}
for test_file in reports.values()
for test_suite in test_file.test_suites.values()
for test_case in test_suite.test_cases.values()
]
)
# no need to print send result as exceptions will be captured and print later.
send_to_scribe(logs)
def assemble_s3_object(
reports: Dict[str, TestFile],
*,
total_seconds: float,
) -> Version2Report:
return {
**build_info(), # type: ignore[misc]
"total_seconds": total_seconds,
"format_version": 2,
"files": {
name: {
"total_seconds": test_file.total_time,
"suites": {
name: {
"total_seconds": suite.total_time,
"cases": {
name: {
"seconds": case.time,
"status": "errored"
if case.errored
else "failed"
if case.failed
else "skipped"
if case.skipped
else None,
}
for name, case in suite.test_cases.items()
},
}
for name, suite in test_file.test_suites.items()
},
}
for name, test_file in reports.items()
},
}
def send_report_to_s3(head_report: Version2Report) -> None:
job = os.getenv("BUILD_ENVIRONMENT", os.environ.get("CIRCLE_JOB"))
sha1 = os.environ.get("SHA1", os.environ.get("CIRCLE_SHA1", ""))
now = datetime.datetime.utcnow().isoformat()
# SHARD_NUMBER and TEST_CONFIG are specific to GHA, as these details would be included in CIRCLE_JOB already
shard = os.environ.get("SHARD_NUMBER", "")
test_config = os.environ.get("TEST_CONFIG")
job_report_dirname = (
f'{job}{f"-{test_config}" if test_config is not None else ""}{shard}'
)
key = f"test_time/{sha1}/{job_report_dirname}/{now}Z.json.bz2" # Z meaning UTC
obj = get_S3_object_from_bucket("ossci-metrics", key)
# use bz2 because the results are smaller than gzip, and the
# compression time penalty we pay is only about half a second for
# input files of a few megabytes in size like these JSON files, and
# because for some reason zlib doesn't seem to play nice with the
# gunzip command whereas Python's bz2 does work with bzip2
obj.put(Body=bz2.compress(json.dumps(head_report).encode()))
def print_regressions(head_report: Report, *, num_prev_commits: int) -> None:
sha1 = os.environ.get("SHA1", os.environ.get("CIRCLE_SHA1", "HEAD"))
base = get_base_commit(sha1)
count_spec = f"{base}..{sha1}"
intermediate_commits = int(
subprocess.check_output(
["git", "rev-list", "--count", count_spec], encoding="ascii"
)
)
ancestry_path = int(
subprocess.check_output(
["git", "rev-list", "--ancestry-path", "--count", count_spec],
encoding="ascii",
)
)
# if current commit is already on main, we need to exclude it from
# this history; otherwise we include the merge-base
commits = subprocess.check_output(
["git", "rev-list", f"--max-count={num_prev_commits+1}", base],
encoding="ascii",
).splitlines()
on_master = False
if base == sha1:
on_master = True
commits = commits[1:]
else:
commits = commits[:-1]
job = os.environ.get("BUILD_ENVIRONMENT", "")
objects: Dict[Commit, List[Report]] = defaultdict(list)
for commit in commits:
objects[commit]
summaries = get_test_stats_summaries_for_job(sha=commit, job_prefix=job)
for _, summary in summaries.items():
objects[commit].extend(summary)
print()
print(
regression_info(
head_sha=sha1,
head_report=head_report,
base_reports=objects,
job_name=job,
on_master=on_master,
ancestry_path=ancestry_path - 1,
other_ancestors=intermediate_commits - ancestry_path,
),
end="",
)
def positive_integer(value: str) -> float:
parsed = int(value)
if parsed < 1:
raise argparse.ArgumentTypeError(f"{value} is not a natural number")
return parsed
def positive_float(value: str) -> float:
parsed = float(value)
if parsed <= 0.0:
raise argparse.ArgumentTypeError(f"{value} is not a positive rational number")
return parsed
def reports_has_no_tests(reports: Dict[str, TestFile]) -> bool:
for test_file in reports.values():
for test_suite in test_file.test_suites.values():
if len(test_suite.test_cases) > 0:
return False
return True
if __name__ == "__main__":
import argparse
import sys
parser = argparse.ArgumentParser(
"Print statistics from test XML output.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--longest-of-class",
type=positive_integer,
default=3,
metavar="N",
help="how many longest tests to show for each class",
)
parser.add_argument(
"--class-print-threshold",
type=positive_float,
default=1.0,
metavar="N",
help="Minimal total time to warrant class report",
)
parser.add_argument(
"--longest-of-run",
type=positive_integer,
default=10,
metavar="N",
help="how many longest tests to show from the entire run",
)
if HAVE_BOTO3:
parser.add_argument(
"--upload-to-s3",
action="store_true",
help="upload test time to S3 bucket",
)
parser.add_argument(
"--compare-with-s3",
action="store_true",
help="download test times for base commits and compare",
)
parser.add_argument(
"--num-prev-commits",
type=positive_integer,
default=10,
metavar="N",
help="how many previous commits to compare test times with",
)
parser.add_argument(
"--use-json",
metavar="FILE.json",
help="compare S3 with JSON file, instead of the test report folder",
)
parser.add_argument(
"folder",
help="test report folder",
)
args = parser.parse_args()
reports_by_file = parse_reports(args.folder)
if reports_has_no_tests(reports_by_file):
print(f"No tests in reports found in {args.folder}")
sys.exit(0)
try:
send_report_to_scribe(reports_by_file)
except Exception as e:
print(f"ERROR ENCOUNTERED WHEN UPLOADING TO SCRIBE: {e}")
total_time = 0.0
for filename, test_filename in reports_by_file.items():
for suite_name, test_suite in test_filename.test_suites.items():
total_time += test_suite.total_time
obj = assemble_s3_object(reports_by_file, total_seconds=total_time)
if args.upload_to_s3:
try:
send_report_to_s3(obj)
except Exception as e:
print(f"ERROR ENCOUNTERED WHEN UPLOADING TO S3: {e}")
if args.compare_with_s3:
head_json = obj
if args.use_json:
head_json = json.loads(Path(args.use_json).read_text())
try:
print_regressions(head_json, num_prev_commits=args.num_prev_commits)
except Exception as e:
print(f"ERROR ENCOUNTERED WHEN COMPARING AGAINST S3: {e}")
|
pytorch-master
|
tools/stats/print_test_stats.py
|
import base64
import bz2
import json
import os
from typing import Any
_lambda_client = None
def sprint(*args: Any) -> None:
print("[scribe]", *args)
def aws_lambda() -> Any:
global _lambda_client
# lazy import so that we don't need to introduce extra dependencies
import boto3 # type: ignore[import]
if _lambda_client is None:
_lambda_client = boto3.client("lambda")
return _lambda_client
def invoke_lambda(name: str, payload: Any) -> Any:
res = aws_lambda().invoke(FunctionName=name, Payload=json.dumps(payload).encode())
payload = str(res["Payload"].read().decode())
if res.get("FunctionError"):
raise Exception(payload)
return payload
def send_to_scribe(logs: str) -> str:
access_token = os.environ.get("SCRIBE_GRAPHQL_ACCESS_TOKEN", "")
# boto3 can be used when the runner has IAM roles setup
# currently it's used as a fallback when SCRIBE_GRAPHQL_ACCESS_TOKEN is empty
if access_token == "":
return _send_to_scribe_via_boto3(logs)
return _send_to_scribe_via_http(access_token, logs)
def _send_to_scribe_via_boto3(logs: str) -> str:
sprint("Scribe access token not provided, sending report via boto3...")
event = {"base64_bz2_logs": base64.b64encode(bz2.compress(logs.encode())).decode()}
return str(invoke_lambda("gh-ci-scribe-proxy", event))
def _send_to_scribe_via_http(access_token: str, logs: str) -> str:
# lazy import so that we don't need to introduce extra dependencies
import requests # type: ignore[import]
sprint("Scribe access token provided, sending report via http...")
r = requests.post(
"https://graph.facebook.com/scribe_logs",
data={"access_token": access_token, "logs": logs},
)
r.raise_for_status()
return str(r.text)
|
pytorch-master
|
tools/stats/scribe.py
|
pytorch-master
|
tools/stats/__init__.py
|
|
import argparse
import json
import os
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import Any, Dict, List
from tools.stats.upload_stats_lib import (
download_gha_artifacts,
download_s3_artifacts,
unzip,
upload_to_rockset,
)
def get_sccache_stats(
workflow_run_id: int, workflow_run_attempt: int
) -> List[Dict[str, Any]]:
with TemporaryDirectory() as temp_dir:
print("Using temporary directory:", temp_dir)
os.chdir(temp_dir)
# Download and extract all the reports (both GHA and S3)
download_s3_artifacts("sccache-stats", workflow_run_id, workflow_run_attempt)
artifact_paths = download_gha_artifacts(
"sccache-stats", workflow_run_id, workflow_run_attempt
)
for path in artifact_paths:
unzip(path)
stats_jsons = []
for json_file in Path(".").glob("**/*.json"):
with open(json_file) as f:
stats_jsons.append(json.load(f))
return stats_jsons
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Upload test stats to Rockset")
parser.add_argument(
"--workflow-run-id",
type=int,
required=True,
help="id of the workflow to get artifacts from",
)
parser.add_argument(
"--workflow-run-attempt",
type=int,
required=True,
help="which retry of the workflow this is",
)
args = parser.parse_args()
stats = get_sccache_stats(args.workflow_run_id, args.workflow_run_attempt)
upload_to_rockset("sccache_stats", stats)
|
pytorch-master
|
tools/stats/upload_sccache_stats.py
|
import pathlib
import sys
REPO_ROOT = pathlib.Path(__file__).resolve().parent.parent.parent
sys.path.append(str(REPO_ROOT))
from tools.stats.import_test_stats import get_test_times
TEST_TIMES_FILE = ".pytorch-test-times.json"
def main() -> None:
print(f"Exporting test times from test-infra to {TEST_TIMES_FILE}")
get_test_times(str(REPO_ROOT), filename=TEST_TIMES_FILE)
if __name__ == "__main__":
main()
|
pytorch-master
|
tools/stats/export_test_times.py
|
import bz2
import json
import logging
import subprocess
from collections import defaultdict
from datetime import datetime, timedelta
from typing import Any, cast, Dict, List, Optional, Tuple, Union
from typing_extensions import Literal, TypedDict
try:
import boto3 # type: ignore[import]
import botocore # type: ignore[import]
HAVE_BOTO3 = True
except ImportError:
HAVE_BOTO3 = False
logger = logging.getLogger(__name__)
OSSCI_METRICS_BUCKET = "ossci-metrics"
Commit = str # 40-digit SHA-1 hex string
Status = Optional[Literal["errored", "failed", "skipped"]]
class CaseMeta(TypedDict):
seconds: float
class Version1Case(CaseMeta):
name: str
errored: bool
failed: bool
skipped: bool
class Version1Suite(TypedDict):
total_seconds: float
cases: List[Version1Case]
class ReportMetaMeta(TypedDict):
build_pr: str
build_tag: str
build_sha1: Commit
build_base_commit: Commit
build_branch: str
build_job: str
build_workflow_id: str
build_start_time_epoch: str
class ReportMeta(ReportMetaMeta):
total_seconds: float
class Version1Report(ReportMeta):
suites: Dict[str, Version1Suite]
class Version2Case(CaseMeta):
status: Status
class Version2Suite(TypedDict):
total_seconds: float
cases: Dict[str, Version2Case]
class Version2File(TypedDict):
total_seconds: float
suites: Dict[str, Version2Suite]
class VersionedReport(ReportMeta):
format_version: int
# report: Version2Report implies report['format_version'] == 2
class Version2Report(VersionedReport):
files: Dict[str, Version2File]
Report = Union[Version1Report, VersionedReport]
if HAVE_BOTO3:
S3_RESOURCE_READ_ONLY = boto3.resource(
"s3", config=botocore.config.Config(signature_version=botocore.UNSIGNED)
)
S3_RESOURCE = boto3.resource("s3")
def get_S3_bucket_readonly(bucket_name: str) -> Any:
return S3_RESOURCE_READ_ONLY.Bucket(bucket_name)
def get_S3_object_from_bucket(bucket_name: str, object: str) -> Any:
return S3_RESOURCE.Object(bucket_name, object)
def case_status(case: Version1Case) -> Status:
for k in {"errored", "failed", "skipped"}:
if case[k]: # type: ignore[literal-required]
return cast(Status, k)
return None
def newify_case(case: Version1Case) -> Version2Case:
return {
"seconds": case["seconds"],
"status": case_status(case),
}
def get_cases(
*,
data: Report,
filename: Optional[str],
suite_name: Optional[str],
test_name: Optional[str],
) -> List[Version2Case]:
cases: List[Version2Case] = []
if "format_version" not in data: # version 1 implicitly
v1report = cast(Version1Report, data)
suites = v1report["suites"]
for sname, v1suite in suites.items():
if not suite_name or sname == suite_name:
for v1case in v1suite["cases"]:
if not test_name or v1case["name"] == test_name:
cases.append(newify_case(v1case))
else:
v_report = cast(VersionedReport, data)
version = v_report["format_version"]
if version == 2:
v2report = cast(Version2Report, v_report)
for fname, v2file in v2report["files"].items():
if fname == filename or not filename:
for sname, v2suite in v2file["suites"].items():
if sname == suite_name or not suite_name:
for cname, v2case in v2suite["cases"].items():
if not test_name or cname == test_name:
cases.append(v2case)
else:
raise RuntimeError(f"Unknown format version: {version}")
return cases
def _parse_master_summaries(summaries: Any, jobs: List[str]) -> Dict[str, List[Report]]:
summary_dict = defaultdict(list)
for summary in summaries:
# master summary format: "test_time/{sha}/{job}/file"
summary_job = summary.key.split("/")[2]
if summary_job in jobs or len(jobs) == 0:
binary = summary.get()["Body"].read()
string = bz2.decompress(binary).decode("utf-8")
summary_dict[summary_job].append(json.loads(string))
return summary_dict
def _parse_pr_summaries(
summaries: Any, job_prefix: str
) -> Dict[str, List[Tuple[Report, str]]]:
summary_dict = defaultdict(list)
for summary in summaries:
# PR summary format: "pr_test_time/{pr}/{sha}/{job}/file"
summary_job = summary.key.split("/")[3]
summary_timestamp = summary.key.split("/")[4][: len("YYYY-MM-ddTHH:mm:ss")]
if not job_prefix or len(job_prefix) == 0 or summary_job.startswith(job_prefix):
binary = summary.get()["Body"].read()
string = bz2.decompress(binary).decode("utf-8")
summary_dict[summary_job].append((json.loads(string), summary_timestamp))
return summary_dict
# Collect and decompress S3 test stats summaries into JSON.
# data stored on S3 buckets are pathed by {sha}/{job} so we also allow
# optional jobs filter
def get_test_stats_summaries(
*, sha: str, jobs: Optional[List[str]] = None
) -> Dict[str, List[Report]]:
bucket = get_S3_bucket_readonly(OSSCI_METRICS_BUCKET)
summaries = bucket.objects.filter(Prefix=f"test_time/{sha}")
return _parse_master_summaries(summaries, jobs=list(jobs or []))
def get_test_stats_summaries_for_job(
*, sha: str, job_prefix: str
) -> Dict[str, List[Report]]:
bucket = get_S3_bucket_readonly(OSSCI_METRICS_BUCKET)
summaries = bucket.objects.filter(Prefix=f"test_time/{sha}/{job_prefix}")
return _parse_master_summaries(summaries, jobs=list())
def get_test_stats_summaries_for_pr(
*, pr: str, job_prefix: str
) -> Dict[str, List[Tuple[Report, str]]]:
bucket = get_S3_bucket_readonly(OSSCI_METRICS_BUCKET)
summaries = bucket.objects.filter(Prefix=f"pr_test_time/{pr}/")
return _parse_pr_summaries(summaries, job_prefix=job_prefix)
# This function returns a list of S3 test time reports. This function can run into errors if HAVE_BOTO3 = False
# or the S3 bucket is somehow unavailable. Even though this function goes through ten commits' reports to find a
# non-empty report, it is still conceivable (though highly unlikely) for this function to return no reports.
def get_previous_reports_for_branch(
branch: str, ci_job_prefix: str = ""
) -> List[Report]:
commit_date_ts = subprocess.check_output(
["git", "show", "-s", "--format=%ct", "HEAD"], encoding="ascii"
).strip()
commit_date = datetime.fromtimestamp(int(commit_date_ts))
# We go a day before this current commit to avoiding pulling incomplete reports
day_before_commit = str(commit_date - timedelta(days=1)).split(" ")[0]
# something like git rev-list --before="2021-03-04" --max-count=10 --remotes="*origin/nightly"
commits = subprocess.check_output(
[
"git",
"rev-list",
f"--before={day_before_commit}",
"--max-count=10",
f"--remotes=*{branch}",
],
encoding="ascii",
).splitlines()
reports: List[Report] = []
commit_index = 0
while len(reports) == 0 and commit_index < len(commits):
commit = commits[commit_index]
logger.info(f"Grabbing reports from commit: {commit}")
summaries = get_test_stats_summaries_for_job(
sha=commit, job_prefix=ci_job_prefix
)
for job_name, summary in summaries.items():
reports.append(summary[0])
if len(summary) > 1:
logger.warning(
f"WARNING: Multiple summary objects found for {commit}/{job_name}"
)
commit_index += 1
return reports
|
pytorch-master
|
tools/stats/s3_stat_parser.py
|
#!/usr/bin/env python3
import argparse
import subprocess
import sys
from datetime import datetime, timezone
from signal import SIG_DFL, signal, SIGPIPE
from typing import Dict, Iterator, List, Optional, Set, Tuple
from tools.stats.s3_stat_parser import get_cases, get_test_stats_summaries, Report
def get_git_commit_history(*, path: str, ref: str) -> List[Tuple[str, datetime]]:
rc = subprocess.check_output(
["git", "-C", path, "log", "--pretty=format:%H %ct", ref],
).decode("latin-1")
return [
(x[0], datetime.fromtimestamp(int(x[1]), tz=timezone.utc))
for x in [line.split(" ") for line in rc.split("\n")]
]
def make_column(
*,
data: Optional[Report],
filename: Optional[str],
suite_name: Optional[str],
test_name: str,
digits: int,
) -> Tuple[str, int]:
decimals = 3
num_length = digits + 1 + decimals
if data:
cases = get_cases(
data=data, filename=filename, suite_name=suite_name, test_name=test_name
)
if cases:
case = cases[0]
status = case["status"]
omitted = len(cases) - 1
if status:
return f"{status.rjust(num_length)} ", omitted
else:
return f'{case["seconds"]:{num_length}.{decimals}f}s', omitted
else:
return f'{"absent".rjust(num_length)} ', 0
else:
return " " * (num_length + 1), 0
def make_columns(
*,
jobs: List[str],
jsons: Dict[str, Report],
omitted: Dict[str, int],
filename: Optional[str],
suite_name: Optional[str],
test_name: str,
digits: int,
) -> str:
columns = []
total_omitted = 0
total_suites = 0
for job in jobs:
data = jsons.get(job)
column, omitted_suites = make_column(
data=data,
filename=filename,
suite_name=suite_name,
test_name=test_name,
digits=digits,
)
columns.append(column)
total_suites += omitted_suites
if job in omitted:
total_omitted += omitted[job]
if total_omitted > 0:
columns.append(f"({total_omitted} job re-runs omitted)")
if total_suites > 0:
columns.append(f"({total_suites} matching suites omitted)")
return " ".join(columns)
def make_lines(
*,
jobs: Set[str],
jsons: Dict[str, List[Report]],
filename: Optional[str],
suite_name: Optional[str],
test_name: str,
) -> List[str]:
lines = []
for job, reports in jsons.items():
for data in reports:
cases = get_cases(
data=data,
filename=filename,
suite_name=suite_name,
test_name=test_name,
)
if cases:
case = cases[0]
status = case["status"]
line = f'{job} {case["seconds"]}s{f" {status}" if status else ""}'
if len(cases) > 1:
line += f" ({len(cases) - 1} matching suites omitted)"
lines.append(line)
elif job in jobs:
lines.append(f"{job} (test not found)")
if lines:
return lines
else:
return ["(no reports in S3)"]
def history_lines(
*,
commits: List[Tuple[str, datetime]],
jobs: Optional[List[str]],
filename: Optional[str],
suite_name: Optional[str],
test_name: str,
delta: int,
sha_length: int,
mode: str,
digits: int,
) -> Iterator[str]:
prev_time = datetime.now(tz=timezone.utc)
for sha, time in commits:
if (prev_time - time).total_seconds() < delta * 3600:
continue
prev_time = time
if jobs is None:
summaries = get_test_stats_summaries(sha=sha)
else:
summaries = get_test_stats_summaries(sha=sha, jobs=jobs)
if mode == "columns":
assert jobs is not None
# we assume that get_test_stats_summaries here doesn't
# return empty lists
omitted = {job: len(l) - 1 for job, l in summaries.items() if len(l) > 1}
lines = [
make_columns(
jobs=jobs,
jsons={job: l[0] for job, l in summaries.items()},
omitted=omitted,
filename=filename,
suite_name=suite_name,
test_name=test_name,
digits=digits,
)
]
else:
assert mode == "multiline"
lines = make_lines(
jobs=set(jobs or []),
jsons=summaries,
filename=filename,
suite_name=suite_name,
test_name=test_name,
)
for line in lines:
yield f"{time:%Y-%m-%d %H:%M:%S}Z {sha[:sha_length]} {line}".rstrip()
class HelpFormatter(
argparse.ArgumentDefaultsHelpFormatter,
argparse.RawDescriptionHelpFormatter,
):
pass
def description() -> str:
return r"""
Display the history of a test.
Each line of (non-error) output starts with the timestamp and SHA1 hash
of the commit it refers to, in this format:
YYYY-MM-DD hh:mm:ss 0123456789abcdef0123456789abcdef01234567
In multiline mode, each line next includes the name of a CircleCI job,
followed by the time of the specified test in that job at that commit.
Example:
$ tools/stats/test_history.py --mode=multiline --ref=86a961af879 --sha-length=8 \
--test=test_composite_compliance_dot_cpu_float32 \
--job linux-xenial-py3.7-gcc5.4-test-default1 --job linux-xenial-py3.7-gcc7-test-default1
2022-02-18 15:47:37Z 86a961af linux-xenial-py3.7-gcc5.4-test-default1 0.001s
2022-02-18 15:47:37Z 86a961af linux-xenial-py3.7-gcc7-test-default1 0.001s
2022-02-18 15:12:34Z f5e201e4 linux-xenial-py3.7-gcc5.4-test-default1 0.001s
2022-02-18 15:12:34Z f5e201e4 linux-xenial-py3.7-gcc7-test-default1 0.001s
2022-02-18 13:14:56Z 1c0df265 linux-xenial-py3.7-gcc5.4-test-default1 0.001s
2022-02-18 13:14:56Z 1c0df265 linux-xenial-py3.7-gcc7-test-default1 0.001s
2022-02-18 13:14:56Z e73eaffd (no reports in S3)
2022-02-18 06:29:12Z 710f12f5 linux-xenial-py3.7-gcc5.4-test-default1 0.001s
Another multiline example, this time with the --all flag:
$ tools/stats/test_history.py --mode=multiline --all --ref=86a961af879 --delta=12 --sha-length=8 \
--test=test_composite_compliance_dot_cuda_float32
2022-02-18 03:49:46Z 69389fb5 linux-bionic-cuda10.2-py3.9-gcc7-test-default1 0.001s skipped
2022-02-18 03:49:46Z 69389fb5 linux-bionic-cuda10.2-py3.9-gcc7-test-slow1 0.001s skipped
2022-02-18 03:49:46Z 69389fb5 linux-xenial-cuda11.3-py3.7-gcc7-test-default1 0.001s skipped
2022-02-18 03:49:46Z 69389fb5 periodic-linux-bionic-cuda11.5-py3.7-gcc7-test-default1 0.001s skipped
2022-02-18 03:49:46Z 69389fb5 periodic-linux-xenial-cuda10.2-py3-gcc7-slow-gradcheck-test-default1 0.001s skipped
2022-02-18 03:49:46Z 69389fb5 periodic-linux-xenial-cuda11.1-py3.7-gcc7-debug-test-default1 0.001s skipped
In columns mode, the name of the job isn't printed, but the order of the
columns is guaranteed to match the order of the jobs passed on the
command line. Example:
$ tools/stats/test_history.py --mode=columns --ref=86a961af879 --sha-length=8 \
--test=test_composite_compliance_dot_cpu_float32 \
--job linux-xenial-py3.7-gcc5.4-test-default1 --job linux-xenial-py3.7-gcc7-test-default1
2022-02-18 15:47:37Z 86a961af 0.001s 0.001s
2022-02-18 15:12:34Z f5e201e4 0.001s 0.001s
2022-02-18 13:14:56Z 1c0df265 0.001s 0.001s
2022-02-18 13:14:56Z e73eaffd
2022-02-18 06:29:12Z 710f12f5 0.001s 0.001s
2022-02-18 05:20:30Z 51b04f27 0.001s 0.001s
2022-02-18 03:49:46Z 69389fb5 0.001s 0.001s
2022-02-18 00:19:12Z 056b6260 0.001s 0.001s
2022-02-17 23:58:32Z 39fb7714 0.001s 0.001s
Minor note: in columns mode, a blank cell means that no report was found
in S3, while the word "absent" means that a report was found but the
indicated test was not found in that report.
"""
def parse_args(raw: List[str]) -> argparse.Namespace:
parser = argparse.ArgumentParser(
__file__,
description=description(),
formatter_class=HelpFormatter,
)
parser.add_argument(
"--mode",
choices=["columns", "multiline"],
help="output format",
default="columns",
)
parser.add_argument(
"--pytorch",
help="path to local PyTorch clone",
default=".",
)
parser.add_argument(
"--ref",
help="starting point (most recent Git ref) to display history for",
default="master",
)
parser.add_argument(
"--delta",
type=int,
help="minimum number of hours between commits",
default=0,
)
parser.add_argument(
"--sha-length",
type=int,
help="length of the prefix of the SHA1 hash to show",
default=40,
)
parser.add_argument(
"--digits",
type=int,
help="(columns) number of digits to display before the decimal point",
default=4,
)
parser.add_argument(
"--all",
action="store_true",
help="(multiline) ignore listed jobs, show all jobs for each commit",
)
parser.add_argument(
"--file",
help="name of the file containing the test",
)
parser.add_argument(
"--suite",
help="name of the suite containing the test",
)
parser.add_argument("--test", help="name of the test", required=True)
parser.add_argument(
"--job",
help="names of jobs to display columns for, in order",
action="append",
default=[],
)
args = parser.parse_args(raw)
args.jobs = None if args.all else args.job
# We dont allow implicit or empty "--jobs", unless "--all" is specified.
if args.jobs == []:
parser.error("No jobs specified.")
return args
def run(raw: List[str]) -> Iterator[str]:
args = parse_args(raw)
commits = get_git_commit_history(path=args.pytorch, ref=args.ref)
return history_lines(
commits=commits,
jobs=args.jobs,
filename=args.file,
suite_name=args.suite,
test_name=args.test,
delta=args.delta,
mode=args.mode,
sha_length=args.sha_length,
digits=args.digits,
)
def main() -> None:
for line in run(sys.argv[1:]):
print(line, flush=True)
if __name__ == "__main__":
signal(SIGPIPE, SIG_DFL) # https://stackoverflow.com/a/30091579
try:
main()
except KeyboardInterrupt:
pass
|
pytorch-master
|
tools/stats/test_history.py
|
import argparse
import os
import sys
import xml.etree.ElementTree as ET
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import Any, Dict, List, Tuple
from tools.stats.upload_stats_lib import (
download_gha_artifacts,
download_s3_artifacts,
unzip,
upload_to_s3,
)
def get_job_id(report: Path) -> int:
# [Job id in artifacts]
# Retrieve the job id from the report path. In our GHA workflows, we append
# the job id to the end of the report name, so `report` looks like:
# unzipped-test-reports-foo_5596745227/test/test-reports/foo/TEST-foo.xml
# and we want to get `5596745227` out of it.
return int(report.parts[0].rpartition("_")[2])
def parse_xml_report(
tag: str,
report: Path,
workflow_id: int,
workflow_run_attempt: int,
) -> List[Dict[str, Any]]:
"""Convert a test report xml file into a JSON-serializable list of test cases."""
print(f"Parsing {tag}s for test report: {report}")
job_id = get_job_id(report)
print(f"Found job id: {job_id}")
root = ET.parse(report)
test_cases = []
for test_case in root.iter(tag):
case = process_xml_element(test_case)
case["workflow_id"] = workflow_id
case["workflow_run_attempt"] = workflow_run_attempt
case["job_id"] = job_id
# [invoking file]
# The name of the file that the test is located in is not necessarily
# the same as the name of the file that invoked the test.
# For example, `test_jit.py` calls into multiple other test files (e.g.
# jit/test_dce.py). For sharding/test selection purposes, we want to
# record the file that invoked the test.
#
# To do this, we leverage an implementation detail of how we write out
# tests (https://bit.ly/3ajEV1M), which is that reports are created
# under a folder with the same name as the invoking file.
case["invoking_file"] = report.parent.name
test_cases.append(case)
return test_cases
def process_xml_element(element: ET.Element) -> Dict[str, Any]:
"""Convert a test suite element into a JSON-serializable dict."""
ret: Dict[str, Any] = {}
# Convert attributes directly into dict elements.
# e.g.
# <testcase name="test_foo" classname="test_bar"></testcase>
# becomes:
# {"name": "test_foo", "classname": "test_bar"}
ret.update(element.attrib)
# The XML format encodes all values as strings. Convert to ints/floats if
# possible to make aggregation possible in Rockset.
for k, v in ret.items():
try:
ret[k] = int(v)
except ValueError:
pass
try:
ret[k] = float(v)
except ValueError:
pass
# Convert inner and outer text into special dict elements.
# e.g.
# <testcase>my_inner_text</testcase> my_tail
# becomes:
# {"text": "my_inner_text", "tail": " my_tail"}
if element.text and element.text.strip():
ret["text"] = element.text
if element.tail and element.tail.strip():
ret["tail"] = element.tail
# Convert child elements recursively, placing them at a key:
# e.g.
# <testcase>
# <foo>hello</foo>
# <foo>world</foo>
# <bar>another</bar>
# </testcase>
# becomes
# {
# "foo": [{"text": "hello"}, {"text": "world"}],
# "bar": {"text": "another"}
# }
for child in element:
if child.tag not in ret:
ret[child.tag] = process_xml_element(child)
else:
# If there are multiple tags with the same name, they should be
# coalesced into a list.
if not isinstance(ret[child.tag], list):
ret[child.tag] = [ret[child.tag]]
ret[child.tag].append(process_xml_element(child))
return ret
def get_pytest_parallel_times() -> Dict[Any, Any]:
pytest_parallel_times = {}
for report in Path(".").glob("**/python-pytest/**/*.xml"):
invoking_file = report.parent.name
root = ET.parse(report)
assert len(list(root.iter("testsuite"))) == 1
for test_suite in root.iter("testsuite"):
pytest_parallel_times[
(invoking_file, get_job_id(report))
] = test_suite.attrib["time"]
return pytest_parallel_times
def get_tests(
workflow_run_id: int, workflow_run_attempt: int
) -> Tuple[List[Dict[str, Any]], Dict[Any, Any]]:
with TemporaryDirectory() as temp_dir:
print("Using temporary directory:", temp_dir)
os.chdir(temp_dir)
# Download and extract all the reports (both GHA and S3)
s3_paths = download_s3_artifacts(
"test-report", workflow_run_id, workflow_run_attempt
)
for path in s3_paths:
unzip(path)
artifact_paths = download_gha_artifacts(
"test-report", workflow_run_id, workflow_run_attempt
)
for path in artifact_paths:
unzip(path)
# Parse the reports and transform them to JSON
test_cases = []
for xml_report in Path(".").glob("**/*.xml"):
test_cases.extend(
parse_xml_report(
"testcase",
xml_report,
workflow_run_id,
workflow_run_attempt,
)
)
pytest_parallel_times = get_pytest_parallel_times()
return test_cases, pytest_parallel_times
def get_invoking_file_times(
test_case_summaries: List[Dict[str, Any]], pytest_parallel_times: Dict[Any, Any]
) -> List[Dict[str, Any]]:
def get_key(summary: Dict[str, Any]) -> Any:
return (
summary["invoking_file"],
summary["job_id"],
)
def init_value(summary: Dict[str, Any]) -> Any:
return {
"job_id": summary["job_id"],
"workflow_id": summary["workflow_id"],
"workflow_run_attempt": summary["workflow_run_attempt"],
"invoking_file": summary["invoking_file"],
"time": 0.0,
}
ret = {}
for summary in test_case_summaries:
key = get_key(summary)
if key not in ret:
ret[key] = init_value(summary)
ret[key]["time"] += summary["time"]
for key, val in ret.items():
# when running in parallel in pytest, adding the test times will not give the correct
# time used to run the file, which will make the sharding incorrect, so if the test is
# run in parallel, we take the time reported by the testsuite
if key in pytest_parallel_times:
val["time"] = pytest_parallel_times[key]
return list(ret.values())
def summarize_test_cases(test_cases: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""Group test cases by classname, file, and job_id. We perform the aggregation
manually instead of using the `test-suite` XML tag because xmlrunner does
not produce reliable output for it.
"""
def get_key(test_case: Dict[str, Any]) -> Any:
return (
test_case.get("file"),
test_case.get("classname"),
test_case["job_id"],
test_case["workflow_id"],
test_case["workflow_run_attempt"],
# [see: invoking file]
test_case["invoking_file"],
)
def init_value(test_case: Dict[str, Any]) -> Dict[str, Any]:
return {
"file": test_case.get("file"),
"classname": test_case.get("classname"),
"job_id": test_case["job_id"],
"workflow_id": test_case["workflow_id"],
"workflow_run_attempt": test_case["workflow_run_attempt"],
# [see: invoking file]
"invoking_file": test_case["invoking_file"],
"tests": 0,
"failures": 0,
"errors": 0,
"skipped": 0,
"successes": 0,
"time": 0.0,
}
ret = {}
for test_case in test_cases:
key = get_key(test_case)
if key not in ret:
ret[key] = init_value(test_case)
ret[key]["tests"] += 1
if "failure" in test_case:
ret[key]["failures"] += 1
elif "error" in test_case:
ret[key]["errors"] += 1
elif "skipped" in test_case:
ret[key]["skipped"] += 1
else:
ret[key]["successes"] += 1
ret[key]["time"] += test_case["time"]
return list(ret.values())
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Upload test stats to Rockset")
parser.add_argument(
"--workflow-run-id",
type=int,
required=True,
help="id of the workflow to get artifacts from",
)
parser.add_argument(
"--workflow-run-attempt",
type=int,
required=True,
help="which retry of the workflow this is",
)
parser.add_argument(
"--head-branch",
required=True,
help="Head branch of the workflow",
)
args = parser.parse_args()
test_cases, pytest_parallel_times = get_tests(
args.workflow_run_id, args.workflow_run_attempt
)
# Flush stdout so that any errors in rockset upload show up last in the logs.
sys.stdout.flush()
# For PRs, only upload a summary of test_runs. This helps lower the
# volume of writes we do to Rockset.
test_case_summary = summarize_test_cases(test_cases)
invoking_file_times = get_invoking_file_times(
test_case_summary, pytest_parallel_times
)
upload_to_s3(
args.workflow_run_id,
args.workflow_run_attempt,
"test_run_summary",
test_case_summary,
)
upload_to_s3(
args.workflow_run_id,
args.workflow_run_attempt,
"invoking_file_times",
invoking_file_times,
)
if args.head_branch == "master":
# For master jobs, upload everytihng.
upload_to_s3(
args.workflow_run_id, args.workflow_run_attempt, "test_run", test_cases
)
|
pytorch-master
|
tools/stats/upload_test_stats.py
|
# -*- coding: utf-8 -*-
# Owner(s): ["module: mps"]
import sys
import math
import random
import unittest
import warnings
import subprocess
import tempfile
import os
import pprint
import torch
import torch.nn as nn
import torch.nn.functional as F
import itertools
from collections import defaultdict
from torch._six import inf
from torch.nn import Parameter
from torch.testing._internal.common_utils import \
(gradcheck, gradgradcheck, run_tests, TestCase, download_file,
TEST_WITH_UBSAN)
from torch.testing import make_tensor
from torch.testing._comparison import TensorLikePair
from torch.testing._internal.common_dtype import get_all_dtypes
import torch.backends.mps
from torch.distributions import Uniform, Exponential
from functools import partial
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.common_device_type import ops, instantiate_device_type_tests
from torch.testing._internal.common_nn import NNTestCase
import numpy as np
import torch
# Same logic as test_cuda.py
if not torch.backends.mps.is_available():
print('MPS not available, skipping tests', file=sys.stderr)
TestCase = object # noqa: F811
NNTestCase = object # noqa: F811
class MPSReluTest(TestCase):
def _npRelu(self, np_features):
return np.maximum(np_features, np.zeros(np_features.shape)).astype(np_features.dtype)
def testNpRelu(self):
torch.testing.assert_allclose(
np.array([[0., 0.7, 0.0, 0.3, 0.0], [0.1, 0.0, 0.5, 0.0, 0.9]]),
self._npRelu(
np.array([[-0.9, 0.7, -0.5, 0.3, -0.1], [0.1, -0.3, 0.5, -0.7,
0.9]])))
def _testRelu(self, np_features, device):
np_relu = self._npRelu(np_features)
# Convert the numpy array to a PyTorch Tensor,
# and move the Tensor to the CPU/GPU based on the "device" parameter
py_tensor = torch.from_numpy(np_features).to(device)
py_relu = torch.nn.ReLU(inplace=False)(py_tensor)
py_relu_cpu = py_relu.to("cpu")
torch.testing.assert_allclose(np_relu, py_relu_cpu)
def _testReluInPlace(self, np_features, device):
np_relu = self._npRelu(np_features)
# Convert the numpy array to a PyTorch Tensor,
# and move the Tensor to the CPU/GPU based on the "device" parameter
py_tensor = torch.from_numpy(np_features).to(device)
py_relu = torch.nn.ReLU(inplace=True)(py_tensor)
py_relu_cpu = py_relu.to("cpu")
torch.testing.assert_allclose(np_relu, py_relu_cpu)
# Inplace Relu modifies the initial input and it should match the output of Relu
torch.testing.assert_allclose(np_relu, py_tensor.to("cpu"))
def testNumbersCPU(self):
for t in [np.int32]:
# Force execution on CPU even if a GPU kernel is available for the type.
self._testRelu(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t),
device="cpu")
self._testReluInPlace(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t),
device="cpu")
def testNumbersGPU(self):
for t in [np.float16, np.float32]:
self._testRelu(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t),
device="mps")
self._testReluInPlace(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t),
device="mps")
class MatmulTest(TestCase):
def _helper(self, shape_tensor_1, shape_tensor_2, expand_tensor_1_shape=None, expand_tensor_2_shape=None):
if expand_tensor_1_shape:
tensor1_mps = torch.randn(shape_tensor_1, device="mps").expand(expand_tensor_1_shape)
else:
tensor1_mps = torch.randn(shape_tensor_1, device="mps")
if expand_tensor_2_shape:
tensor2_mps = torch.randn(shape_tensor_2, device="mps").expand(expand_tensor_2_shape)
else:
tensor2_mps = torch.randn(shape_tensor_2, device="mps")
tensor1_cpu = tensor1_mps.to("cpu")
tensor2_cpu = tensor2_mps.to("cpu")
matmul_cpu = torch.matmul(tensor1_cpu, tensor2_cpu)
matmul_mps = torch.matmul(tensor1_mps, tensor2_mps)
self.assertEqual(matmul_cpu, matmul_mps.to("cpu"))
def test_vector_x_vector(self):
# uses `dot`
self._helper(3, 3)
def test_matrix_x_vector(self):
# uses `addmv`
self._helper((3, 4), 4)
def test_batched_matrix_x_broadcasted_vector(self):
self._helper((10, 3, 4), 4)
def test_batched_matrix_x_batched_matrix(self):
# uses `bmm.out`
self._helper((10, 3, 4), (10, 4, 5))
def test_batched_matrix_x_broadcasted_matrix(self):
self._helper((10, 3, 4), (4, 5))
class MPSLeakyReluTest(TestCase):
def _npLeakyRelu(self, np_features, negative_slope=0.1):
return np.maximum(np_features, negative_slope * np_features).astype(np_features.dtype)
def testNpLeakyRelu(self):
torch.testing.assert_allclose(
np.array([[-0.09, 0.7, -0.05, 0.3, -0.01],
[0.1, -0.03, 0.5, -0.07, 0.9]]),
self._npLeakyRelu(
np.array([[-0.9, 0.7, -0.5, 0.3, -0.1], [0.1, -0.3, 0.5, -0.7,
0.9]]),
negative_slope=0.1))
def _testLeakyRelu(self, np_features, negative_slope, device):
cpu_x = torch.from_numpy(np_features).requires_grad_()
mps_x = torch.from_numpy(np_features).to('mps').requires_grad_()
relu_op = torch.nn.LeakyReLU(negative_slope)
cpu_leaky_relu = relu_op(cpu_x)
mps_leaky_relu = relu_op(mps_x)
torch.testing.assert_allclose(cpu_leaky_relu, mps_leaky_relu.to('cpu'))
# test backward pass
cpu_grad = torch.ones_like(cpu_leaky_relu)
mps_grad = cpu_grad.to('mps')
cpu_leaky_relu.backward(gradient=cpu_grad)
mps_leaky_relu.backward(gradient=mps_grad)
torch.testing.assert_allclose(cpu_x.grad, mps_x.grad.to('cpu'))
def testNumbersCPU(self):
for t in [np.float32]:
self._testLeakyRelu(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t),
negative_slope=0.2,
device="cpu")
class TestAvgPool(TestCase):
def _sum_pool2d(self, x, kernel_size):
windows = torch.nn.functional.unfold(x, kernel_size=kernel_size, stride=kernel_size)
return torch.sum(windows, dim=1)
def _sum_pool3d(self, x, kernel_size):
# Because unfold does not support 3D sliding window we will split tensor to multiple tensors and calculate sum
h = kernel_size[0]
splited_x = [t.sum(0) for t in x.split(h) if t.size(0) == h]
# sum_pool2d assumes tensor in (1, 1, n, m) view, so unsqueeze two times
splited_x = [self._sum_pool2d(t.unsqueeze(0).unsqueeze(0), kernel_size[1:]) for t in splited_x]
joined_x = torch.cat(splited_x)
return joined_x.view(1, joined_x.numel())
def _avg_pool2d(self, x, kernel_size):
size = reduce((lambda x, y: x * y), kernel_size)
return self._sum_pool2d(x, kernel_size) / size
def _avg_pool3d(self, x, kernel_size):
size = reduce((lambda x, y: x * y), kernel_size)
return self._sum_pool3d(x, kernel_size) / size
def test_avg_pool2d_with_zero_divisor(self):
self.assertRaisesRegex(RuntimeError, "divisor must be not zero",
lambda: F.avg_pool2d(torch.zeros(3, 3, 3), (2, 2), divisor_override=0))
def test_doubletensor_avg_pool2d_with_divisor(self):
n, m = 3, 3
input = torch.rand(1, 1, n, m)
for i in range(1, n + 1):
for j in range(1, m + 1):
for divisor in [1, 7, i * j]:
actual = F.avg_pool2d(input[0], (i, j), divisor_override=divisor)
actual = actual.view(1, actual.numel())
expected = self._sum_pool2d(input, (i, j)) / divisor
self.assertEqual(actual, expected, rtol=0, atol=1e-5)
def test_avg_pool2d_ceil_mode(self):
# Regression test for gh-36977
x = 10 * torch.randn((1, 16, 4, 4))
y = torch.nn.functional.avg_pool2d(
x, ceil_mode=True, count_include_pad=True, kernel_size=(1, 2),
padding=(0, 1), stride=2)
self.assertTrue(not torch.isnan(y).any())
y = torch.nn.functional.avg_pool2d(
x.to('mps'), ceil_mode=True, count_include_pad=True, kernel_size=(1, 2),
padding=(0, 1), stride=2)
self.assertTrue(not torch.isnan(y).any())
class TestMPS(TestCase):
def test_exp(self, device="mps", dtype=torch.float):
for v in (2, -2) + ((1j, 1 + 1j) if dtype.is_complex else ()):
b = torch.arange(18, device="cpu") / 3 * math.pi
a = torch.tensor(v, dtype=dtype, device="cpu") * b
a = a.to(dtype).to("mps")
self.compare_with_numpy(torch.exp, np.exp, a)
def test_exp1(self, device="mps", dtype=torch.float):
input = torch.tensor([-0.1, 3.0, -0.9]).to('mps')
output = torch.exp(input).to('cpu')
def _testLeakyRelu(self, np_features, negative_slope, device):
cpu_x = torch.from_numpy(np_features).requires_grad_()
mps_x = torch.from_numpy(np_features).to('mps').requires_grad_()
relu_op = torch.nn.LeakyReLU(negative_slope)
cpu_leaky_relu = relu_op(cpu_x)
mps_leaky_relu = relu_op(mps_x)
torch.testing.assert_allclose(cpu_leaky_relu, mps_leaky_relu.to('cpu'))
# test backward pass
cpu_grad = torch.ones_like(cpu_leaky_relu)
mps_grad = cpu_grad.to('mps')
cpu_leaky_relu.backward(gradient=cpu_grad)
mps_leaky_relu.backward(gradient=mps_grad)
torch.testing.assert_allclose(cpu_x.grad, mps_x.grad.to('cpu'))
def testNumbersGPU(self):
for t in [np.float32]:
self._testLeakyRelu(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t),
negative_slope=0.1,
device="mps")
def test_fill(self):
def helper(val, shape):
tensor = torch.zeros(shape, device='mps')
tensor_mps = tensor.fill_(val)
tensor_mps = torch.tanh(tensor_mps)
tensor_0 = torch.zeros(shape, device='cpu')
tensor_cpu = tensor_0.fill_(val)
tensor_cpu = torch.tanh(tensor_cpu)
self.assertEqual(tensor_mps, tensor_cpu)
helper(0, [1024])
helper(0.2, [2, 3])
def test_mm(self):
B = torch.ones(5, 6).to("mps")
C = torch.ones(6, 5).to("mps")
D = torch.mm(B, C).cpu()
torch.testing.assert_allclose(D, torch.full((5, 5), 6.0))
def test_addmm(self):
A = torch.ones(5, 5).to("mps")
B = torch.ones(5, 6).to("mps")
C = torch.ones(6, 5).to("mps")
D = torch.addmm(A, B, C).to("cpu")
torch.testing.assert_allclose(D, torch.full((5, 5), 7.0))
def test_bmm(self):
batch1_cpu = torch.randn(10, 3, 4)
batch2_cpu = torch.randn(10, 4, 5)
batch1_mps = batch1_cpu.detach().clone().to("mps")
batch2_mps = batch2_cpu.detach().clone().to("mps")
output_cpu = torch.bmm(batch1_cpu, batch2_cpu)
output_mps = torch.bmm(batch1_mps, batch2_mps)
self.assertEqual(output_cpu, output_mps)
self.assertEqual(output_cpu.size(), output_mps.size())
def test_addbmm(self):
M_cpu = torch.randn(3, 5)
batch1_cpu = torch.randn(10, 3, 4)
batch2_cpu = torch.randn(10, 4, 5)
M_mps = M_cpu.detach().clone().to("mps")
batch1_mps = batch1_cpu.detach().clone().to("mps")
batch2_mps = batch2_cpu.detach().clone().to("mps")
output_cpu = torch.addbmm(M_cpu, batch1_cpu, batch2_cpu)
output_mps = torch.addbmm(M_mps, batch1_mps, batch2_mps)
self.assertEqual(output_cpu, output_mps)
self.assertEqual(output_cpu.size(), output_mps.size())
def test_baddbmm(self):
def helper(input_shape, batch1_shape, batch2_shape):
M_cpu = torch.randn(input_shape)
batch1_cpu = torch.randn(batch1_shape)
batch2_cpu = torch.randn(batch2_shape)
alpha = 1.2
beta = 0.8
M_mps = M_cpu.detach().clone().to("mps")
batch1_mps = batch1_cpu.detach().clone().to("mps")
batch2_mps = batch2_cpu.detach().clone().to("mps")
output_cpu = torch.baddbmm(M_cpu, batch1_cpu, batch2_cpu, beta=beta, alpha=alpha)
output_mps = torch.baddbmm(M_mps, batch1_mps, batch2_mps, beta=beta, alpha=alpha)
self.assertEqual(output_cpu, output_mps)
self.assertEqual(output_cpu.size(), output_mps.size())
helper(input_shape=(3, 5), batch1_shape=(10, 3, 4), batch2_shape=(10, 4, 5))
helper(input_shape=(10, 3, 5), batch1_shape=(10, 3, 4), batch2_shape=(10, 4, 5))
helper(input_shape=(1, 77, 77), batch1_shape=(8, 77, 64), batch2_shape=(8, 64, 77))
def test_local_scalar_dense_mps(self):
x_cpu = torch.randn(1)
y_mps = x_cpu.to("mps")
torch.testing.assert_allclose(x_cpu.item(), y_mps.item())
def _linear_helper(self, in_features, out_features, shape, bias=True, backward_pass=False):
cpu_linear = torch.nn.Linear(in_features=in_features, out_features=out_features, device="cpu", bias=bias)
mps_linear = torch.nn.Linear(in_features=in_features, out_features=out_features, device="mps", bias=bias)
# Use the same weights and bias as the ones from the cpu
mps_linear.weight.data = cpu_linear.weight.data.detach().clone().to("mps")
if bias:
mps_linear.bias.data = cpu_linear.bias.data.detach().clone().to("mps")
linear_mps_input = torch.randn(shape).to('mps')
linear_cpu_input = linear_mps_input.detach().clone().to('cpu')
if backward_pass:
linear_mps_input = linear_mps_input.requires_grad_()
linear_cpu_input = linear_cpu_input.requires_grad_()
linear_cpu_output = cpu_linear(linear_cpu_input)
linear_mps_output = mps_linear(linear_mps_input)
self.assertEqual(linear_cpu_output, linear_mps_output.to('cpu'))
self.assertEqual(linear_cpu_output.size(), linear_mps_output.size())
if backward_pass:
cpu_grad = torch.ones_like(linear_cpu_output)
grad = cpu_grad.to('mps')
linear_cpu_output.backward(gradient=cpu_grad)
linear_mps_output.backward(gradient=grad)
self.assertEqual(linear_cpu_input.grad.size(), linear_mps_input.grad.size())
self.assertEqual(linear_cpu_input.grad, linear_mps_input.grad.to("cpu"), atol=8e-04, rtol=10.4e-05)
self.assertEqual(cpu_linear.weight.grad.size(), mps_linear.weight.grad.size())
self.assertEqual(cpu_linear.weight.grad, mps_linear.weight.grad.to("cpu"), atol=8e-04, rtol=10.4e-05)
if bias:
self.assertEqual(cpu_linear.bias.grad.size(), mps_linear.bias.grad.size())
self.assertEqual(cpu_linear.bias.grad, mps_linear.bias.grad.to("cpu"), atol=8e-04, rtol=10.4e-05)
def test_linear1D(self):
self._linear_helper(in_features=2, out_features=3, shape=([2]), bias=True, backward_pass=False)
def test_linear1D_backward(self):
self._linear_helper(in_features=2, out_features=3, shape=([2]), bias=True, backward_pass=True)
def test_linear2D(self):
self._linear_helper(in_features=2, out_features=3, shape=((4, 2)), bias=True, backward_pass=False)
def test_linear2D_backward(self):
self._linear_helper(in_features=2, out_features=3, shape=((4, 2)), bias=True, backward_pass=True)
def test_linear2D_no_bias(self):
self._linear_helper(in_features=2, out_features=3, shape=((4, 2)), bias=False, backward_pass=False)
def test_linear2D_no_bias_backward(self):
self._linear_helper(in_features=2, out_features=3, shape=((4, 2)), bias=False, backward_pass=True)
def test_linear3D(self):
self._linear_helper(in_features=2, out_features=3, shape=((4, 5, 2)), bias=True, backward_pass=False)
def test_linear3D_backward(self):
self._linear_helper(in_features=2, out_features=3, shape=((4, 5, 2)), bias=True, backward_pass=True)
def test_linear3D_no_bias(self):
self._linear_helper(in_features=2, out_features=3, shape=((4, 5, 2)), bias=True, backward_pass=False)
def test_linear3D_no_bias_backward(self):
self._linear_helper(in_features=2, out_features=3, shape=((4, 5, 2)), bias=True, backward_pass=True)
def test_uniform(self):
low = torch.zeros(5, 5, requires_grad=True)
high = (torch.ones(5, 5) * 3).requires_grad_()
low_1d = torch.zeros(1, requires_grad=True)
high_1d = (torch.ones(1) * 3).requires_grad_()
self.assertEqual(Uniform(low, high).sample().size(), (5, 5))
self.assertEqual(Uniform(low, high).sample((7,)).size(), (7, 5, 5))
self.assertEqual(Uniform(low_1d, high_1d).sample().size(), (1,))
self.assertEqual(Uniform(low_1d, high_1d).sample((1,)).size(), (1, 1))
self.assertEqual(Uniform(0.0, 1.0).sample((1,)).size(), (1,))
# Check log_prob computation when value outside range
uniform = Uniform(low_1d, high_1d, validate_args=False)
above_high = torch.tensor([4.0])
below_low = torch.tensor([-1.0])
self.assertEqual(uniform.log_prob(above_high).item(), -inf)
self.assertEqual(uniform.log_prob(below_low).item(), -inf)
# check cdf computation when value outside range
self.assertEqual(uniform.cdf(below_low).item(), 0)
self.assertEqual(uniform.cdf(above_high).item(), 1)
state = torch.get_rng_state()
rand = low.new(low.size()).uniform_()
torch.set_rng_state(state)
u = Uniform(low, high).rsample()
u.backward(torch.ones_like(u))
self.assertEqual(low.grad, 1 - rand)
self.assertEqual(high.grad, rand)
low.grad.zero_()
high.grad.zero_()
# Test forward maxpool2d
def test_max_pool2d(self):
def helper(shape, ks, padding=0, dilation=1, ceil_mode=False, return_indices=False, test_ties=False):
cpu_x = None
if(test_ties):
cpu_x = torch.ones(shape, device='cpu', dtype=torch.float, requires_grad=True)
else:
cpu_x = torch.randn(shape, device='cpu', dtype=torch.float, requires_grad=True)
x = cpu_x.detach().clone().to('mps').requires_grad_()
pool = torch.nn.MaxPool2d(kernel_size=ks, padding=padding, dilation=dilation,
ceil_mode=ceil_mode, return_indices=return_indices)
if(return_indices is False):
y = pool(x)
ref_y = pool(cpu_x)
cpu_grad = torch.ones_like(ref_y)
grad = cpu_grad.to('mps')
y.backward(gradient=grad)
ref_y.backward(gradient=cpu_grad)
self.assertEqual(y, ref_y)
self.assertEqual(x.grad, cpu_x.grad)
else:
y, idx = pool(x)
ref_y, ref_idx = pool(cpu_x)
cpu_grad = torch.ones_like(ref_y)
grad = cpu_grad.to('mps')
y.backward(gradient=grad)
ref_y.backward(gradient=cpu_grad)
self.assertEqual(y, ref_y)
self.assertEqual(idx, ref_idx)
self.assertEqual(x.grad, cpu_x.grad)
# Test with no batch dimension
helper((8, 4, 4), ks=2)
helper((2, 8, 4, 4), ks=2)
helper((1, 1000, 32, 32), ks=4)
helper((1, 1000, 1, 4), ks=(1, 4)) # test for max_pool1d
# Test padding
helper((1, 1000, 32, 32), ks=4, padding=1)
helper((1, 1000, 1, 4), ks=(1, 4), padding=(0, 1)) # test for max_pool1d
# Test dilation
helper((1, 1000, 32, 32), ks=4, dilation=2)
helper((1, 1000, 1, 4), ks=(1, 4), padding=(0, 2)) # test for max_pool1d
# Test ceil mode
helper((1, 1000, 32, 32), ks=4, ceil_mode=True)
helper((1, 1000, 1, 4), ks=(1, 4), ceil_mode=True) # test for max_pool1d
# Test return indices
for test_ties in [False, True]:
# Test with no batch dimension
helper((8, 4, 4), ks=2, return_indices=True, test_ties=test_ties)
helper((2, 8, 4, 4), ks=2, return_indices=True, test_ties=test_ties)
helper((1, 1000, 32, 32), ks=4, return_indices=True, test_ties=test_ties)
helper((1, 1000, 1, 4), ks=(1, 4), return_indices=True, test_ties=test_ties) # test for max_pool1d
# Test padding
helper((1, 1000, 32, 32), ks=4, padding=1, return_indices=True, test_ties=test_ties)
helper((1, 1000, 1, 4), ks=(1, 4), padding=(0, 1),
return_indices=True, test_ties=test_ties) # test for max_pool1d
# Test dilation
helper((1, 1000, 32, 32), ks=4, dilation=2, return_indices=True, test_ties=test_ties)
helper((1, 1000, 1, 4), ks=(1, 4), padding=(0, 2),
return_indices=True, test_ties=test_ties) # test for max_pool1d
# Test ceil mode
helper((1, 1000, 32, 32), ks=4, ceil_mode=True, return_indices=True, test_ties=test_ties)
helper((1, 1000, 1, 4), ks=(1, 4), ceil_mode=True,
return_indices=True, test_ties=test_ties) # test for max_pool1d
def test_adaptive_avg_pool2d_output_size_one(self):
def helper(size, memory_format):
x = torch.randint(1, 10, size, dtype=torch.float, device='mps', requires_grad=True)
if memory_format == 'non_contiguous':
x = x[::2, ::2, ::2, ::2]
else:
x = x.to(memory_format=memory_format)
net = torch.nn.AdaptiveAvgPool2d((1, 1))
out = net(x)
ref_out = x.contiguous().mean((-1, -2)).view((x.size(0), x.size(1), 1, 1))
out.sum().backward() # make sure it doesn't crash
self.assertEqual(out, ref_out)
if memory_format == torch.channels_last:
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
c = out.size(1)
self.assertEqual(out.stride(), [c, 1, c, c])
else:
self.assertTrue(out.is_contiguous())
c = out.size(1)
self.assertEqual(out.stride(), [c, 1, 1, 1])
helper((2, 3, 6, 6), torch.contiguous_format)
def test_masked_fill(self):
device = "mps"
dtype = torch.float32
mask_dtype = torch.bool
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
num_dest = 10
dst = torch.zeros(num_dest, dtype=dtype, device=device)
mask = torch.randint(2, (num_dest,), dtype=mask_dtype, device=device)
val = random.random()
dst2 = torch.zeros(num_dest, dtype=dtype)
mask_cpu = mask.to("cpu")
dst.masked_fill_(mask, val)
for i in range(num_dest):
if mask_cpu[i]:
dst2[i] = val
self.assertEqual(dst.to("cpu"), dst2, atol=0, rtol=0)
# test non-contiguous case
dst = ((torch.randn(num_dest, num_dest, num_dest) * 10).to(dtype)).permute((2, 0, 1))
dst2 = dst.contiguous()
if dtype.is_complex:
mask = dst.abs() > 0
else:
mask = dst > 0
self.assertTrue(not dst.is_contiguous())
self.assertTrue(dst2.is_contiguous())
dst.masked_fill_(mask.to(mask_dtype), val)
dst2.masked_fill_(mask.to(mask_dtype), val)
self.assertEqual(dst, dst2, atol=0, rtol=0)
if mask_dtype == torch.uint8:
self.assertEqual(len(w), 3)
warn = 'masked_fill_ received a mask with dtype torch.uint8,'
for wi in w:
self.assertEqual(str(wi.message)[0:52], str(warn))
else:
self.assertEqual(len(w), 0)
def test_nhwc_operation(self):
def helper(shape, channels_last=False):
import numpy as np
np.random.seed(332)
arr = (256 - 128) * np.random.random_sample(size=shape) + 128
cpu_x = torch.tensor(arr, device='cpu', dtype=torch.float, requires_grad=True)
if(channels_last):
cpu_x = cpu_x.to(memory_format=torch.channels_last)
cpu_x.retain_grad()
x = cpu_x.detach().clone().to('mps').requires_grad_()
# This passes
self.assertEqual(x, cpu_x)
helper((2, 2, 2, 2), True)
# Test forward batch norm
def test_batch_norm(self):
def helper(shape, eps=1, momentum=0.1, wts=False, training=False, channels_last=False,
track_running_stats=True, test_module=False):
import numpy as np
np.random.seed(332)
arr = (256 - 128) * np.random.random_sample(size=shape) + 128
cpu_x = torch.tensor(arr, device='cpu', dtype=torch.float, requires_grad=True)
if(channels_last):
cpu_x = cpu_x.to(memory_format=torch.channels_last)
cpu_x.retain_grad()
x = cpu_x.detach().clone().to('mps').requires_grad_()
mean_shape = [shape[1]]
cpu_running_mean = None
cpu_running_var = None
running_mean = None
running_var = None
if(track_running_stats):
mean_arr = (240 - 140) * np.random.random_sample(size=mean_shape) + 140
cpu_running_mean = torch.tensor(mean_arr, device='cpu', dtype=torch.float)
var_arr = 32 * np.random.random_sample(size=mean_shape)
cpu_running_var = torch.tensor(var_arr, device='cpu', dtype=torch.float)
running_mean = cpu_running_mean.detach().clone().to('mps')
running_var = cpu_running_var.detach().clone().to('mps')
weight = None
cpu_weight = None
bias = None
cpu_bias = None
if(wts):
cpu_weight = torch.randn(mean_shape, device='cpu', dtype=torch.float, requires_grad=True)
weight = cpu_weight.detach().clone().to('mps').requires_grad_()
cpu_bias = torch.randn(mean_shape, device='cpu', dtype=torch.float, requires_grad=True)
bias = cpu_bias.detach().clone().to('mps').requires_grad_()
y = None
ref_y = None
if(not test_module):
y = torch.nn.functional.batch_norm(x, running_mean, running_var,
weight=weight,
bias=bias,
training=training,
momentum=momentum, eps=eps)
ref_y = torch.nn.functional.batch_norm(cpu_x, cpu_running_mean, cpu_running_var,
weight=cpu_weight,
bias=cpu_bias,
training=training,
momentum=momentum, eps=eps)
else:
batchnorm_op = None
mps_batchnorm_op = None
if(len(shape) == 3):
batchnorm_op = torch.nn.BatchNorm1d(shape[1],
eps=eps,
momentum=momentum,
affine=wts,
track_running_stats=track_running_stats,
device='cpu')
mps_batchnorm_op = torch.nn.BatchNorm1d(shape[1],
eps=eps,
momentum=momentum,
affine=wts,
track_running_stats=track_running_stats,
device='mps')
elif(len(shape) == 4):
batchnorm_op = torch.nn.BatchNorm2d(shape[1],
eps=eps,
momentum=momentum,
affine=wts,
track_running_stats=track_running_stats,
device='cpu')
mps_batchnorm_op = torch.nn.BatchNorm2d(shape[1],
eps=eps,
momentum=momentum,
affine=wts,
track_running_stats=track_running_stats,
device='mps')
elif(len(shape) == 5):
batchnorm_op = torch.nn.BatchNorm3d(shape[1],
eps=eps,
momentum=momentum,
affine=wts,
track_running_stats=track_running_stats,
device='cpu')
mps_batchnorm_op = torch.nn.BatchNorm3d(shape[1],
eps=eps,
momentum=momentum,
affine=wts,
track_running_stats=track_running_stats,
device='mps')
if(track_running_stats):
batchnorm_op.running_mean = cpu_running_mean
batchnorm_op.running_var = cpu_running_var
mps_batchnorm_op.running_mean = running_mean
mps_batchnorm_op.running_var = running_var
if(wts):
batchnorm_op.weight = torch.nn.Parameter(cpu_weight)
batchnorm_op.bias = torch.nn.Parameter(cpu_bias)
mps_batchnorm_op.weight = torch.nn.Parameter(weight)
mps_batchnorm_op.bias = torch.nn.Parameter(bias)
ref_y = batchnorm_op(cpu_x)
y = mps_batchnorm_op(x)
self.assertEqual(y, ref_y)
if(not test_module):
self.assertEqual(running_mean, cpu_running_mean)
self.assertEqual(running_var, cpu_running_var)
else:
self.assertEqual(mps_batchnorm_op.running_mean, batchnorm_op.running_mean)
self.assertEqual(mps_batchnorm_op.running_var, batchnorm_op.running_var)
cpu_grad = torch.randn(ref_y.shape)
grad = cpu_grad.to('mps')
ref_y.backward(gradient=cpu_grad)
y.backward(gradient=grad)
self.assertEqual(x.grad, cpu_x.grad)
if(wts):
if(not test_module):
self.assertEqual(weight.grad, cpu_weight.grad)
self.assertEqual(bias.grad, cpu_bias.grad)
else:
self.assertEqual(mps_batchnorm_op.weight.grad, batchnorm_op.weight.grad)
self.assertEqual(mps_batchnorm_op.bias.grad, batchnorm_op.bias.grad)
for shape in [(2, 3, 2, 2), (2, 3, 2, 2, 2), (2, 3, 2)]:
for test_module in [False, True]:
for track_running_stats in [True, False]:
for channels_last in [False]:
if(channels_last and len(shape) != 4):
continue
# Running stats must be tracked in eval mode
if(track_running_stats):
helper(shape, eps=0, momentum=1, channels_last=channels_last,
track_running_stats=track_running_stats, test_module=test_module)
helper(shape, channels_last=channels_last,
track_running_stats=track_running_stats, test_module=test_module)
helper(shape, eps=1e-05, momentum=0.1, wts=False, training=False, channels_last=channels_last,
track_running_stats=track_running_stats, test_module=test_module)
helper(shape, eps=0, momentum=1.0, wts=False, training=False, channels_last=channels_last,
track_running_stats=track_running_stats, test_module=test_module)
helper(shape, eps=1, momentum=1, wts=True, training=False, channels_last=channels_last,
track_running_stats=track_running_stats, test_module=test_module)
helper(shape, eps=3, momentum=0.67, wts=True, training=False, channels_last=channels_last,
track_running_stats=track_running_stats, test_module=test_module)
helper(shape, eps=1e-05, momentum=0.1, wts=False, training=True, channels_last=channels_last,
track_running_stats=track_running_stats, test_module=test_module)
helper(shape, eps=0, momentum=1.0, wts=False, training=True, channels_last=channels_last,
track_running_stats=track_running_stats, test_module=test_module)
helper(shape, eps=1, momentum=1, wts=True, training=True, channels_last=channels_last,
track_running_stats=track_running_stats, test_module=test_module)
helper(shape, eps=3, momentum=0.67, wts=True, training=True, channels_last=channels_last,
track_running_stats=track_running_stats, test_module=test_module)
def test_layer_norm(self):
# TODO: Test non-contiguous
def helper(input_shape, normalized_shape, eps=1e-05, elementwise_affine=True, dtype=torch.float32):
cpu_x = torch.randn(input_shape, device='cpu', dtype=dtype, requires_grad=True)
x = cpu_x.detach().clone().to('mps').requires_grad_()
cpu_op = torch.nn.LayerNorm(normalized_shape, eps=eps, elementwise_affine=elementwise_affine, device='cpu', dtype=dtype)
mps_op = torch.nn.LayerNorm(normalized_shape, eps=eps, elementwise_affine=elementwise_affine, device='mps', dtype=dtype)
cpu_wt = torch.randn(normalized_shape, device='cpu', dtype=dtype, requires_grad=True)
wt = cpu_wt.detach().clone().to('mps').requires_grad_()
cpu_bias = torch.randn(normalized_shape, device='cpu', dtype=dtype, requires_grad=True)
bias = cpu_bias.detach().clone().to('mps').requires_grad_()
if(elementwise_affine):
cpu_op.weight = torch.nn.Parameter(cpu_wt)
mps_op.weight = torch.nn.Parameter(wt)
cpu_op.bias = torch.nn.Parameter(cpu_bias)
mps_op.bias = torch.nn.Parameter(bias)
cpu_result = cpu_op(cpu_x)
result = mps_op(x)
cpu_grad = torch.randn(cpu_result.shape)
grad = cpu_grad.to('mps')
cpu_result.backward(cpu_grad)
result.backward(grad)
self.assertEqual(result, cpu_result)
self.assertEqual(x.grad, cpu_x.grad)
if(elementwise_affine):
self.assertEqual(mps_op.weight.grad, cpu_op.weight.grad)
self.assertEqual(mps_op.bias.grad, cpu_op.bias.grad)
for elementwise_affine in [True, False]:
helper((2, 2, 2, 2), (2, 2), elementwise_affine=elementwise_affine)
helper((2, 3, 4, 5), (4, 5), elementwise_affine=elementwise_affine)
helper((2, 3, 4, 5, 6), (4, 5, 6), elementwise_affine=elementwise_affine)
def test_instance_norm(self):
def helper(shape, eps=1, momentum=0.1, wts=False, channels_last=False, track_running_stats=True, test_module=False):
import numpy as np
np.random.seed(332)
arr = (256 - 128) * np.random.random_sample(size=shape) + 128
cpu_x = torch.tensor(arr, device='cpu', dtype=torch.float, requires_grad=True)
if(channels_last):
cpu_x = cpu_x.to(memory_format=torch.channels_last)
cpu_x.retain_grad()
x = cpu_x.detach().clone().to('mps').requires_grad_()
mean_shape = [shape[1]]
cpu_running_mean = None
cpu_running_var = None
running_mean = None
running_var = None
if(track_running_stats):
mean_arr = (240 - 140) * np.random.random_sample(size=mean_shape) + 140
cpu_running_mean = torch.tensor(mean_arr, device='cpu', dtype=torch.float)
var_arr = 32 * np.random.random_sample(size=mean_shape)
cpu_running_var = torch.tensor(var_arr, device='cpu', dtype=torch.float)
running_mean = cpu_running_mean.detach().clone().to('mps')
running_var = cpu_running_var.detach().clone().to('mps')
weight = None
cpu_weight = None
bias = None
cpu_bias = None
if(wts):
cpu_weight = torch.randn(mean_shape, device='cpu', dtype=torch.float, requires_grad=True)
weight = cpu_weight.detach().clone().to('mps').requires_grad_()
cpu_bias = torch.randn(mean_shape, device='cpu', dtype=torch.float, requires_grad=True)
bias = cpu_bias.detach().clone().to('mps').requires_grad_()
y = None
ref_y = None
if(not test_module):
ref_y = torch.nn.functional.instance_norm(cpu_x, cpu_running_mean, cpu_running_var,
weight=cpu_weight,
bias=cpu_bias,
momentum=momentum, eps=eps)
y = torch.nn.functional.instance_norm(x, running_mean, running_var,
weight=weight,
bias=bias,
momentum=momentum, eps=eps)
else:
instancenorm_op = None
mps_instancenorm_op = None
if(len(shape) == 3):
instancenorm_op = torch.nn.InstanceNorm1d(shape[1],
eps=eps,
momentum=momentum,
affine=wts,
track_running_stats=track_running_stats,
device='cpu')
mps_instancenorm_op = torch.nn.InstanceNorm1d(shape[1],
eps=eps,
momentum=momentum,
affine=wts,
track_running_stats=track_running_stats,
device='mps')
elif(len(shape) == 4):
instancenorm_op = torch.nn.InstanceNorm2d(shape[1],
eps=eps,
momentum=momentum,
affine=wts,
track_running_stats=track_running_stats,
device='cpu')
mps_instancenorm_op = torch.nn.InstanceNorm2d(shape[1],
eps=eps,
momentum=momentum,
affine=wts,
track_running_stats=track_running_stats,
device='mps')
elif(len(shape) == 5):
instancenorm_op = torch.nn.InstanceNorm3d(shape[1],
eps=eps,
momentum=momentum,
affine=wts,
track_running_stats=track_running_stats,
device='cpu')
mps_instancenorm_op = torch.nn.InstanceNorm3d(shape[1],
eps=eps,
momentum=momentum,
affine=wts,
track_running_stats=track_running_stats,
device='mps')
if(track_running_stats):
instancenorm_op.running_mean = cpu_running_mean
instancenorm_op.running_var = cpu_running_var
mps_instancenorm_op.running_mean = running_mean
mps_instancenorm_op.running_var = running_var
if(wts):
instancenorm_op.weight = torch.nn.Parameter(cpu_weight)
instancenorm_op.bias = torch.nn.Parameter(cpu_bias)
mps_instancenorm_op.weight = torch.nn.Parameter(weight)
mps_instancenorm_op.bias = torch.nn.Parameter(bias)
ref_y = instancenorm_op(cpu_x)
y = mps_instancenorm_op(x)
self.assertEqual(y, ref_y)
if(not test_module):
self.assertEqual(running_mean, cpu_running_mean)
self.assertEqual(running_var, cpu_running_var)
else:
self.assertEqual(mps_instancenorm_op.running_mean, instancenorm_op.running_mean)
self.assertEqual(mps_instancenorm_op.running_var, instancenorm_op.running_var)
cpu_grad = torch.randn(ref_y.shape)
grad = cpu_grad.to('mps')
ref_y.backward(gradient=cpu_grad)
y.backward(gradient=grad)
self.assertEqual(x.grad, cpu_x.grad)
if(wts):
if(not test_module):
self.assertEqual(weight.grad, cpu_weight.grad)
self.assertEqual(bias.grad, cpu_bias.grad)
else:
self.assertEqual(mps_instancenorm_op.weight.grad, instancenorm_op.weight.grad)
self.assertEqual(mps_instancenorm_op.bias.grad, instancenorm_op.bias.grad)
for shape in [(2, 3, 2, 2), (2, 3, 2, 2, 2), (2, 3, 2)]:
for test_module in [False, True]:
for track_running_stats in [True, False]:
for channels_last in [False]:
if(channels_last and len(shape) != 4):
continue
# Running stats must be tracked in eval mode
if(track_running_stats):
helper(shape, eps=0, momentum=1, channels_last=channels_last,
track_running_stats=track_running_stats, test_module=test_module)
helper(shape, channels_last=channels_last,
track_running_stats=track_running_stats, test_module=test_module)
helper(shape, eps=1e-05, momentum=0.1, wts=False, channels_last=channels_last,
track_running_stats=track_running_stats, test_module=test_module)
helper(shape, eps=0, momentum=1.0, wts=False, channels_last=channels_last,
track_running_stats=track_running_stats, test_module=test_module)
helper(shape, eps=1, momentum=1, wts=True, channels_last=channels_last,
track_running_stats=track_running_stats, test_module=test_module)
helper(shape, eps=3, momentum=0.67, wts=True, channels_last=channels_last,
track_running_stats=track_running_stats, test_module=test_module)
helper(shape, eps=1e-05, momentum=0.1, wts=False, channels_last=channels_last,
track_running_stats=track_running_stats, test_module=test_module)
helper(shape, eps=0, momentum=1.0, wts=False, channels_last=channels_last,
track_running_stats=track_running_stats, test_module=test_module)
helper(shape, eps=1, momentum=1, wts=True, channels_last=channels_last,
track_running_stats=track_running_stats, test_module=test_module)
helper(shape, eps=3, momentum=0.67, wts=True, channels_last=channels_last,
track_running_stats=track_running_stats, test_module=test_module)
# Test conv2d
def test_conv2d_unit(self):
def helper(input_shape, wt_shape,
stride=1, padding=0,
dilation=1, groups=1,
bias_shape=None):
cpu_x = torch.randn(input_shape, device='cpu', dtype=torch.float, requires_grad=True)
x = cpu_x.detach().clone().to('mps').requires_grad_()
cpu_wt = torch.randn(wt_shape, device='cpu', dtype=torch.float, requires_grad=True)
wt = cpu_wt.detach().clone().to('mps').requires_grad_()
cpu_bias = None
bias = None
if(bias_shape is not None):
cpu_bias = torch.randn(bias_shape, device='cpu', dtype=torch.float, requires_grad=True)
bias = cpu_bias.detach().clone().to('mps').requires_grad_()
y = torch.nn.functional.conv2d(x, wt, bias=bias, stride=stride,
padding=padding, dilation=dilation, groups=groups)
ref_y = torch.nn.functional.conv2d(cpu_x, cpu_wt, bias=cpu_bias, stride=stride,
padding=padding, dilation=dilation, groups=groups)
cpu_grad = torch.ones_like(ref_y)
grad = cpu_grad.to('mps')
y.backward(gradient=grad)
ref_y.backward(gradient=cpu_grad)
self.assertEqual(y, ref_y, rtol=2.6e-05, atol=2e-04)
self.assertEqual(x.grad, cpu_x.grad, rtol=2.6e-06, atol=2e-05)
self.assertEqual(wt.grad, cpu_wt.grad, atol=8e-04, rtol=10.4e-05)
if(bias_shape is not None):
self.assertEqual(bias.grad, cpu_bias.grad, atol=8e-04, rtol=10.4e-05)
N = 1
C_in = 3
C_out = 64
H = 64
W = 64
kH = 4
kW = 4
stride = 2
padding = 1
helper((N, C_in, H, W), (C_out, C_in, kH, kW), stride=stride, padding=padding)
N = 4
C_in = 16
H = 32
W = 32
C_out = 8
kH = 3
kW = 3
for groups in [1, 2, 4]:
helper((N, C_in, H, W), (C_out, C_in // groups, kH, kW), groups=groups)
helper((N, C_in, H, W), (C_out, C_in // groups, kH, kW), groups=groups)
helper((N, C_in, H, W), (C_out, C_in // groups, kH, kW), bias_shape=(C_out), groups=groups)
helper((N, C_in, H, W), (C_out, C_in // groups, kH, kW), bias_shape=(C_out), groups=groups)
helper((N, C_in * 2, H * 2, W * 2), (C_out * 2, (C_in * 2) // groups, kH + 2, kW + 2), groups=groups)
helper((N, C_in * 2, H * 2, W * 2), (C_out * 2, (C_in * 2) // groups, kH + 2, kW + 2), groups=groups)
helper((N, C_in * 2, H * 2, W * 2), (C_out * 2, (C_in * 2) // groups,
kH + 2, kW + 2), bias_shape=(C_out * 2), groups=groups)
helper((N, C_in * 2, H * 2, W * 2), (C_out * 2, (C_in * 2) // groups,
kH + 2, kW + 2), bias_shape=(C_out * 2), groups=groups)
# Test conv transpose 2d
def test_conv_transpose2d(self):
def helper(input_shape, wt_shape,
stride=1, padding=0,
output_padding=0,
dilation=1, groups=1,
bias_shape=None):
cpu_x = torch.randn(input_shape, device='cpu', dtype=torch.float, requires_grad=True)
x = cpu_x.detach().clone().to('mps').requires_grad_()
cpu_wt = torch.randn(wt_shape, device='cpu', dtype=torch.float, requires_grad=True)
wt = cpu_wt.detach().clone().to('mps').requires_grad_()
cpu_bias = None
bias = None
if(bias_shape is not None):
cpu_bias = torch.randn(bias_shape, device='cpu', dtype=torch.float, requires_grad=True)
bias = cpu_bias.detach().clone().to('mps').requires_grad_()
y = torch.nn.functional.conv_transpose2d(
x, wt, bias=bias, stride=stride, padding=padding, output_padding=output_padding, groups=groups, dilation=dilation)
ref_y = torch.nn.functional.conv_transpose2d(
cpu_x, cpu_wt, bias=cpu_bias, stride=stride, padding=padding,
output_padding=output_padding, groups=groups, dilation=dilation)
cpu_grad = torch.randn(ref_y.shape)
grad = cpu_grad.to('mps')
y.backward(gradient=grad)
ref_y.backward(gradient=cpu_grad)
self.assertEqual(y, ref_y, rtol=2.6e-05, atol=2e-04)
self.assertEqual(x.grad, cpu_x.grad, rtol=2.6e-06, atol=2e-05)
self.assertEqual(wt.grad, cpu_wt.grad, atol=8e-04, rtol=10.4e-05)
# if(bias_shape is not None):
# print(cpu_bias.grad)
# print(bias.grad.to('cpu'))
# self.assertEqual(bias.grad, cpu_bias.grad)
N = 4
C_in = 2
H = 32
W = 32
C_out = 8
groups = 1
kH = 3
kW = 3
for stride in [1, 2, 3]:
for padding in [0, 1, 2]:
for output_padding in [0, 1, 2]:
for dilation in [1, 2]:
if(output_padding >= stride or output_padding >= dilation):
continue
helper((N, C_out, H, W), (C_out, C_in, kH, kW), stride=stride,
padding=padding, output_padding=output_padding, dilation=dilation)
helper((N, C_out, H, W), (C_out, C_in, kH, kW), stride=stride,
padding=padding, output_padding=output_padding, dilation=dilation)
helper((N, C_out, H, W), (C_out, C_in, kH, kW), bias_shape=(C_in), stride=stride,
padding=padding, output_padding=output_padding, dilation=dilation)
helper((N, C_out, H, W), (C_out, C_in, kH, kW), bias_shape=(C_in), stride=stride,
padding=padding, output_padding=output_padding, dilation=dilation)
def test_conv1d_channels_last(self):
model_cpu = torch.nn.Conv1d(1, 128, 3)
a_cpu = torch.arange((128 * 176), dtype=torch.float32)
a_cpu = a_cpu.view(128, 176, 1).permute(0, 2, 1)
out_cpu = model_cpu(a_cpu) # pass
a_mps = a_cpu.detach().clone().to("mps")
model_mps = model_cpu.to("mps")
out_mps = model_mps(a_mps)
self.assertEqual(out_cpu, out_mps.cpu(), rtol=2.6e-05, atol=2e-04)
def test_conv1d_contiguous(self):
model_cpu = torch.nn.Conv1d(1, 128, 3)
a_cpu = torch.ones(128, 1, 176)
out_cpu = model_cpu(a_cpu)
a_mps = a_cpu.detach().clone().to("mps")
model_mps = model_cpu.to("mps")
out_mps = model_mps(a_mps)
self.assertEqual(out_cpu.shape, out_mps.shape)
self.assertEqual(out_cpu, out_mps.cpu())
# Test sigmoid
def test_sigmoid(self):
def helper(shape):
cpu_x = torch.randn(shape, device='cpu', dtype=torch.float, requires_grad=True)
x = cpu_x.detach().clone().to('mps').requires_grad_()
sigmoid_op = torch.nn.Sigmoid()
y = sigmoid_op(x)
ref_y = sigmoid_op(cpu_x)
cpu_grad = torch.ones_like(ref_y)
grad = cpu_grad.to('mps')
y.backward(gradient=grad)
ref_y.backward(gradient=cpu_grad)
self.assertEqual(y, ref_y)
self.assertEqual(x.grad, cpu_x.grad)
helper((2, 3, 4, 5))
helper((2, 3, 4))
helper((2, 8, 4, 5))
# Test tanh
def test_tanh(self):
def helper(shape):
cpu_x = torch.randn(shape, device='cpu', dtype=torch.float, requires_grad=True)
x = cpu_x.detach().clone().to('mps').requires_grad_()
tanh_op = torch.nn.Tanh()
y = tanh_op(x)
ref_y = tanh_op(cpu_x)
cpu_grad = torch.ones_like(ref_y)
grad = cpu_grad.to('mps')
y.backward(gradient=grad)
ref_y.backward(gradient=cpu_grad)
self.assertEqual(y, ref_y)
self.assertEqual(x.grad, cpu_x.grad)
helper((2, 3, 4, 5))
helper((2, 3, 4))
helper((2, 8, 4, 5))
def test_threshold(self):
def helper(threshold, value, num_elems, inplace=False, requires_grad=True):
m = nn.Threshold(threshold=threshold, value=value, inplace=inplace)
input_cpu = torch.randn(num_elems, requires_grad=requires_grad, dtype=torch.float)
input_mps = input_cpu.detach().clone().to('mps').requires_grad_(requires_grad)
output_cpu = m(input_cpu)
output_mps = m(input_mps)
cpu_grad = torch.ones_like(output_cpu)
mps_grad = cpu_grad.to('mps')
self.assertEqual(output_cpu, output_mps)
if requires_grad:
output_cpu.backward(gradient=cpu_grad)
output_mps.backward(gradient=mps_grad)
self.assertEqual(input_cpu.grad, input_mps.grad)
helper(threshold=0.1, value=20, num_elems=2)
helper(threshold=-0.1, value=10, num_elems=10)
helper(threshold=0.5, value=-15, num_elems=100)
helper(threshold=1, value=10, num_elems=100, inplace=True, requires_grad=False)
# Test pow
def test_pow(self):
def helper(shape):
cpu_x = torch.randn(shape, device='cpu', dtype=torch.float, requires_grad=False)
x = cpu_x.detach().clone().to('mps')
cpu_y = torch.randn(shape, device='cpu', dtype=torch.float, requires_grad=False)
y = cpu_y.detach().clone().to('mps')
z = torch.pow(x, y)
ref_z = torch.pow(cpu_x, cpu_y)
self.assertEqual(z, ref_z)
cpu_x = torch.randn(shape, device='cpu', dtype=torch.float, requires_grad=False)
x = cpu_x.detach().clone().to('mps')
exp = random.random()
z = torch.pow(x, exp)
ref_z = torch.pow(cpu_x, exp)
self.assertEqual(z, ref_z)
helper((2, 8, 4, 5))
# Test addcmul
def test_addcmul(self):
def helper(shape, value):
cpu_x = torch.randn(shape, device='cpu', dtype=torch.float, requires_grad=False)
x = cpu_x.detach().clone().to('mps')
cpu_y = torch.randn(shape, device='cpu', dtype=torch.float, requires_grad=False)
y = cpu_y.detach().clone().to('mps')
cpu_z = torch.randn(shape, device='cpu', dtype=torch.float, requires_grad=False)
z = cpu_z.detach().clone().to('mps')
y = torch.addcmul(x, y, z, value=value)
ref_y = torch.addcmul(cpu_x, cpu_y, cpu_z, value=value)
self.assertEqual(y, ref_y)
helper((2, 3, 4, 5), 0.1)
helper((2, 8, 4, 5), 0.1)
helper((2, 3, 4, 5), 0.2)
helper((2, 8, 4, 5), 0.2)
# Test addcdiv
def test_addcdiv(self):
def helper(shape, value):
cpu_x = torch.randn(shape, device='cpu', dtype=torch.float, requires_grad=False)
cpu_y = torch.randn(shape, device='cpu', dtype=torch.float, requires_grad=False)
# clamp to avoid division by 0
cpu_z = torch.randn(shape, device='cpu', dtype=torch.float, requires_grad=False).clamp_min_(0.1)
cpu_out = torch.randn(shape, device='cpu', dtype=torch.float, requires_grad=False)
mps_x = cpu_x.detach().clone().to('mps')
mps_y = cpu_y.detach().clone().to('mps')
mps_z = cpu_z.detach().clone().to('mps')
mps_out = cpu_out.detach().clone().to('mps')
result_div_mps = torch.addcdiv(mps_x, mps_y, mps_z, value=value)
result_div_cpu = torch.addcdiv(cpu_x, cpu_y, cpu_z, value=value)
self.assertEqual(result_div_mps, result_div_cpu)
# test .out variant
self.assertEqual(torch.addcdiv(mps_x, mps_y, mps_z, out=mps_out, value=value), result_div_cpu)
helper((2, 3, 4, 5), 0.1)
helper((2, 8, 4, 5), 0.2)
helper((2, 3, 4, 5), 1.0) # value of 1 should be ignored internally
def test_buffer_size_match(self):
# this test shouldn't cause any crash
size = 16
cpu_A = torch.rand(size, device='cpu')
cpu_F = torch.rand(size, size, size, device='cpu')
mps_A = cpu_A.to('mps')
mps_F = cpu_F.to('mps')
self.assertEqual(cpu_A @ cpu_F, mps_A @ mps_F)
def test_transpose_inplace(self):
values = [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]
cpu_x = torch.tensor(values, device='cpu')
mps_x = torch.tensor(values, device='mps')
cpu_x.transpose_(0, 1)
mps_x.transpose_(0, 1)
self.assertEqual(cpu_x, mps_x.to('cpu'))
def test_expand_cpu_to_mps_copy(self):
# https://github.com/pytorch/pytorch/issues/78642
x = torch.tensor(1).expand([10]).to("mps")
x_cpu = torch.tensor(1).expand([10])
self.assertEqual(x_cpu, x.cpu())
def test_slice(self):
values = [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]
cpu_x = torch.tensor(values, device='cpu')
mps_x = (torch.tensor(values, device='mps', dtype=torch.float))
cpu_slice1 = cpu_x[:2, :]
mps_slice1 = mps_x[:2, :]
self.assertEqual(cpu_slice1, mps_slice1)
cpu_slice2 = cpu_x[:, :1]
mps_slice2 = mps_x[:, :1]
self.assertEqual(cpu_slice2, mps_slice2)
cpu_slice3 = cpu_x[1:2, :]
mps_slice3 = mps_x[1:2, :]
self.assertEqual(cpu_slice3, mps_slice3.to('cpu'))
cpu_slice4 = cpu_x[1, :]
mps_slice4 = mps_x[1, :].to('cpu')
self.assertEqual(cpu_slice4, mps_slice4)
def test_slice_contiguous_view(self):
# https://github.com/pytorch/pytorch/issues/77750
def helper(operator):
t_mps = torch.tensor([1, 2, 3, 4], device="mps")
t_cpu = torch.tensor([1, 2, 3, 4], device="cpu")
# contiguous view
x_mps = t_mps[2:] # 3, 4
y_mps = t_mps[:2] # 1, 2
x_cpu = t_cpu[2:]
y_cpu = t_cpu[:2]
res_mps = res_cpu = None
if operator == "<=":
res_mps = x_mps <= y_mps
res_cpu = x_cpu <= y_cpu
if operator == "<":
res_mps = x_mps < y_mps
res_cpu = x_cpu < y_cpu
if operator == ">=":
res_mps = x_mps >= y_mps
res_cpu = x_cpu >= y_cpu
if operator == ">":
res_mps = x_mps >= y_mps
res_cpu = x_cpu >= y_cpu
if operator == "==":
res_mps = x_mps == y_mps
res_cpu = x_cpu == y_cpu
if operator == "!=":
res_mps = x_mps != y_mps
res_cpu = x_cpu != y_cpu
self.assertEqual(res_mps, res_cpu)
for op in ["<=", "<", ">=", ">", "==", "!="]:
helper(op)
def test_index_storage_offset(self):
# https://github.com/pytorch/pytorch/issues/78107
a = torch.tensor([8.2670e-01, -1.0293e+00])
b_cpu = a[0]
c_cpu = a[1]
# both 'b' and 'c' are views of 'a'
# 'b' has a storage offset of 0, while 'c' has a storage offset of 1
# when copying from 'cpu' to 'mps', c will have a storage_offset of 1 which needs to be taking into account,
# otherwise it ends with same value as 'b'
b = b_cpu.to('mps')
c = c_cpu.to('mps')
res_mps = b > c
res_cpu = b_cpu > c_cpu
self.assertEqual(res_mps, res_cpu)
res_mps = c > b
res_cpu = c_cpu > b_cpu
self.assertEqual(res_mps, res_cpu)
def test_flatten(self):
values = [[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], [[7.0, 8.0, 9.0], [10.0, 11.0, 12.0]]]
cpu_x = torch.tensor(values, device='cpu')
mps_x = torch.tensor(values, device='mps')
cpu_flatten1 = cpu_x.flatten()
mps_flatten1 = mps_x.flatten().to('cpu')
self.assertEqual(cpu_flatten1, mps_flatten1)
cpu_flatten2 = cpu_x.flatten(start_dim=1)
mps_flatten2 = mps_x.flatten(start_dim=1).to('cpu')
self.assertEqual(cpu_flatten2, mps_flatten2)
cpu_flatten3 = cpu_x.flatten(end_dim=1)
mps_flatten3 = mps_x.flatten(end_dim=1).to('cpu')
self.assertEqual(cpu_flatten3, mps_flatten3)
# Test repeat
def test_repeat(self):
def helper(shape, repeats):
cpu_x = torch.randn(shape, device='cpu', dtype=torch.float, requires_grad=True)
x = cpu_x.detach().clone().to('mps').requires_grad_()
y = x.repeat(repeats)
ref_y = cpu_x.repeat(repeats)
cpu_grad = torch.randn(ref_y.shape)
grad = cpu_grad.to('mps')
y.backward(gradient=grad)
ref_y.backward(gradient=cpu_grad)
self.assertEqual(y, ref_y)
self.assertEqual(x.grad, cpu_x.grad)
helper((2, 3, 4, 5), (2, 3, 4, 5))
helper((2, 3, 4), (4, 3, 2, 5, 7, 2))
helper((3, 4, 5), (2, 3, 4, 5))
helper((3, 4, 5), (2, 2, 2))
def test_count_nonzero(self):
def helper(dtype):
n = [
[[1, 0, 2], [3, 0, 2], [7, 9, -4]],
[[0, 2, 3], [3, 2, 1], [2, 0, 0]],
]
cpu_x = torch.tensor(n, dtype=dtype)
mps_x = torch.tensor(n, dtype=dtype).to('mps')
# All non-zeros
self.assertEqual(
torch.count_nonzero(cpu_x),
torch.count_nonzero(mps_x)
)
# dim=1
self.assertEqual(
torch.count_nonzero(cpu_x, dim=1),
torch.count_nonzero(mps_x, dim=1)
)
# dim=(0, 1)
self.assertEqual(
torch.count_nonzero(cpu_x, dim=(0, 1)),
torch.count_nonzero(mps_x, dim=(0, 1))
)
helper(torch.int32)
helper(torch.int64)
helper(torch.float16)
helper(torch.float32)
def _test_module_empty_input(self, module, inp, check_size=True):
inp.requires_grad_(True)
out = module(inp)
gO = torch.rand_like(out)
out.backward(gO)
if check_size:
self.assertEqual(out.size(), inp.size())
for p in module.parameters():
if p.requires_grad:
self.assertEqual(p.grad, torch.zeros_like(p.grad))
self.assertEqual(inp.grad, torch.zeros_like(inp))
# Test dtype casting, with and without simultaneous device change
def test_to(self):
values = [[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], [[7.0, 8.0, 9.0], [10.0, 11.0, 12.0]]]
cpu_x = torch.tensor(values, device='cpu')
mps_x = torch.tensor(values, device='mps')
self.assertEqual(cpu_x.int(), mps_x.int().cpu())
self.assertEqual(cpu_x.bool(), mps_x.bool().cpu())
self.assertEqual(cpu_x.float(), mps_x.float().cpu())
self.assertEqual(torch.tensor(1.3, device='mps').int().cpu(),
torch.tensor(1, dtype=torch.int32))
self.assertEqual(torch.tensor(0.0, device='mps').bool().cpu(), torch.tensor(False))
self.assertEqual(torch.tensor(0.1, device='mps').bool().cpu(), torch.tensor(True))
self.assertEqual(torch.tensor(0.1, device='mps').bool().int().cpu(),
torch.tensor(1, dtype=torch.int32))
self.assertEqual(torch.tensor(0.1, device='mps').bool().int().float().cpu(),
torch.tensor(1.0))
self.assertEqual(torch.tensor(4.25, device='mps').to('cpu', torch.int),
torch.tensor(4, dtype=torch.int32))
self.assertEqual(torch.tensor(4.25, device='cpu').to('mps', torch.int).cpu(),
torch.tensor(4, dtype=torch.int32))
self.assertEqual(torch.tensor(-8.34, device='cpu').to('mps', torch.int),
torch.tensor(-8.34, device='cpu').to('mps').to(torch.int))
# Cast int8 and uint8 to float and compare results
# See https://github.com/pytorch/pytorch/issues/80009 for more details
cpu_byte = torch.tensor([60, 160, 20, 220], dtype=torch.uint8)
cpu_char = torch.tensor([60, -60, 20, -120], dtype=torch.uint8)
for x_cpu in [cpu_byte, cpu_char]:
x_mps = x_cpu.to('mps')
self.assertEqual(x_mps.to(torch.float32), x_cpu.to(torch.float32))
def test_setitem_scalar(self) -> None:
device = 'mps'
for dtype in [torch.int32, torch.float32, torch.int64]:
for i in range(3, 6):
for j in range(3, 6):
t = torch.zeros(i, j, dtype=dtype, device=device)
self.assertEqual(t.sum(), 0)
t[1, 1] = 1
t[2, 1] = j
t[1, 2] = i
self.assertEqual(t[1, 1], 1)
self.assertEqual(t[1, 2], i)
self.assertEqual(t[2, 1], j)
self.assertEqual(t.sum(), 1 + i + j)
def test_stride_of_strides(self) -> None:
x = torch.rand(32, 1, device='mps')
y = x.as_strided(size=(32, 2), stride=(1, 0))
# Casting stride of strided tensor to CPU use to crash with "buffer is not large enough." assert
# See https://github.com/pytorch/pytorch/issues/79181#issuecomment-1154683435
z = y.as_strided(size=(32, 3), stride=(1, 0)).to("cpu")
self.assertEqual(x.to("cpu").as_strided(size=(32, 3), stride=(1, 0)), z)
def test_type_casting(self):
# https://github.com/pytorch/pytorch/issues/81567
def helper(data, to_dtype):
a_cpu = torch.tensor(data)
a_mps = a_cpu.to(torch.device('mps'))
res_cpu = a_cpu.type(to_dtype)
res_mps = a_mps.type(to_dtype)
self.assertEqual(res_cpu, res_mps)
helper([9.0, 3.0, 5.0, 4.0], torch.LongTensor)
helper([9.0, 3.0, 5.0, 4.0], torch.FloatTensor)
helper([9.0, 3.0, 5.0, 4.0], torch.IntTensor)
helper([9.0, 3.0, 5.0, 4.0], torch.ShortTensor)
helper([9.0, 3.0, 5.0, 4.0], torch.HalfTensor)
helper([9.0, 3.0, 5.0, 4.0], torch.CharTensor)
helper([9.0, 3.0, 5.0, 4.0], torch.ByteTensor)
def test_to_casting(self):
# https://github.com/pytorch/pytorch/issues/81567
def helper(data, to_dtype):
a_cpu = torch.tensor(data)
a_mps = a_cpu.to(torch.device('mps'))
res_cpu = a_cpu.to(to_dtype)
res_mps = a_mps.to(to_dtype)
self.assertEqual(res_cpu, res_mps)
helper([9.0, 3.0, 5.0, 4.0], torch.int64)
helper([9.0, 3.0, 5.0, 4.0], torch.float)
helper([9.0, 3.0, 5.0, 4.0], torch.int32)
helper([9.0, 3.0, 5.0, 4.0], torch.short)
helper([9.0, 3.0, 5.0, 4.0], torch.half)
helper([9.0, 3.0, 5.0, 4.0], torch.int8)
helper([9.0, 3.0, 5.0, 4.0], torch.uint8)
def test_storage_offset_greater_than_src_nbytes(self):
# https://github.com/pytorch/pytorch/issues/80844
n_tensors = 100
n_tensor_elems = 784
elems = torch.arange(n_tensors * n_tensor_elems, dtype=torch.float32)
tensor_list = []
for i in range(0, n_tensors - 1):
# create a list of contiguous view tensors (view tensor created by the slice op)
t = elems[n_tensor_elems * i : n_tensor_elems * (i + 1)]
tensor_list.append(t)
for i in range(0, n_tensors - 1):
t = tensor_list[i].view(1, 784)
t_mps = t.to("mps")
self.assertEqual(t, t_mps.cpu())
# See https://github.com/pytorch/pytorch/issues/82427
# Test should not crash
def test_bool_full(self):
x = torch.full((3, 3), True, device='mps')
# See https://github.com/pytorch/pytorch/issues/82663
def test_bool_expand(self):
x = torch.tensor([[1], [0]], dtype=torch.bool, device='mps')
y = torch.tensor([0, 1], dtype=torch.bool, device='mps')
self.assertFalse(torch.equal(x.expand(2, 2), y.expand(2, 2)))
# Empty unary op should return tensor of the same size
def test_empty_neg(self):
x = torch.tensor([[]], device='mps')
y = -x
self.assertEqual(x, y)
class TestLogical(TestCase):
def _wrap_tensor(self, x, device="cpu", dtype=None, requires_grad=False):
return torch.tensor(x, device=device, dtype=dtype, requires_grad=requires_grad)
def test_logical_not(self):
def helper(x):
cpu_x = x
x = cpu_x.detach().clone().to('mps')
result = torch.logical_not(x)
result_cpu = torch.logical_not(cpu_x)
self.assertEqual(result, result_cpu)
helper(self._wrap_tensor([1, 1, 0, 0]))
helper(self._wrap_tensor([1, 1, 0, 0], dtype=torch.float, requires_grad=True))
helper(self._wrap_tensor([True, True, False, False]))
helper(self._wrap_tensor(1))
helper(self._wrap_tensor(0))
helper(self._wrap_tensor(True))
helper(self._wrap_tensor(False))
def test_logical_and(self):
def helper(x, other):
cpu_x = x
x = cpu_x.detach().clone().to('mps')
cpu_other = other
other = cpu_other.detach().clone().to('mps')
result = torch.logical_and(x, other)
result_cpu = torch.logical_and(cpu_x, cpu_other)
self.assertEqual(result, result_cpu)
helper(self._wrap_tensor([1, 1, 0, 0]), self._wrap_tensor(([1, 0, 0, 1])))
helper(
self._wrap_tensor([1, 1, 0, 0], dtype=torch.float, requires_grad=True),
self._wrap_tensor([1, 0, 0, 1], dtype=torch.float)
)
helper(self._wrap_tensor([True, True, False, False]), self._wrap_tensor([True, False, False, True]))
helper(self._wrap_tensor((1, 0, 1, 0)), self._wrap_tensor(1))
helper(self._wrap_tensor((1, 0, 1, 0)), self._wrap_tensor(0))
helper(self._wrap_tensor((1, 0, 1, 0)), self._wrap_tensor(True))
helper(self._wrap_tensor((1, 0, 1, 0)), self._wrap_tensor(False))
def test_logical_or(self):
def helper(x, other):
cpu_x = x
x = cpu_x.detach().clone().to('mps')
cpu_other = other
other = cpu_other.detach().clone().to('mps')
result = torch.logical_or(x, other)
result_cpu = torch.logical_or(cpu_x, cpu_other)
self.assertEqual(result, result_cpu)
helper(self._wrap_tensor([1, 1, 0, 0]), self._wrap_tensor(([1, 0, 0, 1])))
helper(
self._wrap_tensor([1, 1, 0, 0], dtype=torch.float, requires_grad=True),
self._wrap_tensor([1, 0, 0, 1], dtype=torch.float)
)
helper(self._wrap_tensor([True, True, False, False]), self._wrap_tensor([True, False, False, True]))
helper(self._wrap_tensor((1, 0, 1, 0)), self._wrap_tensor(1))
helper(self._wrap_tensor((1, 0, 1, 0)), self._wrap_tensor(0))
helper(self._wrap_tensor((1, 0, 1, 0)), self._wrap_tensor(True))
helper(self._wrap_tensor((1, 0, 1, 0)), self._wrap_tensor(False))
def test_logical_xor(self):
def helper(x, other):
cpu_x = x
x = cpu_x.detach().clone().to('mps')
cpu_other = other
other = cpu_other.detach().clone().to('mps')
result = torch.logical_xor(x, other)
result_cpu = torch.logical_xor(cpu_x, cpu_other)
self.assertEqual(result, result_cpu)
helper(self._wrap_tensor([1, 1, 0, 0]), self._wrap_tensor(([1, 0, 0, 1])))
helper(
self._wrap_tensor([1, 1, 0, 0], dtype=torch.float, requires_grad=True),
self._wrap_tensor([1, 0, 0, 1], dtype=torch.float)
)
helper(self._wrap_tensor([True, True, False, False]), self._wrap_tensor([True, False, False, True]))
helper(self._wrap_tensor((1, 0, 1, 0)), self._wrap_tensor(1))
helper(self._wrap_tensor((1, 0, 1, 0)), self._wrap_tensor(0))
helper(self._wrap_tensor((1, 0, 1, 0)), self._wrap_tensor(True))
helper(self._wrap_tensor((1, 0, 1, 0)), self._wrap_tensor(False))
class TestSmoothL1Loss(TestCase):
def _smooth_l1_loss_helper(self, reduction="mean", requires_grad=False):
# CPU
input_cpu = torch.randn(4, 7, requires_grad=requires_grad)
target_cpu = torch.randn(4, 7)
# MPS
input_mps = input_cpu.detach().clone().to('mps').requires_grad_()
target_mps = target_cpu.detach().clone().to('mps')
smooth_l1_loss_cpu = F.smooth_l1_loss(input_cpu, target_cpu, beta=1.0, reduction=reduction)
smooth_l1_loss_mps = F.smooth_l1_loss(input_mps, target_mps, beta=1.0, reduction=reduction)
self.assertEqual(smooth_l1_loss_cpu, smooth_l1_loss_mps)
if requires_grad:
smooth_l1_loss_cpu.backward()
smooth_l1_loss_mps.backward()
self.assertEqual(input_cpu.grad, input_mps.grad.to("cpu"))
return smooth_l1_loss_cpu, smooth_l1_loss_mps
def test_smooth_l1_loss_reduction_none(self):
self._smooth_l1_loss_helper(reduction="none")
def test_smooth_l1_loss_reduction_mean(self):
self._smooth_l1_loss_helper(reduction="mean")
def test_smooth_l1_loss_reduction_sum(self):
self._smooth_l1_loss_helper(reduction="sum")
def test_smooth_l1_loss_reduction_mean_backward(self):
self._smooth_l1_loss_helper(reduction="mean", requires_grad=True)
def test_smooth_l1_loss_reduction_mean_sum_backward(self):
self._smooth_l1_loss_helper(reduction="sum", requires_grad=True)
class TestNLLLoss(TestCase):
def test_nll_loss_mismatched_batch(self, device='mps'):
x = torch.randn((10, 3), requires_grad=True, device=device)
# t should have size (10,)
t = torch.zeros((3,), dtype=torch.int64, device=device)
with self.assertRaisesRegex(ValueError, 'Expected.*batch_size'):
F.nll_loss(x, t)
def test_nll_loss_out_of_bounds_ignore_index(self):
def _test_nll_loss_out_of_bounds_ignore_index(device):
output = []
x = torch.tensor([[0.3, 0.5, 0.2], [0.1, 0.7, 0.2], [0.4, 0.5, 0.1], [
0.3, 0.5, 0.2], [0.1, 0.7, 0.2], [0.4, 0.5, 0.1]], device=device)
t = torch.tensor([0, 1, 255, 0, 1, 2], dtype=torch.int64, device=device)
for reduction in ['mean', 'none']:
output.append(F.nll_loss(x, t, ignore_index=255, reduction=reduction))
return output
output_cpu = _test_nll_loss_out_of_bounds_ignore_index(device='cpu')
output_mps = _test_nll_loss_out_of_bounds_ignore_index(device='mps')
for cpu, mps in zip(output_cpu, output_mps):
self.assertEqual(cpu, mps.to('cpu'))
def test_nll_loss_invalid_target_dim(self):
def _test_nll_loss_invalid_target_dim(device):
output = []
x = torch.tensor([[0.3, 0.5, 0.2], [0.1, 0.7, 0.2], [0.4, 0.5, 0.1], [
0.3, 0.5, 0.2], [0.1, 0.7, 0.2], [0.4, 0.5, 0.1]], device=device)
t = torch.zeros((6, 2), dtype=torch.int64, device=device)
with self.assertRaisesRegex(RuntimeError, "1D target tensor expected"):
F.nll_loss(x, t)
_test_nll_loss_invalid_target_dim(device='cpu')
_test_nll_loss_invalid_target_dim(device='mps')
def test_nll_loss_invalid_weights(self):
def _test_nll_loss_invalid_weights(device):
x = torch.tensor([[0.3, 0.5, 0.2], [0.1, 0.7, 0.2], [0.4, 0.5, 0.1], [
0.3, 0.5, 0.2], [0.1, 0.7, 0.2], [0.4, 0.5, 0.1]], device=device)
t = torch.tensor([0, 1, 2, 1, 1, 2], dtype=torch.int64, device=device)
invalid_weights = [
torch.zeros(4, device=device),
torch.zeros((1, 3), device=device),
]
msg = "weight tensor should be defined either for all 3 classes or no classes"
for weight in invalid_weights:
with self.assertRaisesRegex(RuntimeError, msg):
F.nll_loss(x, t, weight=weight)
_test_nll_loss_invalid_weights(device='cpu')
_test_nll_loss_invalid_weights(device='mps')
def _nll_loss_helper(self, input_size, reduction, expected):
# CPU
input = torch.rand(input_size, requires_grad=True, device='cpu')
num_channels = input_size[1]
target_size = (input_size[0], ) + tuple(input_size[2:])
target = torch.randint(num_channels, target_size, device='cpu')
# MPS
input_mps = input.detach().clone().to('mps').requires_grad_()
target_mps = target.detach().clone().to('mps')
output_cpu = F.nll_loss(input, target, reduction=reduction)
output_mps = F.nll_loss(input_mps, target_mps, reduction=reduction)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(output_cpu, output_mps.to('cpu'))
output_cpu.sum().backward()
output_mps.sum().backward()
self.assertEqual(input.grad, input_mps.grad.to('cpu'))
def _nll_loss_1d_helper(self, input_size, reduction):
# CPU
input = torch.rand(input_size, requires_grad=True, device='cpu')
num_channels = input_size[0]
target = torch.randint(num_channels, [], device='cpu')
# MPS
input_mps = input.detach().clone().to('mps').requires_grad_()
target_mps = target.detach().clone().to('mps')
output_cpu = F.nll_loss(input, target, reduction=reduction)
output_mps = F.nll_loss(input_mps, target_mps, reduction=reduction)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(output_cpu, output_mps.to('cpu'))
output_cpu.sum().backward()
output_mps.sum().backward()
self.assertEqual(input.grad, input_mps.grad.to('cpu'))
def test_as_strided(self):
values = [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]
values_1 = [[1.0, 1.0], [1.0, 1.0]]
cpu_x = torch.tensor(values, device='cpu')
ones1 = torch.tensor(values_1, device='mps')
x = cpu_x.detach().clone().to('mps').requires_grad_()
strided_cpu = torch.as_strided(cpu_x, (2, 2), (1, 2))
strided_mps = torch.as_strided(x, (2, 2), (1, 2))
self.assertEqual(strided_mps, strided_cpu)
strided_cpu_out = strided_cpu + ones1.to('cpu')
strided_mps_out = strided_mps + ones1
self.assertEqual(strided_cpu_out, strided_mps_out)
# test with storage offsets
cpu_x = torch.rand(3, 3, device='cpu')
mps_x = cpu_x.to('mps')
strided_cpu1 = torch.as_strided(cpu_x, (2, 2), (1, 2), 0)
strided_mps1 = torch.as_strided(mps_x, (2, 2), (1, 2), 0)
strided_cpu2 = torch.as_strided(cpu_x, (2, 2), (1, 2), 1)
strided_mps2 = torch.as_strided(mps_x, (2, 2), (1, 2), 1)
strided_cpu_out = strided_cpu1 - strided_cpu2
strided_mps_out = strided_mps1 - strided_mps2
self.assertEqual(strided_cpu_out, strided_mps_out)
def test_sum_backward(self):
def helper(n, c):
values = [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]
cpu_x = torch.tensor(values, device='cpu', requires_grad=True)
x = cpu_x.detach().clone().to('mps').requires_grad_()
all_sum = torch.sum(x)
all_sum_cpu = torch.sum(cpu_x)
all_sum.backward()
all_sum_cpu.backward()
self.assertEqual(all_sum, all_sum_cpu)
self.assertEqual(x.grad, cpu_x.grad)
helper(3, 3)
def test_nll_loss_1d(self, device='cpu'):
self._nll_loss_1d_helper([10], "none")
self._nll_loss_1d_helper([10], "mean")
self._nll_loss_1d_helper([10], "sum")
def test_nll_loss_empty_tensor_reduction_none(self, device='cpu'):
self._nll_loss_helper([1, 3], "none", torch.empty([0], device=device))
self._nll_loss_helper([3, 5, 7], "none", torch.empty([5, 7], device=device))
self._nll_loss_helper([2, 3, 1, 7], "none", torch.empty([2, 1, 7], device=device))
self._nll_loss_helper([2, 3, 5, 1], "none", torch.empty([2, 5, 1], device=device))
self._nll_loss_helper([2, 3, 5, 7, 1], "none", torch.empty([2, 5, 7, 1], device=device))
@unittest.skipIf(TEST_WITH_UBSAN, "division-by-zero error with UBSAN")
def test_nll_loss_empty_tensor_reduction_mean(self, device='cpu'):
nan = torch.tensor(float('nan'), device=device)
self._nll_loss_helper([1, 3], "mean", nan)
self._nll_loss_helper([1, 3, 5, 7], "mean", nan)
self._nll_loss_helper([2, 3, 1, 7], "mean", nan)
self._nll_loss_helper([2, 3, 5, 1], "mean", nan)
self._nll_loss_helper([2, 3, 5, 7, 1], "mean", nan)
def test_nll_loss_empty_tensor_reduction_sum(self, device='cpu'):
zero = torch.tensor(0, device=device)
self._nll_loss_helper([1, 3], "sum", zero)
self._nll_loss_helper([1, 3, 5, 7], "sum", zero)
self._nll_loss_helper([2, 3, 1, 7], "sum", zero)
self._nll_loss_helper([2, 3, 5, 1], "sum", zero)
self._nll_loss_helper([2, 3, 5, 7, 1], "sum", zero)
def test_nll_loss_byte_target_matches_long(self, device='cpu'):
N, C = 10, 4
input = torch.randn(N, C, device=device, requires_grad=True)
target = torch.empty(N, dtype=torch.long, device=device).random_(0, C)
def compute_result_and_gradient(reduction, target_dtype):
result, grad = {}, {}
for dev in ['cpu', 'mps']:
input_dev = input.to(dev)
input_ = input_dev.detach()
input_.requires_grad_()
target_dev = target.to(dev)
prob = F.log_softmax(input_, dim=-1)
loss = nn.NLLLoss(reduction=reduction)
result[dev] = loss(prob, target_dev.to(target_dtype))
result[dev].sum().backward()
grad[dev] = input_.grad
return result, grad
for reduction in ["none", "mean", "sum"]:
result_long, grad_long = compute_result_and_gradient(reduction, torch.long)
result_byte, grad_byte = compute_result_and_gradient(reduction, torch.uint8)
self.assertEqual(result_long['mps'].to('cpu'), result_long['cpu'])
self.assertEqual(grad_long['mps'].to('cpu'), grad_long['cpu'])
# L1 loss
def test_l1_loss(self):
def helper(shape, reduction):
# create the criterion
loss = torch.nn.L1Loss(reduction=reduction)
inputCPU = torch.randn(shape, device='cpu', dtype=torch.float, requires_grad=True)
targetCPU = torch.randn(shape, device='cpu', dtype=torch.float, requires_grad=False)
inputMPS = inputCPU.detach().clone().to('mps').requires_grad_()
targetMPS = targetCPU.detach().clone().to('mps')
# forward pass
outputCPU = loss(inputCPU, targetCPU)
outputMPS = loss(inputMPS, targetMPS)
self.assertEqual(outputCPU, outputMPS)
# backward pass
if reduction != 'none':
# chose 2 just to make the grad_output > 1 in backward pass
outputCPU.backward(gradient=torch.full_like(outputCPU, 2))
outputMPS.backward(gradient=torch.full_like(outputMPS, 2))
self.assertEqual(inputCPU.grad, inputMPS.grad)
helper([8, 5, 4], 'none')
helper([7, 5, 2, 4], 'sum')
# verify if changes in shape would cause cached graph lookup problems
helper([7, 5, 2, 4, 6], 'sum')
helper([8, 4, 5, 7, 6], 'mean')
# Mean Squared Error
def test_mse_loss(self):
def helper(shape, reduction):
# create the criterion
loss = torch.nn.MSELoss(reduction=reduction)
inputCPU = torch.randn(shape, device='cpu', dtype=torch.float, requires_grad=True)
targetCPU = torch.randn(shape, device='cpu', dtype=torch.float, requires_grad=False)
inputMPS = inputCPU.detach().clone().to('mps').requires_grad_()
targetMPS = targetCPU.detach().clone().to('mps')
# forward pass
outputCPU = loss(inputCPU, targetCPU)
outputMPS = loss(inputMPS, targetMPS)
self.assertEqual(outputCPU, outputMPS)
# backward pass
if reduction != 'none':
# chose 2 just to make the grad_output > 1 in backward pass
outputCPU.backward(gradient=torch.full_like(outputCPU, 2))
outputMPS.backward(gradient=torch.full_like(outputMPS, 2))
self.assertEqual(inputCPU.grad, inputMPS.grad)
helper([8, 5, 4], 'none')
helper([7, 5, 2, 4], 'sum')
# verify if changes in shape would cause cached graph lookup problems
helper([7, 5, 2, 4, 6], 'sum')
helper([8, 4, 5, 7, 6], 'mean')
# Binary Cross Enropy
def test_bce_loss_simple(self):
def helper(shape, reduction):
# create the criterion
loss = torch.nn.BCELoss(reduction=reduction)
# input and target must be within [0..1]
input_t = np.random.random_sample(size=shape).astype(np.float32)
target_t = np.random.random_sample(size=shape).astype(np.float32)
inputCPU = torch.tensor(input_t, device='cpu', dtype=torch.float, requires_grad=True)
targetCPU = torch.tensor(target_t, device='cpu', dtype=torch.float, requires_grad=False)
inputMPS = inputCPU.detach().clone().to('mps').requires_grad_()
targetMPS = targetCPU.detach().clone().to('mps')
# forward pass
outputCPU = loss(inputCPU, targetCPU)
outputMPS = loss(inputMPS, targetMPS)
self.assertEqual(outputCPU, outputMPS)
# backward pass
if reduction != 'none':
# chose 0.6 just to have the grad_output != 1
outputCPU.backward(gradient=torch.full_like(outputCPU, 0.6))
outputMPS.backward(gradient=torch.full_like(outputMPS, 0.6))
self.assertEqual(inputCPU.grad, inputMPS.grad)
helper([8, 5, 4], 'none')
helper([7, 5, 2, 4], 'sum')
# verify if changes in shape would cause cached graph lookup problems
helper([7, 5, 2, 4, 6], 'sum')
helper([8, 4, 5, 7, 6], 'mean')
helper([1, 1, 32, 32], 'mean')
def test_bce_loss_always_nonnegative(self):
target = torch.ones(5, device='mps')
input = torch.ones(5, device='mps')
self.assertEqual((nn.BCELoss()(input, target) < 0).sum(), 0)
target = torch.zeros(5, device='mps')
input = torch.zeros(5, device='mps')
self.assertEqual((nn.BCELoss()(input, target) < 0).sum(), 0)
def test_bce_loss_size_mismatch(self):
bceloss = nn.BCELoss()
a = torch.rand(25, device='mps')
b = torch.rand(25, 1, device='mps')
with self.assertRaisesRegex(ValueError, r'Using a target size \('):
bceloss(a, b)
def test_bce_with_logits_gives_same_result_as_sigmoid_and_bce_loss_large_tensors_with_grad(self):
x_size = 1024
y_size = 256
target = torch.rand(x_size, y_size, device='mps')
for reduction in ['none', 'mean', 'sum']:
output_sig = torch.rand(x_size, y_size, device='mps') - 0.5
output_logits = output_sig.clone().detach()
output_sig.requires_grad = True
output_logits.requires_grad = True
weight = torch.rand(y_size, device='mps')
loss_sig = nn.BCELoss(weight, reduction=reduction)(
torch.sigmoid(output_sig), target
)
loss_logits = nn.BCEWithLogitsLoss(weight, reduction=reduction)(
output_logits, target
)
self.assertEqual(loss_logits, loss_sig)
if reduction == 'none':
grad = torch.rand(x_size, y_size, device='mps')
loss_sig.backward(grad)
loss_logits.backward(grad)
else:
loss_sig.backward()
loss_logits.backward()
self.assertEqual(output_sig.grad, output_logits.grad)
def test_bce_with_logits_has_correct_grad_at_zero(self):
output = torch.zeros(3, 1, requires_grad=True, device='mps')
target = torch.zeros(3, 1, device='mps')
nn.BCEWithLogitsLoss(reduction='sum')(output, target).backward()
expected_grad = torch.empty(3, 1, device='mps').fill_(0.5)
self.assertEqual(output.grad, expected_grad)
def test_bce_with_logits_broadcasts_weights(self):
target = torch.rand(16, 4, device='mps')
output = torch.rand(16, 4, device='mps') - 0.5
weight = torch.rand(4, device='mps')
out1 = nn.BCEWithLogitsLoss(weight)(output, target)
weight = weight.expand(16, 4).contiguous()
out2 = nn.BCEWithLogitsLoss(weight)(output, target)
self.assertEqual(out1, out2)
weight = torch.rand(16, 1, device='mps')
out1 = nn.BCEWithLogitsLoss(weight)(output, target)
weight = weight.expand(16, 4).contiguous()
out2 = nn.BCEWithLogitsLoss(weight)(output, target)
self.assertEqual(out1, out2)
def test_bce_with_logits_ones_in_pos_weights_are_the_same_as_none(self):
target = torch.rand(64, 4, device='mps')
output = torch.rand(64, 4, device='mps') - 0.5
pos_weight = torch.ones(64, 4, device='mps')
self.assertEqual(nn.BCEWithLogitsLoss()(output, target),
nn.BCEWithLogitsLoss(pos_weight=pos_weight)(output, target))
def test_bce_with_logits_broadcasts_pos_weights(self):
target = torch.rand(64, 4, device='mps')
output = torch.rand(64, 4, device='mps') - 0.5
pos_weight = torch.rand(4, device='mps')
out1 = nn.BCEWithLogitsLoss(pos_weight=pos_weight)(output, target)
pos_weight1 = pos_weight.expand(1, 4)
out2 = nn.BCEWithLogitsLoss(pos_weight=pos_weight1)(output, target)
pos_weight2 = pos_weight.expand(64, 4)
out3 = nn.BCEWithLogitsLoss(pos_weight=pos_weight2)(output, target)
self.assertEqual(out1, out2)
self.assertEqual(out1, out3)
def test_bce_with_logits_with_pos_weight_has_correct_grad_at_zero(self):
output = torch.zeros(3, 1, requires_grad=True, device='mps')
target = torch.zeros(3, 1, device='mps')
pos_weight = torch.ones(3, 1, device='mps')
nn.BCEWithLogitsLoss(pos_weight=pos_weight, reduction='sum')(output, target).backward()
expected_grad = torch.empty(3, 1, device='mps').fill_(0.5)
grad = output.grad
self.assertEqual(grad, expected_grad)
def test_bce_with_logits_stability(self):
output = torch.tensor([0., -120.], device='mps')
target = torch.tensor([0., 1.], device='mps')
pos_weight = torch.tensor([1., 1.], device='mps')
out1 = nn.BCEWithLogitsLoss()(output, target)
self.assertTrue(torch.isfinite(out1).all().item())
out2 = nn.BCEWithLogitsLoss(pos_weight=pos_weight)(output, target)
self.assertTrue(torch.isfinite(out2).all().item())
def test_bce_loss_broadcasts_weights(self):
sigmoid = nn.Sigmoid()
target = torch.rand(16, 4, device='mps')
output = torch.rand(16, 4, device='mps') - 0.5
weight = torch.rand(4, device='mps')
out1 = nn.BCELoss(weight)(sigmoid(output), target)
weight = weight.expand(16, 4).contiguous()
out2 = nn.BCELoss(weight)(sigmoid(output), target)
self.assertEqual(out1, out2)
weight = torch.rand(16, 1, device='mps')
out1 = nn.BCELoss(weight)(sigmoid(output), target)
weight = weight.expand(16, 4).contiguous()
out2 = nn.BCELoss(weight)(sigmoid(output), target)
self.assertEqual(out1, out2)
def test_log_softmax(self):
values = [[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], [[7.0, 8.0, 9.0], [10.0, 11.0, 12.0]]]
cpu_x = torch.tensor(values, device='cpu', requires_grad=True)
mps_x = torch.tensor(values, device='mps', requires_grad=True)
cpu_log_softmax = F.log_softmax(cpu_x, dim=0)
mps_log_softmax = F.log_softmax(mps_x, dim=0)
self.assertEqual(cpu_log_softmax, mps_log_softmax.to('cpu'))
cpu_grad = torch.ones_like(cpu_log_softmax)
mps_grad = torch.ones_like(cpu_log_softmax).to('mps')
cpu_log_softmax.backward(gradient=cpu_grad)
mps_log_softmax.backward(gradient=mps_grad)
self.assertEqual(cpu_x.grad, mps_x.grad.to('cpu'))
def test_eq(self):
values1 = [[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], [[7.0, 8.0, 9.0], [10.0, 11.0, 12.0]]]
values2 = [[[1.0, 2.0, 15.0], [4.0, 5.0, 6.0]], [[7.0, 8.0, 9.0], [0.0, 11.0, 12.0]]]
mps_x = torch.tensor(values1, device='mps')
mps_y = torch.tensor(values2, device='mps')
cpu_x = torch.tensor(values1, device='cpu')
cpu_y = torch.tensor(values2, device='cpu')
result_mps = torch.eq(mps_x, mps_y)
result_cpu = torch.eq(cpu_x, cpu_y)
self.assertEqual(result_cpu, result_mps.to('cpu'))
def test_eq_int64(self):
values1 = [[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]
values2 = [[[1, 2, 15], [4, 5, 6]], [[7, 8, 9], [0, 11, 12]]]
mps_x = torch.tensor(values1, device='mps')
mps_y = torch.tensor(values2, device='mps')
cpu_x = torch.tensor(values1, device='cpu')
cpu_y = torch.tensor(values2, device='cpu')
result_mps = torch.eq(mps_x, mps_y)
result_cpu = torch.eq(cpu_x, cpu_y)
self.assertEqual(result_cpu, result_mps.to('cpu'))
def test_ne(self):
def helper(shape):
cpu_x = torch.randn(shape, device='cpu', dtype=torch.float)
cpu_y = torch.randn(shape, device='cpu', dtype=torch.float)
mps_x = cpu_x.detach().clone().to('mps')
mps_y = cpu_y.detach().clone().to('mps')
result_mps = torch.ne(mps_x, mps_y)
result_cpu = torch.ne(cpu_x, cpu_y)
self.assertEqual(result_cpu, result_mps.to('cpu'))
helper((2, 3, 4, 5))
def test_ne_scalar(self):
def helper(shape):
cpu_x = torch.randn(shape, device='cpu', dtype=torch.float)
mps_x = cpu_x.detach().clone().to('mps')
result_mps = torch.ne(mps_x, 0.0)
result_cpu = torch.ne(cpu_x, 0.0)
self.assertEqual(result_cpu, result_mps.to('cpu'))
helper((2, 3, 4, 5))
def test_lt(self):
def helper(shape):
cpu_x = torch.randn(shape, device='cpu', dtype=torch.float)
cpu_y = torch.randn(shape, device='cpu', dtype=torch.float)
mps_x = cpu_x.detach().clone().to('mps')
mps_y = cpu_y.detach().clone().to('mps')
result_mps = torch.lt(mps_x, mps_y)
result_cpu = torch.lt(cpu_x, cpu_y)
self.assertEqual(result_cpu, result_mps.to('cpu'))
helper((2, 3, 4, 5))
def test_lt_scalar(self):
def helper(shape):
cpu_x = torch.randn(shape, device='cpu', dtype=torch.float)
mps_x = cpu_x.detach().clone().to('mps')
result_mps = torch.lt(mps_x, 0.0)
result_cpu = torch.lt(cpu_x, 0.0)
self.assertEqual(result_cpu, result_mps.to('cpu'))
helper((2, 3, 4, 5))
def test_le(self):
def helper(shape):
cpu_x = torch.randn(shape, device='cpu', dtype=torch.float)
cpu_y = torch.randn(shape, device='cpu', dtype=torch.float)
mps_x = cpu_x.detach().clone().to('mps')
mps_y = cpu_y.detach().clone().to('mps')
result_mps = torch.le(mps_x, mps_y)
result_cpu = torch.le(cpu_x, cpu_y)
self.assertEqual(result_cpu, result_mps.to('cpu'))
helper((2, 3, 4, 5))
def test_le_scalar(self):
def helper(shape):
cpu_x = torch.randn(shape, device='cpu', dtype=torch.float)
mps_x = cpu_x.detach().clone().to('mps')
result_mps = torch.le(mps_x, 0.0)
result_cpu = torch.le(cpu_x, 0.0)
self.assertEqual(result_cpu, result_mps.to('cpu'))
helper((2, 3, 4, 5))
def test_ge(self):
def helper(shape):
cpu_x = torch.randn(shape, device='cpu', dtype=torch.float)
cpu_y = torch.randn(shape, device='cpu', dtype=torch.float)
mps_x = cpu_x.detach().clone().to('mps')
mps_y = cpu_y.detach().clone().to('mps')
result_mps = torch.ge(mps_x, mps_y)
result_cpu = torch.ge(cpu_x, cpu_y)
self.assertEqual(result_cpu, result_mps.to('cpu'))
helper((2, 3, 4, 5))
def test_ge_scalar(self):
def helper(shape):
cpu_x = torch.randn(shape, device='cpu', dtype=torch.float)
mps_x = cpu_x.detach().clone().to('mps')
result_mps = torch.ge(mps_x, 0.0)
result_cpu = torch.ge(cpu_x, 0.0)
self.assertEqual(result_cpu, result_mps.to('cpu'))
helper((2, 3, 4, 5))
def test_gt(self):
def helper(shape):
cpu_x = torch.randn(shape, device='cpu', dtype=torch.float)
cpu_y = torch.randn(shape, device='cpu', dtype=torch.float)
mps_x = cpu_x.detach().clone().to('mps')
mps_y = cpu_y.detach().clone().to('mps')
result_mps = torch.gt(mps_x, mps_y)
result_cpu = torch.gt(cpu_x, cpu_y)
self.assertEqual(result_cpu, result_mps.to('cpu'))
helper((2, 3, 4, 5))
def test_gt_scalar(self):
def helper(shape):
cpu_x = torch.randn(shape, device='cpu', dtype=torch.float)
mps_x = cpu_x.detach().clone().to('mps')
result_mps = torch.gt(mps_x, 0.0)
result_cpu = torch.gt(cpu_x, 0.0)
self.assertEqual(result_cpu, result_mps.to('cpu'))
helper((2, 3, 4, 5))
# Test forward argmin argmax
def test_argmin_argmax(self):
def helper(n, c, h, w, reduction_type, dtype=torch.float32):
if reduction_type == "max":
arg_reduction_fn = torch.argmax
else:
arg_reduction_fn = torch.argmin
cpu_x = None
x = None
if(dtype not in [torch.float32, torch.bool]):
cpu_x = torch.randint(50, (n, c, h, w), device='cpu', dtype=dtype, requires_grad=False)
x = cpu_x.detach().clone().to('mps')
elif (dtype == torch.bool):
cpu_x = torch.randint(2, (n, c, h, w), device='cpu', dtype=dtype, requires_grad=False)
x = cpu_x.detach().clone().to('mps')
else:
cpu_x = torch.randn(n, c, h, w, device='cpu', dtype=dtype, requires_grad=True)
x = cpu_x.detach().clone().to('mps').requires_grad_()
y = arg_reduction_fn(x)
ref_y = arg_reduction_fn(cpu_x)
self.assertEqual(y, ref_y)
y_0 = arg_reduction_fn(x, dim=0)
refy_0 = arg_reduction_fn(cpu_x, dim=0)
self.assertEqual(y_0, refy_0)
y_0dim = arg_reduction_fn(x, dim=0, keepdim=True)
refy_0dim = arg_reduction_fn(cpu_x, dim=0, keepdim=True)
self.assertEqual(y_0dim, refy_0dim)
y_1 = arg_reduction_fn(x, dim=1)
refy_1 = arg_reduction_fn(cpu_x, dim=1)
self.assertEqual(y_1, refy_1)
y_1dim = arg_reduction_fn(x, dim=1, keepdim=True)
refy_1dim = arg_reduction_fn(cpu_x, dim=1, keepdim=True)
self.assertEqual(y_1dim, refy_1dim)
y_2 = arg_reduction_fn(x, dim=2)
refy_2 = arg_reduction_fn(cpu_x, dim=2)
self.assertEqual(y_2, refy_2)
y_2dim = arg_reduction_fn(x, dim=2, keepdim=True)
refy_2dim = arg_reduction_fn(cpu_x, dim=2, keepdim=True)
self.assertEqual(y_2dim, refy_2dim)
y_3 = arg_reduction_fn(x, dim=3)
refy_3 = arg_reduction_fn(cpu_x, dim=3)
self.assertEqual(y_3, refy_3)
y_3dim = arg_reduction_fn(x, dim=3, keepdim=True)
refy_3dim = arg_reduction_fn(cpu_x, dim=3, keepdim=True)
self.assertEqual(y_3dim, refy_3dim)
helper(2, 8, 4, 4, "max", torch.float32)
helper(2, 8, 4, 4, "max", torch.int32)
helper(2, 8, 4, 4, "max", torch.float16)
helper(2, 8, 4, 4, "max", torch.int64)
helper(2, 8, 4, 4, "min", torch.float32)
helper(2, 8, 4, 4, "min", torch.int32)
helper(2, 8, 4, 4, "min", torch.float16)
helper(2, 8, 4, 4, "min", torch.int64)
# Test forward max
# Note - don't test grad now
def test_max_el(self):
def helper(n, c, h, w, dtype=torch.float32):
if(dtype not in [torch.float32, torch.bool]):
cpu_x = torch.randint(50, (n, c, h, w), device='cpu', dtype=dtype, requires_grad=False)
x = cpu_x.detach().clone().to('mps')
elif (dtype == torch.bool):
cpu_x = torch.randint(2, (n, c, h, w), device='cpu', dtype=dtype, requires_grad=False)
x = cpu_x.detach().clone().to('mps')
else:
cpu_x = torch.randn(n, c, h, w, device='cpu', dtype=dtype, requires_grad=True)
x = cpu_x.detach().clone().to('mps')
ref_y = torch.max(cpu_x)
y = torch.max(x)
self.assertEqual(y, ref_y)
for dim in [0, 1, 2, 3]:
for keepdim in [True, False]:
y, idx = torch.max(x, dim=dim, keepdim=keepdim)
refy, refidx = torch.max(cpu_x, dim=dim, keepdim=keepdim)
self.assertEqual(y, refy)
self.assertEqual(idx, refidx)
y_0 = torch.ones(c, h, w, device='mps', dtype=dtype)
idx_0 = torch.ones(c, h, w, device='mps', dtype=torch.int64)
torch.max(x, dim=0, out=(y_0, idx_0))
refy_0, refidx_0 = torch.max(cpu_x, dim=0)
self.assertEqual(y_0, refy_0)
self.assertEqual(idx_0, refidx_0)
y_0dim = torch.ones(1, c, h, w, device='mps', dtype=dtype)
idx_0dim = torch.ones(1, c, h, w, device='mps', dtype=torch.int64)
torch.max(x, dim=0, keepdim=True, out=(y_0dim, idx_0dim))
refy_0dim, refidx_0dim = torch.max(cpu_x, dim=0, keepdim=True)
self.assertEqual(y_0dim, refy_0dim)
self.assertEqual(idx_0dim, refidx_0dim)
y_1 = torch.ones(n, h, w, device='mps', dtype=dtype)
idx_1 = torch.ones(n, h, w, device='mps', dtype=torch.int64)
torch.max(x, dim=1, out=(y_1, idx_1))
refy_1, refidx_1 = torch.max(cpu_x, dim=1)
self.assertEqual(y_1, refy_1)
self.assertEqual(idx_1, refidx_1)
y_1dim = torch.ones(n, 1, h, w, device='mps', dtype=dtype)
idx_1dim = torch.ones(n, 1, h, w, device='mps', dtype=torch.int64)
torch.max(x, dim=1, keepdim=True, out=(y_1dim, idx_1dim))
refy_1dim, refidx_1dim = torch.max(cpu_x, keepdim=True, dim=1)
self.assertEqual(y_1dim, refy_1dim)
self.assertEqual(idx_1dim, refidx_1dim)
y_2 = torch.ones(n, c, w, device='mps', dtype=dtype)
idx_2 = torch.ones(n, c, w, device='mps', dtype=torch.int64)
torch.max(x, dim=2, out=(y_2, idx_2))
refy_2, refidx_2 = torch.max(cpu_x, dim=2)
self.assertEqual(y_2, refy_2)
self.assertEqual(idx_2, refidx_2)
y_2dim = torch.ones(n, c, 1, w, device='mps', dtype=dtype)
idx_2dim = torch.ones(n, c, 1, w, device='mps', dtype=torch.int64)
torch.max(x, dim=2, keepdim=True, out=(y_2dim, idx_2dim))
refy_2dim, refidx_2dim = torch.max(cpu_x, dim=2, keepdim=True,)
self.assertEqual(y_2dim, refy_2dim)
self.assertEqual(idx_2dim, refidx_2dim)
y_3 = torch.ones(n, c, h, device='mps', dtype=dtype)
idx_3 = torch.ones(n, c, h, device='mps', dtype=torch.int64)
torch.max(x, dim=3, out=(y_3, idx_3))
refy_3, refidx_3 = torch.max(cpu_x, dim=3)
self.assertEqual(y_3, refy_3)
self.assertEqual(idx_3, refidx_3)
y_3dim = torch.ones(n, c, h, 1, device='mps', dtype=dtype)
idx_3dim = torch.ones(n, c, h, 1, device='mps', dtype=torch.int64)
torch.max(x, dim=3, keepdim=True, out=(y_3dim, idx_3dim))
refy_3dim, refidx_3dim = torch.max(cpu_x, dim=3, keepdim=True,)
self.assertEqual(y_3dim, refy_3dim)
self.assertEqual(idx_3dim, refidx_3dim)
helper(2, 8, 4, 5, torch.float32)
helper(2, 8, 4, 5, torch.int32)
# helper(2, 8, 4, 5, torch.int64)
def test_any(self):
def helper(shape):
input_xs = []
prod = 1
for i in range(len(shape)):
prod *= shape[i]
input_xs.append(torch.randn(prod, dtype=torch.float).reshape(shape))
input_xs.append(torch.arange(0, prod, dtype=torch.float).reshape(shape))
input_xs.append(torch.ones(prod, dtype=torch.float).reshape(shape))
input_xs.append(torch.zeros(prod, dtype=torch.float).reshape(shape))
input_xs.append(torch.arange(0, prod, dtype=torch.int).reshape(shape))
input_xs.append(torch.ones(prod, dtype=torch.int).reshape(shape))
input_xs.append(torch.zeros(prod, dtype=torch.int).reshape(shape))
input_xs.append(torch.arange(0, prod, dtype=torch.int).reshape(shape).bool())
input_xs.append(torch.ones(prod, dtype=torch.int).reshape(shape).bool())
input_xs.append(torch.zeros(prod, dtype=torch.int).reshape(shape).bool())
for i, cpu_x in enumerate(input_xs):
x = cpu_x.detach().clone().to('mps')
y = torch.any(x)
ref_y = torch.any(cpu_x)
self.assertEqual(y, ref_y)
y_0 = torch.any(x, dim=0)
refy_0 = torch.any(cpu_x, dim=0)
self.assertEqual(y_0, refy_0)
y_0dim = torch.any(x, dim=0, keepdim=True)
refy_0dim = torch.any(cpu_x, dim=0, keepdim=True)
self.assertEqual(y_0dim, refy_0dim)
y_0dim = torch.any(x, dim=0, keepdim=True)
refy_0dim = torch.any(cpu_x, dim=0, keepdim=True)
self.assertEqual(y_0dim, refy_0dim)
y_1 = torch.any(x, dim=1)
refy_1 = torch.any(cpu_x, dim=1)
self.assertEqual(y_1, refy_1)
y_1dim = torch.any(x, dim=1, keepdim=True)
refy_1dim = torch.any(cpu_x, dim=1, keepdim=True)
self.assertEqual(y_1dim, refy_1dim)
if (len(shape) > 2):
y_2 = torch.any(x, dim=2)
refy_2 = torch.any(cpu_x, dim=2)
self.assertEqual(y_2, refy_2)
y_2dim = torch.any(x, dim=2, keepdim=True)
refy_2dim = torch.any(cpu_x, dim=2, keepdim=True)
self.assertEqual(y_2dim, refy_2dim)
y_3 = torch.any(x, dim=3)
refy_3 = torch.any(cpu_x, dim=3)
self.assertEqual(y_3, refy_3)
y_3dim = torch.any(x, dim=3, keepdim=True)
refy_3dim = torch.any(cpu_x, dim=3, keepdim=True)
self.assertEqual(y_3dim, refy_3dim)
helper((1, 1, 1, 1))
helper((1, 1, 3, 3))
helper((7, 13))
helper((2, 8, 4, 5))
def test_all(self):
def helper(shape):
input_xs = []
prod = 1
for i in range(len(shape)):
prod *= shape[i]
input_xs.append(torch.randn(prod, dtype=torch.float).reshape(shape))
input_xs.append(torch.arange(0, prod, dtype=torch.float).reshape(shape))
input_xs.append(torch.ones(prod, dtype=torch.float).reshape(shape))
input_xs.append(torch.zeros(prod, dtype=torch.float).reshape(shape))
input_xs.append(torch.arange(0, prod, dtype=torch.int).reshape(shape))
input_xs.append(torch.ones(prod, dtype=torch.int).reshape(shape))
input_xs.append(torch.zeros(prod, dtype=torch.int).reshape(shape))
input_xs.append(torch.arange(0, prod, dtype=torch.int).reshape(shape).bool())
input_xs.append(torch.ones(prod, dtype=torch.int).reshape(shape).bool())
input_xs.append(torch.zeros(prod, dtype=torch.int).reshape(shape).bool())
for i, cpu_x in enumerate(input_xs):
x = cpu_x.detach().clone().to('mps')
y = torch.all(x)
ref_y = torch.all(cpu_x)
self.assertEqual(y, ref_y)
y_0 = torch.all(x, dim=0)
refy_0 = torch.all(cpu_x, dim=0)
self.assertEqual(y_0, refy_0)
y_0dim = torch.all(x, dim=0, keepdim=True)
refy_0dim = torch.all(cpu_x, dim=0, keepdim=True)
self.assertEqual(y_0dim, refy_0dim)
y_0dim = torch.all(x, dim=0, keepdim=True)
refy_0dim = torch.all(cpu_x, dim=0, keepdim=True)
self.assertEqual(y_0dim, refy_0dim)
y_1 = torch.all(x, dim=1)
refy_1 = torch.all(cpu_x, dim=1)
self.assertEqual(y_1, refy_1)
y_1dim = torch.all(x, dim=1, keepdim=True)
refy_1dim = torch.all(cpu_x, dim=1, keepdim=True)
self.assertEqual(y_1dim, refy_1dim)
if (len(shape) > 2):
y_2 = torch.all(x, dim=2)
refy_2 = torch.all(cpu_x, dim=2)
self.assertEqual(y_2, refy_2)
y_2dim = torch.all(x, dim=2, keepdim=True)
refy_2dim = torch.all(cpu_x, dim=2, keepdim=True)
self.assertEqual(y_2dim, refy_2dim)
y_3 = torch.all(x, dim=3)
refy_3 = torch.all(cpu_x, dim=3)
self.assertEqual(y_3, refy_3)
y_3dim = torch.all(x, dim=3, keepdim=True)
refy_3dim = torch.all(cpu_x, dim=3, keepdim=True)
self.assertEqual(y_3dim, refy_3dim)
helper((1, 1, 1, 1))
helper((1, 1, 3, 3))
helper((7, 13))
helper((2, 8, 4, 5))
# Test forward min
def test_min_el(self):
def helper(n, c, h, w):
cpu_x = torch.randn(n, c, h, w, device='cpu', dtype=torch.float, requires_grad=False)
x = cpu_x.detach().clone().to('mps')
y = torch.min(x)
ref_y = torch.min(cpu_x)
self.assertEqual(y, ref_y)
y_0, idx_0 = torch.min(x, dim=0)
refy_0, refidx_0 = torch.min(cpu_x, dim=0)
self.assertEqual(y_0, refy_0)
self.assertEqual(idx_0, refidx_0)
y_0 = torch.ones(c, h, w, device='mps', dtype=torch.float)
idx_0 = torch.ones(c, h, w, device='mps', dtype=torch.int64)
torch.min(x, dim=0, out=(y_0, idx_0))
refy_0, refidx_0 = torch.min(cpu_x, dim=0)
self.assertEqual(y_0, refy_0)
self.assertEqual(idx_0, refidx_0)
y_0dim, idx_0dim = torch.min(x, dim=0, keepdim=True)
refy_0dim, refidx_0dim = torch.min(cpu_x, dim=0, keepdim=True)
self.assertEqual(y_0dim, refy_0dim)
self.assertEqual(idx_0dim, refidx_0dim)
y_0dim = torch.ones(1, c, h, w, device='mps', dtype=torch.float)
idx_0dim = torch.ones(1, c, h, w, device='mps', dtype=torch.int64)
torch.min(x, dim=0, keepdim=True, out=(y_0dim, idx_0dim))
refy_0dim, refidx_0dim = torch.min(cpu_x, dim=0, keepdim=True)
self.assertEqual(y_0dim, refy_0dim)
self.assertEqual(idx_0dim, refidx_0dim)
y_1, idx_1 = torch.min(x, dim=1)
refy_1, refidx_1 = torch.min(cpu_x, dim=1)
self.assertEqual(y_1, refy_1)
self.assertEqual(idx_1, refidx_1)
y_1 = torch.ones(n, h, w, device='mps', dtype=torch.float)
idx_1 = torch.ones(n, h, w, device='mps', dtype=torch.int64)
torch.min(x, dim=1, out=(y_1, idx_1))
refy_1, refidx_1 = torch.min(cpu_x, dim=1)
self.assertEqual(y_1, refy_1)
self.assertEqual(idx_1, refidx_1)
y_1dim, idx_1dim = torch.min(x, dim=1, keepdim=True)
refy_1dim, refidx_1dim = torch.min(cpu_x, dim=1, keepdim=True)
self.assertEqual(y_1dim, refy_1dim)
self.assertEqual(idx_1dim, refidx_1dim)
y_1dim = torch.ones(n, 1, h, w, device='mps', dtype=torch.float)
idx_1dim = torch.ones(n, 1, h, w, device='mps', dtype=torch.int64)
torch.min(x, dim=1, keepdim=True, out=(y_1dim, idx_1dim))
refy_1dim, refidx_1dim = torch.min(cpu_x, keepdim=True, dim=1)
self.assertEqual(y_1dim, refy_1dim)
self.assertEqual(idx_1dim, refidx_1dim)
y_2, idx_2 = torch.min(x, dim=2)
refy_2, refidx_2 = torch.min(cpu_x, dim=2)
self.assertEqual(y_2, refy_2)
self.assertEqual(idx_2, refidx_2)
y_2 = torch.ones(n, c, w, device='mps', dtype=torch.float)
idx_2 = torch.ones(n, c, w, device='mps', dtype=torch.int64)
torch.min(x, dim=2, out=(y_2, idx_2))
refy_2, refidx_2 = torch.min(cpu_x, dim=2)
self.assertEqual(y_2, refy_2)
self.assertEqual(idx_2, refidx_2)
y_2dim, idx_2dim = torch.min(x, dim=2, keepdim=True)
refy_2dim, refidx_2dim = torch.min(cpu_x, dim=2, keepdim=True)
self.assertEqual(y_2dim, refy_2dim)
self.assertEqual(idx_2dim, refidx_2dim)
y_2dim = torch.ones(n, c, 1, w, device='mps', dtype=torch.float)
idx_2dim = torch.ones(n, c, 1, w, device='mps', dtype=torch.int64)
torch.min(x, dim=2, keepdim=True, out=(y_2dim, idx_2dim))
refy_2dim, refidx_2dim = torch.min(cpu_x, dim=2, keepdim=True,)
self.assertEqual(y_2dim, refy_2dim)
self.assertEqual(idx_2dim, refidx_2dim)
y_3, idx_3 = torch.min(x, dim=3)
refy_3, refidx_3 = torch.min(cpu_x, dim=3)
self.assertEqual(y_3, refy_3)
self.assertEqual(idx_3, refidx_3)
y_3 = torch.ones(n, c, h, device='mps', dtype=torch.float)
idx_3 = torch.ones(n, c, h, device='mps', dtype=torch.int64)
torch.min(x, dim=3, out=(y_3, idx_3))
refy_3, refidx_3 = torch.min(cpu_x, dim=3)
self.assertEqual(y_3, refy_3)
self.assertEqual(idx_3, refidx_3)
y_3dim, idx_3dim = torch.min(x, dim=3, keepdim=True)
refy_3dim, refidx_3dim = torch.min(cpu_x, dim=3, keepdim=True)
self.assertEqual(y_3dim, refy_3dim)
self.assertEqual(idx_3dim, refidx_3dim)
y_3dim = torch.ones(n, c, h, 1, device='mps', dtype=torch.float)
idx_3dim = torch.ones(n, c, h, 1, device='mps', dtype=torch.int64)
torch.min(x, dim=3, keepdim=True, out=(y_3dim, idx_3dim))
refy_3dim, refidx_3dim = torch.min(cpu_x, dim=3, keepdim=True,)
self.assertEqual(y_3dim, refy_3dim)
self.assertEqual(idx_3dim, refidx_3dim)
helper(2, 8, 4, 5)
# Test forward sum
def test_sum(self):
def helper(n, c, h, w, dtype=torch.float32):
cpu_x = None
x = None
if(dtype not in [torch.float32, torch.bool]):
cpu_x = torch.randint(50, (n, c, h, w), device='cpu', dtype=dtype, requires_grad=False)
x = cpu_x.detach().clone().to('mps')
elif (dtype == torch.bool):
cpu_x = torch.randint(2, (n, c, h, w), device='cpu', dtype=dtype, requires_grad=False)
x = cpu_x.detach().clone().to('mps')
else:
cpu_x = torch.randn(n, c, h, w, device='cpu', dtype=dtype, requires_grad=True)
x = cpu_x.detach().clone().to('mps').requires_grad_()
all_sum = torch.sum(x)
all_sum_cpu = torch.sum(cpu_x)
self.assertEqual(all_sum, all_sum_cpu)
nil_dim_sum = torch.sum(x, dim=[])
nil_dim_sum_cpu = torch.sum(cpu_x, dim=[])
self.assertEqual(nil_dim_sum, nil_dim_sum_cpu)
nil_dim_sum_keepdim = torch.sum(x, dim=[], keepdim=True)
nil_dim_sum_cpu_keepdim = torch.sum(cpu_x, dim=[], keepdim=True)
self.assertEqual(nil_dim_sum_keepdim, nil_dim_sum_cpu_keepdim)
zero_dim_sum = torch.sum(x, dim=[0])
zero_dim_sum_cpu = torch.sum(cpu_x, dim=[0])
self.assertEqual(zero_dim_sum, zero_dim_sum_cpu)
zero_dim_sum_keepdim = torch.sum(x, dim=[0], keepdim=True)
zero_dim_sum_cpu_keepdim = torch.sum(cpu_x, dim=[0], keepdim=True)
self.assertEqual(zero_dim_sum_keepdim, zero_dim_sum_cpu_keepdim)
zero_one_dim_sum = torch.sum(x, dim=[0, 1])
zero_one_dim_sum_cpu = torch.sum(cpu_x, dim=[0, 1])
self.assertEqual(zero_one_dim_sum, zero_one_dim_sum_cpu)
zero_one_dim_sum_keepdim = torch.sum(x, dim=[0, 1], keepdim=True)
zero_one_dim_sum_cpu_keepdim = torch.sum(cpu_x, dim=[0, 1], keepdim=True)
self.assertEqual(zero_one_dim_sum_keepdim, zero_one_dim_sum_cpu_keepdim)
two_three_dim_sum = torch.sum(x, dim=[2, 3])
two_three_dim_sum_cpu = torch.sum(cpu_x, dim=[2, 3])
self.assertEqual(two_three_dim_sum, two_three_dim_sum_cpu)
two_three_keepdim_sum = torch.sum(x, dim=[2, 3], keepdim=True)
two_three_dim_keepsum_cpu = torch.sum(cpu_x, dim=[2, 3], keepdim=True)
self.assertEqual(two_three_keepdim_sum, two_three_dim_keepsum_cpu)
helper(2, 8, 4, 5)
helper(2, 8, 4, 5, dtype=torch.int32)
helper(2, 8, 4, 5, dtype=torch.int64)
helper(2, 8, 4, 5, dtype=torch.bool)
# Test forward prod
def test_prod(self):
def helper(shape, dtype=torch.float32):
cpu_x = None
x = None
if(dtype not in [torch.float32, torch.bool]):
cpu_x = torch.randint(1, 6, shape, device='cpu', dtype=dtype, requires_grad=False)
x = cpu_x.detach().clone().to('mps')
elif (dtype == torch.bool):
cpu_x = torch.randint(2, shape, device='cpu', dtype=dtype, requires_grad=False)
x = cpu_x.detach().clone().to('mps')
else:
cpu_x = torch.randn(shape, device='cpu', dtype=dtype, requires_grad=True)
x = cpu_x.detach().clone().to('mps').requires_grad_()
all_prod = torch.prod(x)
all_prod_cpu = torch.prod(cpu_x)
self.assertEqual(all_prod, all_prod_cpu)
for dim in range(len(shape)):
dim_prod = torch.prod(x, dim=dim)
dim_prod_cpu = torch.prod(cpu_x, dim=dim)
self.assertEqual(dim_prod, dim_prod_cpu)
dim_prod_keepdim = torch.prod(x, dim=dim, keepdim=True)
dim_prod_cpu_keepdim = torch.prod(cpu_x, dim=dim, keepdim=True)
self.assertEqual(dim_prod_keepdim, dim_prod_cpu_keepdim)
for dtype in [torch.float32, torch.int32, torch.int64, torch.bool]:
helper((2, 3), dtype)
# Test forward mean
def test_mean(self):
def helper(n, c, h, w):
cpu_x = torch.randn(n, c, h, w, device='cpu', dtype=torch.float, requires_grad=True)
x = cpu_x.detach().clone().to('mps').requires_grad_()
all_mean = torch.mean(x)
all_mean_cpu = torch.mean(cpu_x)
self.assertEqual(all_mean, all_mean_cpu)
nil_dim_mean = torch.mean(x, dim=[])
nil_dim_mean_cpu = torch.mean(cpu_x, dim=[])
self.assertEqual(nil_dim_mean, nil_dim_mean_cpu)
nil_dim_mean_keepdim = torch.mean(x, dim=[], keepdim=True)
nil_dim_mean_cpu_keepdim = torch.mean(cpu_x, dim=[], keepdim=True)
self.assertEqual(nil_dim_mean_keepdim, nil_dim_mean_cpu_keepdim)
zero_dim_mean = torch.mean(x, dim=[0])
zero_dim_mean_cpu = torch.mean(cpu_x, dim=[0])
self.assertEqual(zero_dim_mean, zero_dim_mean_cpu)
zero_dim_mean_keepdim = torch.mean(x, dim=[0], keepdim=True)
zero_dim_mean_cpu_keepdim = torch.mean(cpu_x, dim=[0], keepdim=True)
self.assertEqual(zero_dim_mean_keepdim, zero_dim_mean_cpu_keepdim)
zero_one_dim_mean = torch.mean(x, dim=[0, 1])
zero_one_dim_mean_cpu = torch.mean(cpu_x, dim=[0, 1])
self.assertEqual(zero_one_dim_mean, zero_one_dim_mean_cpu)
zero_one_dim_mean_keepdim = torch.mean(x, dim=[0, 1], keepdim=True)
zero_one_dim_mean_cpu_keepdim = torch.mean(cpu_x, dim=[0, 1], keepdim=True)
self.assertEqual(zero_one_dim_mean_keepdim, zero_one_dim_mean_cpu_keepdim)
two_three_dim_mean = torch.mean(x, dim=[2, 3])
two_three_dim_mean_cpu = torch.mean(cpu_x, dim=[2, 3])
self.assertEqual(two_three_dim_mean, two_three_dim_mean_cpu)
two_three_keepdim_mean = torch.mean(x, dim=[2, 3], keepdim=True)
two_three_dim_keepmean_cpu = torch.mean(cpu_x, dim=[2, 3], keepdim=True)
self.assertEqual(two_three_keepdim_mean, two_three_dim_keepmean_cpu)
helper(2, 8, 4, 5)
# Test std
def test_std(self):
def helper(shape):
cpu_x = torch.randn(shape, device='cpu', dtype=torch.float, requires_grad=False)
x = cpu_x.detach().clone().to('mps')
all_std = torch.std(x, unbiased=False)
all_std_cpu = torch.std(cpu_x, unbiased=False)
self.assertEqual(all_std, all_std_cpu)
nil_dim_std = torch.std(x, dim=[], unbiased=False)
nil_dim_std_cpu = torch.std(cpu_x, dim=[], unbiased=False)
self.assertEqual(nil_dim_std, nil_dim_std_cpu)
nil_dim_std_keepdim = torch.std(x, dim=[], keepdim=True, unbiased=False)
nil_dim_std_cpu_keepdim = torch.std(cpu_x, dim=[], keepdim=True, unbiased=False)
self.assertEqual(nil_dim_std_keepdim, nil_dim_std_cpu_keepdim)
zero_dim_std = torch.std(x, dim=[0], unbiased=False)
zero_dim_std_cpu = torch.std(cpu_x, dim=[0], unbiased=False)
self.assertEqual(zero_dim_std, zero_dim_std_cpu)
zero_dim_std_keepdim = torch.std(x, dim=[0], keepdim=True, unbiased=False)
zero_dim_std_cpu_keepdim = torch.std(cpu_x, dim=[0], keepdim=True, unbiased=False)
self.assertEqual(zero_dim_std_keepdim, zero_dim_std_cpu_keepdim)
zero_one_dim_std = torch.std(x, dim=[0, 1], unbiased=False)
zero_one_dim_std_cpu = torch.std(cpu_x, dim=[0, 1], unbiased=False)
self.assertEqual(zero_one_dim_std, zero_one_dim_std_cpu)
zero_one_dim_std_keepdim = torch.std(x, dim=[0, 1], keepdim=True, unbiased=False)
zero_one_dim_std_cpu_keepdim = torch.std(cpu_x, dim=[0, 1], keepdim=True, unbiased=False)
self.assertEqual(zero_one_dim_std_keepdim, zero_one_dim_std_cpu_keepdim)
two_three_dim_std = torch.std(x, dim=[2, 3], unbiased=False)
two_three_dim_std_cpu = torch.std(cpu_x, dim=[2, 3], unbiased=False)
self.assertEqual(two_three_dim_std, two_three_dim_std_cpu)
two_three_keepdim_std = torch.std(x, dim=[2, 3], keepdim=True, unbiased=False)
two_three_dim_keepstd_cpu = torch.std(cpu_x, dim=[2, 3], keepdim=True, unbiased=False)
self.assertEqual(two_three_keepdim_std, two_three_dim_keepstd_cpu)
all_std = torch.std(x, unbiased=True)
all_std_cpu = torch.std(cpu_x, unbiased=True)
self.assertEqual(all_std, all_std_cpu)
nil_dim_std = torch.std(x, dim=[], unbiased=True)
nil_dim_std_cpu = torch.std(cpu_x, dim=[], unbiased=True)
self.assertEqual(nil_dim_std, nil_dim_std_cpu)
nil_dim_std_keepdim = torch.std(x, dim=[], keepdim=True, unbiased=True)
nil_dim_std_cpu_keepdim = torch.std(cpu_x, dim=[], keepdim=True, unbiased=True)
self.assertEqual(nil_dim_std_keepdim, nil_dim_std_cpu_keepdim)
zero_dim_std = torch.std(x, dim=[0], unbiased=True)
zero_dim_std_cpu = torch.std(cpu_x, dim=[0], unbiased=True)
self.assertEqual(zero_dim_std, zero_dim_std_cpu)
zero_dim_std_keepdim = torch.std(x, dim=[0], keepdim=True, unbiased=True)
zero_dim_std_cpu_keepdim = torch.std(cpu_x, dim=[0], keepdim=True, unbiased=True)
self.assertEqual(zero_dim_std_keepdim, zero_dim_std_cpu_keepdim)
zero_one_dim_std = torch.std(x, dim=[0, 1], unbiased=True)
zero_one_dim_std_cpu = torch.std(cpu_x, dim=[0, 1], unbiased=True)
self.assertEqual(zero_one_dim_std, zero_one_dim_std_cpu)
zero_one_dim_std_keepdim = torch.std(x, dim=[0, 1], keepdim=True, unbiased=True)
zero_one_dim_std_cpu_keepdim = torch.std(cpu_x, dim=[0, 1], keepdim=True, unbiased=True)
self.assertEqual(zero_one_dim_std_keepdim, zero_one_dim_std_cpu_keepdim)
two_three_dim_std = torch.std(x, dim=[2, 3], unbiased=True)
two_three_dim_std_cpu = torch.std(cpu_x, dim=[2, 3], unbiased=True)
self.assertEqual(two_three_dim_std, two_three_dim_std_cpu)
two_three_keepdim_std = torch.std(x, dim=[2, 3], keepdim=True, unbiased=True)
two_three_dim_keepstd_cpu = torch.std(cpu_x, dim=[2, 3], keepdim=True, unbiased=True)
self.assertEqual(two_three_keepdim_std, two_three_dim_keepstd_cpu)
helper((4, 5, 6, 7))
# verify if a change in shape of input would cause problems with graph caching
helper((9, 5, 6, 7))
# Test var
def test_var(self):
def helper(shape):
cpu_x = torch.randn(shape, device='cpu', dtype=torch.float, requires_grad=False)
x = cpu_x.detach().clone().to('mps')
all_var = torch.var(x, unbiased=False)
all_var_cpu = torch.var(cpu_x, unbiased=False)
self.assertEqual(all_var, all_var_cpu)
nil_dim_var = torch.var(x, dim=[], unbiased=False)
nil_dim_var_cpu = torch.var(cpu_x, dim=[], unbiased=False)
self.assertEqual(nil_dim_var, nil_dim_var_cpu)
nil_dim_var_keepdim = torch.var(x, dim=[], keepdim=True, unbiased=False)
nil_dim_var_cpu_keepdim = torch.var(cpu_x, dim=[], keepdim=True, unbiased=False)
self.assertEqual(nil_dim_var_keepdim, nil_dim_var_cpu_keepdim)
zero_dim_var = torch.var(x, dim=[0], unbiased=False)
zero_dim_var_cpu = torch.var(cpu_x, dim=[0], unbiased=False)
self.assertEqual(zero_dim_var, zero_dim_var_cpu)
zero_dim_var_keepdim = torch.var(x, dim=[0], keepdim=True, unbiased=False)
zero_dim_var_cpu_keepdim = torch.var(cpu_x, dim=[0], keepdim=True, unbiased=False)
self.assertEqual(zero_dim_var_keepdim, zero_dim_var_cpu_keepdim)
zero_one_dim_var = torch.var(x, dim=[0, 1], unbiased=False)
zero_one_dim_var_cpu = torch.var(cpu_x, dim=[0, 1], unbiased=False)
self.assertEqual(zero_one_dim_var, zero_one_dim_var_cpu)
zero_one_dim_var_keepdim = torch.var(x, dim=[0, 1], keepdim=True, unbiased=False)
zero_one_dim_var_cpu_keepdim = torch.var(cpu_x, dim=[0, 1], keepdim=True, unbiased=False)
self.assertEqual(zero_one_dim_var_keepdim, zero_one_dim_var_cpu_keepdim)
two_three_dim_var = torch.var(x, dim=[2, 3], unbiased=False)
two_three_dim_var_cpu = torch.var(cpu_x, dim=[2, 3], unbiased=False)
self.assertEqual(two_three_dim_var, two_three_dim_var_cpu)
two_three_keepdim_var = torch.var(x, dim=[2, 3], keepdim=True, unbiased=False)
two_three_dim_keepvar_cpu = torch.var(cpu_x, dim=[2, 3], keepdim=True, unbiased=False)
self.assertEqual(two_three_keepdim_var, two_three_dim_keepvar_cpu)
all_var = torch.var(x, unbiased=True)
all_var_cpu = torch.var(cpu_x, unbiased=True)
self.assertEqual(all_var, all_var_cpu)
nil_dim_var = torch.var(x, dim=[], unbiased=True)
nil_dim_var_cpu = torch.var(cpu_x, dim=[], unbiased=True)
self.assertEqual(nil_dim_var, nil_dim_var_cpu)
nil_dim_var_keepdim = torch.var(x, dim=[], keepdim=True, unbiased=True)
nil_dim_var_cpu_keepdim = torch.var(cpu_x, dim=[], keepdim=True, unbiased=True)
self.assertEqual(nil_dim_var_keepdim, nil_dim_var_cpu_keepdim)
zero_dim_var = torch.var(x, dim=[0], unbiased=True)
zero_dim_var_cpu = torch.var(cpu_x, dim=[0], unbiased=True)
self.assertEqual(zero_dim_var, zero_dim_var_cpu)
zero_dim_var_keepdim = torch.var(x, dim=[0], keepdim=True, unbiased=True)
zero_dim_var_cpu_keepdim = torch.var(cpu_x, dim=[0], keepdim=True, unbiased=True)
self.assertEqual(zero_dim_var_keepdim, zero_dim_var_cpu_keepdim)
zero_one_dim_var = torch.var(x, dim=[0, 1], unbiased=True)
zero_one_dim_var_cpu = torch.var(cpu_x, dim=[0, 1], unbiased=True)
self.assertEqual(zero_one_dim_var, zero_one_dim_var_cpu)
zero_one_dim_var_keepdim = torch.var(x, dim=[0, 1], keepdim=True, unbiased=True)
zero_one_dim_var_cpu_keepdim = torch.var(cpu_x, dim=[0, 1], keepdim=True, unbiased=True)
self.assertEqual(zero_one_dim_var_keepdim, zero_one_dim_var_cpu_keepdim)
two_three_dim_var = torch.var(x, dim=[2, 3], unbiased=True)
two_three_dim_var_cpu = torch.var(cpu_x, dim=[2, 3], unbiased=True)
self.assertEqual(two_three_dim_var, two_three_dim_var_cpu)
two_three_keepdim_var = torch.var(x, dim=[2, 3], keepdim=True, unbiased=True)
two_three_dim_keepvar_cpu = torch.var(cpu_x, dim=[2, 3], keepdim=True, unbiased=True)
self.assertEqual(two_three_keepdim_var, two_three_dim_keepvar_cpu)
helper((4, 5, 6, 7))
# verify if a change in shape of input would cause problems with graph caching
helper((9, 5, 6, 7))
# Test forward amax
def test_amax(self):
def helper(shape, dim, keepdim):
cpu_x = torch.randn(shape, device='cpu', dtype=torch.float, requires_grad=True)
x = cpu_x.detach().clone().to('mps').requires_grad_()
result = torch.amax(x, dim=dim, keepdim=keepdim)
result_cpu = torch.amax(cpu_x, dim=dim, keepdim=keepdim)
cpu_grad = torch.randn(result_cpu.shape)
grad = cpu_grad.to('mps')
result_cpu.backward(gradient=cpu_grad)
result.backward(gradient=grad)
self.assertEqual(result, result_cpu)
self.assertEqual(x.grad, cpu_x.grad)
for dim in ([], [0], [0, 1], [2, 3]):
for keepdim in [False, True]:
helper((2, 8, 4, 5), dim, keepdim)
# Test forward amin
def test_amin(self):
def helper(shape, dim, keepdim):
cpu_x = torch.randn(shape, device='cpu', dtype=torch.float, requires_grad=True)
x = cpu_x.detach().clone().to('mps').requires_grad_()
result = torch.amin(x, dim=dim, keepdim=keepdim)
result_cpu = torch.amin(cpu_x, dim=dim, keepdim=keepdim)
cpu_grad = torch.randn(result_cpu.shape)
grad = cpu_grad.to('mps')
result_cpu.backward(gradient=cpu_grad)
result.backward(gradient=grad)
self.assertEqual(result, result_cpu)
self.assertEqual(x.grad, cpu_x.grad)
for dim in ([], [0], [0, 1], [2, 3]):
for keepdim in [False, True]:
helper((2, 8, 4, 5), dim, keepdim)
# Test minimum and maximum
def test_minimum_maximum(self):
def helper(n, c, h, w):
cpu_x = torch.randn(n, c, h, w, device='cpu', dtype=torch.float, requires_grad=False)
cpu_y = torch.randn(n, c, h, w, device='cpu', dtype=torch.float, requires_grad=False)
mps_x = cpu_x.detach().clone().to('mps')
mps_y = cpu_y.detach().clone().to('mps')
minimum_result_cpu = torch.minimum(cpu_x, cpu_y)
minimum_result_mps = torch.minimum(mps_x, mps_y)
self.assertEqual(minimum_result_cpu, minimum_result_mps)
maximum_result_cpu = torch.maximum(cpu_x, cpu_y)
maximum_result_mps = torch.maximum(mps_x, mps_y)
self.assertEqual(maximum_result_cpu, maximum_result_mps)
helper(1, 1, 4, 5)
# Test clamp_min
def test_clamp_min(self):
def helper(n, c, h, w):
cpu_x = torch.randn(n, c, h, w, device='cpu', dtype=torch.float, requires_grad=False)
x = cpu_x.detach().clone().to('mps')
cpu_min_t = torch.randn(n, c, h, w, device='cpu', dtype=torch.float, requires_grad=False)
min_t = cpu_min_t.detach().clone().to('mps')
clamp_min_result = torch.clamp_min(x, min=5.0)
clamp_min_result_cpu = torch.clamp_min(cpu_x, min=5.0)
self.assertEqual(clamp_min_result, clamp_min_result_cpu)
clamp_min_t_result = torch.clamp_min(x, min=min_t)
clamp_min_t_result_cpu = torch.clamp_min(cpu_x, min=cpu_min_t)
self.assertEqual(clamp_min_t_result, clamp_min_t_result_cpu)
helper(2, 8, 4, 5)
# Test clamp_max
def test_clamp_max(self):
def helper(n, c, h, w):
cpu_x = torch.randn(n, c, h, w, device='cpu', dtype=torch.float, requires_grad=False)
x = cpu_x.detach().clone().to('mps')
cpu_max_t = torch.randn(n, c, h, w, device='cpu', dtype=torch.float, requires_grad=False)
max_t = cpu_max_t.detach().clone().to('mps')
clamp_max_result = torch.clamp_max(x, max=100.0)
clamp_max_result_cpu = torch.clamp_max(cpu_x, max=100.0)
self.assertEqual(clamp_max_result, clamp_max_result_cpu)
clamp_max_t_result = torch.clamp_max(x, max=max_t)
clamp_max_t_result_cpu = torch.clamp_max(cpu_x, max=cpu_max_t)
self.assertEqual(clamp_max_t_result, clamp_max_t_result_cpu)
helper(2, 8, 4, 5)
# Test clamp
def test_clamp(self):
def helper(n, c, h, w):
import numpy as np
upper_bound = 1000
half_upper_bound = upper_bound / 2
# x=[0..1000)
x_arr = upper_bound * np.random.random_sample(size=(n, c, h, w)).astype(np.float32)
cpu_x = torch.tensor(x_arr, device='cpu', dtype=torch.float, requires_grad=False)
x = cpu_x.detach().clone().to('mps')
# x=[0..500)
min_arr = half_upper_bound * np.random.random_sample(size=(n, c, h, w)).astype(np.float32)
cpu_min_t = torch.tensor(min_arr, device='cpu', dtype=torch.float, requires_grad=False)
min_t = cpu_min_t.detach().clone().to('mps')
# x=[500..1000), to ensure max's are greater than mins
max_arr = (half_upper_bound * np.random.random_sample(size=(n, c, h, w)).astype(np.float32)) + half_upper_bound
cpu_max_t = torch.tensor(max_arr, device='cpu', dtype=torch.float, requires_grad=False)
max_t = cpu_max_t.detach().clone().to('mps')
# [200..600]: just an arbitrary range between [0..1000]
clamp_result = torch.clamp(x, min=200.0, max=600.0)
clamp_result_cpu = torch.clamp(cpu_x, min=200.0, max=600.0)
self.assertEqual(clamp_result, clamp_result_cpu)
# test optional scalar refs and cached graph keys by passing only max
clamp_opt_result = torch.clamp(x, max=600.0)
clamp_opt_result_cpu = torch.clamp(cpu_x, max=600.0)
self.assertEqual(clamp_opt_result, clamp_opt_result_cpu)
clamp_t_result = torch.clamp(x, min=min_t, max=max_t)
clamp_t_result_cpu = torch.clamp(cpu_x, min=cpu_min_t, max=cpu_max_t)
self.assertEqual(clamp_t_result, clamp_t_result_cpu)
# test optional tensor refs and cached graph keys by passing only max
clamp_topt_result = torch.clamp(x, max=max_t)
clamp_topt_result_cpu = torch.clamp(cpu_x, max=cpu_max_t)
self.assertEqual(clamp_topt_result, clamp_topt_result_cpu)
# test inplace clamping
x.clamp_(min=200.0, max=600.0)
cpu_x.clamp_(min=200.0, max=600.0)
self.assertEqual(cpu_x, x)
helper(2, 8, 4, 5)
def test_divmode(self):
def helper(shape, rounding_mode):
for dtype in [torch.float32]:
cpu_x = torch.randn(shape, device='cpu', dtype=dtype, requires_grad=False)
mps_x = cpu_x.detach().clone().to('mps')
# clamp to avoid division by 0
cpu_y = torch.randn(shape, device='cpu', dtype=dtype, requires_grad=False)
mps_y = cpu_y.detach().clone().to('mps')
result_div_cpu = torch.div(cpu_x, cpu_y, rounding_mode=rounding_mode)
result_div_mps = torch.div(mps_x, mps_y, rounding_mode=rounding_mode)
self.assertEqual(result_div_mps, result_div_cpu)
helper((2, 8, 4, 5), None)
helper((2, 8, 4, 5), "floor")
helper((2, 8, 4, 5), "trunc")
def test_rounding(self):
def helper(shape):
cpu_x = torch.randn(shape, device='cpu', dtype=torch.float, requires_grad=False)
mps_x = cpu_x.detach().clone().to('mps')
result_floor_cpu = torch.floor(cpu_x)
result_floor_mps = torch.floor(mps_x)
self.assertEqual(result_floor_mps, result_floor_cpu)
result_ceil_cpu = torch.ceil(cpu_x)
result_ceil_mps = torch.ceil(mps_x)
self.assertEqual(result_ceil_mps, result_ceil_cpu)
result_trunc_cpu = torch.trunc(cpu_x)
result_trunc_mps = torch.trunc(mps_x)
self.assertEqual(result_trunc_mps, result_trunc_cpu)
result_round_cpu = torch.round(cpu_x)
result_round_mps = torch.round(mps_x)
self.assertEqual(result_round_mps, result_round_cpu)
helper((2, 6, 3, 5))
helper((2, 8, 4, 5))
def test_expand(self):
def helper(n, c):
values = [[1.0], [4.0], [7.0]]
cpu_x = torch.tensor(values, device='cpu')
x = cpu_x.detach().clone().to('mps')
strided_cpu = torch.as_strided(cpu_x, (3, 4), (1, 0))
strided_mps = torch.as_strided(x, (3, 4), (1, 0))
self.assertEqual(strided_mps, strided_cpu)
helper(3, 1)
def test_select(self):
def helper(n, c):
cpu_x = torch.randn(n, c, device='cpu', dtype=torch.float, requires_grad=True)
x = cpu_x.detach().clone().to('mps').requires_grad_()
strided_cpu = torch.as_strided(cpu_x, (3, 1), (3, 1))
strided_mps = torch.as_strided(x, (3, 1), (3, 1))
self.assertEqual(strided_mps, strided_cpu)
strided_cpu = torch.as_strided(cpu_x, (1, 3), (3, 1))
strided_mps = torch.as_strided(x, (1, 3), (3, 1))
self.assertEqual(strided_mps, strided_cpu)
strided_cpu = torch.as_strided(cpu_x, (3, 1), (3, 1), storage_offset=1)
strided_mps = torch.as_strided(x, (3, 1), (3, 1), storage_offset=1)
self.assertEqual(strided_mps, strided_cpu)
helper(3, 3)
def test_assert_topk(self):
# here the k > 16 raises an error as expected
with self.assertRaisesRegex(RuntimeError, "Currently topk on mps works only for k<=16"):
xs = torch.arange(30).to('mps')
xs.topk(30)
# for k <= 16 it works fine
ys_cpu = torch.arange(30)
ys_mps = ys_cpu.to('mps')
self.assertEqual(ys_cpu.topk(16), ys_mps.topk(16))
def test_topk(self):
def helper(shape):
cpu_x = torch.randn(shape, device='cpu', dtype=torch.float, requires_grad=False)
x = cpu_x.detach().clone().to('mps')
for largest_val in [True, False]:
if (type(shape) == tuple):
for curr_dim in range(0, len(shape)):
dim_size = shape[curr_dim]
for k in range(1, dim_size + 1):
topk_values, topk_indices = torch.topk(x, k, dim=curr_dim, largest=largest_val)
topk_values_cpu, topk_indices_cpu = torch.topk(cpu_x, k, dim=curr_dim, largest=largest_val)
self.assertEqual(topk_values, topk_values_cpu)
self.assertEqual(topk_indices, topk_indices_cpu)
else:
for k in range(1, shape):
topk_values, topk_indices = torch.topk(x, k, dim=0, largest=largest_val)
topk_values_cpu, topk_indices_cpu = torch.topk(cpu_x, k, dim=0, largest=largest_val)
self.assertEqual(topk_values, topk_values_cpu)
self.assertEqual(topk_indices, topk_indices_cpu)
helper(2)
helper((5, 1))
helper((1, 5))
helper((5, 9, 7, 4))
def test_upsample_nearest_exact2d(self):
def helper(N, C, H, W):
inputCPU = torch.arange(N * C * H * W, device='cpu', dtype=torch.float,
requires_grad=True).reshape(N, C, H, W)
inputCPU.retain_grad()
inputMPS = inputCPU.detach().clone().to('mps').requires_grad_()
outputCPU = torch.nn.functional.interpolate(inputCPU, size=(5, 5), mode='nearest-exact')
outputMPS = torch.nn.functional.interpolate(inputMPS, size=(5, 5), mode='nearest-exact')
self.assertEqual(outputCPU, outputMPS)
outputCPU.backward(gradient=torch.full_like(outputCPU, 0.3))
outputMPS.backward(gradient=torch.full_like(outputMPS, 0.3))
self.assertEqual(inputCPU.grad, inputMPS.grad)
helper(1, 1, 4, 4)
helper(7, 5, 3, 2)
def test_upsample_nearest2d(self):
def helper(N, C, H, W):
inputCPU = torch.arange(N * C * H * W, device='cpu', dtype=torch.float,
requires_grad=True).reshape(N, C, H, W)
inputCPU.retain_grad()
inputMPS = inputCPU.detach().to('mps').requires_grad_()
values = [1, 2, 5, 10, 40]
for i in values:
for j in values:
upsample_nearest2d = nn.UpsamplingNearest2d(scale_factor=(i, j))
outputCPU = upsample_nearest2d(inputCPU)
outputMPS = upsample_nearest2d(inputMPS)
self.assertEqual(outputCPU, outputMPS)
upsample_nearest2d = nn.UpsamplingNearest2d((i * H, j * W))
outputCPU = upsample_nearest2d(inputCPU)
outputMPS = upsample_nearest2d(inputMPS)
self.assertEqual(outputCPU, outputMPS)
outputCPU.backward(gradient=torch.full_like(outputCPU, 0.3))
outputMPS.backward(gradient=torch.full_like(outputMPS, 0.3))
self.assertEqual(inputCPU.grad, inputMPS.grad)
helper(1, 1, 4, 4)
helper(7, 5, 3, 2)
def test_upsample_bilinear2d(self):
def helper(N, C, H, W):
inputCPU = torch.arange(N * C * H * W, device='cpu', dtype=torch.float,
requires_grad=True).reshape(N, C, H, W)
inputCPU.retain_grad()
inputMPS = inputCPU.detach().clone().to('mps').requires_grad_()
values = [1, 2, 5, 10, 40]
for i in values:
for j in values:
upsample_bilinear2d = nn.UpsamplingBilinear2d(scale_factor=(i, j))
outputCPU = upsample_bilinear2d(inputCPU)
outputMPS = upsample_bilinear2d(inputMPS)
self.assertEqual(outputCPU, outputMPS)
upsample_bilinear2d = nn.UpsamplingBilinear2d((i * H, j * W))
outputCPU = upsample_bilinear2d(inputCPU)
outputMPS = upsample_bilinear2d(inputMPS)
self.assertEqual(outputCPU, outputMPS)
outputCPU.backward(gradient=torch.full_like(outputCPU, 0.3))
outputMPS.backward(gradient=torch.full_like(outputMPS, 0.3))
self.assertEqual(inputCPU.grad, inputMPS.grad)
helper(1, 1, 4, 4)
helper(7, 5, 3, 2)
def test_upsample_nearest1d(self):
def helper(N, C, H, W):
inputCPU = torch.arange(C * H * W, device='cpu', dtype=torch.float,
requires_grad=True).reshape(C, H, W)
inputMPS = inputCPU.detach().clone().to('mps')
outputCPU = torch.nn.functional.interpolate(inputCPU, scale_factor=2.0, mode='nearest')
outputMPS = torch.nn.functional.interpolate(inputMPS, scale_factor=2.0, mode='nearest')
self.assertEqual(outputCPU, outputMPS)
helper(1, 1, 4, 4)
helper(7, 5, 3, 2)
# Test concat forward
def test_cat1(self):
def helper(shape_x, shape_y, shape_z):
cpu_x = torch.randn(shape_x, device='cpu', dtype=torch.float, requires_grad=False)
x = cpu_x.detach().clone().to('mps')
cpu_y = torch.randn(shape_y, device='cpu', dtype=torch.float, requires_grad=False)
y = cpu_y.detach().clone().to('mps')
cpu_z = torch.randn(shape_z, device='cpu', dtype=torch.float, requires_grad=False)
z = cpu_z.detach().clone().to('mps')
cat = torch.cat([x, y, z], dim=1)
cat_cpu = torch.cat([cpu_x, cpu_y, cpu_z], dim=1)
self.assertEqual(cat, cat_cpu)
helper([2, 2, 4, 5], [2, 3, 4, 5], [2, 5, 4, 5])
helper([2, 2, 6, 5], [2, 3, 6, 5], [2, 5, 6, 5])
helper([0, 2, 4, 5], [0, 3, 4, 5], [0, 5, 4, 5])
helper([2, 2, 6, 5], [0], [2, 5, 6, 5])
helper([0], [2, 3, 6, 5], [2, 5, 6, 5])
helper([2, 3, 4, 5], [2, 5, 4, 5], [0])
helper([2, 2, 6, 5], [2, 0, 6, 5], [2, 5, 6, 5])
helper([2, 0, 6, 5], [2, 3, 6, 5], [2, 5, 6, 5])
helper([2, 0, 6, 5], [2, 3, 6, 5], [2, 0, 6, 5])
def test_constant_pad(self):
m = torch.nn.ConstantPad2d((-2, -2, -2, -2), 3.5)
input_cpu = torch.randn(1, 16, 16, 16)
input_mps = input_cpu.detach().clone().to("mps")
r_cpu = m(input_cpu)
r_mps = m(input_mps)
self.assertEqual(r_cpu, r_mps.to("cpu"))
def test_circular_pad(self):
# https://github.com/pytorch/pytorch/issues/80856
k_cpu = torch.ones(3, 3, 9, 9)
k_mps = k_cpu.detach().clone().to("mps")
x_cpu = torch.rand(1, 3, 32, 32)
x_mps = x_cpu.detach().clone().to("mps")
x_pad_cpu = F.pad(x_cpu, (2, 2, 2, 2), mode='circular')
x_pad_mps = F.pad(x_mps, (2, 2, 2, 2), mode='circular')
y_cpu = F.conv2d(x_pad_cpu, k_cpu)
y_mps = F.conv2d(x_pad_mps, k_mps)
self.assertEqual(y_cpu, y_mps.cpu())
def test_pad(self):
def helper(shape, padding, op, value=0):
inputCPU = torch.randn(shape, device='cpu', dtype=torch.float, requires_grad=True)
inputCPU.retain_grad()
inputMPS = inputCPU.detach().clone().to('mps').requires_grad_()
if (op in [nn.ConstantPad1d, nn.ConstantPad2d, nn.ConstantPad3d]):
padCriteria = op(padding, value)
else:
padCriteria = op(padding)
outputCPU = padCriteria(inputCPU)
outputMPS = padCriteria(inputMPS)
self.assertEqual(outputCPU, outputMPS)
# backward pass (chose 0.6 just to have the grad_output != 1)
outputCPU.backward(gradient=torch.full_like(outputCPU, 0.6))
outputMPS.backward(gradient=torch.full_like(outputMPS, 0.6))
self.assertEqual(inputCPU.grad, inputMPS.grad)
# 1D Padding
helper((2, 4, 3), 2, nn.ReflectionPad1d)
# verify if a change in shape of input would cause problems with graph caching
helper((2, 4, 4), (1, 3), nn.ReflectionPad1d)
# Replication 1D
helper((2, 1, 6), 3, nn.ReplicationPad1d)
# Constant Pad 1D
helper((2, 3, 4), 2, nn.ConstantPad1d)
# 2D Padding
helper((1, 2, 3, 4), (1, 1, 2, 0), nn.ReflectionPad2d)
# verify if a change in shape of input would cause problems with graph caching
helper((2, 4, 3, 4), (1, 1, 2, 0), nn.ReflectionPad2d)
# this should make the padding (2, 2, 2, 2)
helper((2, 1, 6, 8), 2, nn.ReplicationPad2d)
# verify if a change in shape of padding would cause problems with graph caching
helper((2, 1, 6, 8), (2, 4, 3, 5), nn.ReplicationPad2d)
# Constant Pad 2D
helper((2, 1, 6, 8), (2, 4, 3, 5), nn.ConstantPad2d)
# 3D Padding
helper((2, 4, 6, 8, 4), (1, 3, 3, 5, 3, 4), nn.ReflectionPad3d)
# verify if a change in shape of padding would cause problems with graph caching
helper((2, 4, 6, 8, 4), (1, 3, 3, 5, 3, 4), nn.ReplicationPad3d)
# Constant Pad 3D
helper((2, 4, 6, 8, 4), (1, 3, 3, 5, 3, 4), nn.ConstantPad3d)
# Test stack forward
def test_stack(self):
# All shapes must be same
def helper(shape, dtype=torch.float32):
x, cpu_x = None, None
y, cpu_y = None, None
z, cpu_z = None, None
if(dtype not in [torch.float32, torch.bool]):
cpu_x = torch.randint(50, shape, device='cpu', dtype=dtype, requires_grad=False)
x = cpu_x.detach().clone().to('mps')
cpu_y = torch.randint(50, shape, device='cpu', dtype=dtype, requires_grad=False)
y = cpu_y.detach().clone().to('mps')
cpu_z = torch.randint(50, shape, device='cpu', dtype=dtype, requires_grad=False)
z = cpu_z.detach().clone().to('mps')
elif (dtype == torch.bool):
cpu_x = torch.randint(2, shape, device='cpu', dtype=dtype, requires_grad=False)
x = cpu_x.detach().clone().to('mps')
cpu_y = torch.randint(2, shape, device='cpu', dtype=dtype, requires_grad=False)
y = cpu_y.detach().clone().to('mps')
cpu_z = torch.randint(2, shape, device='cpu', dtype=dtype, requires_grad=False)
z = cpu_z.detach().clone().to('mps')
else:
cpu_x = torch.randn(shape, device='cpu', dtype=dtype, requires_grad=True)
x = cpu_x.detach().clone().to('mps').requires_grad_()
cpu_y = torch.randn(shape, device='cpu', dtype=dtype, requires_grad=True)
y = cpu_y.detach().clone().to('mps').requires_grad_()
cpu_z = torch.randn(shape, device='cpu', dtype=dtype, requires_grad=True)
z = cpu_z.detach().clone().to('mps').requires_grad_()
stack = torch.stack([x, y, z], dim=1)
stack_cpu = torch.stack([cpu_x, cpu_y, cpu_z], dim=1)
self.assertEqual(stack, stack_cpu)
helper([2, 8, 4, 5])
helper([2, 8, 4, 5], dtype=torch.float16)
helper([2, 8, 4, 5], dtype=torch.int32)
helper([2, 8, 4, 5], dtype=torch.int64)
helper([2, 8, 4, 5], dtype=torch.bool)
# Empty test - Currently failing! Empty tensor not handled!
# helper([0, 2, 4, 5])
# Test abs
def test_abs(self):
def helper(shape):
cpu_x = torch.randn(shape, device='cpu', dtype=torch.float, requires_grad=False)
x = cpu_x.detach().clone().to('mps')
abs_result = torch.abs(x)
abs_result_cpu = torch.abs(cpu_x)
self.assertEqual(abs_result, abs_result_cpu)
helper((2, 8, 4, 5))
def test_log(self):
def helper(shape):
cpu_x = torch.randn(shape, device='cpu', dtype=torch.float, requires_grad=False)
x = cpu_x.detach().clone().to('mps')
log_result = torch.log(x)
log_result_cpu = torch.log(cpu_x)
self.assertEqual(log_result, log_result_cpu)
helper((2, 8, 4, 5))
def test_log_ten(self):
def helper(shape):
cpu_x = torch.randn(shape, device='cpu', dtype=torch.float, requires_grad=False)
x = cpu_x.detach().clone().to('mps')
log_ten_result = torch.log10(x)
log_ten_result_cpu = torch.log10(cpu_x)
self.assertEqual(log_ten_result, log_ten_result_cpu)
helper((2, 8, 4, 5))
def test_log_two(self):
def helper(shape):
cpu_x = torch.randn(shape, device='cpu', dtype=torch.float, requires_grad=False)
x = cpu_x.detach().clone().to('mps')
log_two_result = torch.log2(x)
log_two_result_cpu = torch.log2(cpu_x)
self.assertEqual(log_two_result, log_two_result_cpu)
helper((2, 8, 4, 5))
def test_log1p(self):
def helper(shape):
cpu_x = torch.randn(shape, device='cpu', dtype=torch.float, requires_grad=False)
x = cpu_x.detach().clone().to('mps')
log_result = torch.log1p(x)
log_result_cpu = torch.log1p(cpu_x)
self.assertEqual(log_result, log_result_cpu)
helper((2, 8, 4, 5))
def test_logaddexp(self):
def helper(shape):
cpu_x = torch.randn(shape, device='cpu', dtype=torch.float, requires_grad=False)
x = cpu_x.detach().clone().to('mps')
cpu_y = torch.randn(shape, device='cpu', dtype=torch.float, requires_grad=False)
y = cpu_y.detach().clone().to('mps')
log_result = torch.logaddexp(x, y)
log_result_cpu = torch.logaddexp(cpu_x, cpu_y)
self.assertEqual(log_result, log_result_cpu)
helper((2, 8, 4, 5))
def test_logaddexp2(self):
def helper(shape):
cpu_x = torch.randn(shape, device='cpu', dtype=torch.float, requires_grad=False)
x = cpu_x.detach().clone().to('mps')
cpu_y = torch.randn(shape, device='cpu', dtype=torch.float, requires_grad=False)
y = cpu_y.detach().clone().to('mps')
log_result = torch.logaddexp2(x, y)
log_result_cpu = torch.logaddexp2(cpu_x, cpu_y)
self.assertEqual(log_result, log_result_cpu)
helper((2, 8, 4, 5))
# Test concat forward
def test_cat2(self):
def helper1(shape_x, shape_y, shape_z, shape_w):
cpu_x = torch.randn(shape_x, device='cpu', dtype=torch.float, requires_grad=False)
x = cpu_x.detach().clone().to('mps')
cpu_y = torch.randn(shape_y, device='cpu', dtype=torch.float, requires_grad=False)
y = cpu_y.detach().clone().to('mps')
cpu_z = torch.randn(shape_z, device='cpu', dtype=torch.float, requires_grad=False)
z = cpu_z.detach().clone().to('mps')
cpu_w = torch.randn(shape_w, device='cpu', dtype=torch.float, requires_grad=False)
w = cpu_w.detach().clone().to('mps')
cat = torch.cat([x, y, z, w], dim=1)
cat_cpu = torch.cat([cpu_x, cpu_y, cpu_z, cpu_w], dim=1)
self.assertEqual(cat, cat_cpu)
def helper(shape_x, shape_y, shape_z):
cpu_x = torch.randn(shape_x, device='cpu', dtype=torch.float, requires_grad=False)
x = cpu_x.detach().clone().to('mps')
cpu_y = torch.randn(shape_y, device='cpu', dtype=torch.float, requires_grad=False)
y = cpu_y.detach().clone().to('mps')
cpu_z = torch.randn(shape_z, device='cpu', dtype=torch.float, requires_grad=False)
z = cpu_z.detach().clone().to('mps')
cat = torch.cat([x, y, z], dim=1)
cat_cpu = torch.cat([cpu_x, cpu_y, cpu_z], dim=1)
self.assertEqual(cat, cat_cpu)
helper([2, 8, 4, 5], [2, 10, 4, 5], [2, 6, 4, 5])
helper([2, 2, 4, 5], [2, 3, 4, 5], [2, 5, 4, 5])
# Empty test - Currently failing! Empty tensor not handled!
# helper([0, 2, 4, 5], [2, 0, 4, 5], [2, 5, 0, 5])
# Test isnan
def test_isnan(self):
def helper(shape):
cpu_x = torch.randn(shape, device='cpu', dtype=torch.float, requires_grad=False)
nan_index = [random.randrange(0, shape[0])]
# make a selected row inf
cpu_x.index_put_(indices=[torch.tensor(nan_index)], values=torch.tensor(float('nan')))
x = cpu_x.detach().clone().to('mps')
isnan_result = torch.isnan(x)
isnan_result_cpu = torch.isnan(cpu_x)
self.assertEqual(isnan_result, isnan_result_cpu)
helper((8, 2, 4, 5))
# Test reciprocal
def test_reciprocal(self):
def helper(shape):
cpu_x = torch.randn(shape, device='cpu', dtype=torch.float, requires_grad=True)
x = cpu_x.detach().clone().to('mps').requires_grad_()
reciprocal_result = torch.reciprocal(x)
reciprocal_result_cpu = torch.reciprocal(cpu_x)
cpu_grad = torch.ones_like(reciprocal_result_cpu)
grad = cpu_grad.to('mps')
reciprocal_result.backward(gradient=grad)
reciprocal_result_cpu.backward(gradient=cpu_grad)
self.assertEqual(reciprocal_result, reciprocal_result_cpu)
self.assertEqual(x.grad, cpu_x.grad)
helper((2, 8, 4, 5))
# Test sqrt
def test_sqrt(self):
def helper(shape):
cpu_x = torch.randn(shape, device='cpu', dtype=torch.float, requires_grad=True)
x = cpu_x.detach().clone().to('mps').requires_grad_()
sqrt_result = torch.sqrt(x)
sqrt_result_cpu = torch.sqrt(cpu_x)
cpu_grad = torch.ones_like(sqrt_result_cpu)
grad = cpu_grad.to('mps')
sqrt_result.backward(gradient=grad)
sqrt_result_cpu.backward(gradient=cpu_grad)
self.assertEqual(sqrt_result, sqrt_result_cpu)
self.assertEqual(x.grad, cpu_x.grad)
helper((2, 8, 4, 5))
# Test selu, elu, celu
def test_elu(self):
def helper(shape, alpha=1.0):
cpu_x = torch.randn(shape, device='cpu', dtype=torch.float, requires_grad=True)
x = cpu_x.detach().clone().to('mps').requires_grad_()
for activation_func in [torch.nn.ELU(alpha=alpha), torch.nn.CELU(alpha=alpha), torch.nn.SELU()]:
elu_result = activation_func(x)
elu_result_cpu = activation_func(cpu_x)
cpu_grad = torch.randn(elu_result_cpu.shape)
grad = cpu_grad.to('mps')
elu_result.backward(gradient=grad)
elu_result_cpu.backward(gradient=cpu_grad)
self.assertEqual(elu_result, elu_result_cpu)
self.assertEqual(x.grad, cpu_x.grad)
# Test empty shape too
for shape in [[], (2, 3), (2, 8, 4, 5)]:
for alpha in [0.000001, 1.0, 2.3, 0.34, 23]:
helper(shape, alpha)
# Test glu
def test_glu(self):
def helper(shape, dim=0):
cpu_x = torch.randn(shape, device='cpu', dtype=torch.float, requires_grad=True)
x = cpu_x.detach().clone().to('mps').requires_grad_()
for activation_func in [torch.nn.GLU(dim=dim)]:
glu_result = activation_func(x)
glu_result_cpu = activation_func(cpu_x)
cpu_grad = torch.randn(glu_result_cpu.shape)
grad = cpu_grad.to('mps')
glu_result.backward(gradient=grad)
glu_result_cpu.backward(gradient=cpu_grad)
self.assertEqual(glu_result, glu_result_cpu)
self.assertEqual(x.grad, cpu_x.grad)
for shape in [[4], (2, 4), (2, 8, 4, 6)]:
for dim in range(len(shape)):
helper(shape, dim)
# Test softplus
def test_softplus(self):
def helper(shape):
cpu_x = torch.randn(shape, device='cpu', dtype=torch.float, requires_grad=True)
x = cpu_x.detach().clone().to('mps').requires_grad_()
softplus_result = torch.nn.Softplus(beta=0.5, threshold=0.5)(x)
softplus_result_cpu = torch.nn.Softplus(beta=0.5, threshold=0.5)(cpu_x)
cpu_grad = torch.randn(softplus_result.shape)
grad = cpu_grad.to('mps')
softplus_result.backward(gradient=grad)
softplus_result_cpu.backward(gradient=cpu_grad)
self.assertEqual(softplus_result, softplus_result_cpu)
self.assertEqual(x.grad, cpu_x.grad)
# Test empty shape too
for shape in [(), (2, 3), (10, 10), (2, 3, 4, 5)]:
helper(shape)
# Test silu
def test_silu(self):
def helper(shape):
cpu_x = torch.randn(shape, device='cpu', dtype=torch.float, requires_grad=True)
x = cpu_x.detach().clone().to('mps').requires_grad_()
silu_result = torch.nn.SiLU()(x)
silu_result_cpu = torch.nn.SiLU()(cpu_x)
cpu_grad = torch.randn(silu_result_cpu.shape)
grad = cpu_grad.to('mps')
silu_result.backward(gradient=grad)
silu_result_cpu.backward(gradient=cpu_grad)
self.assertEqual(silu_result, silu_result_cpu)
self.assertEqual(x.grad, cpu_x.grad)
# Test empty shape too
for shape in [[], (2, 3), (2, 8, 4, 5)]:
helper(shape)
# Test adaptive avg pool2d - when the input size is a multiple of output size
# Not testing for channels last right now
def test_adaptive_avg_pool2d_simple(self):
def helper(input_shape, out_shape, channels_last):
cpu_x = torch.randn(input_shape, device='cpu', dtype=torch.float, requires_grad=True)
if(channels_last):
cpu_x = cpu_x.to(memory_format=torch.channels_last)
cpu_x.retain_grad()
x = cpu_x.detach().clone().to('mps').requires_grad_()
avg_result = torch.nn.AdaptiveAvgPool2d(out_shape)(x)
avg_result_cpu = torch.nn.AdaptiveAvgPool2d(out_shape)(cpu_x)
cpu_grad = torch.randn(avg_result_cpu.shape)
grad = cpu_grad.to('mps')
avg_result.backward(gradient=grad)
avg_result_cpu.backward(gradient=cpu_grad)
self.assertEqual(avg_result, avg_result_cpu)
self.assertEqual(x.grad, cpu_x.grad)
helper((2, 2, 4, 4), (2, 2), False)
helper((2, 2, 9, 9), (3, 3), False)
helper((2, 2, 9, 9), (9, 9), False)
helper((2, 2, 16, 16), (2, 2), False)
helper((2, 2, 16, 16), (2, 16), False)
helper((2, 16, 16), (4, 4), False)
# Test max avg pool2d - when the input size is a multiple of output size
# Not testing for channels last right now
def test_adaptive_max_pool2d_simple(self):
def helper(input_shape, out_shape, return_indices, dtype, channels_last=False):
cpu_x = None
if(dtype in [torch.float16, torch.float32]):
cpu_x = torch.randn(input_shape, device='cpu', dtype=dtype, requires_grad=True)
else:
cpu_x = torch.randint(50, input_shape, device='cpu', dtype=dtype, requires_grad=True)
if(channels_last):
cpu_x = cpu_x.to(memory_format=torch.channels_last)
cpu_x.retain_grad()
x = cpu_x.detach().clone().to('mps').requires_grad_()
max_result, max_indices = None, None
max_result_cpu, max_indices_cpu = None, None
if(return_indices):
max_result, max_indices = torch.nn.AdaptiveMaxPool2d(out_shape, return_indices)(x)
max_result_cpu, max_indices_cpu = torch.nn.AdaptiveMaxPool2d(out_shape, return_indices)(cpu_x)
else:
max_result = torch.nn.AdaptiveMaxPool2d(out_shape, return_indices)(x)
max_result_cpu = torch.nn.AdaptiveMaxPool2d(out_shape, return_indices)(cpu_x)
cpu_grad = torch.randn(max_result_cpu.shape)
grad = cpu_grad.to('mps')
max_result.backward(gradient=grad)
max_result_cpu.backward(gradient=cpu_grad)
self.assertEqual(max_result, max_result_cpu)
if(return_indices):
self.assertEqual(max_indices, max_indices_cpu)
self.assertEqual(x.grad, cpu_x.grad)
for dtype in [torch.float32]:
for return_indices in [False, True]:
helper((2, 2, 4, 4), (2, 2), return_indices, dtype)
helper((2, 2, 9, 9), (3, 3), return_indices, dtype)
helper((2, 2, 9, 9), (9, 9), return_indices, dtype)
helper((2, 2, 16, 16), (2, 2), return_indices, dtype)
helper((2, 2, 16, 16), (2, 16), return_indices, dtype)
helper((2, 16, 16), (4, 4), return_indices, dtype)
def test_gelu_simple(self):
def helper(shape):
cpu_x = torch.randn(shape, device='cpu', dtype=torch.float, requires_grad=True)
x = cpu_x.detach().clone().to('mps').requires_grad_()
gelu_result = torch.nn.GELU()(x)
gelu_result_cpu = torch.nn.GELU()(cpu_x)
cpu_grad = torch.ones_like(gelu_result_cpu)
grad = cpu_grad.to('mps')
gelu_result.backward(gradient=grad)
gelu_result_cpu.backward(gradient=cpu_grad)
self.assertEqual(gelu_result, gelu_result_cpu)
self.assertEqual(x.grad, cpu_x.grad)
# Test empty shape too
for shape in [(0, 3), [], (2, 3), (2, 8, 4, 5)]:
helper(shape)
def test_gelu(self):
def _test_gelu(n, m, dtype, contiguous, atol=None, rtol=None):
numpy_dtype = {
torch.bfloat16: torch.float, torch.float: torch.float, torch.double: torch.double
}[dtype]
devices = ['cpu']
devices += ['mps']
def _gelu_ref(X):
return X * stats.norm.cdf(X)
for d in devices:
X = torch.rand(n, m, dtype=dtype, requires_grad=True, device=d)[:, ::2]
res = X
ref = (X.to(numpy_dtype).cpu().detach().numpy())
self.assertEqual(res, ref, rtol=rtol, atol=atol, exact_dtype=False)
for n in [1, 5, 10]:
for m in [1, 5, 10]:
_test_gelu(n, m, torch.float32, True)
_test_gelu(n, m, torch.float32, False)
# Test multi threaded
num_threads = torch.get_num_threads()
torch.set_num_threads(4)
try:
_test_gelu(32, 32, torch.float32, False)
finally:
torch.set_num_threads(num_threads)
# Test hardtanh
def test_hardtanh(self):
def helper(shape, min_val, max_val, inplace=False):
cpu_x = None
x = None
if(not inplace):
cpu_x = torch.randn(shape, device='cpu', dtype=torch.float, requires_grad=True)
x = cpu_x.detach().clone().to('mps').requires_grad_()
else:
cpu_x = torch.randn(shape, device='cpu', dtype=torch.float, requires_grad=False)
x = cpu_x.detach().clone().to('mps')
hardtanh_result = torch.nn.Hardtanh(min_val=min_val, max_val=max_val, inplace=inplace)(x)
hardtanh_result_cpu = torch.nn.Hardtanh(min_val=min_val, max_val=max_val, inplace=inplace)(cpu_x)
self.assertEqual(hardtanh_result, hardtanh_result_cpu)
if(not inplace):
cpu_grad = torch.randn(hardtanh_result_cpu.shape)
grad = cpu_grad.to('mps')
hardtanh_result.backward(gradient=grad)
hardtanh_result_cpu.backward(gradient=cpu_grad)
self.assertEqual(x.grad, cpu_x.grad)
# Test empty shape too
for shape in [(0, 3), [], (2, 3), (2, 8, 4, 5)]:
for min_val, max_val in zip([-1, -2, 3], [1, -1, 4]):
helper(shape, min_val, max_val)
helper(shape, min_val, max_val, inplace=True)
def test_transpose_2D(self):
values = [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]
values1 = [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]
cpu_x = torch.tensor(values, device='cpu')
mps_x = torch.tensor(values, device='mps')
mps_x1 = torch.tensor(values1, device='mps')
cpu_transpose = torch.transpose(cpu_x, 0, 1)
mps_transpose = torch.transpose(mps_x, 0, 1)
self.assertEqual(cpu_transpose, mps_transpose.to('cpu'))
def test_transpose_3D(self):
values = [[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], [[7.0, 8.0, 9.0], [10.0, 11.0, 12.0]]]
cpu_x = torch.tensor(values, device='cpu')
mps_x = torch.tensor(values, device='mps')
cpu_transpose1 = torch.transpose(cpu_x, 0, 1)
mps_transpose1 = torch.transpose(mps_x, 0, 1).to('cpu')
self.assertEqual(cpu_transpose1, mps_transpose1)
cpu_transpose2 = torch.transpose(cpu_x, 0, 2)
mps_transpose2 = torch.transpose(mps_x, 0, 2).to('cpu')
self.assertEqual(cpu_transpose2, mps_transpose2)
cpu_transpose3 = torch.transpose(cpu_x, 1, 2)
mps_transpose3 = torch.transpose(mps_x, 1, 2).to('cpu')
self.assertEqual(cpu_transpose3, mps_transpose3)
def test_transpose_4D(self):
values = [[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], [[7.0, 8.0, 9.0], [10.0, 11.0, 12.0]]],
[[[13.0, 14.0, 15.0], [16.0, 17.0, 18.0]], [[19.0, 20.0, 21.0], [22.0, 23.0, 24.0]]]]
cpu_x = torch.tensor(values, device='cpu')
mps_x = torch.tensor(values, device='mps')
cpu_transpose1 = torch.transpose(cpu_x, 0, 1)
mps_transpose1 = torch.transpose(mps_x, 0, 1).to('cpu')
self.assertEqual(cpu_transpose1, mps_transpose1)
cpu_transpose2 = torch.transpose(cpu_x, 0, 2)
mps_transpose2 = torch.transpose(mps_x, 0, 2).to('cpu')
self.assertEqual(cpu_transpose2, mps_transpose2)
cpu_transpose3 = torch.transpose(cpu_x, 0, 3)
mps_transpose3 = torch.transpose(mps_x, 0, 3).to('cpu')
self.assertEqual(cpu_transpose3, mps_transpose3)
cpu_transpose4 = torch.transpose(cpu_x, 3, 1)
mps_transpose4 = torch.transpose(mps_x, 3, 1).to('cpu')
self.assertEqual(cpu_transpose4, mps_transpose4)
cpu_transpose5 = torch.transpose(cpu_x, 3, 2)
mps_transpose5 = torch.transpose(mps_x, 3, 2).to('cpu')
self.assertEqual(cpu_transpose5, mps_transpose5)
cpu_transpose6 = torch.transpose(cpu_x, 1, 2)
mps_transpose6 = torch.transpose(mps_x, 1, 2).to('cpu')
self.assertEqual(cpu_transpose6, mps_transpose6)
# Test sign
def test_sign(self):
def helper(shape):
cpu_x = torch.randn(shape, device='cpu', dtype=torch.float, requires_grad=True)
x = cpu_x.detach().clone().to('mps').requires_grad_()
sign_result = torch.sign(x)
sign_result_cpu = torch.sign(cpu_x)
cpu_grad = torch.ones_like(sign_result_cpu)
grad = cpu_grad.to('mps')
sign_result.backward(gradient=grad)
sign_result_cpu.backward(gradient=cpu_grad)
self.assertEqual(sign_result, sign_result_cpu)
helper((2, 8, 4, 5))
# Test neg
def test_neg(self):
def helper(shape):
cpu_x = torch.randn(shape, device='cpu', dtype=torch.float, requires_grad=True)
x = cpu_x.detach().clone().to('mps').requires_grad_()
neg_result = torch.neg(x)
neg_result_cpu = torch.neg(cpu_x)
cpu_grad = torch.ones_like(neg_result_cpu)
grad = cpu_grad.to('mps')
neg_result.backward(gradient=grad)
neg_result_cpu.backward(gradient=cpu_grad)
self.assertEqual(neg_result, neg_result_cpu)
helper((2, 8, 4, 5))
# Test index add
def test_index_add(self):
def helper(shape, dim, index, source_shape, alpha, idx_dtype=torch.int32):
cpu_x = torch.randn(shape, device='cpu', dtype=torch.float, requires_grad=False)
x = cpu_x.detach().clone().to('mps')
cpu_idx = torch.tensor(index, device='cpu', dtype=idx_dtype)
idx = cpu_idx.detach().clone().to('mps')
cpu_source = torch.randn(source_shape, device='cpu', dtype=torch.float, requires_grad=False)
source = cpu_source.detach().clone().to('mps')
idx_result = torch.index_add(x, dim=dim, index=idx, source=source, alpha=alpha)
idx_result_cpu = torch.index_add(cpu_x, dim=dim, index=cpu_idx, source=cpu_source, alpha=alpha)
self.assertEqual(idx_result, idx_result_cpu)
helper((2, 8, 4, 5), 0, [0, 1, 0], (3, 8, 4, 5), 5)
helper((8, 8, 4, 5), 0, [7], (1, 8, 4, 5), 6.0)
helper((2, 8, 4, 5), 1, [0, 3, 7], (2, 3, 4, 5), 5)
helper((2, 8, 4, 5), 2, [3, 0], (2, 8, 2, 5), 3.0)
helper((2, 8, 4, 5), 3, [2, 3, 0], (2, 8, 4, 3), 4)
helper((2, 3, 3), -1, [1, 2], (2, 3, 2), 6.0)
# test result dim=1
helper((2,), 0, [1], (1,), 6.0)
helper(2, 0, 1, 1, 6)
# Test flip
def test_flip(self):
def helper(shape, dims):
cpu_x = torch.randn(shape, device='cpu', dtype=torch.float, requires_grad=False)
x = cpu_x.detach().clone().to('mps')
flip_result = torch.flip(x, dims=dims)
flip_result_cpu = torch.flip(cpu_x, dims=dims)
self.assertEqual(flip_result, flip_result_cpu)
helper((2, 8, 4, 5), [0])
helper((8, 8, 4, 5), [0, 1])
helper((2, 8, 4, 5), (0, 1, 2, 3))
helper((2, 3, 3), (-1,))
# empty dims
helper((2, 8, 4, 5), [])
# input.numel() == 1
helper((1,), (0,))
# input.numel() == 0
helper((0,), (0,))
# Test index select
def test_index_select(self):
def helper(shape, dim, index, idx_dtype=torch.int32):
cpu_x = torch.randn(shape, device='cpu', dtype=torch.float, requires_grad=False)
x = cpu_x.detach().clone().to('mps')
cpu_idx = torch.tensor(index, device='cpu', dtype=idx_dtype)
idx = cpu_idx.detach().clone().to('mps')
idx_result = torch.index_select(x, dim=dim, index=idx)
idx_result_cpu = torch.index_select(cpu_x, dim=dim, index=cpu_idx)
self.assertEqual(idx_result, idx_result_cpu)
helper((2, 8, 4, 5), 0, [1])
helper((8, 8, 4, 5), 0, [0, 3, 2, 7, 6])
helper((2, 8, 4, 5), 1, [0, 3, 2, 7, 6])
helper((2, 8, 4, 5), 2, [3, 0, 1])
helper((2, 8, 4, 5), 3, [2, 3, 0])
helper((2, 3, 3), -1, [1, 2])
def test_embedding_dense_backward(self):
def helper(n, d, m):
embeddingMPS = nn.Embedding(n, d, max_norm=True, device='mps')
W_MPS = torch.randn((m, d), requires_grad=True, device='mps')
idx_MPS = torch.tensor([0, 1, 2]).to('mps')
a_MPS = embeddingMPS.weight.clone() @ W_MPS.t() # weight must be cloned for this to be differentiable
a_MPS.retain_grad()
b_MPS = embeddingMPS(idx_MPS) @ W_MPS.t() # modifies weight in-place
b_MPS.retain_grad()
out_MPS = (a_MPS.unsqueeze(0) + b_MPS.unsqueeze(1))
loss_MPS = out_MPS.sigmoid().prod()
loss_MPS.backward()
embeddingCPU = nn.Embedding(n, d, max_norm=True, scale_grad_by_freq=True)
W_CPU = W_MPS.to('cpu')
idx_CPU = torch.tensor([0, 1, 2])
a_CPU = embeddingCPU.weight.clone() @ W_CPU.t() # weight must be cloned for this to be differentiable
a_CPU.retain_grad()
b_CPU = embeddingCPU(idx_CPU) @ W_CPU.t() # modifies weight in-place
b_CPU.retain_grad()
out_CPU = (a_CPU.unsqueeze(0) + b_CPU.unsqueeze(1))
loss_CPU = out_CPU.sigmoid().prod()
loss_CPU.backward()
self.assertEqual(b_CPU.grad, b_MPS.grad)
self.assertEqual(a_CPU.grad, a_MPS.grad)
helper(3, 5, 7)
# Test pytorch gather
def test_gather(self):
def helper(shape, dim, idx_shape, idx_dtype=torch.int64):
cpu_x = torch.randn(shape, device='cpu', dtype=torch.float, requires_grad=True)
x = cpu_x.detach().clone().to('mps').requires_grad_()
# Indices should be taken from range of axis along which gathering is done
idx_np = np.random.randint(0, shape[dim], idx_shape)
cpu_idx = torch.tensor(idx_np, device='cpu', dtype=idx_dtype)
idx = cpu_idx.detach().clone().to('mps')
gather_result = torch.gather(x, dim=dim, index=idx)
gather_result_cpu = torch.gather(cpu_x, dim=dim, index=cpu_idx)
cpu_grad = torch.randn(idx_shape, device='cpu', dtype=torch.float)
grad = cpu_grad.to('mps')
gather_result.backward(gradient=grad)
gather_result_cpu.backward(gradient=cpu_grad)
self.assertEqual(gather_result, gather_result_cpu)
self.assertEqual(cpu_x.grad, x.grad)
helper((6, 3, 3), 0, (3, 3, 3))
helper((2, 3, 3, 3), 0, (10, 3, 3, 3))
helper((2, 8, 4, 5), 0, (10, 8, 4, 5))
helper((2, 8, 4, 5), 0, (10, 6, 3, 2))
helper((8, 8, 4, 5), 0, (6, 8, 4, 5))
helper((8, 8, 4, 5), 0, (6, 7, 2, 3))
helper((2, 8, 4, 5), 1, (2, 5, 3, 4))
helper((2, 8, 4, 5), 2, (1, 8, 10, 3))
helper((2, 8, 4, 5), 3, (2, 5, 3, 12))
# Test pytorch scatter_add and scatter
def test_scatter_add(self):
def helper(shape, dim, idx_shape, src_shape, idx_dtype=torch.int64, do_add=True):
cpu_x = torch.randn(shape, device='cpu', dtype=torch.float, requires_grad=True)
x = cpu_x.detach().clone().to('mps').requires_grad_()
cpu_src = torch.randn(src_shape, device='cpu', dtype=torch.float, requires_grad=True)
src = cpu_src.detach().clone().to('mps').requires_grad_()
# Indices should be taken from range of axis along which gathering is done
idx_np = None
if(do_add):
idx_np = np.random.randint(0, shape[dim], idx_shape)
else:
idx_np = np.array([[0, 1, 2],
[1, 2, 3],
[2, 3, 4],
[3, 4, 5],
[4, 5, 6]])
cpu_idx = torch.tensor(idx_np, device='cpu', dtype=idx_dtype)
idx = cpu_idx.detach().clone().to('mps')
scatter_result = None
scatter_result_cpu = None
if(do_add):
scatter_result = torch.scatter_add(x, dim=dim, index=idx, src=src)
scatter_result_cpu = torch.scatter_add(cpu_x, dim=dim, index=cpu_idx, src=cpu_src)
else:
scatter_result = torch.scatter(x, dim=dim, index=idx, src=src)
scatter_result_cpu = torch.scatter(cpu_x, dim=dim, index=cpu_idx, src=cpu_src)
cpu_grad = None
grad = None
if(idx_shape == src_shape):
cpu_grad = torch.randn(shape, device='cpu', dtype=torch.float)
grad = cpu_grad.to('mps')
scatter_result.backward(gradient=grad)
scatter_result_cpu.backward(gradient=cpu_grad)
self.assertEqual(scatter_result, scatter_result_cpu)
if(idx_shape == src_shape):
self.assertEqual(cpu_x.grad, x.grad)
self.assertEqual(cpu_src.grad, src.grad)
helper((2, 3), 0, (5, 3), (5, 3))
helper((2, 8, 4, 5), 0, (10, 8, 4, 5), (10, 8, 4, 5))
helper((8, 8, 4, 5), 0, (10, 8, 4, 5), (10, 8, 4, 5))
helper((8, 8, 4, 5), 0, (4, 7, 3, 2), (4, 7, 3, 2))
helper((8, 8, 4, 5), 0, (4, 6, 3, 2), (4, 7, 3, 2))
helper((8, 8, 4, 5), 0, (4, 6, 3, 2), (8, 8, 4, 5))
helper((2, 8, 4, 5), 1, (2, 20, 4, 5), (2, 20, 4, 5))
helper((2, 8, 4, 5), 1, (2, 13, 3, 2), (2, 13, 3, 2))
helper((8, 8, 4, 5), 1, (6, 5, 2, 3), (6, 5, 2, 3))
helper((8, 8, 4, 5), 1, (3, 4, 2, 2), (6, 5, 2, 3))
helper((4, 5, 9, 8), 2, (4, 5, 13, 8), (4, 5, 13, 8))
helper((4, 5, 9, 8), 2, (3, 4, 10, 6), (3, 4, 10, 6))
helper((4, 5, 9, 8), 2, (3, 3, 7, 5), (3, 4, 10, 6))
# Test scatter src
helper((8, 3), 0, (5, 3), (5, 3), do_add=False)
helper((10, 3), 0, (5, 3), (5, 8), do_add=False)
# Test pytorch scatter_reduce
def test_scatter_reduce(self):
def helper(shape, dim, idx_shape, src_shape, idx_dtype=torch.int64, reduce_str="sum"):
cpu_x = torch.randn(shape, device='cpu', dtype=torch.float, requires_grad=True)
x = cpu_x.detach().clone().to('mps').requires_grad_()
cpu_src = torch.randn(src_shape, device='cpu', dtype=torch.float, requires_grad=True)
src = cpu_src.detach().clone().to('mps').requires_grad_()
# Indices should be taken from range of axis along which gathering is done
idx_np = np.random.randint(0, shape[dim], idx_shape)
cpu_idx = torch.tensor(idx_np, device='cpu', dtype=idx_dtype)
idx = cpu_idx.detach().clone().to('mps')
scatter_result = torch.scatter(x, dim=dim, index=idx, src=src, reduce=reduce_str)
scatter_result_cpu = torch.scatter(cpu_x, dim=dim, index=cpu_idx, src=cpu_src, reduce=reduce_str)
self.assertEqual(scatter_result, scatter_result_cpu)
# for reduce in ["sum", "prod", "amax", "amin"]:
for reduce in ["add", "multiply"]:
helper((2, 3), 0, (5, 3), (5, 3), reduce_str=reduce)
helper((2, 8, 4, 5), 0, (10, 8, 4, 5), (10, 8, 4, 5), reduce_str=reduce)
helper((8, 8, 4, 5), 0, (10, 8, 4, 5), (10, 8, 4, 5), reduce_str=reduce)
helper((8, 8, 4, 5), 0, (4, 7, 3, 2), (4, 7, 3, 2), reduce_str=reduce)
helper((8, 8, 4, 5), 0, (4, 6, 3, 2), (4, 7, 3, 2), reduce_str=reduce)
helper((8, 8, 4, 5), 0, (4, 6, 3, 2), (8, 8, 4, 5), reduce_str=reduce)
helper((2, 8, 4, 5), 1, (2, 20, 4, 5), (2, 20, 4, 5), reduce_str=reduce)
helper((2, 8, 4, 5), 1, (2, 13, 3, 2), (2, 13, 3, 2), reduce_str=reduce)
helper((8, 8, 4, 5), 1, (6, 5, 2, 3), (6, 5, 2, 3), reduce_str=reduce)
helper((8, 8, 4, 5), 1, (3, 4, 2, 2), (6, 5, 2, 3), reduce_str=reduce)
helper((4, 5, 9, 8), 2, (4, 5, 13, 8), (4, 5, 13, 8), reduce_str=reduce)
helper((4, 5, 9, 8), 2, (3, 4, 10, 6), (3, 4, 10, 6), reduce_str=reduce)
helper((4, 5, 9, 8), 2, (3, 3, 7, 5), (3, 4, 10, 6), reduce_str=reduce)
def test_is_nonzero(self):
self.assertFalse(torch.is_nonzero(torch.tensor([0.]).to('mps')))
self.assertTrue(torch.is_nonzero(torch.tensor([1.5]).to('mps')))
self.assertFalse(torch.is_nonzero(torch.tensor([False]).to('mps')))
self.assertTrue(torch.is_nonzero(torch.tensor([3]).to('mps')))
# Test triu
def test_triu(self):
def helper(shape, diag=0):
cpu_x = torch.randn(shape, device='cpu', dtype=torch.float, requires_grad=True)
x = cpu_x.detach().clone().to('mps').requires_grad_()
triu_result = torch.triu(x, diag)
triu_result_cpu = torch.triu(cpu_x, diag)
cpu_grad = torch.randn(triu_result_cpu.shape)
grad = cpu_grad.to('mps')
triu_result.backward(gradient=grad)
triu_result_cpu.backward(gradient=cpu_grad)
self.assertEqual(triu_result, triu_result_cpu)
self.assertEqual(x.grad, cpu_x.grad)
helper((2, 8, 4, 5))
helper((2, 8, 4, 5), diag=1)
helper((2, 8, 4, 5), diag=2)
helper((2, 8, 4, 5), diag=3)
helper((2, 8, 4, 5), diag=-1)
helper((2, 8, 4, 5), diag=-2)
helper((2, 8, 4, 5), diag=-3)
# Test tril
def test_tril(self):
def helper(shape, diag=0):
cpu_x = torch.randn(shape, device='cpu', dtype=torch.float, requires_grad=True)
x = cpu_x.detach().clone().to('mps').requires_grad_()
tril_result = torch.tril(x, diag)
tril_result_cpu = torch.tril(cpu_x, diag)
cpu_grad = torch.randn(tril_result_cpu.shape)
grad = cpu_grad.to('mps')
tril_result.backward(gradient=grad)
tril_result_cpu.backward(gradient=cpu_grad)
self.assertEqual(tril_result, tril_result_cpu)
self.assertEqual(x.grad, cpu_x.grad)
helper((2, 8, 4, 5))
helper((2, 8, 4, 5), diag=1)
helper((2, 8, 4, 5), diag=2)
helper((2, 8, 4, 5), diag=3)
helper((2, 8, 4, 5), diag=-1)
helper((2, 8, 4, 5), diag=-2)
helper((2, 8, 4, 5), diag=-3)
# test eye
def test_eye(self):
def helper(n, m, dtype):
cpu_result = None
result = None
if(n == m):
cpu_result = torch.eye(n, dtype=dtype, device='cpu')
result = torch.eye(n, dtype=dtype, device='mps')
else:
cpu_result = torch.eye(n, m, device='cpu')
result = torch.eye(n, m, device='mps')
self.assertEqual(result, cpu_result)
for dtype in [torch.float32, torch.int32, torch.int64]:
helper(2, 2, dtype)
helper(2, 3, dtype)
helper(0, 2, dtype)
helper(0, 0, dtype)
helper(3, 8, dtype)
helper(8, 3, dtype)
# Test diag
def test_diag(self):
def helper(shape, diag=0):
cpu_x = torch.randn(shape, device='cpu', dtype=torch.float, requires_grad=True)
x = cpu_x.detach().clone().to('mps').requires_grad_()
diag_result = torch.diag(x, diag)
diag_result_cpu = torch.diag(cpu_x, diag)
# cpu_grad = torch.randn(diag_result_cpu.shape)
# grad = cpu_grad.to('mps')
# diag_result.backward(gradient=grad)
# diag_result_cpu.backward(gradient=cpu_grad)
self.assertEqual(diag_result, diag_result_cpu)
# self.assertEqual(x.grad, cpu_x.grad)
for shape in [(5, 5), (5, 6), (6, 5), (5,), (6,)]:
for diag in [0, 1, 2, 3, 4, -1, -2, -3, -4]:
helper(shape, diag=diag)
# Test linspace
def test_linspace(self):
def helper(start, end, steps, dtype=torch.float32):
cpu_result = torch.tensor(np.linspace(start, end, steps), dtype=dtype)
result = torch.linspace(start, end, steps, dtype=dtype, device='mps')
self.assertEqual(cpu_result, result)
for dtype in [torch.float32, torch.int32, torch.uint8, torch.int64]:
helper(2, 5, 10, dtype)
helper(2, 2, 10, dtype)
helper(5, 2, 10, dtype)
helper(2, 2, 0, dtype)
# Test argange
def test_arange(self):
self.assertEqual(np.arange(10), torch.arange(10, device='mps'))
self.assertEqual(np.arange(7, 1, -1), torch.arange(7, 1, -1, device='mps'))
self.assertEqual(np.arange(1, 2, .3, dtype=np.float32), torch.arange(1, 2, .3, device='mps'))
self.assertEqual(np.arange(6.3, dtype=np.float32), torch.arange(6.3, device='mps'))
# Test softmax
def test_softmax(self):
def helper(shape, dim, channels_last=False):
cpu_x = torch.randn(shape, device='cpu', dtype=torch.float, requires_grad=True)
if(channels_last):
cpu_x = cpu_x.to(memory_format=torch.channels_last)
cpu_x.retain_grad()
x = cpu_x.detach().clone().to('mps').requires_grad_()
softmax_result = torch.nn.functional.softmax(x, dim=dim)
softmax_result_cpu = torch.nn.functional.softmax(cpu_x, dim=dim)
# Currently NOT testing backward for channels last backward
cpu_grad = None
grad = None
if(not channels_last):
cpu_grad = torch.randn(shape, device='cpu', dtype=torch.float)
grad = cpu_grad.to('mps')
softmax_result.backward(gradient=grad)
softmax_result_cpu.backward(gradient=cpu_grad)
self.assertEqual(softmax_result, softmax_result_cpu)
if(not channels_last):
self.assertEqual(x.grad, cpu_x.grad)
def helper2(dim):
cpu_x = torch.tensor(1.23, device='cpu', dtype=torch.float, requires_grad=True)
x = cpu_x.detach().clone().to('mps').requires_grad_()
softmax_result = torch.nn.functional.softmax(x, dim=dim)
softmax_result_cpu = torch.nn.functional.softmax(cpu_x, dim=dim)
cpu_grad = torch.tensor(2.34, device='cpu', dtype=torch.float)
grad = cpu_grad.to('mps')
softmax_result.backward(gradient=grad)
softmax_result_cpu.backward(gradient=cpu_grad)
self.assertEqual(softmax_result, softmax_result_cpu)
self.assertEqual(x.grad, cpu_x.grad)
helper2(0)
for channels_last in [False]:
for shape in [(2, 4, 8, 5), (3, 4, 6, 7, 2)]:
if(len(shape) != 4 and channels_last):
continue
for dim in [0, 1, 2, 3, -1, -2, -3]:
helper(shape, dim, channels_last)
# Test sub
def test_sub(self):
def helper(shape, alpha):
cpu_x = torch.randn(shape, device='cpu', dtype=torch.float, requires_grad=False)
x = cpu_x.detach().clone().to('mps')
cpu_y = torch.randn(shape, device='cpu', dtype=torch.float, requires_grad=False)
y = cpu_y.detach().clone().to('mps')
cpu_out = torch.sub(cpu_x, cpu_y, alpha=alpha)
out = torch.sub(x, y, alpha=alpha)
self.assertEqual(out, cpu_out)
helper((2, 8, 4, 5), 0.1)
helper((2, 8, 3, 5), 0.1)
helper((2, 8, 3, 5), 0.2)
# Test where
def test_where(self):
def helper(shape, x_shape, y_shape, cond_dtype=torch.bool, x_dtype=torch.float):
cpu_cond = torch.randint(2, shape, device='cpu', dtype=cond_dtype, requires_grad=False)
cond = cpu_cond.detach().clone().to('mps')
cpu_x = torch.randn(x_shape, device='cpu', dtype=x_dtype, requires_grad=True)
x = cpu_x.detach().clone().to('mps').requires_grad_()
cpu_y = torch.randn(y_shape, device='cpu', dtype=x_dtype, requires_grad=True)
y = cpu_y.detach().clone().to('mps').requires_grad_()
cpu_out = torch.where(cpu_cond, cpu_x, cpu_y)
out = torch.where(cond, x, y)
cpu_grad = torch.randn(cpu_out.shape)
grad = cpu_grad.to('mps')
cpu_out.backward(gradient=cpu_grad)
out.backward(gradient=grad)
self.assertEqual(out, cpu_out)
self.assertEqual(x.grad, cpu_x.grad)
self.assertEqual(y.grad, cpu_y.grad)
for shape in ([(0, 3), [], (2, 3), (9,)]):
helper(shape, shape, shape)
helper((2, 3, 1), (2, 3, 4), (2, 1, 4))
helper((2, 1, 1), (2, 3, 4), (1, 3, 4))
helper((1, 1, 1), (1, 1, 4), (2, 3, 1))
helper([], (1, 1, 4), (2, 3, 1))
helper([], (2, 3, 4), [])
# Test normal
def test_normal(self):
def helper(shape, mean=0.0, std=1.0):
mps_out = torch.normal(mean, std, shape, device='mps')
mean_array = np.ones(shape)
mean_array *= mean
cpu_mean_tensor = torch.tensor(mean_array, device='cpu', dtype=torch.float, requires_grad=False)
mean_tensor = cpu_mean_tensor.detach().clone().to('mps')
std_array = np.ones(shape)
std_array *= std
cpu_std_tensor = torch.tensor(std_array, device='cpu', dtype=torch.float, requires_grad=False)
std_tensor = cpu_std_tensor.detach().clone().to('mps')
# test out
mps_out = torch.zeros(shape, device='mps')
torch.normal(mean_tensor, std, out=mps_out)
mps_out = torch.zeros(shape, device='mps')
torch.normal(mean, std_tensor, out=mps_out)
mps_out = torch.zeros(shape, device='mps')
torch.normal(mean_tensor, std_tensor, out=mps_out)
# test without out
mps_out = torch.normal(mean_tensor, std)
self.assertEqual(mps_out.size(), mean_tensor.size())
mps_out = torch.normal(mean, std_tensor)
self.assertEqual(mps_out.size(), std_tensor.size())
inferred_shape = torch.broadcast_shapes(mean_tensor.size(), std_tensor.size())
mps_out = torch.normal(mean_tensor, std_tensor)
self.assertEqual(mps_out.size(), inferred_shape)
helper((2, 3, 4, 5, 6))
helper((100, 100), 2.5, 1.2)
def test_bernoulli(self):
def helper(shape, prob=0.5):
prob_array = np.ones(shape)
prob_array *= prob
cpu_prob_tensor = torch.tensor(prob_array, device='cpu', dtype=torch.float, requires_grad=False)
prob_tensor = cpu_prob_tensor.detach().clone().to('mps')
mps_out = torch.bernoulli(prob_tensor)
# We can't check reliably the mean and std.
# Just make sure we don't return constant values
self.assertNotEqual(mps_out.to('cpu').mean(), 0.)
self.assertNotEqual(mps_out.to('cpu').std() ** 2, 0.)
mps_out = torch.zeros(shape, device='mps')
mps_out = torch.bernoulli(mps_out, prob)
self.assertNotEqual(mps_out.to('cpu').mean(), 0.)
self.assertNotEqual(mps_out.to('cpu').std(), 0.)
helper((100, 100), 0.50)
helper((100, 100), 0.76)
helper((100, 100), 0.23)
# Test random_.to and random_.from
def test_random(self):
def helper(shape, low, high, dtype=torch.int32):
mps_out = torch.randint(low, high, shape, dtype=dtype, device='mps')
# We can't check reliably the mean and std.
# Just make sure we don't return constant values
self.assertNotEqual(mps_out.to('cpu').float().mean(), 0.)
self.assertNotEqual(mps_out.to('cpu').float().std(), 0.)
helper([100, 100], 0, 10)
helper([100, 100], 23, 89)
helper([100, 100], 23, 89, dtype=torch.float32)
helper([100, 100], 23, 89, dtype=torch.int64)
helper([100, 100], 0, 2, dtype=torch.bool)
# Test exponential
def test_exponential(self):
def helper(shape, lamda, dtype=torch.float32):
mps_out = torch.zeros(shape, device='mps', dtype=dtype)
mps_out.exponential_(lamda)
print(mps_out.to('cpu').float().mean(), 1 / lamda)
print(mps_out.to('cpu').float().std() ** 2, 1 / (lamda**2))
for dtype in [torch.float32, torch.float16]:
helper([100, 100], 2, dtype)
helper([100, 100], 1, dtype)
helper([100, 100], 3, dtype)
helper([100, 100], 0.5, dtype)
def test_exponential_1(self):
rate = torch.randn(5, 5).abs().requires_grad_()
rate_1d = torch.randn(1).abs().requires_grad_()
self.assertEqual(Exponential(rate).sample().size(), (5, 5))
self.assertEqual(Exponential(rate).sample((7,)).size(), (7, 5, 5))
self.assertEqual(Exponential(rate_1d).sample((1,)).size(), (1, 1))
self.assertEqual(Exponential(rate_1d).sample().size(), (1,))
self.assertEqual(Exponential(0.2).sample((1,)).size(), (1,))
self.assertEqual(Exponential(50.0).sample((1,)).size(), (1,))
# Test add
def test_add_binary_op(self):
def helper(shape, alpha):
for dtype in [torch.float16, torch.float32]:
cpu_x = torch.randn(shape, device='cpu', dtype=dtype, requires_grad=False)
mps_x = cpu_x.detach().clone().to('mps')
cpu_y = torch.randn(shape, device='cpu', dtype=dtype, requires_grad=False)
mps_y = cpu_y.detach().clone().to('mps')
cpu_out = torch.add(cpu_x, cpu_y, alpha=alpha)
mps_out = torch.add(mps_x, mps_y, alpha=alpha)
# fp16 isn't accurate when alpha is passed
# TODO: remove or fix 'tol' when we fix problems with fp16
tol = 1e-3 if dtype is torch.float16 else None
self.assertEqual(mps_out, cpu_out, rtol=tol, atol=tol)
# create a scalar tensor
cpu_s = torch.tensor(2.3, device='cpu', dtype=dtype, requires_grad=False)
mps_s = cpu_s.detach().clone().to('mps')
# primary tensor is scalar
self.assertEqual(torch.add(cpu_s, cpu_y), torch.add(mps_s, mps_y))
# secondary tensor is scalar
self.assertEqual(torch.add(cpu_x, cpu_s), torch.add(mps_x, mps_s))
helper((2, 8, 4, 5), 1.0)
helper((2, 8, 4, 5), 0.0)
helper((2, 8, 4, 5), 0.1)
helper((2, 8, 3, 5), 0.1)
helper((2, 8, 3, 5), 0.2)
# Test add
def test_add_scalars(self):
def helper(alpha):
for dtype in [torch.float16, torch.float32]:
cpu_x = torch.tensor(2.3, device='cpu', dtype=dtype, requires_grad=False)
x = cpu_x.detach().clone().to('mps')
cpu_y = torch.tensor(3.4, device='cpu', dtype=dtype, requires_grad=False)
y = cpu_y.detach().clone().to('mps')
cpu_out = torch.add(cpu_x, cpu_y, alpha=alpha)
out = torch.add(x, y, alpha=alpha)
# fp16 isn't accurate when alpha is passed
tol = 1e-3 if dtype is torch.float16 else None
self.assertEqual(out, cpu_out, rtol=tol, atol=tol)
helper(1.0)
helper(0.0)
helper(0.1)
helper(0.2)
# Test int32 tensor + int64 scalar add
# see https://github.com/pytorch/pytorch/issues/79835#issuecomment-1164984534
x = torch.ones(4, dtype=torch.int32, device='mps')
self.assertEqual(x + 1, torch.full((4,), 2, dtype=torch.int32, device='mps'))
self.assertTrue(torch.equal(x + 1.5, torch.full((4,), 2.5, device='mps')))
def test_types_binary_op(self):
# Float * Bool
cpu_x = torch.arange(5, dtype=torch.float32, device="cpu") * torch.tensor([True, False, True, False, True], device="cpu")
mps_x = torch.arange(5, dtype=torch.float32, device="mps") * torch.tensor([True, False, True, False, True], device="mps")
self.assertEqual(cpu_x, mps_x)
# Float * Int64
cpu_y = torch.arange(5, dtype=torch.float32, device="cpu") * torch.tensor([1, 0, 1, 0, 1], device="cpu")
mps_y = torch.arange(5, dtype=torch.float32, device="mps") * torch.tensor([1, 0, 1, 0, 1], device="mps")
self.assertEqual(cpu_y, mps_y)
def test_unary_ops(self):
def helper(shape, op):
for dtypef in [torch.float32]:
cpu_x = torch.randn(shape, device='cpu', dtype=dtypef, requires_grad=False)
mps_x = cpu_x.detach().clone().to('mps')
self.assertEqual(op(cpu_x), op(mps_x))
for dtypei in [torch.int32, torch.int16]:
cpu_x = torch.randint(0, 1000, shape, device='cpu', dtype=dtypei, requires_grad=False)
mps_x = cpu_x.to('mps')
self.assertEqual(op(cpu_x), op(mps_x), rtol=1e-4, atol=1e-4)
helper((2, 8, 4, 5), torch.exp)
helper((2, 8, 3, 5), torch.exp2)
helper((2, 8, 3, 5), torch.log)
helper((2, 8, 3, 5), torch.cos)
def test_atan2(self):
def helper(shape):
input_cpu = torch.randn(shape)
input_mps = input_cpu.detach().clone().to("mps")
other_cpu = torch.randn(shape)
other_mps = other_cpu.detach().clone().to("mps")
atan2_cpu = torch.atan2(input_cpu, other_cpu)
atan2_mps = torch.atan2(input_mps, other_mps)
self.assertEqual(atan2_cpu, atan2_mps.to("cpu"))
helper(4)
helper(10000)
helper((10000, 40))
class TestNNMPS(NNTestCase):
def _create_basic_net(self):
class Layer(nn.Module):
def __init__(self):
super(Layer, self).__init__()
self.layer_dummy_param = Parameter(torch.empty(3, 5))
self.register_buffer('layer_dummy_buf', torch.zeros(1, 3, 3, 7))
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.l1 = Layer()
self.dummy_param = Parameter(torch.empty(3, 5))
self.register_buffer('dummy_buf', torch.zeros(7, 3, 3, 1))
l = Layer()
n = Net()
s = nn.Sequential(n, n)
return l, n, s
def test_requires_grad_(self):
m = self._create_basic_net()[-1]
assert len(list(m.buffers())) > 0, 'invalid test'
assert all(not b.requires_grad for b in m.buffers()) > 0, 'invalid test'
assert len(list(m.parameters())) > 0, 'invalid test'
assert all(p.requires_grad for p in m.parameters()) > 0, 'invalid test'
for requires_grad in (False, True):
self.assertIs(m.requires_grad_(requires_grad), m)
for p in m.parameters():
self.assertEqual(p.requires_grad, requires_grad)
for b in m.buffers():
self.assertFalse(b.requires_grad)
def test_module_backcompat(self):
from torch.serialization import SourceChangeWarning
path = download_file('https://download.pytorch.org/test_data/linear.pt')
with warnings.catch_warnings():
warnings.simplefilter('ignore', SourceChangeWarning)
m = torch.load(path)
input = torch.randn(2, 3, dtype=torch.float)
self.assertEqual(m(input).size(), (2, 5))
def test_conv_backcompat(self):
from torch.serialization import SourceChangeWarning
# This file was generated by running on PyTorch 1.0.1 on Python 2:
#
# import torch
# from torch import nn
# m = nn.Conv2d(1, 1, 1)
# torch.save(m, 'legacy_conv2d.pt')
#
# NB: This Pickle also contains some Unicode data!
path = download_file('https://download.pytorch.org/test_data/legacy_conv2d.pt')
with warnings.catch_warnings():
warnings.simplefilter('ignore', SourceChangeWarning)
m = torch.load(path, encoding='utf-8')
input = torch.randn((1, 1, 1, 1), dtype=torch.float)
self.assertEqual(m(input).size(), (1, 1, 1, 1))
def test_conv_expand(self):
device = 'mps'
input_ = torch.rand(2, 3, 16, 16, device=device)
kernel = torch.rand(1, 1, 3, 11, device=device)
tmp_kernel = kernel.expand(-1, 3, -1, -1)
output = F.conv2d(input_, tmp_kernel, groups=1, padding=0, stride=1)
# The test should not crash
def test_permute(self):
X = torch.randn(5, 5).to('mps')
torch.log(X)
X = X.permute(1, 0)
torch.log(X)
# Printing of non_contiguous should not crash
def test_print_non_contiguous(self):
print(torch.ones(100, 100, device='mps').nonzero())
print(torch.ones(100, 100, device='mps').nonzero().contiguous())
def test_zero_grad(self):
i = torch.randn(2, 5, requires_grad=True)
module = nn.Linear(5, 5)
for p in module.parameters():
p.requires_grad = False
module.zero_grad()
module.weight.requires_grad = True
module.zero_grad()
self.assertIsNone(module.weight.grad) # uninitialized grad
module(i).sum().backward()
self.assertIsNotNone(module.weight.grad)
self.assertGreater(module.weight.grad.data.abs().sum(), 0)
module.zero_grad()
self.assertEqual(module.weight.grad.data, module.weight.data.clone().zero_())
module.bias.requires_grad = True
module.zero_grad()
self.assertIsNotNone(module.weight.grad)
self.assertIsNone(module.bias.grad)
module(i).sum().backward()
self.assertIsNotNone(module.weight.grad)
self.assertIsNotNone(module.bias.grad)
self.assertGreater(module.weight.grad.data.abs().sum(), 0)
self.assertGreater(module.bias.grad.data.abs().sum(), 0)
module.zero_grad()
self.assertEqual(module.weight.grad.data, module.weight.data.clone().zero_())
self.assertEqual(module.bias.grad.data, module.bias.data.clone().zero_())
# Force set to None.
module.zero_grad(set_to_none=True)
self.assertIsNone(module.weight.grad)
def test_no_grad(self):
for dtype in [torch.bfloat16, torch.float, torch.double]:
module = nn.Conv2d(2, 5, kernel_size=3, padding=1).to(dtype)
input = torch.randn(1, 2, 10, 10).to(dtype)
x = input
y = input.clone()
output = module(x)
self.assertTrue(output.requires_grad)
output.backward(torch.ones(1, 5, 10, 10))
with torch.no_grad():
output2 = module(y)
self.assertFalse(output2.requires_grad)
self.assertRaises(RuntimeError, lambda: output2.backward(torch.ones(1, 5, 10, 10)))
def test_invalid_conv1d(self):
for dtype in [torch.bfloat16, torch.float, torch.double]:
module = nn.Conv1d(in_channels=3, out_channels=33, kernel_size=10, stride=1, bias=True).to(dtype)
input = torch.randn(1, 3, 4).to(dtype)
with self.assertRaisesRegex(RuntimeError,
r'Calculated padded input size per channel: \(4\). ' +
r'Kernel size: \(10\). Kernel size can\'t be greater than actual input size'):
module(input)
# Negative stride check
module = nn.Conv1d(in_channels=3, out_channels=6, kernel_size=3, stride=-1, bias=True).to(dtype)
input = torch.randn(1, 3, 4).to(dtype)
with self.assertRaisesRegex(RuntimeError, 'non-positive stride is not supported'):
module(input)
def test_conv2d_discontiguous_weight(self):
# Test for https://github.com/pytorch/pytorch/issues/55781
x = torch.ones(64, 16, 16, 16)
weight = torch.arange(0, 1.0, 1 / 2.0 ** 10).reshape(32, 16, 1, 2)[:, :, :, ::2]
self.assertFalse(weight.is_contiguous())
y = torch.nn.functional.conv2d(x, weight, None)
if torch.backends.mkldnn.is_available():
# Disable MKLDNN explicitly, so that either NNPACK or THCNN will be used
with torch.backends.mkldnn.flags(enabled=False):
y_ = torch.nn.functional.conv2d(x, weight, None)
self.assertEqual(y, y_)
self.assertEqual(y.sum(), 4186112.)
def test_invalid_conv2d(self):
for dtype in [torch.bfloat16, torch.float, torch.double]:
module = torch.nn.Conv2d(1, 1, kernel_size=3, dilation=2, stride=2).to(dtype)
input = torch.empty(1, 1, 4, 4).to(dtype)
self.assertRaises(RuntimeError, lambda: module(input))
module = nn.Conv2d(in_channels=3, out_channels=33, kernel_size=10, stride=1, bias=True)
input = torch.randn(1, 3, 1, 1)
with self.assertRaisesRegex(RuntimeError,
r'Calculated padded input size per channel: \(1 x 1\). ' +
r'Kernel size: \(10 x 10\). Kernel size can\'t be greater than actual input size'):
module(input)
# Negative stride check
module = nn.Conv2d(in_channels=3, out_channels=6, kernel_size=4, stride=-1, bias=True).to(dtype)
input = torch.randn(1, 3, 4, 4).to(dtype)
with self.assertRaisesRegex(RuntimeError, 'non-positive stride is not supported'):
module(input)
# Zero stride check
module = nn.Conv2d(in_channels=3, out_channels=6, kernel_size=4, stride=0, bias=True).to(dtype)
input = torch.randn(1, 3, 4, 4).to(dtype)
with self.assertRaisesRegex(RuntimeError, 'non-positive stride is not supported'):
module(input)
def test_conv2d_valid_padding(self, device='mps'):
# Test F.conv2d padding='valid' is the same as no padding
x = torch.rand(1, 1, 1, 10, device=device).to(torch.float)
y = torch.rand(1, 1, 1, 4, device=device).to(torch.float)
expect = F.conv2d(x, y)
actual = F.conv2d(x, y, padding='valid')
self.assertEqual(expect.to('cpu'), actual.to('cpu'))
def test_gemm_permute_transpose(self):
batch_size = 32
n = 20
hidden = 768
num_attention_heads = 12
attention_head_size = hidden // num_attention_heads
def transpose_for_scores(x: torch.Tensor) -> torch.Tensor:
new_x_shape = x.size()[:-1] + (num_attention_heads, attention_head_size)
x = x.view(new_x_shape)
return x.permute(0, 2, 1, 3)
def attention2(key, *, workaround=False, device):
key = transpose_for_scores(key)
res = key.transpose(-1, -2)
return res
A = torch.randn(batch_size, n, hidden)
A_mps = A.detach().clone().to("mps")
r1 = attention2(A, device="cpu")
r2 = attention2(A_mps, device="mps")
r2_cpu = r2.to("cpu")
self.assertEqual(r1, r2_cpu)
# def test_conv2d_same_padding(self, device='mps'):
# x = torch.rand(1, 1, 10, 11, device=device)
# y = torch.rand(1, 1, 4, 5, device=device)
# expect = F.conv2d(x, y, padding=(2, 2))[..., 1:, :]
# actual = F.conv2d(x, y, padding='same')
# self.assertEqual(expect.to('cpu'), actual.to('cpu'))
# # With dilation
# y = torch.rand(1, 1, 3, 4, device=device)
# expect = F.conv2d(x, y, padding=(2, 3), dilation=2)
# actual = F.conv2d(x, y, padding='same', dilation=2)
# self.assertEqual(expect, actual)
# # Dilation with asymmetric padding
# y = torch.rand(1, 1, 4, 4, device=device)
# expect = F.conv2d(x, y, padding=5, dilation=3)[..., 1:, 1:]
# actual = F.conv2d(x, y, padding='same', dilation=3)
# self.assertEqual(expect, actual)
class TestConstantPadNd(TestCase):
def test_preserves_memory_format(self):
nchw_tensor = torch.rand((1, 2, 5, 3))
nchw_padded = torch.constant_pad_nd(nchw_tensor, [1, 2], 0.5)
self.assertTrue(nchw_padded.is_contiguous(memory_format=torch.contiguous_format))
nhwc_tensor = nchw_tensor.contiguous(memory_format=torch.channels_last)
nhwc_padded = torch.constant_pad_nd(nhwc_tensor, [1, 2], 0.5)
self.assertTrue(nhwc_padded.is_contiguous(memory_format=torch.channels_last))
class TestLinalgMPS(TestCase):
def _test_addmm_addmv(self, f, t, m, v, *, alpha=None, beta=None, transpose_out=False):
dtype = t.dtype
numpy_dtype = dtype
alpha = 1.2 if alpha is None else alpha
beta = 0.8 if beta is None else beta
res1 = f(t, m, v, alpha=alpha, beta=beta)
res2 = torch.full_like(res1, math.nan)
if transpose_out:
res2 = res2.t().clone(memory_format=torch.contiguous_format).t()
f(t, m, v, alpha=alpha, beta=beta, out=res2)
res3 = alpha * (m.to(numpy_dtype).cpu().numpy() @ v.to(numpy_dtype).cpu().numpy())
if beta != 0:
res3 += (torch.mul(t, beta)).to(numpy_dtype).cpu().numpy()
res3 = torch.from_numpy(res3).to(dtype)
self.assertEqual(res1, res2)
self.assertEqual(res1, res3)
def test_addmm(self, device="mps", dtype=torch.float32):
M = torch.randn(10, 25, device=device).to(dtype)
m1 = torch.randn(10, 50, device=device).to(dtype)
m2 = torch.randn(50, 25, device=device).to(dtype)
self._test_addmm_addmv(torch.addmm, M, m1, m2)
# Test beta=0, M=nan
M = torch.full((10, 25), math.nan, device=device).to(dtype)
m1 = torch.randn(10, 50, device=device).to(dtype)
m2 = torch.randn(50, 25, device=device).to(dtype)
self._test_addmm_addmv(torch.addmm, M, m1, m2, beta=0)
# Test transpose
for t1, t2, t3, t4 in itertools.product([True, False], repeat=4):
def maybe_transpose(cond, m):
if not cond:
return m
return m.t().clone(memory_format=torch.contiguous_format).t()
M = maybe_transpose(t1, torch.randn(10, 25, device=device).to(dtype))
m1 = maybe_transpose(t2, torch.randn(10, 50, device=device).to(dtype))
m2 = maybe_transpose(t3, torch.randn(50, 25, device=device).to(dtype))
self._test_addmm_addmv(torch.addmm, M, m1, m2, transpose_out=t4)
class TestGatherScatter(TestCase):
def test_slicing_with_step(self):
# Slicing with step
# https://github.com/pytorch/pytorch/issues/78886
x_mps = torch.zeros(10, dtype=torch.float32, device="mps")
x_mps[::2] = 1.0
x_cpu = torch.zeros(10, dtype=torch.float32, device="cpu")
x_cpu[::2] = 1.0
self.assertEqual(x_cpu, x_mps)
def test_slicing_replace_column(self):
# https://github.com/pytorch/pytorch/issues/78074
def _helper(tensor_data):
x_cpu = torch.tensor(tensor_data)
x_mps = x_cpu.to('mps')
x_cpu[:, 0] = 7
x_mps[:, 0] = 7
self.assertEqual(x_cpu, x_mps)
_helper([[1, 2, 3], [4, 5, 6]])
_helper([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
_helper([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]])
def test_inplace_scatter(self):
# https://github.com/pytorch/pytorch/issues/79672
a_mps = torch.ones((2, 2),).to(torch.device("mps"))
b_mps = torch.ones((2, 2),).to(torch.device("mps"))
a_cpu = torch.ones((2, 2),).to(torch.device("cpu"))
b_cpu = torch.ones((2, 2),).to(torch.device("cpu"))
a_mps[:, 0] += b_mps[:, 0]
a_cpu[:, 0] += b_cpu[:, 0]
self.assertEqual(a_cpu, a_mps)
a_mps[:, 0] = a_mps[:, 0] + b_mps[:, 0]
a_cpu[:, 0] = a_cpu[:, 0] + b_cpu[:, 0]
self.assertEqual(a_cpu, a_mps)
# These tests were taken from test/test_view_ops.py
# They are subset of those tests as currently only this subset is working.
# This whole `class` will be removed when we add generic device testing. There
# are no additional tests added apart from what is part of test_view_ops.py
class TestViewOpsMPS(TestCase):
exact_dtype = True
def is_view_of(self, base, other):
if (not other._is_view() or
other is base or
other._base is not base or
base.device != other.device):
return False
# Note: only validates storage on native device types
# because some accelerators, like XLA, do not expose storage
if base.device.type == 'mps':
if base.storage().data_ptr() != other.storage().data_ptr():
return False
return True
# Returns true if v1 and v2 are views of the same base
def is_view_of_same_base(self, v1, v2):
if (not v1._is_view() or v1 is v2):
return False
return self.is_view_of(v1._base, v2)
# Performs transpose if contiguous=True, else returns the input tensor as is
def _do_transpose(self, x, contiguous=False, dim0=0, dim1=1):
if contiguous:
return x
else:
return x.transpose(dim0, dim1)
def test_diagonal_view(self, device="mps"):
t = torch.ones((5, 5), device=device)
v = torch.diagonal(t)
self.assertTrue(self.is_view_of(t, v))
v[0] = 0
self.assertEqual(t[0, 0], v[0])
t = torch.ones((3, 3, 3), device="mps")
v = torch.diagonal(t, offset=1, dim1=1, dim2=2)
self.assertTrue(self.is_view_of(t, v))
v[0, 0] = 0
self.assertEqual(t[0, 0, 1], v[0, 0])
def test_select_view(self, device="mps") -> None:
t = torch.ones((5, 5), device=device)
v = t.select(0, 2)
self.assertTrue(self.is_view_of(t, v))
v[0] = 0
self.assertEqual(t[2, 0], v[0])
def test_unbind_view(self, device="mps") -> None:
t = torch.zeros((5, 5), device=device)
tup = torch.unbind(t)
for idx, v in enumerate(tup):
self.assertTrue(self.is_view_of(t, v))
v[0] = idx + 1
self.assertEqual(t[idx, 0], v[0])
def test_expand_view(self, device="mps") -> None:
t = torch.ones((5, 1), device=device)
v = t.expand(5, 5)
self.assertTrue(self.is_view_of(t, v))
v[2, 2] = 0
self.assertEqual(t[2, 0], v[2, 2])
def test_expand_as_view(self, device="mps"):
t = torch.ones((5, 1), device=device)
e = torch.empty((5, 5), device=device)
v = t.expand_as(e)
self.assertTrue(self.is_view_of(t, v))
v[2, 2] = 0
self.assertEqual(t[2, 0], v[2, 2])
def test_narrow_view(self, device="mps"):
t = torch.ones((5, 5), device=device)
v = torch.narrow(t, 1, 2, 2)
self.assertTrue(self.is_view_of(t, v))
v[0, 0] = 0
self.assertEqual(t[0, 2], v[0, 0])
def test_permute_view(self, device="mps") -> None:
t = torch.ones((5, 5), device=device)
v = t.permute(1, 0)
self.assertTrue(self.is_view_of(t, v))
v[0, 1] = 0
self.assertEqual(t[1, 0], v[0, 1])
def test_transpose_view(self, device="mps"):
for fn in (torch.swapdims, torch.swapaxes, torch.transpose):
t = torch.ones((5, 5), device=device)
v = fn(t, 0, 1)
self.assertTrue(self.is_view_of(t, v))
v[0, 1] = 0
self.assertEqual(t[1, 0], v[0, 1])
def test_transpose_inplace_view(self, device="mps"):
t = torch.ones(5, 5, device=device)
v = t.view_as(t)
v = v.swapdims_(0, 1)
self.assertTrue(self.is_view_of(t, v))
v[0, 1] = 0
self.assertEqual(t[1, 0], v[0, 1])
t = torch.ones(5, 5, device=device)
v = t.view_as(t)
v = v.swapaxes_(0, 1)
self.assertTrue(self.is_view_of(t, v))
v[0, 1] = 0
self.assertEqual(t[1, 0], v[0, 1])
t = torch.ones(5, 5, device=device)
v = t.view_as(t)
v = v.transpose_(0, 1)
self.assertTrue(self.is_view_of(t, v))
v[0, 1] = 0
self.assertEqual(t[1, 0], v[0, 1])
def test_t_view(self, device="mps"):
t = torch.ones((5, 5), device=device)
v = t.t()
self.assertTrue(self.is_view_of(t, v))
v[0, 1] = 0
self.assertEqual(t[1, 0], v[0, 1])
def test_t_inplace_view(self, device="mps"):
t = torch.ones(5, 5, device=device)
v = t.view_as(t)
v = v.t_()
self.assertTrue(self.is_view_of(t, v))
v[0, 1] = 0
self.assertEqual(t[1, 0], v[0, 1])
def test_T_view(self, device="mps"):
for op in ("T", "H", "mT", "mH"):
t = torch.ones((5, 5), device=device)
v = getattr(t, op)
self.assertTrue(self.is_view_of(t, v))
v[0, 1] = 0
self.assertEqual(t[1, 0], v[0, 1])
# requires aten::unfold
# def test_unfold_view(self, device="mps"):
# t = torch.ones(10, device=device)
# v = t.unfold(0, 3, 2)
# self.assertTrue(self.is_view_of(t, v))
# v[1, 0] = 0
# self.assertEqual(t[2], v[1, 0])
def test_squeeze_view(self, device="mps"):
t = torch.ones(5, 1, 5, device=device)
v = torch.squeeze(t)
self.assertTrue(self.is_view_of(t, v))
v[0, 1] = 0
self.assertTrue(t is v._base)
def test_squeeze_inplace_view(self, device="mps"):
t = torch.ones(5, 5, device=device)
v = t.view_as(t)
v = v.squeeze_()
self.assertTrue(self.is_view_of(t, v))
v[0, 1] = 0
self.assertTrue(t is v._base)
def test_unsqueeze_view(self, device="mps"):
t = torch.ones(5, 5, device=device)
v = torch.unsqueeze(t, 1)
self.assertTrue(self.is_view_of(t, v))
v[0, 0, 1] = 0
self.assertEqual(t[0, 1], v[0, 0, 1])
def test_unsqueeze_inplace_view(self, device="mps"):
t = torch.ones(5, 5, device=device)
v = t.view_as(t)
v = v.unsqueeze_(1)
self.assertTrue(self.is_view_of(t, v))
v[0, 0, 1] = 0
self.assertEqual(t[0, 1], v[0, 0, 1])
def test_as_strided_view(self, device="mps"):
t = torch.ones(5, 5, device=device)
v = torch.as_strided(t, (25,), (1,))
self.assertTrue(self.is_view_of(t, v))
v[6] = 0
self.assertEqual(t[1, 1], v[6])
def test_as_strided_inplace_view(self, device="mps"):
t = torch.ones(5, 5, device=device)
v = t.view_as(t)
v = v.as_strided_((25,), (1,))
self.assertTrue(self.is_view_of(t, v))
v[6] = 0
self.assertEqual(t[1, 1], v[6])
def test_view_view(self, device="mps"):
t = torch.ones(5, 5, device=device)
v = t.view(25)
self.assertTrue(self.is_view_of(t, v))
v[6] = 0
self.assertEqual(t[1, 1], v[6])
def test_view_as_view(self, device="mps"):
t = torch.ones(5, 5, device=device)
e = torch.empty((25,))
v = t.view_as(e)
self.assertTrue(self.is_view_of(t, v))
v[6] = 0
self.assertEqual(t[1, 1], v[6])
def test_contiguous_self(self, device="mps"):
t = torch.ones(5, 5, device=device)
s = t.contiguous()
self.assertTrue(s is t)
def test_contiguous_nonview(self, device="mps"):
t = torch.ones(5, 5, device=device)
nv = t.t().contiguous()
self.assertTrue(not self.is_view_of(t, nv))
nv[0, 0] = 0
self.assertNotEqual(t[0, 0], nv[0, 0])
def test_reshape_view(self, device="mps"):
t = torch.ones(5, 5, device=device)
v = torch.reshape(t, (25,))
self.assertTrue(self.is_view_of(t, v))
v[6] = 0
self.assertEqual(t[1, 1], v[6])
def test_reshape_as_view(self, device="mps"):
t = torch.ones(5, 5, device=device)
e = torch.empty((25,), device=device)
v = t.reshape_as(e)
self.assertTrue(self.is_view_of(t, v))
v[6] = 0
self.assertEqual(t[1, 1], v[6])
def test_reshape_nonview(self, device="mps"):
t = torch.ones(5, 5, device=device)
nv = torch.reshape(t.t(), (25,))
self.assertTrue(not self.is_view_of(t, nv))
nv[6] = 0
self.assertNotEqual(t[1, 1], nv[6])
def test_flatten_view(self, device="mps"):
def test_writes_propagate(t, v):
idx_t = (0,) * t.ndim
idx_v = (0,) * v.ndim
v[idx_v] = 0
self.assertEqual(t[idx_t], v[idx_v])
t = torch.ones(1, 2, 3, 4, device=device)
v = t.flatten()
self.assertTrue(self.is_view_of(t, v))
test_writes_propagate(t, v)
# zero-dimensional tensor
t = torch.tensor(1, device=device)
v = t.flatten()
test_writes_propagate(t, v)
self.assertTrue(self.is_view_of(t, v))
t = torch.ones(1, 2, 3, 4, device=device).transpose(2, 3)
v = t.flatten(0, 1)
test_writes_propagate(t, v)
self.assertTrue(self.is_view_of_same_base(t, v))
# stride[i] = stride[i + 1] * size[i + 1] is satisfied for 3 groups:
t = torch.ones(720, device=device) \
.as_strided((2, 3, 2, 3, 5, 4), (6, 2, 15, 5, 1, 0))
# [--1--|---2---|-3-] [--1--|----2---|-3-]
v1 = t.flatten(0, 1)
v2 = v1.flatten(1, 3)
v3 = v2.flatten(2, 2)
test_writes_propagate(t, v1)
self.assertTrue(self.is_view_of_same_base(t, v1))
test_writes_propagate(t, v2)
self.assertTrue(self.is_view_of_same_base(t, v2))
test_writes_propagate(t, v3)
self.assertTrue(self.is_view_of_same_base(t, v3))
def test_flatten_nonview(self, device="mps"):
def assert_is_nonview(t, nv):
idx_t = (0,) * t.ndim
idx_nv = (0,) * nv.ndim
self.assertTrue(not nv._is_view())
nv[idx_nv] = 0
self.assertNotEqual(t[idx_t], nv[idx_nv])
t = torch.ones(2, 3, 2, 3, device=device).transpose(2, 3)
nv = t.flatten(1, 3)
assert_is_nonview(t, nv)
t = torch.ones(2, 2, device=device).T
nv = t.flatten()
assert_is_nonview(t, nv)
# flatten returns the original object if start_dim=end_dim
t = t = torch.ones(2, 2, device=device)
nv = t.flatten(1, 1)
self.assertTrue(t is nv)
def test_basic_indexing_slice_view(self, device="mps"):
t = torch.ones(5, 5, device=device)
v = t[:2, :3]
self.assertTrue(self.is_view_of(t, v))
v[0, 0] = 0
self.assertEqual(t[0, 0], v[0, 0])
def test_basic_indexing_ellipses_view(self, device="mps"):
t = torch.ones(5, 5, device=device)
v = t[..., :2]
self.assertTrue(self.is_view_of(t, v))
v[0, 0] = 0
self.assertEqual(t[0, 0], v[0, 0])
def test_basic_indexing_newaxis_view(self, device="mps"):
t = torch.ones(5, 5, device=device)
v = t[None, :2, 3]
self.assertTrue(self.is_view_of(t, v))
v[0, 0] = 0
self.assertEqual(t[0, 3], v[0, 0])
def test_chunk_view(self, device="mps"):
t = torch.zeros(3, 3, device=device)
l = torch.chunk(t, 3)
for idx, v in enumerate(l):
self.assertTrue(self.is_view_of(t, v))
v[0, 0] = idx + 1
self.assertEqual(t[idx, 0], v[0, 0])
def test_split_view(self, device="mps"):
t = torch.zeros(3, 3, device=device)
l = torch.split(t, [1, 1, 1])
for idx, v in enumerate(l):
self.assertTrue(self.is_view_of(t, v))
v[0, 0] = idx + 1
self.assertEqual(t[idx, 0], v[0, 0])
def test_movedim_view(self, device="mps"):
def run_test(device, op):
t = torch.zeros(3, 3, device=device)
out = op(t)
self.assertTrue(self.is_view_of(t, out))
# Randomly change values in output
# and verify that original is changed
# as well.
for _ in range(3):
idx_1, idx_2 = random.randint(0, 2), random.randint(0, 2)
out[idx_1, idx_2] = random.random()
self.assertEqual(t[idx_2, idx_1], out[idx_1, idx_2])
for fn in [torch.movedim, torch.moveaxis]:
op = partial(fn, source=(0, 1), destination=(1, 0))
run_test(device, op)
op = partial(fn, source=0, destination=1)
run_test(device, op)
# Testing that the generated view_copy kernel and its derivative are implemented correctly
def test_view_copy(self, device="mps"):
a = torch.randn(4, device=device, requires_grad=True)
a_ref = a.clone().detach().requires_grad_()
a_view = a_ref.view(2, 2)
a_view_copy = torch.view_copy(a, (2, 2))
# view_copy ops don't preserve view relationship
self.assertTrue(self.is_view_of(a_ref, a_view))
self.assertFalse(self.is_view_of(a, a_view_copy))
a_view_copy.sum().backward()
a_view.sum().backward()
# forward and backward give the same shape + result
self.assertEqual(a_view_copy, a_view)
self.assertEqual(a.grad, a_ref.grad)
def test_view_copy_out(self, device="mps"):
a = torch.randn(2, 2, device=device)
out = torch.empty(2, device=device)
torch.diagonal_copy(a, out=out)
expected = torch.diagonal_copy(a)
self.assertEqual(expected, out)
a = torch.randn(4, device=device)
out1 = torch.empty(2, device=device)
out2 = torch.empty(2, device=device)
torch.split_copy(a, 2, out=(out1, out2))
expected1, expected2 = torch.split_copy(a, 2)
self.assertEqual(expected1, out1)
self.assertEqual(expected2, out2)
def test_empty_reshape(self, device="mps"):
x = torch.randn(0, 6, device=device)
self.assertEqual((1, 0, 6, 1, 1), x.reshape(1, 0, 6, 1, 1).shape)
# should be viewable -- i.e. data_ptr is the same.
self.assertEqual(x.data_ptr(), x.reshape(1, 0, 6, 1, 1).data_ptr())
# match NumPy semantics -- don't infer the size of dimension with a degree of freedom
self.assertRaises(RuntimeError, lambda: x.reshape(0, -1))
def test_expand(self, device="mps"):
tensor = torch.rand(1, 8, 1, device=device)
tensor2 = torch.rand(5, device=device)
template = torch.rand(4, 8, 5, device=device)
target = template.size()
self.assertEqual(tensor.expand_as(template).size(), target)
self.assertEqual(tensor.expand(4, 8, 5).size(), target)
self.assertEqual(tensor.expand(target).size(), target)
self.assertEqual(tensor2.expand_as(template).size(), target)
self.assertEqual(tensor2.expand(4, 8, 5).size(), target)
self.assertEqual(tensor2.expand(target).size(), target)
# test double expand
self.assertEqual(tensor2.expand(1, 5).expand(2, 2, 5), tensor2.repeat(2, 2, 1))
# test non-contiguous
noncontig = torch.randn(5, 2, 1, 3, device=device)[:, 0]
self.assertFalse(noncontig.is_contiguous())
self.assertEqual(noncontig.expand(2, 5, 4, 3), noncontig.contiguous().repeat(2, 1, 4, 1))
# make sure it's compatible with unsqueeze
expanded = tensor2.expand(1, 1, 5)
unsqueezed = tensor2.unsqueeze(0).unsqueeze(1)
self.assertEqual(expanded, unsqueezed)
self.assertEqual(expanded.stride(), unsqueezed.stride())
# test -1 as target size
self.assertEqual(tensor.expand(4, -1, 5), tensor.expand(4, 8, 5))
self.assertRaises(RuntimeError, lambda: tensor2.expand(-1, -1))
# test expanding empty to empty
self.assertEqual(torch.zeros(0, device=device).expand((0,)), torch.zeros(0, device=device))
def test_view_empty(self, device="mps"):
x = torch.randn(0, 6, device=device)
self.assertEqual((1, 0, 6, 1, 1), x.view(1, 0, 6, 1, 1).shape)
def test_reshape(self, device="mps"):
x = torch.randn(3, 3, device=device)
self.assertEqual(x.data_ptr(), x.reshape(-1).data_ptr())
self.assertEqual(x.data_ptr(), x.reshape(1, 9, 1).data_ptr())
self.assertEqual(torch.reshape(x, (9,)), x.reshape(9))
self.assertRaises(RuntimeError, lambda: x.reshape(-1, -1))
y = torch.randn(4, 4, 4, device=device)[:, 0, :]
# .data_ptr() on meta tensors is always 0 so they are equal regardless of the reshape
if device != "meta":
self.assertNotEqual(y.data_ptr(), y.reshape(-1).data_ptr())
self.assertEqual(y.contiguous().view(-1), y.reshape(-1))
self.assertEqual(y.reshape(2, 2, 4).data_ptr(), y.data_ptr())
s = torch.randn((), device=device)
self.assertEqual(s.data_ptr(), s.reshape(()).data_ptr())
self.assertEqual(s.reshape(-1).shape, (1,))
self.assertRaises(RuntimeError, lambda: s.reshape(2))
empty = torch.tensor([], device=device)
self.assertEqual(empty, empty.reshape(-1))
self.assertEqual(empty, empty.reshape([0]))
# TODO: fix these once we have multi-dimensional empty tensors
self.assertEqual(empty.reshape([0, 1]).shape, (0, 1))
self.assertEqual(empty.reshape([1, -1]).shape, (1, 0))
self.assertRaises(RuntimeError, lambda: empty.reshape(1))
x = torch.randn(3, 3, device=device)
self.assertEqual(x.data_ptr(), x.reshape_as(torch.rand(9)).data_ptr())
self.assertEqual(x.data_ptr(), x.reshape_as(torch.rand(1, 9, 1)).data_ptr())
self.assertRaises(RuntimeError, lambda: x.reshape_as(torch.rand(10, device=device)))
def test_narrow(self, device="mps"):
x = torch.tensor([[0, 1, 2], [3, 4, 5], [6, 7, 8]])
self.assertEqual(x.narrow(0, 0, 1), torch.tensor([[0, 1, 2]]))
self.assertEqual(x.narrow(0, 0, 2), torch.tensor([[0, 1, 2], [3, 4, 5]]))
self.assertEqual(x.narrow(0, 1, 1), torch.tensor([[3, 4, 5]]))
self.assertEqual(x.narrow(0, -1, 1), torch.tensor([[6, 7, 8]]))
self.assertEqual(x.narrow(0, -2, 2), torch.tensor([[3, 4, 5], [6, 7, 8]]))
self.assertEqual(x.narrow(0, -3, 3), torch.tensor([[0, 1, 2], [3, 4, 5], [6, 7, 8]]))
self.assertEqual(x.narrow(-1, -1, 1), torch.tensor([[2], [5], [8]]))
self.assertEqual(x.narrow(-2, -1, 1), torch.tensor([[6, 7, 8]]))
def test_narrow_tensor(self, device="mps"):
x = torch.tensor([[0, 1, 2], [3, 4, 5], [6, 7, 8]])
self.assertEqual(x.narrow(0, torch.tensor(0), 1), torch.tensor([[0, 1, 2]]))
with self.assertRaises(Exception):
x.narrow(0, torch.tensor(0.), 1)
with self.assertRaises(Exception):
x.narrow(0, torch.tensor([0]), 1)
with self.assertRaises(Exception):
x.narrow(0, torch.tensor([0, 1]), 1)
def test_t(self, device="mps"):
# Test 0D tensors
x = torch.randn(())
self.assertEqual(x, x.t())
x = x.to_sparse()
self.assertEqual(x, x.t())
# Test 1D tensors
x = torch.arange(4)
self.assertEqual(x, x.t())
x = x.to_sparse()
self.assertEqual(x, x.t())
# Test 2D tensors
x = torch.rand((2, 2))
self.assertEqual(x.t(), x.transpose(0, 1))
x = x.to_sparse()
self.assertEqual(x.t(), x.transpose(0, 1))
# Test 3D tensor
x = torch.rand((2, 2, 2))
with self.assertRaisesRegex(RuntimeError, 'expects a tensor with <= 2 dimensions, but self is 3D'):
x.t()
x = x.to_sparse()
with self.assertRaisesRegex(RuntimeError, 'expects a tensor with <= 2 sparse and 0 dense dimensions'):
x.t()
def test_split(self, device="mps"):
tensor = torch.rand(7, 4)
split_size = 3
dim = 0
target_sizes = ([3, 4], [3, 4], [1, 4])
splits = tensor.split(split_size, dim)
start = 0
for target_size, split in zip(target_sizes, splits):
self.assertEqual(split.size(), target_size)
self.assertEqual(tensor.narrow(dim, start, target_size[dim]), split, atol=0, rtol=0)
start = start + target_size[dim]
# Variable sections split
tensor = torch.randn(20, 10)
dim = 0
split_sizes = [5, 5, 10]
target_sizes = ([[5, 10], [5, 10], [10, 10]])
splits = tensor.split(split_sizes, dim)
start = 0
for target_size, split in zip(target_sizes, splits):
self.assertEqual(split.size(), target_size)
self.assertEqual(tensor.narrow(dim, start, target_size[dim]), split, atol=0, rtol=0)
start = start + target_size[dim]
split_sizes = [2, 2, 6]
target_sizes = ([20, 2], [20, 2], [20, 6])
dim = 1
splits = tensor.split(split_sizes, dim)
start = 0
for target_size, split in zip(target_sizes, splits):
self.assertEqual(split.size(), target_size)
self.assertEqual(tensor.narrow(dim, start, target_size[dim]), split, atol=0, rtol=0)
start = start + target_size[dim]
def test_chunk(self, device="mps"):
tensor = torch.rand(4, 7)
num_chunks = 3
dim = 1
target_sizes = ([4, 3], [4, 3], [4, 1])
splits = tensor.chunk(num_chunks, dim)
start = 0
for target_size, split in zip(target_sizes, splits):
self.assertEqual(split.size(), target_size)
self.assertEqual(tensor.narrow(dim, start, target_size[dim]), split,
atol=0, rtol=0)
start = start + target_size[dim]
# Invalid chunk sizes
error_regex = 'chunk expects.*greater than 0'
with self.assertRaisesRegex(RuntimeError, error_regex):
tensor.chunk(0)
with self.assertRaisesRegex(RuntimeError, error_regex):
tensor.chunk(-2)
def test_unsqueeze(self, device="mps") -> None:
x = torch.randn(2, 3, 4)
y = x.unsqueeze(1)
self.assertEqual(y, x.view(2, 1, 3, 4))
y = x.clone().unsqueeze_(2)
self.assertEqual(y, x.view(2, 3, 1, 4))
x = x[:, 1]
self.assertFalse(x.is_contiguous())
y = x.unsqueeze(1)
self.assertEqual(y, x.contiguous().view(2, 1, 4))
y = x.clone().unsqueeze_(2)
self.assertEqual(y, x.contiguous().view(2, 4, 1))
# unit test for special case transposed copy (see ATen/native/Copy.cpp for details)
def test_big_transpose(self, device="mps"):
t = torch.rand(456, 789, device=device)
t1 = t.t().contiguous()
t2 = torch.from_numpy(t.cpu().numpy().transpose())
self.assertEqual(t1, t2)
def test_T(self, device="mps"):
a = torch.randn(2, 3, 4, device=device)
t1 = a.T
t2 = a.permute(2, 1, 0)
self.assertEqual(t2, t1)
b = torch.randn(10, device=device)
self.assertEqual(b, b.T)
scalar = torch.tensor(5, device=device)
self.assertEqual(scalar, scalar.T)
def test_transposes(self, device="mps", dtype=torch.float32):
for op in ("T", "H", "mT", "mH", "adjoint"):
shapes = ((), (2, 3), (2, 3, 4)) if op[0] == "m" or op == "adjoint" else ((), (2, 3),)
for shape in shapes:
a = make_tensor(shape, device=device, dtype=dtype)
t1 = getattr(a, op)
if op == "adjoint":
t1 = t1()
t2 = a
if a.ndim != 0:
t2 = t2.transpose(-2, -1)
if op[-1] == "H" or op == "adjoint":
t2 = t2.conj()
self.assertEqual(t2, t1)
def test_transposes_errors(self, device="mps", dtype=torch.float32):
for op in ("H", "mT", "mH", "adjoint"):
shapes = ((2,), (2, 3, 4)) if op == "H" else ((2,),)
for shape in shapes:
a = make_tensor(shape, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "only supported on matrices"):
t1 = getattr(a, op)
if op == "adjoint":
t1 = t1()
def test_python_types(self, device="mps"):
a1 = torch.randn((1, 2), device=device, dtype=torch.float32)
a2 = torch.randn((1, 2), device=device, dtype=torch.float32)
self.assertEqual(a1.dtype, a2.dtype)
b1 = torch.arange(10, 20, dtype=torch.int64, device=device)
b2 = torch.arange(10, 20, dtype=int, device=device)
self.assertEqual(b1.dtype, b2.dtype)
c1 = torch.tensor([True, False], dtype=torch.bool, device=device)
c2 = torch.tensor([True, False], dtype=bool, device=device)
self.assertEqual(c1.dtype, c2.dtype)
# TODO: is resize best put in test_view_ops?
def test_resize_as_preserves_strides(self, device="mps"):
x = torch.empty(2, 3).t()
old_strides = x.stride()
x.resize_as_(x)
self.assertEqual(x.stride(), old_strides)
def test_memory_format_resize_as(self, device="mps"):
def test_helper(shape, memory_format, device="mps"):
xc = torch.randn(shape, device=device).contiguous(memory_format=memory_format)
flat = torch.randn(xc.numel(), device=device)
flat.resize_as_(xc, memory_format=torch.preserve_format)
self.assertTrue(flat.is_contiguous(memory_format=memory_format))
test_helper((10, 3, 32, 32), torch.channels_last, device="mps")
test_helper((3, 10, 3, 32, 32), torch.channels_last_3d, device="mps")
def test_memory_format_resize_(self, device="mps"):
def test_helper(shape, numel, memory_format, device="mps"):
flat = torch.randn(numel, device=device)
flat.resize_(shape, memory_format=memory_format)
self.assertTrue(flat.is_contiguous(memory_format=memory_format))
test_helper((10, 3, 32, 32), 10 * 3 * 32 * 32, torch.channels_last, device="mps")
test_helper((3, 10, 3, 32, 32), 3 * 10 * 3 * 32 * 32, torch.channels_last_3d, device="mps")
# TODO: OpInfo this
def _test_atleast(self, device, torch_fn):
# 0-dim
s = torch.tensor(0.5, dtype=torch.double, requires_grad=True)
gradcheck(lambda x: torch_fn(x), s)
gradgradcheck(lambda x: torch_fn(x), s)
# 1-dim
a = torch.rand(4, dtype=torch.double, requires_grad=True)
gradcheck(lambda x: torch_fn(x), a)
gradgradcheck(lambda x: torch_fn(x), a)
# 2,3,4-dim
b = torch.rand(4, 3, dtype=torch.double, requires_grad=True)
c = torch.rand(4, 3, 2, dtype=torch.double, requires_grad=True)
d = torch.rand(4, 3, 2, 1, dtype=torch.double, requires_grad=True)
input_tuple = (s, a, b, c, d)
gradcheck(lambda s, w, x, y, z: torch_fn(s, w, x, y, z), input_tuple)
gradgradcheck(lambda s, w, x, y, z: torch_fn(s, w, x, y, z), input_tuple)
def test_atleast_gradient(self, device="mps"):
self._test_atleast(device, torch.atleast_1d)
self._test_atleast(device, torch.atleast_2d)
self._test_atleast(device, torch.atleast_3d)
def test_view(self, device="mps"):
tensor = torch.rand(15, device=device)
template = torch.rand(3, 5, device=device)
empty = torch.empty(0, device=device)
target = template.size()
self.assertEqual(tensor.view_as(template).size(), target)
self.assertEqual(tensor.view(3, 5).size(), target)
self.assertEqual(tensor.view(torch.Size([3, 5])).size(), target)
self.assertEqual(tensor.view(-1, 5).size(), target)
self.assertEqual(tensor.view(3, -1).size(), target)
tensor_view = tensor.view(5, 3)
tensor_view.fill_(random.uniform(0, 1))
self.assertEqual(empty.view_as(empty), empty)
self.assertEqual(empty.view(0), empty)
self.assertEqual(empty.view(0, 3, 0, 1).size(), torch.Size([0, 3, 0, 1]))
self.assertEqual(empty.view(0, 3, 0, 1).view(0), empty)
# test size inference with empty tensors
self.assertEqual(empty.view(-1).size(), torch.Size([0]))
self.assertEqual(empty.view(10, 3, -1).size(), torch.Size([10, 3, 0]))
with self.assertRaisesRegex(RuntimeError, r"because the unspecified dimension size -1 can be any value"):
empty.view(-1, 0)
with self.assertRaisesRegex(RuntimeError, r"because the unspecified dimension size -1 can be any value"):
empty.view(3, 0, -1, 0)
self.assertRaises(RuntimeError, lambda: tensor.view(15, 0))
self.assertRaises(RuntimeError, lambda: tensor.view(7, -1))
self.assertRaises(RuntimeError, lambda: tensor.view(15, -1, -1))
# RuntimeError: Invalid device for storage: mps
def test_contiguous(self, device="mps"):
x = torch.randn(1, 16, 5, 5, device=device)
self.assertTrue(x.is_contiguous())
stride = list(x.stride())
stride[0] = 20
# change the stride in dimension 0. the tensor is still contiguous because size[0] is 1
x.set_(x.storage(), 0, x.size(), stride)
self.assertTrue(x.is_contiguous())
def test_resize_all_dtypes_and_devices(self, device="mps"):
shape = (2, 2)
for dt in (torch.half, torch.bfloat16, torch.bool):
x = torch.tensor([[1, 2], [3, 4], [5, 6]], dtype=dt, device=device)
x.resize_(shape)
self.assertEqual(shape, x.shape)
def test_resize_as_all_dtypes_and_devices(self, device="mps"):
for dt in (torch.half, torch.bfloat16, torch.bool):
x = torch.tensor([[1, 2], [3, 4], [5, 6]], dtype=dt, device=device)
y = torch.tensor([[1, 2, 3], [4, 5, 6]], dtype=dt, device=device)
x.resize_as_(y)
self.assertEqual(y.shape, x.shape)
def test_resize_overflow(self, device="mps"):
x = torch.empty((), dtype=torch.float64)
with self.assertRaisesRegex(RuntimeError, 'Storage size calculation overflowed'):
x.resize_([2, 4, 2**29, 2**29])
with self.assertRaisesRegex(RuntimeError, 'overflow'):
x.resize_([8, 8, 2**29, 2**29])
def test_view_all_dtypes_and_devices(self, device="mps"):
for dt in (torch.float, torch.bool):
x = torch.tensor([[1, 2], [3, 4], [5, 6]], dtype=dt, device=device)
self.assertEqual(x.view(6).shape, [6])
class TestRNNMPS(TestCase):
def test_lstm_1(self, device="mps", dtype=torch.float32):
rnn = nn.LSTM(1, 4, 2, device="cpu")
input = torch.randn(2, 3, 1, device="cpu")
hx = torch.zeros(2, 3, 4, device="cpu")
cx = torch.zeros(2, 3, 4, device="cpu")
cpu_output, (cpu_hn, cpu_cn) = rnn(input, (hx, cx))
rnn = rnn.to(device)
input = input.to(device)
hx = hx.to(device)
cx = cx.to(device)
output, (hn, cn) = rnn(input, (hx, cx))
self.assertEqual(cpu_output, output)
self.assertEqual(cpu_hn, hn)
self.assertEqual(cpu_cn, cn)
# test batch_first
rnn = nn.LSTM(1, 4, 2, device="cpu", batch_first=True)
input = torch.randn(3, 2, 1, device="cpu")
hx = torch.zeros(2, 3, 4, device="cpu")
cx = torch.zeros(2, 3, 4, device="cpu")
cpu_output, (cpu_hn, cpu_cn) = rnn(input, (hx, cx))
rnn = rnn.to(device)
input = input.to(device)
hx = hx.to(device)
cx = cx.to(device)
output, (hn, cn) = rnn(input, (hx, cx))
self.assertEqual(cpu_output, output)
self.assertEqual(cpu_hn, hn)
self.assertEqual(cpu_cn, cn)
@unittest.skipIf(True, "Backward of lstm returns wrong result")
def test_lstm_2(self, device="mps", dtype=torch.float32):
def get_results(device):
rnn = nn.LSTM(1, 4, 1, device=device)
inp = torch.randn(2, 3, 1, device=device, requires_grad=True)
hx = torch.zeros(1, 3, 4, device=device)
cx = torch.zeros(1, 3, 4, device=device)
output, _ = rnn(inp, (hx, cx))
output.sum().backward()
weight_grad = rnn.weight_ih_l0.grad.clone()
input_grad = inp.grad.clone()
return output, weight_grad, input_grad
cpu_output, cpu_weight_grad, cpu_input_grad = get_results("cpu")
mps_output, mps_weight_grad, mps_input_grad = get_results("mps")
self.assertEqual(cpu_output, mps_output)
self.assertEqual(cpu_input_grad, mps_input_grad)
self.assertEqual(cpu_weight_grad, mps_weight_grad)
class TestFallbackWarning(TestCase):
# TODO: Remove once test_testing.py is running on MPS devices
def test_no_warning_on_import(self):
out = subprocess.check_output(
[sys.executable, "-W", "all", "-c", "import torch"],
stderr=subprocess.STDOUT,
# On Windows, opening the subprocess with the default CWD makes `import torch`
# fail, so just set CWD to this script's directory
cwd=os.path.dirname(os.path.realpath(__file__)),).decode("utf-8")
self.assertEquals(out, "")
def _get_not_implemented_op(self):
# This can be changed once we actually implement `torch.bincount`
# Should return fn, args, kwargs, string_version
return (torch.bincount,
torch.tensor([4], device='mps'), {},
"torch.bincount(torch.tensor([4, 3, 6, 3, 4], device='mps'))")
def test_error_on_not_implemented(self):
fn, args, kwargs, _ = self._get_not_implemented_op()
with self.assertRaisesRegex(NotImplementedError, "not current implemented for the MPS device"):
fn(*args, **kwargs)
def test_warn_on_not_implemented_with_fallback(self):
_, _, _, op = self._get_not_implemented_op()
script = f"""
import os
# MUST happen before pytorch's import
os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
import warnings
with warnings.catch_warnings(record=True) as w:
import torch
if len(w) > 0:
print(w)
exit(1)
# This should run just fine and raise warning about perf
with warnings.catch_warnings(record=True) as w:
{op}
if len(w) != 1:
print(w)
exit(2)
"""
try:
subprocess.check_output(
[sys.executable, '-W', 'all', '-c', script],
stderr=subprocess.STDOUT,
# On Windows, opening the subprocess with the default CWD makes `import torch`
# fail, so just set CWD to this script's directory
cwd=os.path.dirname(os.path.realpath(__file__)),)
except subprocess.CalledProcessError as e:
if e.returncode == 1:
self.assertTrue(False, "There was a warning when importing torch when PYTORCH_ENABLE_MPS_FALLBACK is set." +
e.output.decode("utf-8"))
elif e.returncode == 2:
self.assertTrue(False, "There wasn't exactly one warning when running not implemented op with "
f"PYTORCH_ENABLE_MPS_FALLBACK set. {e.output}")
else:
self.assertTrue(False, "Running a not implemented op failed even though PYTORCH_ENABLE_MPS_FALLBACK is set. " +
e.output.decode("utf-8"))
class TestNoRegression(TestCase):
def test_assert_close(self):
a = torch.ones(1, device="mps")
b = torch.zeros(1, device="mps")
inf = a / b
nan = b / b
with self.assertRaisesRegex(AssertionError, "Tensor-likes are not close!"):
torch.testing.assert_close(a, inf)
# TODO: The NaN test is failing when all the tests in test_mps are run
# together but passes when run separately. There seems to be memory
# corruption which needs to be fixed for this test to be enabled.
# with self.assertRaisesRegex(AssertionError, "Tensor-likes are not close!"):
# torch.testing.assert_close(a, nan)
@unittest.expectedFailure
def test_mps_compat(self):
# If this test is successful, that means that all operations in the comparison logic are supported natively on
# the MPS backend. Please remove this test as well as the compatibility logic in
# torch.testing._comparison.TensorLikePair._equalize_attributes
actual = torch.tensor(1.0, device="mps")
expected = actual.clone()
# We can't use assert_close or TensorLikePair.compare() directly, since that would hit the compatibility logic
# in torch.testing._comparison.TensorLikePair._equalize_attributes that we want to circumvent here
pair = TensorLikePair(actual, expected)
pair._compare_values(actual, expected)
def test_double_error(self):
with self.assertRaisesRegex(TypeError, "the MPS framework doesn't support float64"):
a = torch.ones(2, dtype=torch.float64, device="mps")
a = torch.ones(2, device="mps")
with self.assertRaisesRegex(TypeError, "the MPS framework doesn't support float64"):
a = a.double()
def test_legacy_constructor(self):
a = torch.ones(2, device="mps")
b = a.new(1)
def test_serialization_map_location(self):
# Ensures that cpu Tensor can be loaded on mps
with tempfile.NamedTemporaryFile() as f:
x = torch.rand(2)
torch.save(x, f)
f.seek(0)
x2 = torch.load(f, map_location="mps")
self.assertEqual(x, x2)
self.assertEqual(x2.device.type, "mps")
# Ensures that mps Tensors can be loaded on mps
with tempfile.NamedTemporaryFile() as f:
x = torch.rand(2, device="mps")
torch.save(x, f)
f.seek(0)
x2 = torch.load(f)
self.assertEqual(x, x2)
self.assertEqual(x2.device.type, "mps")
# Ensures that mps Tensors can be loaded on cpu
with tempfile.NamedTemporaryFile() as f:
x = torch.rand(2, device="mps")
torch.save(x, f)
f.seek(0)
x2 = torch.load(f, map_location="cpu")
self.assertEqual(x, x2)
self.assertEqual(x2.device.type, "cpu")
MPS_DTYPES = get_all_dtypes()
for t in [torch.double, torch.cdouble, torch.cfloat, torch.int8, torch.bfloat16]:
del MPS_DTYPES[MPS_DTYPES.index(t)]
class TestConsistency(TestCase):
# TODO: This is only used while some ops are being added.
# This list should contain all ops and dtypes eventually
# This can be generated automatically in the `new_mps_allowlist.txt` file
# by doing `EXPECTTEST_ACCEPT=1 python test_mps.py TestConsistencyCPU`
# You most likely do NOT want to modify this manually
ALLOWLIST_OP = {
'__radd__': ['torch.float32',
'torch.int16',
'torch.int32',
'torch.int64'],
'__rand__': ['torch.bool',
'torch.int16',
'torch.int32',
'torch.int64'],
'__rmul__': ['torch.bool',
'torch.float32',
'torch.int16',
'torch.int32',
'torch.int64'],
'__ror__': ['torch.bool',
'torch.int16',
'torch.int32',
'torch.int64'],
'__rxor__': ['torch.bool',
'torch.int16',
'torch.int32',
'torch.int64'],
'_masked.normalize': ['torch.float32'],
'abs': ['torch.float16',
'torch.float32',
'torch.int16',
'torch.int32',
'torch.uint8'],
'add': ['torch.float32',
'torch.int16',
'torch.int32',
'torch.int64'],
'addcdiv': ['torch.float32'],
'addcmul': ['torch.float32',
'torch.int16',
'torch.int32',
'torch.int64'],
'addmv': ['torch.float32'],
'addr': ['torch.float32'],
'all': ['torch.bool',
'torch.float16',
'torch.float32',
'torch.int16',
'torch.int32',
'torch.int64'],
'any': ['torch.bool',
'torch.float16',
'torch.float32',
'torch.int16',
'torch.int32',
'torch.int64'],
'argmax': ['torch.float16',
'torch.float32',
'torch.int16',
'torch.int32',
'torch.int64'],
'asin': ['torch.float32'],
'asinh': ['torch.float32'],
'atan': ['torch.float32'],
'atan2': ['torch.float32'],
'atanh': ['torch.float32'],
'atleast_1d': ['torch.bool',
'torch.float16',
'torch.float32',
'torch.int16',
'torch.int32',
'torch.int64',
'torch.uint8'],
'atleast_2d': ['torch.bool',
'torch.float16',
'torch.float32',
'torch.int16',
'torch.int32',
'torch.int64',
'torch.uint8'],
'atleast_3d': ['torch.bool',
'torch.float16',
'torch.float32',
'torch.int16',
'torch.int32',
'torch.int64',
'torch.uint8'],
'baddbmm': ['torch.float32'],
'bitwise_and': ['torch.bool',
'torch.int16',
'torch.int32',
'torch.int64',
'torch.uint8'],
'bitwise_left_shift': ['torch.int16',
'torch.int32',
'torch.int64',
'torch.uint8'],
'bitwise_not': ['torch.bool',
'torch.int16',
'torch.int32',
'torch.int64',
'torch.uint8'],
'bitwise_or': ['torch.bool',
'torch.int16',
'torch.int32',
'torch.int64',
'torch.uint8'],
'bitwise_right_shift': ['torch.int16',
'torch.int32',
'torch.int64',
'torch.uint8'],
'bitwise_xor': ['torch.bool',
'torch.int16',
'torch.int32',
'torch.int64',
'torch.uint8'],
'bmm': ['torch.float32'],
'ceil': ['torch.float32'],
'chunk': ['torch.float16', 'torch.float32', 'torch.int64'],
'clone': ['torch.bool',
'torch.float16',
'torch.float32',
'torch.int16',
'torch.int32',
'torch.int64',
'torch.uint8'],
'column_stack': ['torch.float16',
'torch.float32',
'torch.int16',
'torch.int32',
'torch.int64',
'torch.uint8'],
'conj': ['torch.bool',
'torch.float16',
'torch.float32',
'torch.int16',
'torch.int32',
'torch.int64',
'torch.uint8'],
'conj_physical': ['torch.bool',
'torch.float16',
'torch.float32',
'torch.int16',
'torch.int32',
'torch.int64',
'torch.uint8'],
'contiguous': ['torch.bool',
'torch.float16',
'torch.float32',
'torch.int16',
'torch.int32',
'torch.int64',
'torch.uint8'],
'corrcoef': ['torch.float32'],
'deg2rad': ['torch.float32'],
'diag': ['torch.float32', 'torch.int32'],
'diagflat': ['torch.int32'],
'diff': ['torch.float32'],
'dist': ['torch.float32'],
'dot': ['torch.float32', 'torch.int32'],
'einsum': ['torch.float32'],
'erf': ['torch.float32'],
'fill': ['torch.float16',
'torch.float32',
'torch.int16',
'torch.int32',
'torch.int64'],
'flatten': ['torch.bool',
'torch.float16',
'torch.float32',
'torch.int16',
'torch.int32',
'torch.int64'],
'floor': ['torch.float32'],
'hstack': ['torch.float16',
'torch.float32',
'torch.int16',
'torch.int32',
'torch.int64'],
'index_select': ['torch.float16',
'torch.float32',
'torch.int16',
'torch.int32',
'torch.int64'],
'isinf': ['torch.float16', 'torch.float32'],
'isnan': ['torch.bool',
'torch.float16',
'torch.float32',
'torch.int16',
'torch.int32',
'torch.int64',
'torch.uint8'],
'kron': ['torch.bool',
'torch.float16',
'torch.float32',
'torch.int16',
'torch.int32',
'torch.int64',
'torch.uint8'],
'linalg.norm': ['torch.float16',
'torch.float32',
'torch.float16',
'torch.float32'],
'linalg.svd': ['torch.float32'],
'linalg.vector_norm': ['torch.float16'],
'log1p': ['torch.float32'],
'log_softmax': ['torch.float32'],
'logaddexp': ['torch.float32'],
'logaddexp2': ['torch.float32'],
'masked_select': ['torch.bool',
'torch.float16',
'torch.float32',
'torch.int16',
'torch.int32',
'torch.int64',
'torch.uint8'],
'mm': ['torch.float32'],
'mv': ['torch.float32'],
'neg': ['torch.float16',
'torch.float32',
'torch.int16',
'torch.int32'],
'nn.functional.adaptive_max_pool1d': ['torch.float32'],
'nn.functional.adaptive_max_pool2d': ['torch.float32'],
'nn.functional.binary_cross_entropy': ['torch.float32'],
'nn.functional.celu': ['torch.float32'],
'nn.functional.elu': ['torch.float32'],
'nn.functional.embedding': ['torch.float16', 'torch.float32'],
'nn.functional.feature_alpha_dropout': ['torch.bool',
'torch.float16',
'torch.float32',
'torch.int16',
'torch.int32',
'torch.int64',
'torch.uint8'],
'nn.functional.hardtanh': ['torch.float32',
'torch.int16',
'torch.int32',
'torch.int64'],
'nn.functional.hinge_embedding_loss': ['torch.float32'],
'nn.functional.kl_div': ['torch.float32'],
'nn.functional.l1_loss': ['torch.float32'],
'nn.functional.linear': ['torch.float32'],
'nn.functional.huber_loss': ['torch.float32'],
'nn.functional.leaky_relu': ['torch.float32'],
'nn.functional.mse_loss': ['torch.float16', 'torch.float32'],
'nn.functional.relu': ['torch.float32',
'torch.int16',
'torch.int32',
'torch.int64',
'torch.uint8'],
'nn.functional.relu6': ['torch.float32',
'torch.int16',
'torch.int32',
'torch.int64',
'torch.uint8'],
'nn.functional.prelu': ['torch.float32'],
'nn.functional.selu': ['torch.float32'],
'nn.functional.silu': ['torch.float32'],
'nn.functional.smooth_l1_loss': ['torch.float32',
'torch.float16'],
'nn.functional.softmin': ['torch.float32'],
'nn.functional.threshold': ['torch.float32',
'torch.int16',
'torch.int32',
'torch.int64',
'torch.uint8'],
'nn.functional.upsample_bilinear': ['torch.float32'],
'norm': ['torch.float32', 'torch.float16', 'torch.float32'],
'positive': ['torch.float16',
'torch.float32',
'torch.int16',
'torch.int32',
'torch.int64',
'torch.uint8'],
'rad2deg': ['torch.float32'],
'ravel': ['torch.bool',
'torch.float16',
'torch.float32',
'torch.int16',
'torch.int32',
'torch.int64',
'torch.uint8'],
'real': ['torch.bool',
'torch.float16',
'torch.float32',
'torch.int16',
'torch.int32',
'torch.int64',
'torch.uint8'],
'repeat': ['torch.float16',
'torch.float32',
'torch.int16',
'torch.int32',
'torch.int64',
'torch.uint8'],
'repeat_interleave': ['torch.bool',
'torch.float16',
'torch.float32',
'torch.int16',
'torch.int32',
'torch.int64',
'torch.uint8'],
'resize_': ['torch.bool',
'torch.float16',
'torch.float32',
'torch.int16',
'torch.int32',
'torch.int64',
'torch.uint8'],
'resize_as_': ['torch.bool',
'torch.float16',
'torch.float32',
'torch.int16',
'torch.int32',
'torch.int64',
'torch.uint8'],
'resolve_conj': ['torch.bool',
'torch.float16',
'torch.float32',
'torch.int16',
'torch.int32',
'torch.int64',
'torch.uint8'],
'resolve_neg': ['torch.bool',
'torch.float16',
'torch.float32',
'torch.int16',
'torch.int32',
'torch.int64',
'torch.uint8'],
'round': ['torch.float32'],
'sgn': ['torch.bool',
'torch.float16',
'torch.float32',
'torch.int16',
'torch.int32',
'torch.int64',
'torch.uint8'],
'sign': ['torch.bool',
'torch.float16',
'torch.float32',
'torch.int16',
'torch.int32',
'torch.uint8'],
'sin': ['torch.float32'],
'sinh': ['torch.float32'],
'softmax': ['torch.float32'],
'split': ['torch.bool',
'torch.float16',
'torch.float32',
'torch.int16',
'torch.int32',
'torch.int64',
'torch.uint8'],
'sqrt': ['torch.float32'],
'square': ['torch.float32'],
'squeeze': ['torch.bool',
'torch.float16',
'torch.float32',
'torch.int16',
'torch.int32',
'torch.int64',
'torch.uint8'],
'stack': ['torch.float16',
'torch.float32',
'torch.int16',
'torch.int32',
'torch.int64',
'torch.uint8'],
'sub': ['torch.float32',
'torch.int16',
'torch.int32',
'torch.int64'],
'sum_to_size': ['torch.bool',
'torch.float16',
'torch.float32',
'torch.int16',
'torch.int32',
'torch.int64',
'torch.uint8'],
'svd': ['torch.float32'],
't': ['torch.bool',
'torch.float16',
'torch.float32',
'torch.int16',
'torch.int32',
'torch.int64',
'torch.uint8'],
'tanh': ['torch.float32'],
'tensordot': ['torch.float32'],
'topk': ['torch.float32'],
'tril': ['torch.bool',
'torch.float16',
'torch.float32',
'torch.int16',
'torch.int32',
'torch.int64',
'torch.uint8'],
'triu': ['torch.bool',
'torch.float16',
'torch.float32',
'torch.int16',
'torch.int32',
'torch.int64',
'torch.uint8'],
'true_divide': ['torch.float32'],
'trunc': ['torch.float32'],
'unsqueeze': ['torch.bool',
'torch.float16',
'torch.float32',
'torch.int16',
'torch.int32',
'torch.int64',
'torch.uint8'],
'view': ['torch.bool',
'torch.float16',
'torch.float32',
'torch.int16',
'torch.int32',
'torch.int64',
'torch.uint8'],
'view_as': ['torch.bool',
'torch.float16',
'torch.float32',
'torch.int16',
'torch.int32',
'torch.int64',
'torch.uint8'],
'vsplit': ['torch.bool',
'torch.float16',
'torch.float32',
'torch.int16',
'torch.int32',
'torch.int64',
'torch.uint8'],
'vstack': ['torch.float16',
'torch.float32',
'torch.int16',
'torch.int32',
'torch.int64'],
'zero_': ['torch.float16',
'torch.float32',
'torch.int16',
'torch.int32',
'torch.int64',
'torch.uint8']}
# These ops that are problematic. So never run them even when
# generating the new allowlist.
# If the dtype list is None, all dtypes are excluded.
# All the entries in this list should be removed
BLOCKLIST = {
# Functions that hang
'masked_fill': [torch.bool, torch.uint8, torch.float32], 'where': [torch.bool],
# Functions that hard crash
'nn.functional.kl_div': [torch.int16, torch.int32, torch.int64],
'nn.functional.nll_loss': [torch.float32],
'nn.functional.padreflect': [torch.float32], 'nn.functional.padreplicate': [torch.float32],
'std': [torch.float16],
'stft': [torch.float32], 'var': [torch.float16],
# These were moved from ALLOWLIST to BLOCK as they are not working
# locally
'tile': ['torch.float16', 'torch.float32', 'torch.int16', 'torch.int32', 'torch.int64', 'torch.uint8'],
'__radd__': ['torch.bool', 'torch.uint8'],
'__rmul__': ['torch.uint8'],
'add': ['torch.bool', 'torch.uint8'],
'square': ['torch.int32', 'torch.int64', 'torch.uint8'],
'addr': ['torch.int16', 'torch.int32', 'torch.int64', 'torch.uint8'],
'diag': ['torch.int64'],
'diagflat': ['torch.int64'],
# Functions that are flaky
# These are detected as "ok" by the expect case but actually fail to run sometimes
'H': None,
'T': None,
'as_strided': None,
'broadcast_tensors': None,
'broadcast': None,
'broadcast_to': None,
'diagonal': None,
'divfloor_rounding': None,
'divno_rounding_mode': None,
'divtrunc_rounding': None,
'dsplit': None,
'hsplit': None,
'empty': None,
'expand_as': None,
'expand': None,
'ge': None,
'ne': None,
'le': None,
'lt': None,
'gt': None,
'transpose': None,
'splitlist_args': None,
'select': None,
'reshape': None,
'reshape_as': None,
'permute': None,
'norm': None,
'nn.functional.pixel_unshuffle': None,
'nn.functional.pixel_shuffle': None,
'nn.functional.cross_entropy': None,
'nn.functional.one_hot': None,
'narrow': None,
'movedim': None,
'minreduction_with_dim': None,
'minreduction_no_dim': None,
'minbinary': None,
'meshgridvariadic_tensors': None,
'meshgridlist_of_tensors': None,
'maxreduction_with_dim': None,
'maxreduction_no_dim': None,
'maxbinary': None,
'maximum': None,
'minimum': None,
'mT': None,
'mH': None,
'outer': None,
'softmaxwith_dtype': None,
'rounddecimals_neg_3': None,
'rounddecimals_3': None,
'rounddecimals_0': None,
'normnuc': None,
'nn.functional.softminwith_dtype': None,
'nn.functional.feature_alpha_dropoutwith_train': None,
'log_softmaxdtype': None,
'split_with_sizes': None,
'trapezoid': None,
'eq': None,
'mul': None,
'cartesian_prod': None,
'nonzero': None,
'bool': None,
'inner': None,
'dstack': None,
'take_along_dim': None,
}
# Used for accept mode only
NEW_ALLOW_LIST = defaultdict(list)
@ops(op_db, allowed_dtypes=MPS_DTYPES)
def test_output_match(self, device, dtype, op):
self.assertEqual(device, "cpu")
if not torch.backends.mps.is_available():
self.skipTest("MPS is not available")
key = op.name + op.variant_test_name
if key in self.BLOCKLIST:
if self.BLOCKLIST[key] is None or dtype in self.BLOCKLIST[key]:
self.skipTest(f"Running test with {op.name} hangs so skipping")
# Make this an expecttest manually
# When this env variable is set, generate a new ALLOWLIST_OP
# that reflects the current state of what passes or not
if os.environ.get("EXPECTTEST_ACCEPT", None) == "1":
generate_new_truth = True
else:
generate_new_truth = False
if not generate_new_truth:
if op.name not in self.ALLOWLIST_OP:
self.skipTest(f"{op.name} is not in the allow list for test on MPS")
else:
if str(dtype) not in self.ALLOWLIST_OP[op.name]:
self.skipTest(f"{op.name} is in the allow list for MPS but {dtype} is excluded")
try:
cpu_samples = op.sample_inputs(device, dtype)
for cpu_sample in cpu_samples:
mps_sample = cpu_sample.transform(lambda x: x.to("mps") if isinstance(x, torch.Tensor) else x)
# TODO: This checks only the function variant. We should also check the method and inplace version
# when they exist
cpu_args = [cpu_sample.input] + list(cpu_sample.args)
cpu_kwargs = cpu_sample.kwargs
mps_args = [mps_sample.input] + list(mps_sample.args)
mps_kwargs = mps_sample.kwargs
cpu_out = op(*cpu_args, **cpu_kwargs)
mps_out = op(*mps_args, **mps_kwargs)
self.assertEqual(cpu_out, mps_out)
except Exception as e:
if not generate_new_truth:
raise e
else:
if generate_new_truth:
self.NEW_ALLOW_LIST[op.name].append(str(dtype))
# We could write it only once. But I don't know how to detect that the current test is the last one
# So each test append to the dict and write it.
with open("new_mps_allowlist.txt", "w") as f:
pprint.pprint(self.NEW_ALLOW_LIST, stream=f)
# TODO: Actually instantiate that test for the "mps" device to better reflect what it is doing.
# This requires mps to be properly registered in the device generic test framework which is not the
# case right now.
instantiate_device_type_tests(TestConsistency, globals(), only_for="cpu")
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/test_mps.py
|
# Owner(s): ["oncall: jit"]
import torch
from torch.cuda.amp import autocast
from typing import Optional, Tuple
import unittest
from test_jit import JitTestCase
from torch.testing._internal.common_cuda import TEST_CUDA
from torch.testing._internal.common_utils import run_tests
from torch.testing import FileCheck
from jit.test_models import MnistNet
TEST_BFLOAT16 = TEST_CUDA and torch.cuda.is_bf16_supported()
class TestAutocast(JitTestCase):
def setUp(self):
# common input tensors
if TEST_CUDA:
self.a_fp16 = torch.rand((2, 2), dtype=torch.float16, device='cuda')
self.b_fp16 = torch.rand((2, 2), dtype=torch.float16, device='cuda')
self.c_fp16 = torch.rand((2, 2), dtype=torch.float16, device='cuda')
self.d_fp16 = torch.rand((2, 2), dtype=torch.float16, device='cuda')
self.a_fp32 = torch.rand((2, 2), dtype=torch.float32, device='cuda')
self.b_fp32 = torch.rand((2, 2), dtype=torch.float32, device='cuda')
self.c_fp32 = torch.rand((2, 2), dtype=torch.float32, device='cuda')
self.d_fp32 = torch.rand((2, 2), dtype=torch.float32, device='cuda')
self.old_value = torch._C._jit_set_autocast_mode(True)
super().setUp()
def tearDown(self):
torch._C._jit_set_autocast_mode(self.old_value)
super().tearDown()
@unittest.skipIf(not TEST_CUDA, "No cuda")
def test_minimal(self):
@torch.jit.script
def fn(a, b):
with autocast():
x = torch.mm(a, b)
y = torch.sum(x)
return x, y
x, y = fn(self.a_fp32, self.b_fp32)
self.assertEqual(x.dtype, torch.float16)
self.assertEqual(y.dtype, torch.float32)
@unittest.skipIf(not TEST_CUDA or not TEST_BFLOAT16, "No cuda bfloat16 support")
def test_linear_bf16(self):
@torch.jit.script
def fn(a, b):
with autocast(dtype=torch.bfloat16):
x = torch.mm(a, b)
y = torch.sum(x)
return x, y
x, y = fn(self.a_fp32, self.b_fp32)
self.assertEqual(x.dtype, torch.bfloat16)
self.assertEqual(y.dtype, torch.float32)
@unittest.skipIf(not TEST_CUDA, "No cuda")
def test_minimal_cpu(self):
@torch.jit.script
def fn(a, b):
with autocast():
return torch.mm(a, b)
result = fn(self.a_fp32.to('cpu'), self.b_fp32.to('cpu'))
self.assertEqual(result.dtype, torch.float32)
@unittest.skipIf(not TEST_CUDA, "No cuda")
def test_minimal_off(self):
@torch.jit.script
def fn(a, b):
with autocast(enabled=False):
return torch.mm(a, b)
result = fn(self.a_fp32, self.b_fp32)
self.assertEqual(result.dtype, torch.float32)
@unittest.skipIf(not TEST_CUDA, "No cuda")
def test_runtime_autocast_state(self):
@torch.jit.script
def fn(a, b, use_amp: bool):
with autocast(enabled=use_amp):
return torch.mm(a, b)
# runtime values for autocast enable argument are not supported
with self.assertRaises(RuntimeError):
fn(self.a_fp32, self.b_fp32, True)
@unittest.skipIf(not TEST_CUDA, "No cuda")
def test_runtime_autocast_state_expr(self):
@torch.jit.script
def fn(a, b):
with autocast(enabled=True if a[0][0] > 0.5 else False):
return torch.mm(a, b)
# runtime values for autocast enable argument are not supported
with self.assertRaises(RuntimeError):
fn(self.a_fp32, self.b_fp32)
@unittest.skipIf(not TEST_CUDA, "No cuda")
def test_explicit_casts(self):
@torch.jit.script
def fn(a, b, c, d):
with autocast():
e = torch.mm(a.double(), b.double()).float()
f = torch.mm(c, d).double()
g = torch.mm(c.double(), f)
return e, f, g
e, f, g = fn(self.a_fp32, self.b_fp32, self.c_fp32, self.d_fp32)
self.assertEqual(e.dtype, torch.float32)
self.assertEqual(f.dtype, torch.float64)
self.assertEqual(g.dtype, torch.float64)
# multiple uses of the same input value
@unittest.skipIf(not TEST_CUDA, "No cuda")
def test_duplicate_inputs(self):
@torch.jit.script
def fn(a, b):
with autocast():
e = torch.mm(a, a)
f = torch.mm(e, e)
return e, f
e, f = fn(self.a_fp32, self.b_fp32)
self.assertEqual(e.dtype, torch.float16)
self.assertEqual(f.dtype, torch.float16)
@unittest.skipIf(not TEST_CUDA, "No cuda")
def test_fp32_policy(self):
@torch.jit.script
def fn(a):
with autocast(enabled=True):
return torch.log(a)
result = fn(self.a_fp16)
self.assertEqual(result.dtype, torch.float32)
@unittest.skipIf(not TEST_CUDA, "No cuda")
def test_fp32_policy_with_fp64(self):
@torch.jit.script
def fn(a):
with autocast(enabled=True):
return torch.log(a)
# fp32 policy should not narrow fp64 to fp32!
result = fn(self.a_fp32.double())
self.assertEqual(result.dtype, torch.float64)
@unittest.skipIf(not TEST_CUDA, "No cuda")
def test_promote_policy(self):
@torch.jit.script
def fn(a, b, c, d):
with autocast():
e = torch.mm(a, b)
f = torch.addcmul(e, c, d, value=0.1)
return e, f
e, f = fn(self.a_fp32, self.b_fp32, self.c_fp32, self.d_fp32)
self.assertEqual(e.dtype, torch.float16)
self.assertEqual(f.dtype, torch.float32)
@unittest.skipIf(not TEST_CUDA, "No cuda")
def test_promote_policy_fp64(self):
@torch.jit.script
def fn(a, b):
with autocast(enabled=True):
return torch.addcmul(a, a, b, value=0.1)
result = fn(self.a_fp32.double(), self.b_fp32.double())
self.assertEqual(result.dtype, torch.float64)
@unittest.skipIf(not TEST_CUDA, "No cuda")
def test_fp32_set_opt_dtype_policy(self):
@torch.jit.script
def fn(a, b, c, d, dtype: Optional[int]):
with autocast(enabled=True):
x = torch.softmax(a, 0)
y = torch.softmax(b, 0, None)
z = torch.softmax(c, 0, torch.float64)
w = torch.softmax(d, 0, dtype)
return x, y, z, w
x, y, z, w = fn(self.a_fp16, self.b_fp16, self.c_fp16, self.d_fp16, None)
self.assertEqual(x.dtype, torch.float32)
self.assertEqual(y.dtype, torch.float32)
self.assertEqual(z.dtype, torch.float64)
self.assertEqual(w.dtype, torch.float16)
@unittest.skipIf(not TEST_CUDA, "No cuda")
def test_fp32_set_opt_dtype_policy_fp64(self):
@torch.jit.script
def fn(a, b, c, d, dtype: Optional[int]):
with autocast(enabled=True):
x = torch.softmax(a, 0)
y = torch.softmax(b, 0, None)
z = torch.softmax(c, 0, torch.float64)
w = torch.softmax(d, 0, dtype)
return x, y, z, w
x, y, z, w = fn(self.a_fp32.double(), self.b_fp32.double(), self.c_fp32.double(), self.d_fp32.double(), None)
self.assertEqual(x.dtype, torch.float64)
self.assertEqual(y.dtype, torch.float64)
self.assertEqual(z.dtype, torch.float64)
self.assertEqual(w.dtype, torch.float64)
@unittest.skipIf(True, "broken due to lack of type propagation")
@unittest.skipIf(not TEST_CUDA, "No cuda")
def test_control_flow(self):
@torch.jit.script
def fn(a, b, c, d):
with autocast():
if a[0][0] > 0.5:
e = torch.mm(a, b)
x = 1
else:
e = torch.mm(c, d)
x = 2
f = torch.mm(d, e) * x
return e, f
e, f = fn(self.a_fp32, self.b_fp32, self.c_fp32, self.d_fp32)
self.assertEqual(e.dtype, torch.float16)
self.assertEqual(f.dtype, torch.float16)
# this works find in regular Python, but it creates a delicate
# situation in TorchScript where the types are not consistent across
# the then/else branches
@unittest.skipIf(not TEST_CUDA, "No cuda")
def test_divergent_types(self):
@torch.jit.script
def fn(a, b, c, d):
with autocast():
if a[0][0] > 0.5:
e = torch.mm(a, b)
f = torch.mm(a, b).float()
else:
e = torch.mm(c, d).float()
f = torch.mm(a, b)
return torch.mm(e.float(), f.float())
result = fn(self.a_fp32, self.b_fp32, self.c_fp32, self.d_fp32)
self.assertEqual(result.dtype, torch.float32)
# another, more complex case of divergent types
@unittest.skipIf(not TEST_CUDA, "No cuda")
def test_divergent_autocast(self):
@torch.jit.script
def fn(a, b, c, d):
autocast_on = autocast(enabled=True)
autocast_off = autocast(enabled=False)
if a[0][0] > 0.5:
with autocast_on:
e = torch.mm(a, b)
else:
with autocast_off:
e = torch.mm(c, d)
return torch.mm(e, e)
fn(self.a_fp32, self.b_fp32, self.c_fp32, self.d_fp32)
@unittest.skipIf(not TEST_CUDA, "No cuda")
def test_conditional_autocast(self):
@torch.jit.script
def fn(a, b):
autocast_on = autocast(enabled=True)
autocast_off = autocast(enabled=False)
with autocast_on if a[0][0] > 0.5 else autocast_off:
return torch.mm(a, b)
# conditional autocast expressions are not supported
with self.assertRaises(RuntimeError):
fn(self.a_fp32, self.b_fp32)
@unittest.skipIf(not TEST_CUDA, "No cuda")
def test_nested_autocast(self):
@torch.jit.script
def fn(a, b, c, d):
with autocast(enabled=False):
e = torch.mm(a, b)
with autocast(enabled=True):
f = torch.mm(e, c)
with autocast(enabled=False):
g = torch.mm(e, d)
return e, f, g
e, f, g = fn(self.a_fp32, self.b_fp32, self.c_fp32, self.d_fp32)
self.assertEqual(e.dtype, torch.float32)
self.assertEqual(f.dtype, torch.float16)
self.assertEqual(g.dtype, torch.float32)
@unittest.skipIf(not TEST_CUDA, "No cuda")
def test_implicitly_nested_autocast(self):
@torch.jit.script
def fn(a, b):
with autocast(enabled=False), autocast(enabled=True):
return torch.mm(a, b)
result = fn(self.a_fp32, self.b_fp32)
self.assertEqual(result.dtype, torch.float16)
@unittest.skipIf(not TEST_CUDA, "No cuda")
def test_reused_autocast(self):
@torch.jit.script
def fn(a, b, c, d):
autocast_instance = autocast(enabled=True)
with autocast_instance:
e = torch.mm(a, b)
with autocast_instance:
e = torch.mm(c, d)
f = torch.mm(d, e)
g = torch.mm(e, f)
return e, f, g
e, f, g = fn(self.a_fp32, self.b_fp32, self.c_fp32, self.d_fp32)
self.assertEqual(e.dtype, torch.float16)
self.assertEqual(f.dtype, torch.float16)
self.assertEqual(g.dtype, torch.float16)
# TODO: fix and enable this test?
# (we could technically fix this, but is it really worth it?)
@unittest.skipIf(True, "unsuported autocast syntax")
def test_reused_autocast_expr(self):
@torch.jit.script
def fn(a, b, c, d):
with autocast(enabled=True) as autocast_instance:
e = torch.mm(a, b)
with autocast_instance:
e = torch.mm(c, d)
f = torch.mm(d, e)
g = torch.mm(e, f)
return e, f, g
e, f, g = fn(self.a_fp32, self.b_fp32, self.c_fp32, self.d_fp32)
self.assertEqual(e.dtype, torch.float16)
self.assertEqual(f.dtype, torch.float16)
self.assertEqual(g.dtype, torch.float16)
@unittest.skipIf(not TEST_CUDA, "No cuda")
def test_callees(self):
def helper(a, b):
return torch.mm(a, b)
@torch.jit.script
def fn(a, b):
with autocast(enabled=True):
tmp = helper(a, b)
tmp = helper(tmp, tmp)
tmp = helper(tmp, tmp)
tmp = helper(tmp, tmp)
return helper(tmp, b)
result = fn(self.a_fp32, self.b_fp32)
self.assertEqual(result.dtype, torch.float16)
@unittest.skipIf(not TEST_CUDA, "No cuda")
def test_callees_with_autocast_on(self):
def helper(a, b):
with autocast(enabled=True):
return torch.mm(a, b)
@torch.jit.script
def fn(a, b):
with autocast(enabled=False):
return helper(a, b)
result = fn(self.a_fp32, self.b_fp32)
self.assertEqual(result.dtype, torch.float16)
@unittest.skipIf(not TEST_CUDA, "No cuda")
def test_callees_with_autocast_off(self):
def helper(a, b):
with autocast(enabled=False):
return torch.mm(a, b)
@torch.jit.script
def fn(a, b):
with autocast(enabled=True):
return helper(a, b)
result = fn(self.a_fp32, self.b_fp32)
self.assertEqual(result.dtype, torch.float32)
# scripting inside eager autocast
@unittest.skipIf(not TEST_CUDA, "No cuda")
def test_eager_and_script(self):
@torch.jit.script
def fn(a, b):
return torch.mm(a, b)
for i in range(8):
use_autocast = (i % 2 == 0)
expected_dtype = torch.float16 if use_autocast else torch.float32
with autocast(enabled=use_autocast):
result = fn(self.a_fp32, self.b_fp32)
self.assertEqual(result.dtype, expected_dtype)
# traced inside scripting
@unittest.skipIf(not TEST_CUDA, "No cuda")
def test_script_and_tracing(self):
def helper(a, b):
return torch.mm(a, b)
traced = torch.jit.trace(helper, (self.a_fp32, self.a_fp32))
@torch.jit.script
def fn(a, b):
with autocast(enabled=True):
return traced(a, b)
result = fn(self.a_fp32, self.b_fp32)
self.assertEqual(result.dtype, torch.float16)
# traced with autocast inside scripting
@unittest.skipIf(True, "autocast(False) is ignored inside traced functions")
@unittest.skipIf(not TEST_CUDA, "No cuda")
def test_script_and_tracing_with_autocast(self):
def helper(a, b):
with autocast(enabled=False):
return torch.mm(a, b) * 2.0
traced = torch.jit.trace(helper, (self.a_fp32, self.a_fp32))
@torch.jit.script
def fn(a, b):
with autocast(enabled=True):
return traced(a, b)
result = fn(self.a_fp32, self.b_fp32)
self.assertEqual(result.dtype, torch.float32)
# scripted called from traced
@unittest.skipIf(not TEST_CUDA, "No cuda")
def test_tracing_and_script(self):
@torch.jit.script
def fn(a, b):
with autocast():
return torch.mm(a, b)
def traced(a, b):
return fn(a, b)
traced = torch.jit.trace(traced, (self.a_fp32, self.b_fp32))
result = traced(self.a_fp32, self.b_fp32)
self.assertEqual(result.dtype, torch.float16)
# scripted called from traced with autocast
@unittest.skipIf(True, "scripted called from traced TorchScript is not yet working")
@unittest.skipIf(not TEST_CUDA, "No cuda")
def test_tracing_with_autocast_and_script(self):
@torch.jit.script
def fn(a, b):
return torch.mm(a, b)
def traced(a, b):
with autocast(enabled=True):
return fn(a, b)
traced = torch.jit.trace(traced, (self.a_fp32, self.b_fp32))
result = traced(self.a_fp32, self.b_fp32)
self.assertEqual(result.dtype, torch.float16)
@unittest.skipIf(not TEST_CUDA, "No cuda")
def test_script_module(self):
class TestModule(torch.nn.Module):
def __init__(self, N, M):
super().__init__()
self.weight = torch.nn.Parameter(torch.rand((N, M), dtype=torch.float32))
self.linear = torch.nn.Linear(N, M).float()
def forward(self, input):
with autocast(enabled=True):
output = self.weight.mv(input)
output = self.linear(output)
return output
scripted_module = torch.jit.script(TestModule(2, 3)).cuda()
input = torch.rand(3, dtype=torch.float32, device='cuda')
result = scripted_module(input)
self.assertEqual(result.dtype, torch.float16)
@unittest.skipIf(True, "autocast decorators not supported")
@unittest.skipIf(not TEST_CUDA, "No cuda")
def test_autocast_decorator(self):
@torch.jit.script
@autocast(enabled=True)
def fn(a, b):
return torch.mm(a, b)
result = fn(self.a_fp32, self.b_fp32)
self.assertEqual(result.dtype, torch.float16)
# this is equivalent to running scripted functions inside autocast)
# (see also test_eager_and_script)
@unittest.skipIf(not TEST_CUDA, "No cuda")
def test_autocast_decorator_outside_jit(self):
@autocast(enabled=True)
@torch.jit.script
def fn(a, b):
return torch.mm(a, b)
result = fn(self.a_fp32, self.b_fp32)
self.assertEqual(result.dtype, torch.float16)
@unittest.skipIf(not TEST_CUDA, "No cuda")
def test_inplace(self):
@torch.jit.script
def fn(a, b, c):
with autocast(enabled=True):
x = torch.addmm(a, b, c)
y = torch.addmm(a, b, c, out=a)
z = a.addmm_(b, c)
return x, y, z
x, y, z = fn(self.a_fp32, self.b_fp32, self.c_fp32)
self.assertEqual(x.dtype, torch.float16)
self.assertEqual(y.dtype, torch.float32)
self.assertEqual(z.dtype, torch.float32)
def _test_autocast(self, func, cast_op, *args):
jit_func = torch.jit.script(func)
o = func(*args)
jit_o = jit_func(*args)
if cast_op is not None:
FileCheck().check(cast_op).run(jit_func.graph_for(*args))
for o0, o1 in zip(o, jit_o):
self.assertEqual(o0.dtype, o1.dtype)
@unittest.skipIf(not TEST_CUDA, "No cuda")
def test_autocast_api(self):
def t_autocast_cpu(x, y):
with torch.autocast("cpu", dtype=torch.bfloat16):
return torch.mm(x, y)
def t_autocast_cuda(x, y):
with torch.autocast("cuda", dtype=torch.half):
return torch.mm(x, y)
def t_cuda_amp_autocast(x, y):
with torch.cuda.amp.autocast():
return torch.mm(x, y)
def t_cpu_amp_autocast(x, y):
with torch.cpu.amp.autocast():
return torch.mm(x, y)
x = torch.randn(5, 5, device="cuda", dtype=torch.float32)
y = torch.randn(5, 5, device="cuda", dtype=torch.float32)
self._test_autocast(t_autocast_cpu, "aten::_autocast_to_reduced_precision", x, y)
self._test_autocast(t_autocast_cuda, "aten::_autocast_to_reduced_precision", x, y)
self._test_autocast(t_cuda_amp_autocast, "aten::_autocast_to_reduced_precision", x, y)
self._test_autocast(t_cpu_amp_autocast, "aten::_autocast_to_reduced_precision", x, y)
@unittest.skipIf(True, "we need to provide dtype argument at this moment")
@unittest.skipIf(not TEST_CUDA, "No cuda")
def test_autocast_api_not_supported(self):
def t_autocast_cpu(x, y):
# no dtype provided is not currently supported
with torch.autocast("cpu"):
return torch.mm(x, y)
def t_autocast_cuda(x, y):
# no dtype provided is not currently supported
with torch.autocast("cuda"):
return torch.mm(x, y)
x = torch.randn(5, 5, device="cuda", dtype=torch.float32)
y = torch.randn(5, 5, device="cuda", dtype=torch.float32)
self._test_autocast(t_autocast_cpu, "aten::_autocast_to_reduced_precision", x, y)
self._test_autocast(t_autocast_cuda, "aten::_autocast_to_reduced_precision", x, y)
@unittest.skipIf(not TEST_CUDA, "No cuda")
def test_autocast_mixed_dtypes(self):
def t(cpu0, cpu1, cuda0, cuda1):
with torch.autocast("cpu", torch.bfloat16):
with torch.autocast("cuda", torch.float16):
cpu_o = torch.mm(cpu0, cpu1)
cuda_o = torch.mm(cuda0, cuda1)
return cpu_o, cuda_o
jit_t = torch.jit.script(t)
cpu0 = torch.randn(5, 5, device="cpu", dtype=torch.float32)
cpu1 = torch.randn(5, 5, device="cpu", dtype=torch.float32)
cuda0 = torch.randn(5, 5, device="cuda", dtype=torch.float32)
cuda1 = torch.randn(5, 5, device="cuda", dtype=torch.float32)
self._test_autocast(t, "aten::_autocast_to_reduced_precision", cpu0, cpu1, cuda0, cuda1)
@unittest.skipIf(not TEST_CUDA, "No cuda")
def test_jit_executor_under_autocast(self):
def t(cpu0, cpu1, cuda0, cuda1):
cpu_o = torch.mm(cpu0, cpu1)
cuda_o = torch.mm(cuda0, cuda1)
return cpu_o, cuda_o
jit_t = torch.jit.script(t)
cpu0 = torch.randn(5, 5, device="cpu", dtype=torch.float32)
cpu1 = torch.randn(5, 5, device="cpu", dtype=torch.float32)
cuda0 = torch.randn(5, 5, device="cuda", dtype=torch.float32)
cuda1 = torch.randn(5, 5, device="cuda", dtype=torch.float32)
with torch.autocast("cpu", torch.bfloat16):
with torch.autocast("cuda", torch.float16):
self._test_autocast(t, "aten::_autocast_to_reduced_precision", cpu0, cpu1, cuda0, cuda1)
with torch.autocast("cpu", torch.bfloat16):
self._test_autocast(t, "aten::_autocast_to_reduced_precision", cpu0, cpu1, cuda0, cuda1)
with torch.autocast("cuda", torch.float16):
self._test_autocast(t, "aten::_autocast_to_reduced_precision", cpu0, cpu1, cuda0, cuda1)
# no cast op should be observed when executing outside autocast context
self._test_autocast(t, None, cpu0, cpu1, cuda0, cuda1)
@unittest.skipIf(not TEST_CUDA, "No cuda")
def test_autocast_autodiff(self):
def t(t0, t1):
o = torch.mm(t0, t1)
return o.relu()
jit_t = torch.jit.script(t)
t0 = torch.randn(5, 5, device="cuda", dtype=torch.float32).requires_grad_()
t1 = torch.randn(5, 5, device="cuda", dtype=torch.float32).requires_grad_()
# run optimization
for i in range(5):
with torch.autocast("cuda", torch.float16):
jit_o = jit_t(t0, t1)
jit_o.sum().backward()
t0.grad = None
t1.grad = None
ref_t0 = t0.detach().requires_grad_()
ref_t1 = t1.detach().requires_grad_()
with torch.autocast("cuda", torch.float16):
o = t(ref_t0, ref_t1)
jit_o = jit_t(t0, t1)
jit_o.sum().backward()
o.sum().backward()
self.assertEqual(o, jit_o)
self.assertEqual(t0.grad, ref_t0.grad)
self.assertEqual(t1.grad, ref_t1.grad)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(t0.grad.dtype, ref_t0.grad.dtype)
self.assertEqual(t1.grad.dtype, ref_t1.grad.dtype)
@unittest.skipIf(not TEST_CUDA, "No cuda")
def test_jit_call_method_under_autocast(self):
@torch.jit.interface
class Iface(torch.nn.Module):
def forward(self, x, y) -> torch.Tensor:
pass
class Impl(Iface):
def forward(self, x, y):
return torch.mm(x, y)
class Thing1(torch.nn.Module):
impl: Iface
def forward(self, x, y):
with torch.cuda.amp.autocast():
a = torch.mm(x, y)
b = self.impl.forward(a, x)
return b
scripted_impl = torch.jit.script(Impl())
thing1 = Thing1()
thing1.impl = scripted_impl
scripted_thing1 = torch.jit.script(thing1)
x = torch.rand([2, 2])
y = torch.rand([2, 2])
# make sure this doesn't throw an error
with torch.cuda.amp.autocast():
ans = scripted_thing1.forward(x, y)
self.assertEqual(torch.mm(torch.mm(x, y), x), ans)
# sanity check: this isn't supported currently when global autocasting
# isn't enabled
self.assertRaises(RuntimeError, lambda: scripted_thing1.forward(x, y))
@unittest.skipIf(not TEST_CUDA, "No cuda")
def test_jit_freeze_autocast_basic(self):
class TestModule(torch.nn.Module):
def __init__(self):
super(TestModule, self).__init__()
def forward(self, x, y):
with torch.cuda.amp.autocast():
return torch.mm(x, y)
x = torch.rand((3, 4), dtype=torch.float).cuda()
y = torch.rand((4, 5), dtype=torch.float).cuda()
mod = TestModule().eval()
# sanity check
self._test_autocast(mod, "aten::_autocast_to_reduced_precision", x, y)
frozen_mod = torch.jit.freeze(torch.jit.script(mod).eval())
FileCheck().check_count("aten::_autocast_to_reduced_precision", 2, True).run(frozen_mod.graph)
# make sure that the runtime pass doesn't duplicate autocast nodes
frozen_mod(x, y)
optimized_graph = frozen_mod.graph_for(x, y)
FileCheck().check_count("aten::_autocast_to_reduced_precision", 2, True).run(optimized_graph)
@unittest.skipIf(not TEST_CUDA, "No cuda")
def test_jit_freeze_autocast_constants(self):
class TestModule(torch.nn.Module):
def __init__(self):
super(TestModule, self).__init__()
self.x = torch.rand((3, 4), dtype=torch.float).cuda()
def forward(self, y):
with torch.cuda.amp.autocast():
return torch.mm(self.x, y)
y = torch.rand((4, 5), dtype=torch.float).cuda()
mod = TestModule().eval()
frozen_mod = torch.jit.freeze(torch.jit.script(mod).eval())
# freezing should pre-cast the constant self.x to remove one autocast call
FileCheck().check_count("aten::_autocast_to_reduced_precision", 1, True).run(frozen_mod.graph)
# the runtime autocasting pass will re-insert the second autocast call,
# but constant propagation will merge it with the constant that it's casting.
frozen_mod(y)
optimized_graph = frozen_mod.graph_for(y)
FileCheck().check_count("aten::_autocast_to_reduced_precision", 1, True).run(optimized_graph)
@unittest.skipIf(TEST_CUDA, "CPU-only test")
def test_jit_autocast_softmax_cpu(self):
def fn(x):
with torch.cpu.amp.autocast():
return torch.nn.functional.softmax(x, dim=0)
fn_s = torch.jit.script(fn)
x = torch.rand((2, 2), dtype=torch.bfloat16)
fn_s(x)
y = fn_s(x)
self.assertTrue(y.dtype == torch.bfloat16)
@unittest.skipIf(not TEST_CUDA, "No cuda")
def test_jit_autocast_softmax_gpu(self):
def fn(x):
with torch.cuda.amp.autocast():
return torch.nn.functional.softmax(x, dim=0)
fn_s = torch.jit.script(fn)
x = torch.rand((2, 2), dtype=torch.half).cuda()
fn_s(x)
y = fn_s(x)
self.assertTrue(y.dtype == torch.float)
def test_ignore_amp(self):
@torch.jit.script
def foo(x):
return torch.mm(x, x)
inp = torch.rand([10, 10], dtype=torch.float)
foo._set_ignore_amp(True)
with torch.cpu.amp.autocast():
foo(inp)
foo(inp)
g = torch.jit.last_executed_optimized_graph()
FileCheck().check_not("_autocast_to_reduced").run(g)
class convbn(torch.nn.Module):
def __init__(self, bias_enabled=True):
super(convbn, self).__init__()
self.conv = torch.nn.Conv2d(3, 64, 7, stride=2, bias=bias_enabled)
self.bn = torch.nn.BatchNorm2d(64)
def forward(self, x):
return self.bn(self.conv(x))
class TestJitTraceAutocast(JitTestCase):
def setUp(self):
super(TestJitTraceAutocast, self).setUp()
self.previous_default_dtype = torch.get_default_dtype()
torch.set_default_dtype(torch.float32)
self.models = [MnistNet(),
convbn(bias_enabled=True),
convbn(bias_enabled=False)]
self.inputs = [torch.randn(5, 1, 28, 28, device='cpu'),
torch.randn(32, 3, 224, 224, device='cpu'),
torch.randn(32, 3, 224, 224, device='cpu')]
self.previous_jit_autocast_pass = torch._C._jit_set_autocast_mode(False)
def tearDown(self):
torch._C._jit_set_autocast_mode(self.previous_jit_autocast_pass)
torch.set_default_dtype(self.previous_default_dtype)
super(TestJitTraceAutocast, self).tearDown()
def test_generate_autocast_jit_trace_model(self):
def test_generate_autocast_jit_trace_model(model, x):
model.eval()
with torch.cpu.amp.autocast(cache_enabled=False), torch.no_grad():
traced_model = torch.jit.trace(model, x)
traced_model = torch.jit.freeze(traced_model)
for i in range(self.models.__len__()):
test_generate_autocast_jit_trace_model(self.models[i], self.inputs[i])
def test_nchw_autocast_jit_trace_model(self):
def test_nchw_autocast_jit_trace_model(model, x):
model.eval()
with torch.cpu.amp.autocast(cache_enabled=False), torch.no_grad():
traced_model = torch.jit.trace(model, x)
traced_model = torch.jit.freeze(traced_model)
with torch.no_grad():
y = traced_model(x.clone())
with torch.cpu.amp.autocast(), torch.no_grad():
y2 = model(x.clone())
torch.testing.assert_allclose(y.double(), y2.double(), rtol=1e-03, atol=1e-03)
for i in range(self.models.__len__()):
test_nchw_autocast_jit_trace_model(self.models[i], self.inputs[i])
def test_nhwc_autocast_jit_trace_model(self):
def test_nhwc_autocast_jit_trace_model(model, x):
model = model.to(memory_format=torch.channels_last)
model.eval()
with torch.cpu.amp.autocast(cache_enabled=False), torch.no_grad():
traced_model = torch.jit.trace(model, x.to(memory_format=torch.channels_last))
traced_model = torch.jit.freeze(traced_model)
with torch.no_grad():
y = traced_model(x.clone().to(memory_format=torch.channels_last))
with torch.cpu.amp.autocast(), torch.no_grad():
y2 = model(x.clone().to(memory_format=torch.channels_last))
torch.testing.assert_allclose(y.double(), y2.double(), rtol=1e-03, atol=1e-03)
for i in range(self.models.__len__()):
if self.inputs[i].size().__len__() == 5:
# NHWC 3D case not support yet
continue
test_nhwc_autocast_jit_trace_model(self.models[i], self.inputs[i])
def test_script_autocast_cpu(self):
def fn(x):
if torch.is_autocast_cpu_enabled():
return x.relu()
else:
return x.sin()
fn_s = torch.jit.script(fn)
x = torch.rand((4, 4)) - 0.5
with torch.cpu.amp.autocast():
self.assertEqual(fn_s(x), fn(x))
with torch.cpu.amp.autocast(enabled=True):
self.assertEqual(fn_s(x), fn(x))
self.assertTrue(any(["is_autocast_cpu_enabled" in x.kind() for x in fn_s.graph.nodes()]))
@unittest.skipIf(not TEST_CUDA, "No cuda")
def test_script_autocast_cuda(self):
def fn(x):
if torch.is_autocast_enabled():
return x.relu()
else:
return x.sin()
fn_s = torch.jit.script(fn)
x = torch.rand((4, 4)) - 0.5
with torch.cpu.amp.autocast():
self.assertEqual(fn_s(x), fn(x))
with torch.cuda.amp.autocast(enabled=True):
self.assertEqual(fn_s(x), fn(x))
self.assertTrue(any(["is_autocast_enabled" in x.kind() for x in fn_s.graph.nodes()]))
def test_scripted_aliasing(self):
# torch.is_autocast_enabled should not be able to move inside of the autocast context.
def fn(x):
if torch.is_autocast_enabled():
y = True
else:
y = False
with torch.cuda.amp.autocast(enabled=True):
z = x.relu()
return y, z
fn_s = torch.jit.script(fn)
graph = fn_s.graph
aliasdb = graph.alias_db()
is_enabled_nodes = graph.findAllNodes("aten::is_autocast_enabled")
enter_nodes = graph.findAllNodes("prim::Enter")
self.assertEqual(len(is_enabled_nodes), 1)
self.assertEqual(len(enter_nodes), 1)
self.assertFalse(aliasdb.move_after_topologically_valid(is_enabled_nodes[0], enter_nodes[0]))
def test_script_autocast_enable_and_check(self):
def fn(x, y) -> Tuple[torch.Tensor, bool, torch.Tensor, bool, torch.Tensor, bool]:
b1 = torch.is_autocast_cpu_enabled()
v1 = torch.mm(x, y)
with torch.cpu.amp.autocast(enabled=True):
b2 = torch.is_autocast_cpu_enabled()
v2 = torch.mm(x, y)
with torch.cpu.amp.autocast(enabled=False):
b3 = torch.is_autocast_cpu_enabled()
v3 = torch.mm(x, y)
return (v1, b1, v2, b2, v3, b3)
# bx = is_autocast_cpu_enabled() result should be False iff (vx = mm(x, y)).dtype is float
def check_fn_results(arr):
[v1, b1, v2, b2, v3, b3] = arr
self.assertTrue((v1.dtype == torch.float) != b1)
self.assertTrue((v2.dtype == torch.float) != b2)
self.assertTrue((v3.dtype == torch.float) != b3)
x = torch.rand((2, 2), dtype=torch.float)
y = torch.rand((2, 2), dtype=torch.float)
fn_s = torch.jit.script(fn)
with torch.cpu.amp.autocast(enabled=False):
check_fn_results(fn(x, y))
check_fn_results(fn_s(x, y))
with torch.cpu.amp.autocast(enabled=True):
check_fn_results(fn(x, y))
check_fn_results(fn_s(x, y))
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/test_jit_autocast.py
|
pytorch-master
|
test/delete.py
|
|
# Owner(s): ["module: ci"]
import os
import run_test
from torch.testing._internal.common_utils import TestCase, run_tests
class DummyOptions(object):
verbose = False
class DeterminationTest(TestCase):
# Test determination on a subset of tests
TESTS = [
"test_nn",
"test_jit_profiling",
"test_jit",
"test_torch",
"test_cpp_extensions_aot_ninja",
"test_cpp_extensions_aot_no_ninja",
"test_utils",
"test_determination",
"test_quantization",
]
@classmethod
def determined_tests(cls, changed_files):
changed_files = [os.path.normpath(path) for path in changed_files]
return [
test
for test in cls.TESTS
if run_test.should_run_test(run_test.TARGET_DET_LIST, test, changed_files, DummyOptions())
]
def test_target_det_list_is_sorted(self):
# We keep TARGET_DET_LIST sorted to minimize merge conflicts
# but most importantly to allow us to comment on the absence
# of a test. It would be very difficult to add a file right
# next to a comment that says to keep it out of the list.
self.assertListEqual(run_test.TARGET_DET_LIST, sorted(run_test.TARGET_DET_LIST))
def test_config_change_only(self):
"""CI configs trigger all tests"""
self.assertEqual(
self.determined_tests([".jenkins/pytorch/test.sh"]), self.TESTS
)
def test_run_test(self):
"""run_test.py is imported by determination tests"""
self.assertEqual(
self.determined_tests(["test/run_test.py"]), ["test_determination"]
)
def test_non_code_change(self):
"""Non-code changes don't trigger any tests"""
self.assertEqual(
self.determined_tests(["CODEOWNERS", "README.md", "docs/doc.md"]), []
)
def test_cpp_file(self):
"""CPP files trigger all tests"""
self.assertEqual(
self.determined_tests(["aten/src/ATen/native/cpu/Activation.cpp"]),
self.TESTS,
)
def test_test_file(self):
"""Test files trigger themselves and dependent tests"""
self.assertEqual(
self.determined_tests(["test/test_jit.py"]), ["test_jit_profiling", "test_jit"]
)
self.assertEqual(
self.determined_tests(["test/jit/test_custom_operators.py"]),
["test_jit_profiling", "test_jit"],
)
self.assertEqual(
self.determined_tests(["test/quantization/eager/test_quantize_eager_ptq.py"]),
["test_quantization"],
)
def test_test_internal_file(self):
"""testing/_internal files trigger dependent tests"""
self.assertEqual(
self.determined_tests(["torch/testing/_internal/common_quantization.py"]),
[
"test_jit_profiling",
"test_jit",
"test_quantization",
],
)
def test_torch_file(self):
"""Torch files trigger dependent tests"""
self.assertEqual(
# Many files are force-imported to all tests,
# due to the layout of the project.
self.determined_tests(["torch/onnx/utils.py"]),
self.TESTS,
)
self.assertEqual(
self.determined_tests(
[
"torch/autograd/_functions/utils.py",
"torch/autograd/_functions/utils.pyi",
]
),
["test_utils"],
)
self.assertEqual(
self.determined_tests(["torch/utils/cpp_extension.py"]),
[
"test_cpp_extensions_aot_ninja",
"test_cpp_extensions_aot_no_ninja",
"test_utils",
"test_determination",
],
)
def test_caffe2_file(self):
"""Caffe2 files trigger dependent tests"""
self.assertEqual(self.determined_tests(["caffe2/python/brew_test.py"]), [])
self.assertEqual(
self.determined_tests(["caffe2/python/context.py"]), self.TESTS
)
def test_new_folder(self):
"""New top-level Python folder triggers all tests"""
self.assertEqual(self.determined_tests(["new_module/file.py"]), self.TESTS)
def test_new_test_script(self):
"""New test script triggers nothing (since it's not in run_tests.py)"""
self.assertEqual(self.determined_tests(["test/test_new_test_script.py"]), [])
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/test_determination.py
|
# Owner(s): ["module: dispatch"]
import torch._C as C
from torch.testing._internal.common_utils import TestCase, run_tests
from torch._python_dispatcher import PythonDispatcher
from collections import namedtuple
import itertools
import os
import re
import torch.utils.cpp_extension
# TODO: Expand the dispatcher API to be a generic API for interfacing with
# the dispatcher from Python!
#
# These are exhaustive tests for commutativity of dispatch behavior. If you're
# looking for more usage-info style tests, check op_registration_test.cpp
#
# Things not tested here:
# - Listeners
# - Top level namespace registrations
# - Fallback
# - Exotic overloads of CppFunction/schema
#
# Things not directly tested here:
# - Internal state of Dispatcher makes sense. This is indirectly
# tested by the invariant testing
Result = namedtuple('Result', 'state table provenance')
dispatch_keys_to_check = (
'Undefined',
'CPU',
'CUDA',
'XLA',
'AutogradOther',
'AutogradCPU',
'AutogradCUDA',
'AutogradXLA')
def extract_dispatch_table_with_keys(table, dispatch_keys):
extracted = ''
table_entries = table.split('\n')
regex = re.compile(r"registered at .*FallbackKernel\.cpp.*(\[)")
for k in dispatch_keys:
for t in table_entries:
if t.startswith(k):
# mask out file:line info for in-tree backend fallback
entry = regex.sub('registered in pytorch framework [', t)
extracted += (entry + '\n')
return extracted
class TestDispatch(TestCase):
namespace_index = 0
def test_all_invariants(self):
# Check that the regular stuff is OK!
C._dispatch_check_all_invariants()
# You probably don't want to call this directly; if your constructors
# don't commute, you can still run commute with a fixed ctor_order
# so that you can test that the destructors still commute
def run_ops(self, name, ops, ctor_order=None, dtor_order=None,
results=None, expect_raises=False):
"""
Given a list of operator registrations, run the registrations in the
order specified by ctor_order, and then run the deregistrations in
dtor_order.
If results is specified, intermediate results are checked for consistency
with results stored in results (and stored in results if this is the
first time we've seen them). Results are expected to be equivalent
modulo commutativity and inverses (thus, results is keyed on a frozenset
of in effect registrations from ops). Results stores namedtuple
Result[state, table, provenance], where state is a string that contains
non-derived kernel registered or error message if it doesn't pass;
table is a string that contains computed dispatch table entries;
provenance is a string that describes how exactly we got this string.
If expect_raises is True, it is not an error to raise an exception. Instead,
we'll store the exception string (instead of the dispatcher state)
in results. In principle we should flag these differently, but it's
very obvious when you get an error in one case but not another.
"""
# By allocating every test into a fresh namespace, this makes it less
# likely that a bug in the testing framework will result in tests
# interfering with each other
self.__class__.namespace_index += 1
if results is None:
results = {}
if ctor_order is None:
ctor_order = list(range(len(ops)))
if dtor_order is None:
dtor_order = list(reversed(ctor_order))
# Refs which retain the c10::Module object so we can explicitly control
# when each deregistration happens (deregistration occurs when the
# object gets deallocated).
refs = [None] * len(ops)
# Keep track of the set "in effect" registrations
active_ops = set()
# double underscore to make it less likely we conflict with something
# else
test_namespace = "__test{}__".format(self.namespace_index)
def check_invariants(actual_provenance):
C._dispatch_check_invariants(name)
# Normalize the test namespace so that expected outputs are stable
actual_state = C._dispatch_dump(
"{}::{}".format(test_namespace, name)).replace(test_namespace, "test")
actual_table = C._dispatch_dump_table(
"{}::{}".format(test_namespace, name)).replace(test_namespace, "test")
expected_state, expected_table, expected_provenance = results.setdefault(
frozenset(active_ops),
Result(actual_state, actual_table, actual_provenance)
)
self.assertMultiLineEqual(
expected_state, actual_state,
"expected from {}; actual from {}"
.format(expected_provenance, actual_provenance)
)
self.assertMultiLineEqual(
expected_table, actual_table,
"expected from {}; actual from {}"
.format(expected_provenance, actual_provenance)
)
results.setdefault(frozenset(), Result("", "", "hardcoded initial state"))
check_invariants("initial state")
# In the order specified by ctor_order, run registrations
set_to_report = frozenset(range(len(ops)))
for i, op_ix in enumerate(ctor_order):
# It would be better to DEF here, but because we manage
# lifetime of multiple registrations with multiple Library
# references (refs), we can't deal with the strict checking
# from DEF.
refs[op_ix] = C._dispatch_library("FRAGMENT", test_namespace, "")
active_ops.add(op_ix)
try:
ops[op_ix](refs[op_ix])
check_invariants("running ctors {}".format(ctor_order[:i + 1]))
except RuntimeError as e:
if not expect_raises:
raise
actual = str(e).replace(test_namespace, "test")
actual = actual.split("\nException raised from ")[0]
expected, _, expected_provenance = results.setdefault(
frozenset(active_ops),
Result(actual, "", "error after running ctors {}".format(ctor_order[:i + 1]))
)
self.assertMultiLineEqual(expected, actual, expected_provenance)
set_to_report = frozenset(active_ops)
active_ops.remove(op_ix)
# NB: this finally test asserts that if a registrations fails,
# the dispatcher is left in the same state *that it was before*!
check_invariants(
"running ctors {} and then failing to run ctor {} "
"(did this failure leave the dispatcher in a wedged state? "
"it shouldn't!)"
.format(ctor_order[:i], op_ix))
break
last_ctor = i
if expect_raises and len(active_ops) == len(ops):
# Destroy references first, as some test frameworks (like pytest)
# will retain references in the exception raised by assertTrue! EW!
refs = None
self.assertTrue(
False,
"expected exception to be raised, but nothing was raised "
"(after running ctors {})".format(ctor_order))
# In the order specified by dtor_order, run deregistrations
for i, op_ix in enumerate(dtor_order):
# Trigger a destruction
refs[op_ix] = None
# discard not remove, since we may not have actually deregistered
# anything if there was an error raised
if expect_raises:
active_ops.discard(op_ix)
else:
active_ops.remove(op_ix)
check_invariants(
"running ctors {}, then running dtors {}"
.format(ctor_order[:last_ctor + 1], dtor_order[:i + 1])
)
return results[set_to_report][0]
# Operator registrations are commutative (as static initializers can
# run in any order) and invertible (by deregistration). (Subject
# to some caveats: some legacy behavior in the system are not commutative--
# we want to get rid of these!)
#
# So while in principle we could simply test a set of operations
# by just running them one by one in the order specified by the user,
# we can get more assurance about these extra properties by doing
# more work:
#
# 1. Don't run the registrations once in a fixed order: run every possible
# permutation. Similarly, run every permutation of deregistration order.
#
# 2. Don't just check the end state of the dispatcher: for every
# subset of operator registrations, ensure that the computed
# intermediate state is path independent. One thing to note:
# in this function, we assume each operation is unique. In general,
# there may be duplicated registrations, but these are usually
# idempotent or legacy. We test for behavior here separately.
#
# NB: checking all permutations means this function is exponential in
# the length of ops! So don't pass too many ops to this function!
def commute(self, name, ops, ctor_order=None, expect_raises=False):
results = {}
def go(ctor_order):
for dtor_order in itertools.permutations(range(len(ops))):
self.run_ops(
name, ops, ctor_order, dtor_order,
results=results, expect_raises=expect_raises)
if ctor_order is not None:
go(ctor_order)
else:
for ctor_order in itertools.permutations(range(len(ops))):
go(ctor_order)
# Return the "full" Result namedtuple after all operations are run.
# If this KeyErrors, that means that there did not exist any
# ordering of ctors which got us to the "end". That's an
# error in test construction: it means you could have
# factored the test into two smaller ones.
return results[frozenset(range(len(ops)))]
def test_def(self):
state = self.commute("foo", [
# m.def("foo(Tensor x) -> Tensor")
lambda m: m.def_("foo(Tensor x) -> Tensor"),
# m.impl("test_def", [](const Tensor& x) { return x })
lambda m: m.impl_t_t("foo"),
# m.impl("test_def", kCPU, [](const Tensor& x) { return x })
lambda m: m.impl_t_t("foo", dispatch="CPU"),
# m.impl("test_def", kAutograd, [](const Tensor& x) { return x })
lambda m: m.impl_t_t("foo", dispatch="Autograd"),
# m.impl("test_def", kAutogradCPU, [](const Tensor& x) { return x })
lambda m: m.impl_t_t("foo", dispatch="AutogradCPU")
]).state
self.assertExpectedInline(state, '''\
name: test::foo
schema: test::foo(Tensor x) -> Tensor
debug: registered at /dev/null:0
alias analysis kind: FROM_SCHEMA
CPU: impl_t_t :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
AutogradCPU: impl_t_t :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
Autograd[alias]: impl_t_t :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
CompositeImplicitAutograd[alias]: impl_t_t :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
''')
def test_def_impl_schema_mismatch(self):
# NB: an impl-impl mismatch is not reported eagerly; you'll find out
# about it because one of them won't match with def
state = self.commute("foo", [
# m.def("foo(Tensor x, Tensor y) -> Tensor")
lambda m: m.def_("foo(Tensor x, Tensor y) -> Tensor"),
# m.impl("foo", [](const Tensor & x) { return x })
lambda m: m.impl_t_t("foo"),
], expect_raises=True).state
self.assertExpectedInline(state, '''\
Inferred operator schema for a C++ kernel function doesn't match the expected function schema.
operator: test::foo
expected schema: test::foo(Tensor x, Tensor y) -> Tensor
registered at /dev/null:0
inferred schema: (Tensor _0) -> Tensor _0
impl_t_t
reason: The number of arguments is different. 2 vs 1.''')
def test_def_with_inference(self):
state = self.commute("foo", [
# m.def("foo", [](const Tensor & x) { return x })
lambda m: m.def_name_t_t("foo"),
# m.impl("foo", torch::kCPU, [](const Tensor & x) { return x })
lambda m: m.impl_t_t("foo", "CPU"),
# m.impl("foo", torch::kAutograd, [](const Tensor & x) { return x })
lambda m: m.impl_t_t("foo", "Autograd"),
# m.impl("foo", torch::kAutogradCPU, [](const Tensor & x) { return x })
lambda m: m.impl_t_t("foo", "AutogradCPU")
]).state
self.assertExpectedInline(state, '''\
name: test::foo
schema: test::foo(Tensor _0) -> Tensor _0
debug: registered at /dev/null:0
alias analysis kind: CONSERVATIVE
CPU: impl_t_t :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
AutogradCPU: impl_t_t :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
Autograd[alias]: impl_t_t :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
CompositeImplicitAutograd[alias]: default_def_name_t_t :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
''')
def test_def_only(self):
state = self.commute("foo", [
# m.def("foo(Tensor x, Tensor y) -> Tensor")
lambda m: m.def_("foo(Tensor x, Tensor y) -> Tensor"),
]).state
self.assertExpectedInline(state, '''\
name: test::foo
schema: test::foo(Tensor x, Tensor y) -> Tensor
debug: registered at /dev/null:0
alias analysis kind: FROM_SCHEMA
''')
def test_impl_only(self):
state = self.commute("foo", [
# m.impl("foo", [](const Tensor& x) { return x })
lambda m: m.impl_t_t("foo"),
# m.impl("foo", torch::kCPU, [](const Tensor& x) { return x })
lambda m: m.impl_t_t("foo", "CPU"),
# m.impl("foo", torch::kAutograd, [](const Tensor& x) { return x })
lambda m: m.impl_t_t("foo", "Autograd"),
# m.impl("foo", torch::kAutogradCPU, [](const Tensor& x) { return x })
lambda m: m.impl_t_t("foo", "AutogradCPU")
]).state
self.assertExpectedInline(state, '''\
name: test::foo
schema: (none)
CPU: impl_t_t :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
AutogradCPU: impl_t_t :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
Autograd[alias]: impl_t_t :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
CompositeImplicitAutograd[alias]: impl_t_t :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
''')
def test_computed_table(self):
result = self.commute("foo", [
# m.def("foo", [](const Tensor & x) { return x })
lambda m: m.def_name_t_t("foo"),
# m.impl("foo", torch::kCPU, [](const Tensor & x) { return x })
lambda m: m.impl_t_t("foo", "CPU", debug="fn_cpu"),
# m.impl("foo", torch::kCUDA, [](const Tensor & x) { return x })
lambda m: m.impl_t_t("foo", "XLA", debug="fn_xla"),
# m.impl("foo", torch::kAutograd, [](const Tensor & x) { return x })
lambda m: m.impl_t_t("foo", "Autograd", debug="fn_autograd"),
# m.impl("foo", torch::kAutogradCPU, [](const Tensor & x) { return x })
lambda m: m.impl_t_t("foo", "AutogradCPU", debug="fn_autogradcpu")
])
state, table = result.state, result.table
self.assertExpectedInline(state, '''\
name: test::foo
schema: test::foo(Tensor _0) -> Tensor _0
debug: registered at /dev/null:0
alias analysis kind: CONSERVATIVE
CPU: fn_cpu :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
XLA: fn_xla :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
AutogradCPU: fn_autogradcpu :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
Autograd[alias]: fn_autograd :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
CompositeImplicitAutograd[alias]: default_def_name_t_t :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
''')
# computed dispatch table is too big, so we only check on a few entries we're interested in.
extracted_table = extract_dispatch_table_with_keys(table, dispatch_keys_to_check)
self.assertExpectedInline(extracted_table, '''\
Undefined: default_def_name_t_t [math kernel]
CPU: fn_cpu [kernel]
CUDA: default_def_name_t_t [math kernel]
XLA: fn_xla [kernel]
AutogradOther: default_def_name_t_t [math kernel]
AutogradCPU: fn_autogradcpu [kernel]
AutogradCUDA: default_def_name_t_t [math kernel]
AutogradXLA: fn_autograd [autograd kernel]
''')
def test_computed_table_with_cpu_math_autogradcpu_fallthrough(self):
global_m = C._dispatch_library("IMPL", "_", "AutogradCPU")
result = self.commute("foo", [
# m.def("foo", [](const Tensor & x) { return x })
lambda m: m.def_name_t_t("foo"),
# m.impl("foo", torch::kCPU, [](const Tensor & x) { return x })
lambda m: m.impl_t_t("foo", "CPU"),
])
state, table = result.state, result.table
self.assertExpectedInline(state, '''\
name: test::foo
schema: test::foo(Tensor _0) -> Tensor _0
debug: registered at /dev/null:0
alias analysis kind: CONSERVATIVE
CPU: impl_t_t :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
CompositeImplicitAutograd[alias]: default_def_name_t_t :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
''')
# computed dispatch table is too big, so we only check on a few entries we're interested in.
extracted_table = extract_dispatch_table_with_keys(table, dispatch_keys_to_check)
self.assertExpectedInline(extracted_table, '''\
Undefined: default_def_name_t_t [math kernel]
CPU: impl_t_t [kernel]
CUDA: default_def_name_t_t [math kernel]
XLA: default_def_name_t_t [math kernel]
AutogradOther: default_def_name_t_t [math kernel]
AutogradCPU: fallthrough registered in pytorch framework [backend fallback]
AutogradCUDA: default_def_name_t_t [math kernel]
AutogradXLA: default_def_name_t_t [math kernel]
''')
def test_computed_table_with_math(self):
global_m = C._dispatch_library("IMPL", "_", "AutogradCPU")
result = self.commute("foo", [
# m.def("foo(Tensor x) -> Tensor")
lambda m: m.def_("foo(Tensor x) -> Tensor"),
# m.impl("foo", torch::kCompositeImplicitAutograd, [](const Tensor & x) { return x })
lambda m: m.impl_t_t("foo", "CompositeImplicitAutograd"),
])
state, table = result.state, result.table
self.assertExpectedInline(state, '''\
name: test::foo
schema: test::foo(Tensor x) -> Tensor
debug: registered at /dev/null:0
alias analysis kind: FROM_SCHEMA
CompositeImplicitAutograd[alias]: impl_t_t :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
''')
# computed dispatch table is too big, so we only check on a few entries we're interested in.
extracted_table = extract_dispatch_table_with_keys(table, dispatch_keys_to_check)
self.assertExpectedInline(extracted_table, '''\
Undefined: impl_t_t [math kernel]
CPU: impl_t_t [math kernel]
CUDA: impl_t_t [math kernel]
XLA: impl_t_t [math kernel]
AutogradOther: impl_t_t [math kernel]
AutogradCPU: impl_t_t [math kernel]
AutogradCUDA: impl_t_t [math kernel]
AutogradXLA: impl_t_t [math kernel]
''')
def test_computed_table_with_cpu_math(self):
global_m = C._dispatch_library("IMPL", "_", "AutogradCPU")
result = self.commute("foo", [
# m.def("foo(Tensor x) -> Tensor")
lambda m: m.def_("foo(Tensor x) -> Tensor"),
# m.impl("foo", torch::kCPU, [](const Tensor & x) { return x })
lambda m: m.impl_t_t("foo", "CPU", debug="fn_cpu"),
# m.impl("foo", torch::kCompositeImplicitAutograd, [](const Tensor & x) { return x })
lambda m: m.impl_t_t("foo", "CompositeImplicitAutograd", debug="fn_math"),
])
state, table = result.state, result.table
self.assertExpectedInline(state, '''\
name: test::foo
schema: test::foo(Tensor x) -> Tensor
debug: registered at /dev/null:0
alias analysis kind: FROM_SCHEMA
CPU: fn_cpu :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
CompositeImplicitAutograd[alias]: fn_math :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
''')
# computed dispatch table is too big, so we only check on a few entries we're interested in.
extracted_table = extract_dispatch_table_with_keys(table, dispatch_keys_to_check)
self.assertExpectedInline(extracted_table, '''\
Undefined: fn_math [math kernel]
CPU: fn_cpu [kernel]
CUDA: fn_math [math kernel]
XLA: fn_math [math kernel]
AutogradOther: fn_math [math kernel]
AutogradCPU: fallthrough registered in pytorch framework [backend fallback]
AutogradCUDA: fn_math [math kernel]
AutogradXLA: fn_math [math kernel]
''')
def test_computed_table_with_autograd(self):
global_m = C._dispatch_library("IMPL", "_", "AutogradCPU")
result = self.commute("foo", [
# m.def("foo(Tensor x) -> Tensor")
lambda m: m.def_("foo(Tensor x) -> Tensor"),
# m.impl("foo", torch::kAutograd, [](const Tensor & x) { return x })
lambda m: m.impl_t_t("foo", "Autograd"),
])
state, table = result.state, result.table
self.assertExpectedInline(state, '''\
name: test::foo
schema: test::foo(Tensor x) -> Tensor
debug: registered at /dev/null:0
alias analysis kind: FROM_SCHEMA
Autograd[alias]: impl_t_t :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
''')
# computed dispatch table is too big, so we only check on a few entries we're interested in.
extracted_table = extract_dispatch_table_with_keys(table, dispatch_keys_to_check)
self.assertExpectedInline(extracted_table, '''\
AutogradOther: impl_t_t [autograd kernel]
AutogradCPU: impl_t_t [autograd kernel]
AutogradCUDA: impl_t_t [autograd kernel]
AutogradXLA: impl_t_t [autograd kernel]
''')
# Now that catchAll maps to CompositeImplicitAutograd, registering to both
# catchAll and CompositeImplicitAutograd breaks commutativity.
def test_computed_table_with_cpu_autograd_math(self):
result = self.commute("foo", [
# m.def("foo(Tensor x) -> Tensor")
lambda m: m.def_("foo(Tensor x) -> Tensor"),
# m.impl("foo", torch::kCPU, [](const Tensor & x) { return x })
lambda m: m.impl_t_t("foo", "CPU", debug="fn_cpu"),
# m.impl("foo", torch::kAutograd, [](const Tensor & x) { return x })
lambda m: m.impl_t_t("foo", "Autograd", debug="fn_autograd"),
# m.impl("foo", torch::kCompositeImplicitAutograd, [](const Tensor & x) { return x })
lambda m: m.impl_t_t("foo", "CompositeImplicitAutograd", debug="fn_math"),
])
state, table = result.state, result.table
self.assertExpectedInline(state, '''\
name: test::foo
schema: test::foo(Tensor x) -> Tensor
debug: registered at /dev/null:0
alias analysis kind: FROM_SCHEMA
CPU: fn_cpu :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
Autograd[alias]: fn_autograd :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
CompositeImplicitAutograd[alias]: fn_math :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
''')
# computed dispatch table is too big, so we only check on a few entries we're interested in.
extracted_table = extract_dispatch_table_with_keys(table, dispatch_keys_to_check)
self.assertExpectedInline(extracted_table, '''\
Undefined: fn_math [math kernel]
CPU: fn_cpu [kernel]
CUDA: fn_math [math kernel]
XLA: fn_math [math kernel]
AutogradOther: fn_math [math kernel]
AutogradCPU: fn_autograd [autograd kernel]
AutogradCUDA: fn_math [math kernel]
AutogradXLA: fn_math [math kernel]
''')
def test_computed_table_with_ambiguous_autogradother(self):
result = self.commute("foo", [
# m.def("foo(Tensor x) -> Tensor")
lambda m: m.def_("foo(Tensor x) -> Tensor"),
# m.impl("foo", torch::kCompositeImplicitAutograd, [](const Tensor & x) { return x })
lambda m: m.impl_t_t("foo", "CompositeImplicitAutograd", debug="fn_math"),
# m.impl("foo", torch::kFPGA, [](const Tensor & x) { return x })
lambda m: m.impl_t_t("foo", "FPGA", debug="fn_fpga"),
])
state, table = result.state, result.table
self.assertExpectedInline(state, '''\
name: test::foo
schema: test::foo(Tensor x) -> Tensor
debug: registered at /dev/null:0
alias analysis kind: FROM_SCHEMA
FPGA: fn_fpga :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
CompositeImplicitAutograd[alias]: fn_math :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
''')
# computed dispatch table is too big, so we only check on a few entries we're interested in.
extracted_table = extract_dispatch_table_with_keys(table, dispatch_keys_to_check + ('FPGA',))
self.assertExpectedInline(extracted_table, '''\
Undefined: fn_math [math kernel]
CPU: fn_math [math kernel]
CUDA: fn_math [math kernel]
XLA: fn_math [math kernel]
AutogradOther: ambiguous_autogradother [ambiguous autogradother]
AutogradCPU: fn_math [math kernel]
AutogradCUDA: fn_math [math kernel]
AutogradXLA: fn_math [math kernel]
FPGA: fn_fpga [kernel]
''')
def test_computed_table_with_cpu_defaultbackend(self):
result = self.commute("foo", [
# m.def("foo(Tensor x) -> Tensor")
lambda m: m.def_("foo(Tensor x) -> Tensor"),
# m.impl("foo", torch::kCPU, [](const Tensor & x) { return x })
lambda m: m.impl_t_t("foo", "CPU", debug="fn_cpu"),
# m.impl("foo", torch::kCompositeExplicitAutograd, [](const Tensor & x) { return x })
lambda m: m.impl_t_t("foo", "CompositeExplicitAutograd", debug="fn_defaultbackend"),
])
state, table = result.state, result.table
self.assertExpectedInline(state, '''\
name: test::foo
schema: test::foo(Tensor x) -> Tensor
debug: registered at /dev/null:0
alias analysis kind: FROM_SCHEMA
CPU: fn_cpu :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
CompositeExplicitAutograd[alias]: fn_defaultbackend :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
''')
# computed dispatch table is too big, so we only check on a few entries we're interested in.
extracted_table = extract_dispatch_table_with_keys(table, dispatch_keys_to_check)
self.assertExpectedInline(extracted_table, '''\
Undefined: fn_defaultbackend [default backend kernel]
CPU: fn_cpu [kernel]
CUDA: fn_defaultbackend [default backend kernel]
XLA: fn_defaultbackend [default backend kernel]
AutogradOther: fallthrough registered in pytorch framework [backend fallback]
AutogradCPU: fallthrough registered in pytorch framework [backend fallback]
AutogradCUDA: fallthrough registered in pytorch framework [backend fallback]
AutogradXLA: fallthrough registered in pytorch framework [backend fallback]
''')
def test_computed_table_with_cpu_autograd_defaultbackend(self):
result = self.commute("foo", [
# m.def("foo(Tensor x) -> Tensor")
lambda m: m.def_("foo(Tensor x) -> Tensor"),
# m.impl("foo", torch::kCPU, [](const Tensor & x) { return x })
lambda m: m.impl_t_t("foo", "CPU", debug="fn_cpu"),
# m.impl("foo", torch::kAutograd, [](const Tensor & x) { return x })
lambda m: m.impl_t_t("foo", "Autograd", debug="fn_autograd"),
# m.impl("foo", torch::kCompositeExplicitAutograd, [](const Tensor & x) { return x })
lambda m: m.impl_t_t("foo", "CompositeExplicitAutograd", debug="fn_defaultbackend"),
])
state, table = result.state, result.table
self.assertExpectedInline(state, '''\
name: test::foo
schema: test::foo(Tensor x) -> Tensor
debug: registered at /dev/null:0
alias analysis kind: FROM_SCHEMA
CPU: fn_cpu :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
Autograd[alias]: fn_autograd :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
CompositeExplicitAutograd[alias]: fn_defaultbackend :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
''')
# computed dispatch table is too big, so we only check on a few entries we're interested in.
extracted_table = extract_dispatch_table_with_keys(table, dispatch_keys_to_check + ('FPGA',))
self.assertExpectedInline(extracted_table, '''\
Undefined: fn_defaultbackend [default backend kernel]
CPU: fn_cpu [kernel]
CUDA: fn_defaultbackend [default backend kernel]
XLA: fn_defaultbackend [default backend kernel]
AutogradOther: fn_autograd [autograd kernel]
AutogradCPU: fn_autograd [autograd kernel]
AutogradCUDA: fn_autograd [autograd kernel]
AutogradXLA: fn_autograd [autograd kernel]
FPGA: fn_defaultbackend [default backend kernel]
''')
def test_computed_table_with_cpu_autograd_math_defaultbackend(self):
result = self.commute("foo", [
# m.def("foo(Tensor x) -> Tensor")
lambda m: m.def_("foo(Tensor x) -> Tensor"),
# m.impl("foo", torch::kCPU, [](const Tensor & x) { return x })
lambda m: m.impl_t_t("foo", "CPU", debug="fn_cpu"),
# m.impl("foo", torch::kAutograd, [](const Tensor & x) { return x })
lambda m: m.impl_t_t("foo", "Autograd", debug="fn_autograd"),
# m.impl("foo", torch::kCompositeImplicitAutograd, [](const Tensor & x) { return x })
lambda m: m.impl_t_t("foo", "CompositeImplicitAutograd", debug="fn_math"),
# m.impl("foo", torch::kCompositeExplicitAutograd, [](const Tensor & x) { return x })
lambda m: m.impl_t_t("foo", "CompositeExplicitAutograd", debug="fn_defaultbackend"),
])
state, table = result.state, result.table
self.assertExpectedInline(state, '''\
name: test::foo
schema: test::foo(Tensor x) -> Tensor
debug: registered at /dev/null:0
alias analysis kind: FROM_SCHEMA
CPU: fn_cpu :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
Autograd[alias]: fn_autograd :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
CompositeImplicitAutograd[alias]: fn_math :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
CompositeExplicitAutograd[alias]: fn_defaultbackend :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
''')
# computed dispatch table is too big, so we only check on a few entries we're interested in.
extracted_table = extract_dispatch_table_with_keys(table, dispatch_keys_to_check)
self.assertExpectedInline(extracted_table, '''\
Undefined: fn_defaultbackend [default backend kernel]
CPU: fn_cpu [kernel]
CUDA: fn_defaultbackend [default backend kernel]
XLA: fn_defaultbackend [default backend kernel]
AutogradOther: fn_autograd [autograd kernel]
AutogradCPU: fn_autograd [autograd kernel]
AutogradCUDA: fn_autograd [autograd kernel]
AutogradXLA: fn_autograd [autograd kernel]
''')
def test_multiple_def_error(self):
ops = [
# m.def("foo(Tensor x, Tensor y) -> Tensor")
lambda m: m.def_("foo(Tensor x, Tensor y) -> Tensor"),
# m.def("foo(Tensor x, Tensor y) -> Tensor")
lambda m: m.def_("foo(Tensor x, Tensor y) -> Tensor"),
]
self.assertExpectedInline(
self.commute("foo", ops, expect_raises=True).state,
'''Tried to register an operator (test::foo(Tensor x, Tensor y) -> Tensor) with the same name and overload '''
'''name multiple times. Each overload's schema should only be registered with a single call to def(). '''
'''Duplicate registration: registered at /dev/null:0. Original registration: registered at /dev/null:0'''
)
def test_def_with_explicit_alias(self):
state = self.commute("foo", [
# m.def(torch::schema(
# "foo(Tensor x, Tensor y) -> Tensor",
# AliasAnalysisKind::PURE))
lambda m: m.def_("foo(Tensor x, Tensor y) -> Tensor",
alias="PURE_FUNCTION")
]).state
self.assertExpectedInline(state, '''\
name: test::foo
schema: test::foo(Tensor x, Tensor y) -> Tensor
debug: registered at /dev/null:0
alias analysis kind: PURE_FUNCTION
''')
def test_multiple_def_alias_defaulting(self):
ops = [
# m.def(torch::schema("foo(Tensor x) -> Tensor",
# c10::AliasAnalysisKind::PURE_FUNCTION))
lambda m: m.def_("foo(Tensor x) -> Tensor", alias="PURE_FUNCTION"),
# RegisterOperators().op("foo(Tensor x) -> Tensor")
lambda m: m.def_legacy("foo(Tensor x) -> Tensor"),
]
self.assertExpectedInline(
self.commute("foo", ops, expect_raises=True).state,
'''Tried to register an operator (test::foo(Tensor x) -> Tensor) with the same name and overload '''
'''name multiple times. Each overload's schema should only be registered with a single call to def(). '''
'''Duplicate registration: registered at /dev/null:0. Original registration: registered at /dev/null:0'''
)
def test_multiple_def_alias_mismatch(self):
ops = [
# m.def(torch::schema("foo(Tensor x) -> Tensor",
# c10::AliasAnalysisKind::PURE_FUNCTION))
lambda m: m.def_("foo(Tensor x) -> Tensor", alias="PURE_FUNCTION"),
# m.def(torch::schema("foo(Tensor x) -> Tensor",
# c10::AliasAnalysisKind::CONSERVATIVE))
lambda m: m.def_("foo(Tensor x) -> Tensor", alias="CONSERVATIVE"),
]
self.assertExpectedInline(
self.commute("foo", ops, expect_raises=True).state,
'''Tried to register an operator (test::foo(Tensor x) -> Tensor) with the same name and overload '''
'''name multiple times. Each overload's schema should only be registered with a single call to def(). '''
'''Duplicate registration: registered at /dev/null:0. Original registration: registered at /dev/null:0'''
)
def test_multiple_fallback(self):
global_m = C._dispatch_library("IMPL", "_", "XLA")
global_m.fallback_fallthrough(),
try:
global_m.fallback_fallthrough(),
except RuntimeError as e:
self.assertExpectedInline(
str(e),
'''Tried to register multiple backend fallbacks for the same dispatch key XLA; previous registration '''
'''registered at /dev/null:0, new registration registered at /dev/null:0'''
)
else:
self.assertTrue(False)
def test_overwrite_math(self):
ops = [
lambda m: m.impl_t_t("foo", debug="fn1"),
lambda m: m.impl_t_t("foo", debug="fn2"),
]
# Not commutative
self.assertExpectedInline(
self.commute("foo", ops, ctor_order=(0, 1)).state,
'''\
name: test::foo
schema: (none)
CompositeImplicitAutograd[alias]: fn2 :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
CompositeImplicitAutograd[alias] (inactive): fn1 :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
'''
)
def test_find_dangling_impls(self):
dangling_impls = C._dispatch_find_dangling_impls()
self.assertEqual(
0,
len(dangling_impls),
msg=f"Expect zero dangling impls, but found: {dangling_impls}"
)
def test_find_dangling_impls_ext(self):
extension_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'cpp_extensions', 'dangling_impl_extension.cpp')
module = torch.utils.cpp_extension.load(
name="dangling_impl_extension",
sources=[
extension_path,
],
extra_cflags=["-g"],
verbose=True,
)
impls = C._dispatch_find_dangling_impls()
self.assertEqual(1, len(impls))
self.assertEqual(
'''\
name: __test::foo
schema: (none)
CPU: registered at {}:5 :: () -> () [ boxed unboxed ]
'''.format(extension_path),
impls[0])
def test_dispatch_print_registrations_for_dispatch_key_invalid(self):
with self.assertRaisesRegex(
RuntimeError,
"could not parse dispatch key: invalid_key"):
C._dispatch_print_registrations_for_dispatch_key('invalid_key')
class TestPythonDispatcher(TestCase):
def test_basic(self):
dispatcher = PythonDispatcher()
dispatcher.register(["CPU", "XLA", "Lazy", "CompositeImplicitAutograd"])
self.assertExpectedInline(
dispatcher.dispatchTable(),
'''\
Computed Dispatch Table
key kernel
---------------------------
CPU fn_CPU [kernel]
XLA fn_XLA [kernel]
Lazy fn_Lazy [kernel]
FPGA fn_CompositeImplicitAutograd [math kernel]
AutogradOther fn_CompositeImplicitAutograd [math kernel]
AutogradCPU fallthrough [backend fallback]
AutogradXLA fallthrough [backend fallback]
AutogradLazy fallthrough [backend fallback]
'''
)
def test_math_autogradcpu(self):
dispatcher = PythonDispatcher()
dispatcher.register(["CPU", "XLA", "Lazy", "CompositeImplicitAutograd", "AutogradCPU"])
self.assertExpectedInline(
dispatcher.dispatchTable(),
'''\
Computed Dispatch Table
key kernel
---------------------------
CPU fn_CPU [kernel]
XLA fn_XLA [kernel]
Lazy fn_Lazy [kernel]
FPGA fn_CompositeImplicitAutograd [math kernel]
AutogradOther fn_CompositeImplicitAutograd [math kernel]
AutogradCPU fn_AutogradCPU [kernel]
AutogradXLA fallthrough [backend fallback]
AutogradLazy fallthrough [backend fallback]
'''
)
self.assertExpectedInline(
dispatcher.registrations(),
'''\
Registered Kernels
key kernel
---------------------------
CPU fn_CPU
XLA fn_XLA
Lazy fn_Lazy
AutogradCPU fn_AutogradCPU
CompositeImplicitAutograd[alias] fn_CompositeImplicitAutograd
'''
)
def test_defaultbackend_autogradcpu(self):
dispatcher = PythonDispatcher()
dispatcher.register(["CPU", "XLA", "Lazy", "CompositeExplicitAutograd", "AutogradCPU"])
self.assertExpectedInline(
dispatcher.dispatchTable(),
'''\
Computed Dispatch Table
key kernel
---------------------------
CPU fn_CPU [kernel]
XLA fn_XLA [kernel]
Lazy fn_Lazy [kernel]
FPGA fn_CompositeExplicitAutograd [default backend kernel]
AutogradOther fallthrough [backend fallback]
AutogradCPU fn_AutogradCPU [kernel]
AutogradXLA fallthrough [backend fallback]
AutogradLazy fallthrough [backend fallback]
'''
)
self.assertExpectedInline(
dispatcher.registrations(),
'''\
Registered Kernels
key kernel
---------------------------
CPU fn_CPU
XLA fn_XLA
Lazy fn_Lazy
AutogradCPU fn_AutogradCPU
CompositeExplicitAutograd[alias] fn_CompositeExplicitAutograd
'''
)
def test_autogradother(self):
dispatcher = PythonDispatcher()
dispatcher.register(["CPU", "FPGA", "CompositeImplicitAutograd"])
self.assertExpectedInline(
dispatcher.dispatchTable(),
'''\
Computed Dispatch Table
key kernel
---------------------------
CPU fn_CPU [kernel]
XLA fn_CompositeImplicitAutograd [math kernel]
Lazy fn_CompositeImplicitAutograd [math kernel]
FPGA fn_FPGA [kernel]
AutogradOther ambiguous_autogradother [ambiguous autogradother]
AutogradCPU fallthrough [backend fallback]
AutogradXLA fn_CompositeImplicitAutograd [math kernel]
AutogradLazy fn_CompositeImplicitAutograd [math kernel]
'''
)
self.assertExpectedInline(
dispatcher.registrations(),
'''\
Registered Kernels
key kernel
---------------------------
FPGA fn_FPGA
CPU fn_CPU
CompositeImplicitAutograd[alias] fn_CompositeImplicitAutograd
'''
)
def test_duplicate_registrations(self):
dispatcher = PythonDispatcher()
with self.assertRaisesRegex(RuntimeError, r"Overriden is not allowed"):
dispatcher.register(["CPU", "CPU"])
def test_defaultbackend_math(self):
dispatcher = PythonDispatcher()
with self.assertRaisesRegex(
RuntimeError,
r"Registration to both CompositeImplicitAutograd and CompositeExplicitAutograd is not allowed"):
dispatcher.register(["CompositeExplicitAutograd", "CompositeImplicitAutograd"])
def test_quantized_structured_not_implemented(self):
x = torch.zeros([1, 1, 1])
y = torch.zeros([1, 1, 1])
scale, zero_point = 1.0, 0
dtype = torch.qint8
qx = torch.quantize_per_tensor(x, scale, zero_point, dtype)
qy = torch.quantize_per_tensor(y, scale, zero_point, dtype)
# If bmm gets quantized support you need to update this to something
# else that is not implemented
self.assertRaisesRegex(
NotImplementedError,
"Could not run 'aten::bmm.out' with arguments from the 'QuantizedCPU' backend.",
lambda: torch.bmm(qx, qy)
)
if __name__ == '__main__':
run_tests()
|
pytorch-master
|
test/test_dispatch.py
|
# Owner(s): ["module: scatter & gather ops"]
from itertools import product
from functools import partial
import numpy as np
import torch
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
dtypes,
)
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
gradcheck,
parametrize,
)
reductions = ["max", "mean", "min", "sum", "prod"]
def get_default_value(initial_value, reduction):
if initial_value is not None:
return initial_value
if reduction == "max":
return -float("Inf")
elif reduction == "mean":
return float("nan")
elif reduction == "min":
return float("Inf")
elif reduction == "sum":
return 0.0
elif reduction == "prod":
return 1.0
class TestSegmentReductions(TestCase):
def _test_common(
self,
reduction,
device,
dtype,
unsafe,
axis,
initial_value,
data_arr,
lengths_arr,
expected_arr,
expected_grad_arr,
check_backward,
lengths_dtype=torch.int,
):
lengths = torch.tensor(lengths_arr, device=device, dtype=lengths_dtype)
# generate offsets from lengths
zeros_shape = list(lengths.shape)
zeros_shape[-1] = 1
offsets = torch.cat((lengths.new_zeros(zeros_shape), lengths), -1).cumsum_(-1)
data = torch.tensor(
data_arr,
device=device,
dtype=dtype,
requires_grad=True,
)
expected_result = torch.tensor(expected_arr, device=device, dtype=dtype)
expected_grad = torch.tensor(expected_grad_arr, device=device, dtype=dtype)
for mode in ['lengths', 'offsets']:
segment_reduce_kwargs = dict(
axis=axis,
unsafe=unsafe,
initial=initial_value)
if (mode == 'lengths'):
segment_reduce_kwargs['lengths'] = lengths
else:
segment_reduce_kwargs['offsets'] = offsets
actual_result = torch.segment_reduce(
data=data,
reduce=reduction,
**segment_reduce_kwargs
)
self.assertEqual(
expected_result, actual_result, rtol=1e-02, atol=1e-05, equal_nan=True
)
if not check_backward:
return
# Test backward
actual_result.sum().backward()
self.assertEqual(
expected_grad, data.grad, rtol=1e-02, atol=1e-05, equal_nan=True
)
data = data.clone().detach().requires_grad_(True)
# gradcheck does not work well with bfloat16 or fp16 cpu types
# also there is small numerical difference with fp32
if dtype not in [torch.half, torch.bfloat16, torch.float]:
# gradcheck does not like "nan" input, setting to random 10
d_non_nan = np.nan_to_num(data_arr, nan=10)
new_data = torch.tensor(
# [10 if v == float("nan") else v for v in data],
d_non_nan,
device=device,
dtype=dtype,
requires_grad=True,
)
self.assertTrue(
gradcheck(
lambda x: torch.segment_reduce(
data=x,
reduce=reduction,
**segment_reduce_kwargs
),
(new_data,),
)
)
@dtypes(
*product(
(torch.half, torch.bfloat16, torch.float, torch.double),
(torch.int, torch.int64),
)
)
def test_simple_1d(self, device, dtypes):
val_dtype, length_type = dtypes
lengths = [1, 2, 3, 0]
data = [1, float("nan"), 3, 4, 5, 5]
for reduction in reductions:
for initial in [0, None]:
check_backward = True if initial is not None else False
initial_value = initial
default_value = get_default_value(initial_value, reduction)
if reduction == "max":
expected_result = [1, float("nan"), 5, default_value]
expected_grad = [1, 1, 0, 0, 0.5, 0.5]
elif reduction == "mean":
expected_result = [1, float("nan"), 4.666, default_value]
expected_grad = [1.0, 0.5, 0.5, 0.333, 0.333, 0.333]
elif reduction == "min":
if initial is not None:
initial_value = 1000 # some high number
default_value = get_default_value(initial_value, reduction)
expected_result = [1, float("nan"), 4, default_value]
expected_grad = [1.0, 1.0, 0, 1, 0, 0]
elif reduction == "sum":
expected_result = [1, float("nan"), 14, default_value]
expected_grad = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
elif reduction == "prod":
if initial is not None:
initial_value = 2 # 0 initial_value will zero out everything for prod
default_value = get_default_value(initial_value, reduction)
expected_result = [2, float("nan"), 200, default_value]
expected_grad = [2.0, 6.0, float("nan"), 50.0, 40.0, 40.0]
else:
expected_result = [1, float("nan"), 100, default_value]
expected_grad = [1.0, 3.0, float("nan"), 25.0, 20.0, 20.0]
for axis in [0, -1]:
for unsafe in [True, False]:
self._test_common(
reduction,
device,
val_dtype,
unsafe,
axis,
initial_value,
data,
lengths,
expected_result,
expected_grad,
check_backward,
length_type,
)
@dtypes(
*product(
(torch.half, torch.bfloat16, torch.float, torch.double),
(torch.int, torch.int64),
)
)
def test_multi_d_simple(self, device, dtypes):
val_dtype, length_type = dtypes
axis = 0
lengths = [1, 2, 3, 0]
data = [[1, 1], [float("nan"), 1], [3, float("nan")], [4, 1], [3, 2], [2, 3]]
for reduction in reductions:
for initial in [0, None]:
check_backward = True if initial is not None else False
initial_value = initial
default_value = get_default_value(initial_value, reduction)
if reduction == "max":
expected_result = [
[1, 1],
[float("nan"), float("nan")],
[4, 3],
[default_value, default_value],
]
expected_grad = [
[1, 1],
[1, 0],
[0, 1],
[1, 0],
[0, 0],
[0, 1],
]
elif reduction == "mean":
expected_result = [
[1, 1],
[float("nan"), float("nan")],
[3, 2],
[default_value, default_value],
]
expected_grad = [
[1.0, 1.0],
[0.5, 0.5],
[0.5, 0.5],
[0.333, 0.333],
[0.333, 0.333],
[0.333, 0.333],
]
elif reduction == "min":
if initial is not None:
initial_value = 1000 # some high number
default_value = get_default_value(initial_value, reduction)
expected_result = [
[1, 1],
[float("nan"), float("nan")],
[2, 1],
[default_value, default_value],
]
expected_grad = [
[1.0, 1.0],
[1, 0],
[0, 1],
[0, 1],
[0, 0],
[1, 0],
]
elif reduction == "sum":
expected_result = [
[1, 1],
[float("nan"), float("nan")],
[9, 6],
[default_value, default_value],
]
expected_grad = [
[1.0, 1.0],
[1.0, 1.0],
[1.0, 1.0],
[1.0, 1.0],
[1.0, 1.0],
[1.0, 1.0],
]
elif reduction == "prod":
if initial is not None:
initial_value = 2 # 0 initial_value will zero out everything for prod
default_value = get_default_value(initial_value, reduction)
expected_result = [
[2, 2],
[float("nan"), float("nan")],
[48, 12],
[default_value, default_value],
]
expected_grad = [
[2.0, 2.0],
[6.0, float("nan")],
[float("nan"), 2.0],
[12.0, 12.0],
[16.0, 6.0],
[24.0, 4.0],
]
else:
expected_result = [
[1, 1],
[float("nan"), float("nan")],
[24, 6],
[default_value, default_value],
]
expected_grad = [
[1.0, 1.0],
[3.0, float("nan")],
[float("nan"), 1.0],
[6.0, 6.0],
[8.0, 3.0],
[12.0, 2.0],
]
for unsafe in [True, False]:
self._test_common(
reduction,
device,
val_dtype,
unsafe,
axis,
initial_value,
data,
lengths,
expected_result,
expected_grad,
check_backward,
)
@dtypes(
*product(
(torch.half, torch.bfloat16, torch.float, torch.double),
(torch.int, torch.int64),
)
)
@parametrize("reduce", ['sum', 'prod', 'min', 'max', 'mean'])
def test_pytorch_scatter_test_cases(self, device, dtypes, reduce):
val_dtype, length_dtype = dtypes
# zero-length segments are filled with reduction inits contrary to pytorch_scatter.
tests = [
{
'src': [1, 2, 3, 4, 5, 6],
'index': [0, 0, 1, 1, 1, 3],
'indptr': [0, 2, 5, 5, 6],
'sum': [3, 12, 0, 6],
'prod': [2, 60, 1, 6],
'mean': [1.5, 4, float('nan'), 6],
'min': [1, 3, float('inf'), 6],
'max': [2, 5, -float('inf'), 6],
},
{
'src': [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12]],
'index': [0, 0, 1, 1, 1, 3],
'indptr': [0, 2, 5, 5, 6],
'sum': [[4, 6], [21, 24], [0, 0], [11, 12]],
'prod': [[3, 8], [315, 480], [1, 1], [11, 12]],
'mean': [[2, 3], [7, 8], [float('nan'), float('nan')], [11, 12]],
'min': [[1, 2], [5, 6], [float('inf'), float('inf')], [11, 12]],
'max': [[3, 4], [9, 10], [-float('inf'), -float('inf')], [11, 12]],
},
{
'src': [[1, 3, 5, 7, 9, 11], [2, 4, 6, 8, 10, 12]],
'index': [[0, 0, 1, 1, 1, 3], [0, 0, 0, 1, 1, 2]],
'indptr': [[0, 2, 5, 5, 6], [0, 3, 5, 6, 6]],
'sum': [[4, 21, 0, 11], [12, 18, 12, 0]],
'prod': [[3, 315, 1, 11], [48, 80, 12, 1]],
'mean': [[2, 7, float('nan'), 11], [4, 9, 12, float('nan')]],
'min': [[1, 5, float('inf'), 11], [2, 8, 12, float('inf')]],
'max': [[3, 9, -float('inf'), 11], [6, 10, 12, -float('inf')]],
},
{
'src': [[[1, 2], [3, 4], [5, 6]], [[7, 9], [10, 11], [12, 13]]],
'index': [[0, 0, 1], [0, 2, 2]],
'indptr': [[0, 2, 3, 3], [0, 1, 1, 3]],
'sum': [[[4, 6], [5, 6], [0, 0]], [[7, 9], [0, 0], [22, 24]]],
'prod': [[[3, 8], [5, 6], [1, 1]], [[7, 9], [1, 1], [120, 143]]],
'mean': [[[2, 3], [5, 6], [float('nan'), float('nan')]],
[[7, 9], [float('nan'), float('nan')], [11, 12]]],
'min': [[[1, 2], [5, 6], [float('inf'), float('inf')]],
[[7, 9], [float('inf'), float('inf')], [10, 11]]],
'max': [[[3, 4], [5, 6], [-float('inf'), -float('inf')]],
[[7, 9], [-float('inf'), -float('inf')], [12, 13]]],
},
{
'src': [[1, 3], [2, 4]],
'index': [[0, 0], [0, 0]],
'indptr': [[0, 2], [0, 2]],
'sum': [[4], [6]],
'prod': [[3], [8]],
'mean': [[2], [3]],
'min': [[1], [2]],
'max': [[3], [4]],
},
{
'src': [[[1, 1], [3, 3]], [[2, 2], [4, 4]]],
'index': [[0, 0], [0, 0]],
'indptr': [[0, 2], [0, 2]],
'sum': [[[4, 4]], [[6, 6]]],
'prod': [[[3, 3]], [[8, 8]]],
'mean': [[[2, 2]], [[3, 3]]],
'min': [[[1, 1]], [[2, 2]]],
'max': [[[3, 3]], [[4, 4]]],
},
]
for test in tests:
data = torch.tensor(test['src'], dtype=val_dtype, device=device, requires_grad=True)
indptr = torch.tensor(test['indptr'], dtype=length_dtype, device=device)
dim = indptr.ndim - 1
# calculate lengths from indptr
lengths = torch.diff(indptr, dim=dim)
expected = torch.tensor(test[reduce], dtype=val_dtype, device=device)
actual_result = torch.segment_reduce(
data=data,
reduce=reduce,
lengths=lengths,
axis=dim,
unsafe=True,
)
self.assertEqual(actual_result, expected)
# test offsets
actual_result = torch.segment_reduce(
data=data,
reduce=reduce,
offsets=indptr,
axis=dim,
unsafe=True,
)
self.assertEqual(actual_result, expected)
if val_dtype == torch.float64:
def fn(x, mode='lengths'):
initial = 1
# supply initial values to prevent gradcheck from failing for 0 length segments
# where nan/inf are reduction identities that produce nans when calculating the numerical jacobian
if reduce == 'min':
initial = 1000
elif reduce == 'max':
initial = -1000
segment_reduce_args = {x, reduce}
segment_reduce_kwargs = dict(axis=dim, unsafe=True, initial=initial)
if mode == 'lengths':
segment_reduce_kwargs[mode] = lengths
elif mode == 'offsets':
segment_reduce_kwargs[mode] = indptr
return torch.segment_reduce(*segment_reduce_args, **segment_reduce_kwargs)
self.assertTrue(gradcheck(partial(fn, mode='lengths'), (data.clone().detach().requires_grad_(True))))
self.assertTrue(gradcheck(partial(fn, mode='offsets'), (data.clone().detach().requires_grad_(True))))
@dtypes(
*product(
(torch.half, torch.bfloat16, torch.float, torch.double),
(torch.int, torch.int64),
)
)
def test_multi_d(self, device, dtypes):
val_dtype, length_type = dtypes
axis = 0
lengths = [0, 2, 3, 0]
data = np.arange(50).reshape(5, 2, 5).tolist()
expected_grad = []
# TODO: calculate grad and check correctness
check_backward = False
for reduction in reductions:
initial_value = 0
if reduction == "max":
expected_result = [
np.full((2, 5), initial_value).tolist(),
np.max(data[:2], axis=0).tolist(),
np.max(data[2:], axis=0).tolist(),
np.full((2, 5), initial_value).tolist(),
]
elif reduction == "mean":
expected_result = [
np.full((2, 5), initial_value).tolist(),
np.mean(data[:2], axis=0).tolist(),
np.mean(data[2:], axis=0).tolist(),
np.full((2, 5), initial_value).tolist(),
]
elif reduction == "min":
initial_value = 1000 # some high number
expected_result = [
np.full((2, 5), initial_value).tolist(),
np.min(data[:2], axis=0).tolist(),
np.min(data[2:], axis=0).tolist(),
np.full((2, 5), initial_value).tolist(),
]
elif reduction == "sum":
expected_result = [
np.full((2, 5), initial_value).tolist(),
np.sum(data[:2], axis=0).tolist(),
np.sum(data[2:], axis=0).tolist(),
np.full((2, 5), initial_value).tolist(),
]
elif reduction == "prod":
initial_value = 1
expected_result = [
np.full((2, 5), initial_value).tolist(),
np.prod(data[:2], axis=0).tolist(),
np.prod(data[2:], axis=0).tolist(),
np.full((2, 5), initial_value).tolist(),
]
for unsafe in [True, False]:
self._test_common(
reduction,
device,
val_dtype,
unsafe,
axis,
initial_value,
data,
lengths,
expected_result,
expected_grad,
check_backward,
)
@dtypes(torch.int, torch.int64)
def test_unsafe_flag(self, device, dtype):
length_type = dtype
lengths = torch.tensor([0, 2, 3, 0], device=device, dtype=length_type)
data = torch.arange(6, dtype=torch.float, device=device)
# test for error on 1-D lenghts
with self.assertRaisesRegex(RuntimeError, "Expected all rows of lengths along axis"):
torch.segment_reduce(data, 'sum', lengths=lengths, axis=0, unsafe=False)
# test for error on multi-D lengths
nd_lengths = torch.tensor([[0, 3, 3, 0], [2, 3, 0, 0]], dtype=length_type, device=device)
nd_data = torch.arange(12, dtype=torch.float, device=device).reshape(2, 6)
with self.assertRaisesRegex(RuntimeError, "Expected all rows of lengths along axis"):
torch.segment_reduce(nd_data, 'sum', lengths=nd_lengths, axis=1, unsafe=False)
instantiate_device_type_tests(TestSegmentReductions, globals())
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/test_segment_reductions.py
|
# Owner(s): ["module: fx"]
import copy
import sys
import logging
from typing import List, Tuple
import torch
from torch.fx._symbolic_trace import symbolic_trace
from torch.fx.experimental.proxy_tensor import make_fx
from torch.fx.passes.backends.nvfuser import NvFuserBackend
from torch.testing._internal.common_utils import run_tests, TEST_CUDA, TestCase
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
skipCUDAIfRocm,
dtypes,
)
if not TEST_CUDA:
print('CUDA not available, skipping tests', file=sys.stderr)
TestCase = object # noqa: F811
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
class HF_T5_Partial(torch.nn.Module):
def inputs_meta(self):
return [
(torch.Size([512, 512]), torch.float32),
(torch.Size([512, 512]), torch.float32),
(torch.Size([512, 512]), torch.float32),
(torch.Size([512, 512]), torch.float32),
(torch.Size([512]), torch.float32),
(torch.Size([2048, 512]), torch.float32),
(torch.Size([512, 2048]), torch.float32),
(torch.Size([512]), torch.float32),
(torch.Size([8, 1024, 512]), torch.float32),
(torch.Size([8, 8, 1024, 1024]), torch.float32),
]
def forward(self, primals_1, primals_2, primals_3, primals_4, primals_5,
primals_6, primals_7, primals_8, primals_9, primals_10):
pow_1 = torch.ops.aten.pow(primals_9, 2)
mean = torch.ops.aten.mean(pow_1, [-1], True)
add = torch.ops.aten.add(mean, 1e-06)
rsqrt = torch.ops.aten.rsqrt(add)
mul = torch.ops.aten.mul(primals_9, rsqrt)
mul_1 = torch.ops.aten.mul(primals_5, mul)
t = torch.ops.aten.t(primals_3)
view = torch.ops.aten.view(mul_1, [8192, 512])
mm = torch.ops.aten.mm(view, t)
_unsafe_view = torch.ops.aten._unsafe_view(mm, [8, 1024, 512])
view_1 = torch.ops.aten.view(_unsafe_view, [8, -1, 8, 64])
transpose = torch.ops.aten.transpose(view_1, 1, 2)
t_1 = torch.ops.aten.t(primals_1)
view_2 = torch.ops.aten.view(mul_1, [8192, 512])
mm_1 = torch.ops.aten.mm(view_2, t_1)
_unsafe_view_1 = torch.ops.aten._unsafe_view(mm_1, [8, 1024, 512])
view_3 = torch.ops.aten.view(_unsafe_view_1, [8, -1, 8, 64])
transpose_1 = torch.ops.aten.transpose(view_3, 1, 2)
t_2 = torch.ops.aten.t(primals_4)
view_4 = torch.ops.aten.view(mul_1, [8192, 512])
mm_2 = torch.ops.aten.mm(view_4, t_2)
_unsafe_view_2 = torch.ops.aten._unsafe_view(mm_2, [8, 1024, 512])
view_5 = torch.ops.aten.view(_unsafe_view_2, [8, -1, 8, 64])
transpose_2 = torch.ops.aten.transpose(view_5, 1, 2)
transpose_3 = torch.ops.aten.transpose(transpose_1, 3, 2)
expand = torch.ops.aten.expand(transpose, [8, 8, 1024, 64])
clone = torch.ops.aten.clone(expand, memory_format=torch.contiguous_format)
_unsafe_view_3 = torch.ops.aten._unsafe_view(clone, [64, 1024, 64])
expand_1 = torch.ops.aten.expand(transpose_3, [8, 8, 64, 1024])
clone_1 = torch.ops.aten.clone(expand_1, memory_format=torch.contiguous_format)
_unsafe_view_4 = torch.ops.aten._unsafe_view(clone_1, [64, 64, 1024])
bmm = torch.ops.aten.bmm(_unsafe_view_3, _unsafe_view_4)
_unsafe_view_5 = torch.ops.aten._unsafe_view(bmm, [8, 8, 1024, 1024])
add_ = torch.ops.aten.add_(_unsafe_view_5, primals_10)
_softmax = torch.ops.aten._softmax(add_, -1, False)
expand_2 = torch.ops.aten.expand(_softmax, [8, 8, 1024, 1024])
view_6 = torch.ops.aten.view(expand_2, [64, 1024, 1024])
expand_3 = torch.ops.aten.expand(transpose_2, [8, 8, 1024, 64])
clone_2 = torch.ops.aten.clone(expand_3, memory_format=torch.contiguous_format)
_unsafe_view_6 = torch.ops.aten._unsafe_view(clone_2, [64, 1024, 64])
bmm_1 = torch.ops.aten.bmm(view_6, _unsafe_view_6)
_unsafe_view_7 = torch.ops.aten._unsafe_view(bmm_1, [8, 8, 1024, 64])
transpose_4 = torch.ops.aten.transpose(_unsafe_view_7, 1, 2)
clone_3 = torch.ops.aten.clone(transpose_4, memory_format=torch.contiguous_format)
view_7 = torch.ops.aten.view(clone_3, [8, -1, 512])
t_3 = torch.ops.aten.t(primals_2)
view_8 = torch.ops.aten.view(view_7, [8192, 512])
mm_3 = torch.ops.aten.mm(view_8, t_3)
_unsafe_view_8 = torch.ops.aten._unsafe_view(mm_3, [8, 1024, 512])
add_1 = torch.ops.aten.add(primals_9, _unsafe_view_8)
pow_2 = torch.ops.aten.pow(add_1, 2)
mean_1 = torch.ops.aten.mean(pow_2, [-1], True)
add_2 = torch.ops.aten.add(mean_1, 1e-06)
rsqrt_1 = torch.ops.aten.rsqrt(add_2)
mul_2 = torch.ops.aten.mul(add_1, rsqrt_1)
mul_3 = torch.ops.aten.mul(primals_8, mul_2)
t_4 = torch.ops.aten.t(primals_6)
view_9 = torch.ops.aten.view(mul_3, [8192, 512])
mm_4 = torch.ops.aten.mm(view_9, t_4)
_unsafe_view_9 = torch.ops.aten._unsafe_view(mm_4, [8, 1024, 2048])
relu = torch.ops.aten.relu(_unsafe_view_9)
t_5 = torch.ops.aten.t(primals_7)
view_10 = torch.ops.aten.view(relu, [8192, 2048])
mm_5 = torch.ops.aten.mm(view_10, t_5)
_unsafe_view_10 = torch.ops.aten._unsafe_view(mm_5, [8, 1024, 512])
add_3 = torch.ops.aten.add(add_1, _unsafe_view_10)
return [add_3, rsqrt, _unsafe_view_3, t_3, _softmax, view_6, mul_2, t, view_9, t_1, primals_5, add_1,
_unsafe_view_4, view_2, view_10, t_5, t_2, primals_8, view_4, view_8, rsqrt_1, primals_9, t_4,
mul, _unsafe_view_6, relu, view]
class TestFxNvFuserBackend(TestCase):
def _generate_random_inputs(self, device, inputs_meta: List[Tuple[torch.Size, torch.dtype]]):
inputs = []
for meta in inputs_meta:
shape, dtype = meta
if dtype in {torch.int, torch.int32, torch.int64, torch.bool, torch.int, torch.uint8}:
input = torch.randint(0, 1, shape, dtype=dtype, device=device)
else:
input = torch.rand(shape, dtype=dtype, device=device)
inputs.append(input)
return inputs
@skipCUDAIfRocm
@dtypes(torch.float32)
def test_nvfuser_call_module_backend(self, device, dtype):
class Model(torch.nn.Module):
def __init__(self):
super(Model, self).__init__()
self.bn = torch.nn.BatchNorm2d(3)
self.relu = torch.nn.ReLU()
def forward(self, inp):
o = self.bn(inp)
o = self.relu(o)
return o
inp = torch.randn(2, 3, 4, 5).to(dtype=dtype, device=device)
m = Model().to(dtype=dtype, device=device)
# note that the traced module here contains only `call_module` node,
# which isn't fused by nvfuser backend. But `nvfuser.compile` should run without error
traced = symbolic_trace(m)
nvfuser = NvFuserBackend()
compiled_module = nvfuser.compile(traced)
eager_result = m(inp)
nvfuser_result = compiled_module(inp)
torch.testing.assert_close(eager_result, nvfuser_result, rtol=1e-5, atol=1e-5)
@skipCUDAIfRocm
@dtypes(torch.float32)
def test_nvfuser_backend(self, device, dtype):
m = HF_T5_Partial()
m.to(device)
traced = symbolic_trace(m)
nvfuser = NvFuserBackend()
compiled_module = nvfuser.compile(traced)
inputs = self._generate_random_inputs(device, m.inputs_meta())
eager_result = m(*inputs)
nvfuser_result = compiled_module(*inputs)
torch.testing.assert_close(eager_result, nvfuser_result, rtol=1e-5, atol=1e-5)
@skipCUDAIfRocm
@dtypes(torch.float32)
def test_aten_square(self, device, dtype):
def fn(x):
square = torch.square(x)
a = square + 1
b = a + 1
return b
inputs = torch.randn(4, device=device)
traced = make_fx(fn)(inputs)
nvfuser = NvFuserBackend()
compiled_module = nvfuser.compile(copy.deepcopy(traced))
for node in compiled_module.graph.nodes:
if node.op == "call_function":
assert "fused" in str(node.target), "the entire function should be fused into a single fusion group"
eager_result = traced(inputs)
nvfuser_result = compiled_module(inputs)
torch.testing.assert_close(eager_result, nvfuser_result, rtol=1e-5, atol=1e-5)
@skipCUDAIfRocm
@dtypes(torch.float32)
def test_aten_leakyrelu(self, device, dtype):
def fn(x):
square = torch.ops.aten.leaky_relu(x, 0.1)
a = square + 1
b = a + 1
return b
inputs = torch.randn(4, device=device)
traced = make_fx(fn)(inputs)
nvfuser = NvFuserBackend()
compiled_module = nvfuser.compile(copy.deepcopy(traced))
for node in compiled_module.graph.nodes:
if node.op == "call_function":
assert "fused" in str(node.target), "the entire function should be fused into a single fusion group"
eager_result = traced(inputs)
nvfuser_result = compiled_module(inputs)
torch.testing.assert_close(eager_result, nvfuser_result, rtol=1e-5, atol=1e-5)
@skipCUDAIfRocm
@dtypes(torch.float32)
def test_aten_where(self, device, dtype):
def fn(x):
where = torch.ops.aten.where(x < 0, -x, x)
a = where + 1
b = a + 1
return b
inputs = torch.randn(4, device=device)
traced = make_fx(fn)(inputs)
nvfuser = NvFuserBackend()
compiled_module = nvfuser.compile(copy.deepcopy(traced))
for node in compiled_module.graph.nodes:
if node.op == "call_function":
assert "fused" in str(node.target), "the entire function should be fused into a single fusion group"
eager_result = traced(inputs)
nvfuser_result = compiled_module(inputs)
torch.testing.assert_close(eager_result, nvfuser_result, rtol=1e-5, atol=1e-5)
instantiate_device_type_tests(TestFxNvFuserBackend, globals(), only_for="cuda")
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/test_fx_backends.py
|
# Owner(s): ["module: masked operators"]
"""Tests for masked operations.
"""
import itertools
import torch
from typing import List, Any
from functools import wraps
import unittest
from torch.testing._internal.common_utils import \
(TestCase, parametrize, suppress_warnings, _TestParametrizer, run_tests)
from torch.testing._internal.common_methods_invocations import \
(op_db, SampleInput)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, onlyNativeDeviceTypes, precisionOverride)
def apply_masked_reduction_along_dim(op, input, *args, **kwargs):
"""Applies reduction op along given dimension to strided x
elements that are valid according to mask tensor.
The op is applied to each elementary slice of input with args and
kwargs with the following constraints:
1. Prior applying the op:
A. if kwargs contains an item with key 'dim_position' then it is
removed from kwargs. The value of 'dim_position' is an
integer that describes the dim argument position: while
typically the dim argument appears at the 0-th position of
the op arguments (excluding input), for instance, sum(input,
dim), then there exists reductions that have extra arguments
prior the dim argument, for instance, norm(input, ord, dim).
B. if args or kwargs contains dim or keepdim arguments, these
will be removed or replaced with None so that the op is
applied to elementary slice using the default dim and keepdim
value.
2. The elementary slice of the input is defined as the flattened
slice that has no masked out elements and when op is applied,
the result will be a scalar value (assuming keepdim=False). For
example, an input tensor to a reduction operation op having
dim=0 and keepdim=True argument:
[[1 * 2 * *]
[* 3 4 * 5]]
(* denotes masked out elements) has the following elementary
slices: [1, 2] and [3, 4, 5]. The result of
apply_masked_reduction_along_dim is
[[op([1, 2], *args0, **kwargs, dim=None, keepdim=False)]
[op([3, 4, 5], *args0, **kwargs, dim=None, keepdim=False)]]
where args0 is args where dim value is replased with None if
present.
Using the same example data, if the op is called with dim=(0, 1)
and keepdim=False, there is one elementary slice: [1, 2, 3, 4,
5]; and the corresponding result of the op is:
op([1, 2, 3, 4, 5], *args0, **kwargs, dim=None, keepdim=False)
3. If the elementary slice is empty, the corresponding output
value is nan if dtype is float, otherwise, 0. An empty
elementary slice corresponds to fully masked-out output, so, the
corresponding specific value of the output will not be important
because we used masked equality check for comparing the results
of masked operations.
"""
# eliminate mask and dim_position keyword arguments:
mask = kwargs.pop('mask', None)
dim_pos = kwargs.pop('dim_position', 0)
dtype = kwargs.get('dtype', input.dtype)
if input.ndim == 0:
# scalar input is an elementary slice
return op(input, *args, **kwargs).to(dtype=dtype)
# eliminate keepdim keyword argument if specified:
keepdim = kwargs.pop('keepdim', False)
# eliminate dim argument that may appear both as args or kwargs
# element:
if dim_pos < len(args):
# dim is specified in args
assert 'dim' not in kwargs, (args, kwargs)
dim = args[dim_pos]
args0 = args[:dim_pos] + (None,) + args[dim_pos + 1:]
else:
# dim may be specified in kwargs
dim = kwargs.pop('dim', None)
args0 = args
# dimensions along which the reduction operation is applied:
dim_ = torch._masked._canonical_dim(dim, input.ndim)
# slices in product(*ranges) define all elementary slices:
ranges: List[Any] = []
# shape of output for the keepdim=True case:
shape = []
for i in range(input.ndim):
if i in dim_:
ranges.append((slice(None),))
shape.append(1)
else:
ranges.append(range(input.shape[i]))
shape.append(input.shape[i])
# keepdim=True version of the output, filled with nan or 0:
output = input.new_full(shape, float('nan') if dtype.is_floating_point else 0, dtype=dtype)
# apply op to all elementary slices:
if mask is None:
inpmask = input.new_ones([], dtype=torch.bool).expand(input.shape)
else:
inpmask = torch._masked._input_mask(input, mask=mask)
for s in itertools.product(*ranges):
# data of an elementary slice is 1D sequence and has only
# masked-in elements:
data = input[s].flatten()[inpmask[s].flatten().argwhere()]
if not data.numel():
# empty elementary slice
continue
output[s][0] = op(data, *args0, **kwargs)
if not keepdim:
# reshape output for the keepdim=False case
shape = [shape[i] for i in range(len(shape)) if i not in dim_]
output = output.reshape(shape)
return output
def apply_masked_normalization_along_dim(op, input, *args, **kwargs):
"""Applies normalization op along given dimension to strided x
elements that are valid according to mask tensor.
"""
mask = kwargs.pop('mask', None)
dim_pos = kwargs.pop('dim_position', 0)
if input.ndim == 0: # scalar input
return op(input, *args, **kwargs)
dtype = kwargs.get('dtype', input.dtype)
dim = args[dim_pos]
args0 = args[:dim_pos] + (0,) + args[dim_pos + 1:]
output = torch.zeros_like(input, dtype=dtype)
if mask is None:
inpmask = input.new_ones([], dtype=torch.bool).expand(input.shape)
else:
inpmask = torch._masked._input_mask(input, mask=mask)
dim_ = dim % input.ndim
left_ranges = tuple(map(range, input.shape[:dim_]))
right_ranges = tuple(map(range, input.shape[dim_ + 1:]))
for s in itertools.product(*(left_ranges + ((slice(None),),) + right_ranges)):
indices = inpmask[s].argwhere()
output[s][indices] = op(input[s][indices], *args0, **kwargs)
return output
reference_functions = dict(
norm=lambda *args, **kwargs: apply_masked_reduction_along_dim(torch.linalg.vector_norm, *args, **dict(kwargs, dim_position=1)),
var=lambda *args, **kwargs: apply_masked_reduction_along_dim(torch.var, *args, **dict(kwargs, dim_position=0)),
std=lambda *args, **kwargs: apply_masked_reduction_along_dim(torch.std, *args, **dict(kwargs, dim_position=0)),
softmax=lambda *args, **kwargs: apply_masked_normalization_along_dim(torch.softmax, *args, **kwargs),
log_softmax=lambda *args, **kwargs: apply_masked_normalization_along_dim(torch.log_softmax, *args, **kwargs),
softmin=lambda *args, **kwargs: apply_masked_normalization_along_dim(torch.nn.functional.softmin, *args, **kwargs),
normalize=lambda *args, **kwargs: apply_masked_normalization_along_dim(
torch.nn.functional.normalize, *args, **dict(kwargs, dim_position=1)),
)
masked_ops = [op for op in op_db if op.name.startswith('_masked.')]
masked_ops_with_references = [op for op in masked_ops if op.name.rsplit('.', 1)[-1] in reference_functions]
masked_ops_with_non_strided_support = [op for op in masked_ops if op.supports_sparse or op.supports_sparse_csr]
def _tensor_to_strided(obj):
# after gh-59958 is resolved, replace the usage of this function
# with torch.Tensor.to_dense
if torch.is_tensor(obj):
if obj.layout == torch.strided:
return obj
return obj.to_dense()
return obj
def to_strided(obj):
"""Convert the tensor content of object to strided tensor content.
"""
return torch.utils._pytree.tree_map(_tensor_to_strided, obj)
def to_sparse_coo(obj):
"""Convert the tensor content of object to sparse coo tensor content.
"""
return torch.utils._pytree.tree_map(torch.Tensor.to_sparse, obj)
def to_sparse_csr(obj):
"""Convert the tensor content of object to sparse csr tensor content.
"""
return torch.utils._pytree.tree_map(torch.Tensor.to_sparse_csr, obj)
class mask_layouts(_TestParametrizer):
"""Decorator class for parametrization of test function with an input
layout argument and an extra argument of sample inputs generator.
The sample_inputs generator provides samples with all supported
layouts for the mask argument.
"""
def _parametrize_test(self, test, generic_cls, device_cls):
@wraps(test)
def wrap(self, layout, device, dtype, op):
layout_name = str(layout).lstrip('torch.')
if layout == torch.strided:
# strided layouts are always supported
sample_inputs_func = op.sample_inputs
elif layout == torch.sparse_coo:
if not op.supports_sparse:
raise unittest.SkipTest(f"{op.name} does not support inputs with {layout_name} layout")
sample_inputs_func = op.sample_inputs_sparse_coo
elif layout == torch.sparse_csr:
if not op.supports_sparse_csr:
raise unittest.SkipTest(f"{op.name} does not support inputs with {layout_name} layout")
sample_inputs_func = op.sample_inputs_sparse_csr
else:
raise NotImplementedError(f'{layout}')
def sample_inputs_generator():
for sample_input in sample_inputs_func(device, dtype):
mask = sample_input.kwargs.get('mask')
if mask is None:
yield sample_input
else:
if layout == sample_input.input.layout:
yield sample_input
if layout != torch.strided:
sample_input_kwargs = sample_input.kwargs.copy()
sample_input_kwargs.update(mask=mask.to_dense())
yield SampleInput(sample_input.input.clone(),
args=sample_input.args,
kwargs=sample_input_kwargs)
if layout != torch.sparse_coo and op.supports_sparse:
sample_input_kwargs = sample_input.kwargs.copy()
sample_input_kwargs.update(mask=mask.to_sparse())
yield SampleInput(sample_input.input.clone(),
args=sample_input.args,
kwargs=sample_input_kwargs)
if layout != torch.sparse_csr and op.supports_sparse_csr and sample_input.input.ndim == 2:
sample_input_kwargs = sample_input.kwargs.copy()
sample_input_kwargs.update(mask=mask.to_sparse_csr())
yield SampleInput(sample_input.input.clone(),
args=sample_input.args,
kwargs=sample_input_kwargs)
test(self, layout, device, dtype, op, sample_inputs_generator())
for layout in (torch.strided, torch.sparse_coo, torch.sparse_csr):
yield (wrap, str(layout).lstrip('torch.'), {'layout': layout})
class TestMasked(TestCase):
def assertEqualMasked(self, actual, expected, mask):
strided = to_strided(actual)
if mask is not None:
strided = torch.where(mask, strided, strided.new_zeros([]))
expected = torch.where(mask, expected, expected.new_zeros([]))
self.assertEqual(strided, expected, exact_device=False)
@onlyNativeDeviceTypes
@suppress_warnings
@ops(masked_ops_with_references)
@precisionOverride({torch.bfloat16: 5e-4, torch.float16: 5e-4})
def test_reference_masked(self, device, dtype, op):
op_name = op.name.rsplit('.', 1)[-1]
ref_op = reference_functions[op_name]
sample_inputs = op.sample_inputs(device, dtype)
for sample_input in sample_inputs:
t_inp, t_args, t_kwargs = sample_input.input, sample_input.args, sample_input.kwargs
if op_name in {'var', 'std'} and not (t_inp.dtype.is_floating_point or t_inp.dtype.is_complex):
# torch.var/torch.std does not support integer inputs
continue
actual = op.op(t_inp, *t_args, **t_kwargs)
expected = ref_op(t_inp, *t_args, **t_kwargs)
if t_kwargs.get('mask') is None:
outmask = None
else:
outmask = torch._masked._output_mask(op.op, t_inp, *t_args, **t_kwargs)
self.assertEqualMasked(actual, expected, outmask)
@mask_layouts()
@onlyNativeDeviceTypes
@suppress_warnings
@ops(masked_ops_with_non_strided_support)
@precisionOverride({torch.bfloat16: 5e-3, torch.float16: 5e-3})
def test_mask_layout(self, layout, device, dtype, op, sample_inputs):
for sample in sample_inputs:
t_inp, t_args, t_kwargs = sample.input, sample.args, sample.kwargs
actual = op.op(t_inp, *t_args, **t_kwargs)
assert actual.layout == layout
# check masked invariance:
# op(inp, mask).to_dense() == op(inp.to_dense(), mask.to_dense()) at outmask
#
r_inp, r_args, r_kwargs = to_strided((t_inp, t_args, t_kwargs))
if r_kwargs.get('mask') is None:
outmask = None
else:
outmask = torch._masked._output_mask(op.op, r_inp, *r_args, **r_kwargs)
expected = op.op(r_inp, *r_args, **r_kwargs)
self.assertEqualMasked(actual, expected, outmask)
@parametrize("sparse_kind,fill_value", [('coo', 0), ('hybrid_coo', 0),
('coo', 123), ('hybrid_coo', 123),
('csr', 0), ('csr', 123)],
name_fn=lambda sparse_kind, fill_value: f'{sparse_kind}_fill_value_{fill_value}')
def test_where(self, sparse_kind, fill_value):
is_hybrid = False
if sparse_kind == 'coo':
def to_sparse(dense):
return dense.to_sparse(2)
def set_values(sparse, index, value):
sparse._values()[index] = value
elif sparse_kind == 'hybrid_coo':
is_hybrid = True
def to_sparse(dense):
return dense.to_sparse(1)
def set_values(sparse, index, value):
sparse._values()[index] = value
elif sparse_kind == 'csr':
def to_sparse(dense):
return dense.to_sparse_csr()
def set_values(sparse, index, value):
sparse.values()[index] = value
else:
assert 0, sparse_kind
mask = torch.tensor([[1, 0, 1, 0, 0],
[1, 1, 1, 1, 0],
[0, 1, 0, 1, 0],
[0, 0, 0, 0, 0],
[0, 0, 1, 1, 0],
[1, 1, 0, 0, 0]]).to(dtype=bool)
mask = to_sparse(mask)
# make some specified mask elements as explicit masked-out masks:
if is_hybrid:
set_values(mask, (1, 1), False)
set_values(mask, (-2, -2), False)
else:
set_values(mask, 3, False)
set_values(mask, -3, False)
input = torch.tensor([[1, 0, 0, 0, -1],
[2, 3, 0, 0, -2],
[0, 4, 5, 0, -3],
[0, 0, 6, 7, 0],
[0, 8, 9, 0, -3],
[10, 11, 0, 0, -5]])
input = to_sparse(input)
# make specified input elements have zero values:
if is_hybrid:
set_values(input, (1, 1), 0)
set_values(input, (-1, 0), 0)
F = fill_value
else:
set_values(input, 3, 0)
set_values(input, -3, 0)
F = 0
# expected where result:
Z = 99
# Z value corresponds to masked-in elements that are not
# specified in the input and it will be replaced with a zero
tmp = torch.tensor([[1, F, Z, F, F],
[2, F, Z, Z, F],
[F, 4, F, Z, F],
[0, 0, 0, 0, 0],
[F, F, 9, F, F],
[Z, 11, F, F, F]])
tmp = to_sparse(tmp)
sparse = torch._masked._where(mask, input,
torch.tensor(fill_value, dtype=input.dtype, device=input.device))
if tmp.layout == torch.sparse_coo:
expected_sparse = torch.sparse_coo_tensor(
tmp.indices(),
torch.where(tmp.values() != Z, tmp.values(), tmp.values().new_full([], 0)),
input.shape)
outmask = torch.sparse_coo_tensor(sparse.indices(),
sparse.values().new_full(sparse.values().shape, 1).to(dtype=bool),
sparse.shape)._coalesced_(True)
elif tmp.layout == torch.sparse_csr:
expected_sparse = torch.sparse_csr_tensor(
tmp.crow_indices(),
tmp.col_indices(),
torch.where(tmp.values() != Z, tmp.values(), tmp.values().new_full([], 0)),
input.shape)
outmask = torch.sparse_csr_tensor(sparse.crow_indices(), sparse.col_indices(),
sparse.values().new_full(sparse.values().shape, 1).to(dtype=bool),
sparse.shape)
else:
assert 0
self.assertEqual(sparse, expected_sparse)
# check invariance:
# torch.where(mask.to_dense(), input.to_dense(), fill_value)
# == where(mask, input, fill_value).to_dense(fill_value)
expected = torch.where(mask.to_dense(), input.to_dense(), torch.full(input.shape, F))
dense = torch.where(outmask.to_dense(), sparse.to_dense(), torch.full(sparse.shape, F))
self.assertEqual(dense, expected)
instantiate_device_type_tests(TestMasked, globals(), except_for='meta')
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/test_masked.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.