python_code
stringlengths 0
4.04M
| repo_name
stringlengths 8
58
| file_path
stringlengths 5
147
|
---|---|---|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import random
import warnings
import gym
import numpy as np
import pytest
import torch
from flaky import flaky
from ray.rllib.agents.ppo import PPOTrainer
from ray.tune.registry import register_env
from compiler_gym.wrappers.mlir import make_mlir_rl_wrapper_env
from tests.test_main import main
# Ignore import deprecation warnings from ray.
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
import ray
@flaky(max_runs=3, min_passes=1)
@pytest.mark.filterwarnings(
"ignore:`np\\.bool` is a deprecated alias for the builtin `bool`\\.",
"ignore:Mean of empty slice",
"ignore::ResourceWarning",
"ignore:using `dtype=` in comparisons is only useful for `dtype=object`",
)
def test_rllib_ppo_smoke():
ray.shutdown()
seed = 123
np.random.seed(seed)
random.seed(seed)
torch.manual_seed(seed)
ray.init(local_mode=True) # Runs PPO training in the same process
register_env(
"mlir_rl_env-v0",
lambda env_config: make_mlir_rl_wrapper_env(env=gym.make("mlir-v0")),
)
config = {
"env": "mlir_rl_env-v0",
"framework": "torch",
"model": {
"fcnet_hiddens": [2, 2],
"fcnet_activation": "relu",
},
"num_workers": 0, # local worker only
"train_batch_size": 2,
"sgd_minibatch_size": 1,
"num_sgd_iter": 1,
"rollout_fragment_length": 2,
}
trainer = PPOTrainer(config=config)
with warnings.catch_warnings():
# Ignore deprecation warnings from internal rllib implementation.
warnings.filterwarnings("ignore", category=DeprecationWarning)
trainer.train()
ray.shutdown()
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/mlir/rllib_ppo_smoke_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the LICENSE file
# in the root directory of this source tree.
#
# tests/**/__init__.py files are needed for pytest Python path resolution. See:
# https://docs.pytest.org/en/latest/explanation/pythonpath.html
|
CompilerGym-development
|
tests/mlir/datasets/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for the matmul dataset."""
import re
from copy import deepcopy
from itertools import islice
from pathlib import Path
import gym
import numpy as np
import pytest
import compiler_gym.envs.mlir # noqa register environments
from compiler_gym.envs.mlir import MlirEnv
from compiler_gym.envs.mlir.datasets import MatmulBenchmark, MatmulDataset
from tests.pytest_plugins.common import is_ci
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.common", "tests.pytest_plugins.mlir"]
@pytest.fixture(scope="module")
def matmul_dataset() -> MatmulDataset:
with gym.make("mlir-v0") as env:
ds = env.datasets["generator://matmul-v0"]
yield ds
def test_matmul_size(matmul_dataset: MatmulDataset):
assert matmul_dataset.size == 1
assert len(matmul_dataset) == 1
@pytest.mark.parametrize("index", range(1) if is_ci() else range(1))
def test_matmul_random_select(
env: MlirEnv, matmul_dataset: MatmulDataset, index: int, tmpwd: Path
):
uri = next(islice(matmul_dataset.benchmark_uris(), index, None))
benchmark = matmul_dataset.benchmark(uri)
assert isinstance(benchmark, MatmulBenchmark)
env.reset(benchmark=benchmark)
assert benchmark.source
benchmark.write_sources_to_directory(tmpwd)
assert (tmpwd / "source.mlir").is_file()
def test_matmul_from_seed_retry_count_exceeded(matmul_dataset: MatmulDataset):
with pytest.raises(
OSError, match=re.escape("matmul failed after 5 attempts with size (4, 4, 4)")
):
matmul_dataset.benchmark_from_size(mnk=(4, 4, 4), max_retries=3, retry_count=5)
def test_matmul_positive_runtimes(env: MlirEnv, matmul_dataset: MatmulDataset):
benchmark = next(matmul_dataset.benchmarks())
env.reset(benchmark=benchmark)
action_space = deepcopy(env.action_space)
action_space.seed(123)
env.step(action_space.sample())
val = env.observation["Runtime"]
assert np.all(np.greater(val, 0))
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/mlir/datasets/matmul_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Utilities for random testing."""
import random
from time import time
from typing import List, Tuple
from compiler_gym.envs import CompilerEnv
from compiler_gym.util.gym_type_hints import ObservationType
def apply_random_trajectory(
env: CompilerEnv,
random_trajectory_length_range=(1, 50),
timeout: int = 0,
) -> List[Tuple[int, ObservationType, float, bool]]:
"""Evaluate and return a random trajectory."""
end_time = time() + timeout
num_actions = random.randint(*random_trajectory_length_range)
trajectory = []
for _ in range(num_actions):
action = env.action_space.sample()
observation, reward, done, _ = env.step(action)
if done:
break # Broken trajectory.
trajectory.append((action, observation, reward, done))
if timeout and time() > end_time:
break
return trajectory
|
CompilerGym-development
|
tests/pytest_plugins/random_util.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the LICENSE file
# in the root directory of this source tree.
#
# tests/**/__init__.py files are needed for pytest Python path resolution. See:
# https://docs.pytest.org/en/latest/explanation/pythonpath.html
|
CompilerGym-development
|
tests/pytest_plugins/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Pytest fixtures for the LLVM CompilerGym environments."""
import os
from pathlib import Path
from typing import Iterable, List
import gym
import pytest
from compiler_gym.datasets import Dataset
from compiler_gym.envs.llvm import LlvmEnv
from compiler_gym.envs.llvm.datasets.cbench import VALIDATORS
from compiler_gym.third_party import llvm
from compiler_gym.util.runfiles_path import runfiles_path
BENCHMARKS_LIST = Path(runfiles_path("compiler_gym/third_party/cbench/benchmarks.txt"))
def _read_list_file(path: Path) -> Iterable[str]:
with open(str(path)) as f:
for action in f:
if action.strip():
yield action.strip()
BENCHMARK_NAMES = list(_read_list_file(BENCHMARKS_LIST))
# Skip ghostscript on CI as it is just too heavy.
if bool(os.environ.get("CI")):
BENCHMARK_NAMES = [
b for b in BENCHMARK_NAMES if b != "benchmark://cbench-v1/ghostscript"
]
with gym.make("llvm-v0") as env:
ACTION_NAMES = list(env.action_space.names)
OBSERVATION_SPACE_NAMES = sorted(env.observation.spaces.keys())
REWARD_SPACE_NAMES = sorted(env.reward.spaces.keys())
DATASET_NAMES = sorted(d.name for d in env.datasets)
@pytest.fixture(scope="module")
def action_names() -> List[str]:
"""A list of every action."""
return ACTION_NAMES
@pytest.fixture(scope="module", params=OBSERVATION_SPACE_NAMES)
def observation_space(request) -> str:
return request.param
@pytest.fixture(scope="module", params=REWARD_SPACE_NAMES)
def reward_space(request) -> str:
return request.param
@pytest.fixture(scope="module")
def benchmark_names() -> List[str]:
"""A list of every benchmarks."""
return BENCHMARK_NAMES
@pytest.fixture(scope="module", params=ACTION_NAMES)
def action_name(request) -> str:
"""Enumerate the names of actions."""
yield request.param
@pytest.fixture(scope="module", params=BENCHMARK_NAMES)
def benchmark_name(request) -> str:
"""Enumerate the names of benchmarks."""
yield request.param
VALIDATABLE_CBENCH_URIS = [b for b in BENCHMARK_NAMES if b in VALIDATORS]
NON_VALIDATABLE_CBENCH_URIS = [b for b in BENCHMARK_NAMES if b not in VALIDATORS]
@pytest.fixture(scope="module", params=VALIDATABLE_CBENCH_URIS)
def validatable_cbench_uri(request) -> str:
"""Enumerate the names of benchmarks whose semantics can be validated."""
yield request.param
@pytest.fixture(scope="module", params=NON_VALIDATABLE_CBENCH_URIS)
def non_validatable_cbench_uri(request) -> str:
"""Enumerate the names of benchmarks whose semantics cannot be validated."""
yield request.param
@pytest.fixture(scope="function")
def env() -> LlvmEnv:
"""Create an LLVM environment."""
with gym.make("llvm-v0") as env_:
yield env_
@pytest.fixture(scope="module")
def llvm_opt() -> Path:
"""Test fixture that yields the path of opt."""
return llvm.opt_path()
@pytest.fixture(scope="module")
def llvm_diff() -> Path:
"""Test fixture that yields the path of llvm-diff."""
return llvm.llvm_diff_path()
@pytest.fixture(scope="module")
def clang() -> Path:
"""Test fixture that yields the path of clang."""
return llvm.clang_path()
@pytest.fixture(scope="module", params=DATASET_NAMES)
def dataset_name(request) -> str:
return request.param
@pytest.fixture(scope="module", params=DATASET_NAMES)
def dataset(request) -> Dataset:
with gym.make("llvm-v0") as env:
return env.datasets[request.param]
|
CompilerGym-development
|
tests/pytest_plugins/llvm.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Pytest fixtures for CompilerGym tests."""
import os
import sys
import tempfile
from pathlib import Path
from typing import List
import docker
import pytest
from absl import flags as absl_flags
from compiler_gym.util.runfiles_path import transient_cache_path
FLAGS = absl_flags.FLAGS
def is_ci() -> bool:
"""Return whether running in CI environment."""
return os.environ.get("CI", "") != ""
def in_bazel() -> bool:
"""Return whether running under bazel."""
return os.environ.get("TEST_WORKSPACE", "") != ""
def docker_is_available() -> bool:
"""Return whether docker is available."""
try:
docker.from_env()
return True
except docker.errors.DockerException:
return False
# Decorator to skip a test in the CI environment.
skip_on_ci = pytest.mark.skipif(is_ci(), reason="Skip on CI")
# Decorator to run a test only in the CI environment.
ci_only = pytest.mark.skipif(not is_ci(), reason="Runs only on CI")
# Decorator to mark a test as skipped if not on Linux.
linux_only = pytest.mark.skipif(
not sys.platform.lower().startswith("linux"), reason="Linux only"
)
# Decorator to mark a test as skipped if not on macOS.
macos_only = pytest.mark.skipif(
not sys.platform.lower().startswith("darwin"), reason="macOS only"
)
# Decorator to mark a test as skipped if not running under bazel.
bazel_only = pytest.mark.skipif(not in_bazel(), reason="bazel only")
# Decorator to mark a test as skipped if not running in the `make test`
# environment.
install_test_only = pytest.mark.skipif(in_bazel(), reason="test only")
# Decorator to skip a test if docker is not available.
with_docker = pytest.mark.skipif(
not docker_is_available(), reason="Docker is not available"
)
# Decorator to skip a test if docker is available.
without_docker = pytest.mark.skipif(
docker_is_available(), reason="Docker is not available"
)
@pytest.fixture(scope="function")
def tmpwd() -> Path:
"""A fixture that creates a temporary directory, changes to it, and yields the path."""
tmpdir_root = transient_cache_path("tests")
tmpdir_root.mkdir(exist_ok=True, parents=True)
with tempfile.TemporaryDirectory(dir=tmpdir_root, prefix="tmpwd-") as d:
pwd = os.getcwd()
try:
os.chdir(d)
yield Path(d)
finally:
os.chdir(pwd)
@pytest.fixture(scope="function")
def temporary_environ():
"""A fixture that allows you to modify os.environ without affecting other tests."""
old_env = os.environ.copy()
try:
yield os.environ
finally:
os.environ.clear()
os.environ.update(old_env)
def set_command_line_flags(flags: List[str]):
"""Set the command line flags."""
sys.argv = flags
FLAGS.unparse_flags()
FLAGS(flags)
|
CompilerGym-development
|
tests/pytest_plugins/common.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Pytest fixtures for the GCC CompilerGym environments."""
import subprocess
from functools import lru_cache
from typing import Iterable
import pytest
from tests.pytest_plugins.common import docker_is_available
@lru_cache(maxsize=2)
def system_gcc_is_available() -> bool:
"""Return whether there is a system GCC available."""
try:
stdout = subprocess.check_output(
["gcc", "--version"], universal_newlines=True, stderr=subprocess.DEVNULL
)
# On some systems "gcc" may alias to a different compiler, so check for
# the presence of the name "gcc" in the first line of output.
return "gcc" in stdout.split("\n")[0].lower()
except (subprocess.CalledProcessError, FileNotFoundError):
return False
def system_gcc_path() -> str:
"""Return the path of the system GCC as a string."""
return subprocess.check_output(
["which", "gcc"], universal_newlines=True, stderr=subprocess.DEVNULL
).strip()
def gcc_environment_is_supported() -> bool:
"""Return whether the requirements for the GCC environment are met."""
return docker_is_available() or system_gcc_is_available()
def gcc_bins() -> Iterable[str]:
"""Return a list of available GCCs."""
if docker_is_available():
yield "docker:gcc:11.2.0"
if system_gcc_is_available():
yield system_gcc_path()
@pytest.fixture(scope="module", params=gcc_bins())
def gcc_bin(request) -> str:
return request.param
# Decorator to skip a test if GCC environment is not supported.
with_gcc_support = pytest.mark.skipif(
not gcc_environment_is_supported(), reason="Docker is not available"
)
# Decorator to skip a test if GCC environment is supported.
without_gcc_support = pytest.mark.skipif(
gcc_environment_is_supported(), reason="Docker is not available"
)
# Decorator to skip a test if system GCC is not availbale.
with_system_gcc = pytest.mark.skipif(
not system_gcc_is_available(), reason="GCC is not available"
)
# Decorator to skip a test if system GCC is availbale.
without_system_gcc = pytest.mark.skipif(
system_gcc_is_available(), reason="GCC is available"
)
|
CompilerGym-development
|
tests/pytest_plugins/gcc.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Pytest fixtures for the MLIR CompilerGym environments."""
from pathlib import Path
from typing import Iterable
import gym
import pytest
from compiler_gym.datasets import Dataset
from compiler_gym.envs.mlir import MlirEnv
def _read_list_file(path: Path) -> Iterable[str]:
with open(str(path)) as f:
for action in f:
if action.strip():
yield action.strip()
with gym.make("mlir-v0") as env:
OBSERVATION_SPACE_NAMES = sorted(env.observation.spaces.keys())
REWARD_SPACE_NAMES = sorted(env.reward.spaces.keys())
DATASET_NAMES = sorted(d.name for d in env.datasets)
@pytest.fixture(scope="module", params=OBSERVATION_SPACE_NAMES)
def observation_space(request) -> str:
return request.param
@pytest.fixture(scope="module", params=REWARD_SPACE_NAMES)
def reward_space(request) -> str:
return request.param
@pytest.fixture(scope="function")
def env() -> MlirEnv:
"""Create an LLVM environment."""
with gym.make("mlir-v0") as env_:
yield env_
@pytest.fixture(scope="module", params=DATASET_NAMES)
def dataset_name(request) -> str:
return request.param
@pytest.fixture(scope="module", params=DATASET_NAMES)
def dataset(request) -> Dataset:
with gym.make("mlir-v0") as env:
return env.datasets[request.param]
|
CompilerGym-development
|
tests/pytest_plugins/mlir.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the LICENSE file
# in the root directory of this source tree.
#
# tests/**/__init__.py files are needed for pytest Python path resolution. See:
# https://docs.pytest.org/en/latest/explanation/pythonpath.html
|
CompilerGym-development
|
tests/third_party/__init__.py
|
# Adapted from David Malcolm's gcc invocation library.
#
# Copyright 2013 David Malcolm <dmalcolm@redhat.com>
# Copyright 2013 Red Hat, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
# USA
from compiler_gym.third_party.gccinvocation.gccinvocation import (
GccInvocation,
cmdline_to_argv,
)
from tests.test_main import main
def test_cmdline_to_argv_simple():
argstr = (
"gcc -o scripts/genksyms/genksyms"
" scripts/genksyms/genksyms.o"
" scripts/genksyms/parse.tab.o"
" scripts/genksyms/lex.lex.o"
)
assert cmdline_to_argv(argstr) == [
"gcc",
"-o",
"scripts/genksyms/genksyms",
"scripts/genksyms/genksyms.o",
"scripts/genksyms/parse.tab.o",
"scripts/genksyms/lex.lex.o",
]
def test_cmdline_to_argv_quoted():
# (heavily edited from a kernel build)
argstr = (
"cc1 -quiet"
" -DCONFIG_AS_CFI_SIGNAL_FRAME=1"
# Here's the awkward argument:
' -DIPATH_IDSTR="QLogic kernel.org driver"'
" -DIPATH_KERN_TYPE=0 -DKBUILD_STR(s)=#s"
" -fprofile-arcs -"
)
assert cmdline_to_argv(argstr) == [
"cc1",
"-quiet",
"-DCONFIG_AS_CFI_SIGNAL_FRAME=1",
'-DIPATH_IDSTR="QLogic kernel.org driver"',
"-DIPATH_KERN_TYPE=0",
"-DKBUILD_STR(s)=#s",
"-fprofile-arcs",
"-",
]
def test_parse_compile():
args = (
"gcc -pthread -fno-strict-aliasing -O2 -g -pipe -Wall"
" -Wp,-D_FORTIFY_SOURCE=2 -fexceptions -fstack-protector"
" --param=ssp-buffer-size=4 -m64 -mtune=generic -D_GNU_SOURCE"
" -fPIC -fwrapv -DNDEBUG -O2 -g -pipe -Wall -Wp,-D_FORTIFY_SOURCE=2"
" -fexceptions -fstack-protector --param=ssp-buffer-size=4 -m64"
' -mtune=generic -D_GNU_SOURCE -fPIC -fwrapv -fPIC -DVERSION="0.7"'
" -I/usr/include/python2.7 -c python-ethtool/ethtool.c"
" -o build/temp.linux-x86_64-2.7/python-ethtool/ethtool.o"
).split()
gccinv = GccInvocation(args)
assert gccinv.argv == args
assert gccinv.executable == "gcc"
assert gccinv.is_driver
assert gccinv.sources == ["python-ethtool/ethtool.c"]
assert gccinv.defines == ["_GNU_SOURCE", "NDEBUG", "_GNU_SOURCE", 'VERSION="0.7"']
assert gccinv.includepaths == ["/usr/include/python2.7"]
assert gccinv.otherargs == [
"-pthread",
"-fno-strict-aliasing",
"-O2",
"-g",
"-pipe",
"-Wall",
"-Wp,-D_FORTIFY_SOURCE=2",
"-fexceptions",
"-fstack-protector",
"--param=ssp-buffer-size=4",
"-m64",
"-mtune=generic",
"-fPIC",
"-fwrapv",
"-O2",
"-g",
"-pipe",
"-Wall",
"-Wp,-D_FORTIFY_SOURCE=2",
"-fexceptions",
"-fstack-protector",
"--param=ssp-buffer-size=4",
"-m64",
"-mtune=generic",
"-fPIC",
"-fwrapv",
"-fPIC",
"-c",
]
def test_parse_link():
args = (
"gcc -pthread -shared -Wl,-z,relro"
" build/temp.linux-x86_64-2.7/python-ethtool/ethtool.o"
" build/temp.linux-x86_64-2.7/python-ethtool/etherinfo.o"
" build/temp.linux-x86_64-2.7/python-ethtool/etherinfo_obj.o"
" build/temp.linux-x86_64-2.7/python-ethtool/etherinfo_ipv6_obj.o"
" -L/usr/lib64 -lnl -lpython2.7"
" -o build/lib.linux-x86_64-2.7/ethtool.so"
).split()
gccinv = GccInvocation(args)
assert gccinv.argv == args
assert gccinv.executable == "gcc"
assert gccinv.sources == [
"build/temp.linux-x86_64-2.7/python-ethtool/ethtool.o",
"build/temp.linux-x86_64-2.7/python-ethtool/etherinfo.o",
"build/temp.linux-x86_64-2.7/python-ethtool/etherinfo_obj.o",
"build/temp.linux-x86_64-2.7/python-ethtool/etherinfo_ipv6_obj.o",
]
assert gccinv.defines == []
assert gccinv.includepaths == []
def test_parse_cplusplus():
args = (
"/usr/bin/c++ -DPYSIDE_EXPORTS -DQT_GUI_LIB -DQT_CORE_LIB"
" -DQT_NO_DEBUG -O2 -g -pipe -Wall -Wp,-D_FORTIFY_SOURCE=2"
" -fexceptions -fstack-protector --param=ssp-buffer-size=4"
" -m64 -mtune=generic -Wall -fvisibility=hidden"
" -Wno-strict-aliasing -O3 -DNDEBUG -fPIC"
" -I/usr/include/QtGui -I/usr/include/QtCore"
" -I/builddir/build/BUILD/pyside-qt4.7+1.1.0/libpyside"
" -I/usr/include/shiboken -I/usr/include/python2.7"
" -o CMakeFiles/pyside.dir/dynamicqmetaobject.cpp.o"
" -c /builddir/build/BUILD/pyside-qt4.7+1.1.0/libpyside/dynamicqmetaobject.cpp"
)
gccinv = GccInvocation(args.split())
assert gccinv.executable == "/usr/bin/c++"
assert gccinv.progname == "c++"
assert gccinv.is_driver
assert gccinv.sources == [
"/builddir/build/BUILD/pyside-qt4.7+1.1.0/libpyside/dynamicqmetaobject.cpp"
]
assert "PYSIDE_EXPORTS" in gccinv.defines
assert "NDEBUG" in gccinv.defines
assert "/builddir/build/BUILD/pyside-qt4.7+1.1.0/libpyside" in gccinv.includepaths
assert "--param=ssp-buffer-size=4" in gccinv.otherargs
def test_complex_invocation():
# A command line taken from libreoffice/3.5.0.3/5.fc17/x86_64/build.log was:
# R=/builddir/build/BUILD && S=$R/libreoffice-3.5.0.3 && O=$S/solver/unxlngx6.pro && W=$S/workdir/unxlngx6.pro && mkdir -p $W/CxxObject/xml2cmp/source/support/ $W/Dep/CxxObject/xml2cmp/source/support/ && g++ -DCPPU_ENV=gcc3 -DENABLE_GRAPHITE -DENABLE_GTK -DENABLE_KDE4 -DGCC -DGXX_INCLUDE_PATH=/usr/include/c++/4.7.2 -DHAVE_GCC_VISIBILITY_FEATURE -DHAVE_THREADSAFE_STATICS -DLINUX -DNDEBUG -DOPTIMIZE -DOSL_DEBUG_LEVEL=0 -DPRODUCT -DSOLAR_JAVA -DSUPD=350 -DUNIX -DUNX -DVCL -DX86_64 -D_PTHREADS -D_REENTRANT -Wall -Wendif-labels -Wextra -fmessage-length=0 -fno-common -pipe -fPIC -Wshadow -Wsign-promo -Woverloaded-virtual -Wno-non-virtual-dtor -fvisibility=hidden -fvisibility-inlines-hidden -std=c++0x -ggdb2 -Wp,-D_FORTIFY_SOURCE=2 -fstack-protector --param=ssp-buffer-size=4 -m64 -mtune=generic -DEXCEPTIONS_ON -fexceptions -fno-enforce-eh-specs -Wp,-D_FORTIFY_SOURCE=2 -fstack-protector --param=ssp-buffer-size=4 -m64 -mtune=generic -c $S/xml2cmp/source/support/cmdline.cxx -o $W/CxxObject/xml2cmp/source/support/cmdline.o -MMD -MT $W/CxxObject/xml2cmp/source/support/cmdline.o -MP -MF $W/Dep/CxxObject/xml2cmp/source/support/cmdline.d -I$S/xml2cmp/source/support/ -I$O/inc/stl -I$O/inc/external -I$O/inc -I$S/solenv/inc/unxlngx6 -I$S/solenv/inc -I$S/res -I/usr/lib/jvm/java-1.7.0-openjdk.x86_64/include -I/usr/lib/jvm/java-1.7.0-openjdk.x86_64/include/linux -I/usr/lib/jvm/java-1.7.0-openjdk.x86_64/include/native_threads/include
args = (
"g++ -DCPPU_ENV=gcc3 -DENABLE_GRAPHITE -DENABLE_GTK"
" -DENABLE_KDE4 -DGCC -DGXX_INCLUDE_PATH=/usr/include/c++/4.7.2"
" -DHAVE_GCC_VISIBILITY_FEATURE -DHAVE_THREADSAFE_STATICS"
" -DLINUX -DNDEBUG -DOPTIMIZE -DOSL_DEBUG_LEVEL=0 -DPRODUCT"
" -DSOLAR_JAVA -DSUPD=350 -DUNIX -DUNX -DVCL -DX86_64"
" -D_PTHREADS -D_REENTRANT -Wall -Wendif-labels -Wextra"
" -fmessage-length=0 -fno-common -pipe -fPIC -Wshadow"
" -Wsign-promo -Woverloaded-virtual -Wno-non-virtual-dtor"
" -fvisibility=hidden -fvisibility-inlines-hidden"
" -std=c++0x -ggdb2 -Wp,-D_FORTIFY_SOURCE=2"
" -fstack-protector --param=ssp-buffer-size=4 -m64"
" -mtune=generic -DEXCEPTIONS_ON -fexceptions"
" -fno-enforce-eh-specs -Wp,-D_FORTIFY_SOURCE=2"
" -fstack-protector --param=ssp-buffer-size=4 -m64"
" -mtune=generic -c $S/xml2cmp/source/support/cmdline.cxx"
" -o $W/CxxObject/xml2cmp/source/support/cmdline.o -MMD"
" -MT $W/CxxObject/xml2cmp/source/support/cmdline.o -MP"
" -MF $W/Dep/CxxObject/xml2cmp/source/support/cmdline.d"
" -I$S/xml2cmp/source/support/ -I$O/inc/stl"
" -I$O/inc/external -I$O/inc -I$S/solenv/inc/unxlngx6"
" -I$S/solenv/inc -I$S/res"
" -I/usr/lib/jvm/java-1.7.0-openjdk.x86_64/include"
" -I/usr/lib/jvm/java-1.7.0-openjdk.x86_64/include/linux"
" -I/usr/lib/jvm/java-1.7.0-openjdk.x86_64/include/native_threads/include"
)
# Expand the shell vars in the arguments:
args = args.replace("$W", "$S/workdir/unxlngx6.pro")
args = args.replace("$O", "$S/solver/unxlngx6.pro")
args = args.replace("$S", "$R/libreoffice-3.5.0.3")
args = args.replace("$R", "/builddir/build/BUILD")
assert "$" not in args
gccinv = GccInvocation(args.split())
assert gccinv.executable == "g++"
assert gccinv.sources == [
"/builddir/build/BUILD/libreoffice-3.5.0.3/xml2cmp/source/support/cmdline.cxx"
]
assert "CPPU_ENV=gcc3" in gccinv.defines
assert "EXCEPTIONS_ON" in gccinv.defines
assert (
"/builddir/build/BUILD/libreoffice-3.5.0.3/solver/unxlngx6.pro/inc/stl"
in gccinv.includepaths
)
assert (
"/usr/lib/jvm/java-1.7.0-openjdk.x86_64/include/native_threads/include"
in gccinv.includepaths
)
assert "-Wall" in gccinv.otherargs
def test_restrict_to_one_source():
args = (
"gcc -fPIC -shared -flto -flto-partition=none"
" -Isomepath -DFOO"
" -o output.o input-f.c input-g.c input-h.c"
)
gccinv = GccInvocation(args.split())
assert gccinv.sources == ["input-f.c", "input-g.c", "input-h.c"]
gccinv2 = gccinv.restrict_to_one_source("input-g.c")
assert gccinv2.sources == ["input-g.c"]
assert gccinv2.argv == [
"gcc",
"-DFOO",
"-Isomepath",
"-fPIC",
"-shared",
"-flto",
"-flto-partition=none",
"input-g.c",
]
def test_kernel_build():
argstr = (
"gcc -Wp,-MD,drivers/media/pci/mantis/.mantis_uart.o.d"
" -nostdinc -isystem /usr/lib/gcc/x86_64-redhat-linux/4.4.7/include"
" -I/home/david/linux-3.9.1/arch/x86/include"
" -Iarch/x86/include/generated -Iinclude"
" -I/home/david/linux-3.9.1/arch/x86/include/uapi"
" -Iarch/x86/include/generated/uapi"
" -I/home/david/linux-3.9.1/include/uapi"
" -Iinclude/generated/uapi"
" -include /home/david/linux-3.9.1/include/linux/kconfig.h"
" -D__KERNEL__ -Wall -Wundef -Wstrict-prototypes"
" -Wno-trigraphs -fno-strict-aliasing -fno-common"
" -Werror-implicit-function-declaration"
" -Wno-format-security -fno-delete-null-pointer-checks"
" -Os -m64 -mtune=generic -mno-red-zone -mcmodel=kernel"
" -funit-at-a-time -maccumulate-outgoing-args"
" -fstack-protector -DCONFIG_AS_CFI=1"
" -DCONFIG_AS_CFI_SIGNAL_FRAME=1"
" -DCONFIG_AS_CFI_SECTIONS=1 -DCONFIG_AS_FXSAVEQ=1"
" -DCONFIG_AS_AVX=1 -pipe -Wno-sign-compare"
" -fno-asynchronous-unwind-tables -mno-sse -mno-mmx"
" -mno-sse2 -mno-3dnow -mno-avx -fno-reorder-blocks"
" -fno-ipa-cp-clone -Wframe-larger-than=2048"
" -Wno-unused-but-set-variable -fno-omit-frame-pointer"
" -fno-optimize-sibling-calls -g"
" -femit-struct-debug-baseonly -fno-var-tracking -pg"
" -fno-inline-functions-called-once"
" -Wdeclaration-after-statement -Wno-pointer-sign"
" -fno-strict-overflow -fconserve-stack"
" -DCC_HAVE_ASM_GOTO -Idrivers/media/dvb-core/"
" -Idrivers/media/dvb-frontends/ -fprofile-arcs"
" -ftest-coverage -DKBUILD_STR(s)=#s"
" -DKBUILD_BASENAME=KBUILD_STR(mantis_uart)"
" -DKBUILD_MODNAME=KBUILD_STR(mantis_core) -c"
" -o drivers/media/pci/mantis/.tmp_mantis_uart.o"
" drivers/media/pci/mantis/mantis_uart.c"
)
gccinv = GccInvocation(argstr.split())
assert gccinv.executable == "gcc"
assert gccinv.progname == "gcc"
assert gccinv.sources == ["drivers/media/pci/mantis/mantis_uart.c"]
assert "__KERNEL__" in gccinv.defines
assert "KBUILD_STR(s)=#s" in gccinv.defines
def test_kernel_cc1():
argstr = (
"/usr/libexec/gcc/x86_64-redhat-linux/4.4.7/cc1 -quiet"
" -nostdinc"
" -I/home/david/linux-3.9.1/arch/x86/include"
" -Iarch/x86/include/generated -Iinclude"
" -I/home/david/linux-3.9.1/arch/x86/include/uapi"
" -Iarch/x86/include/generated/uapi"
" -I/home/david/linux-3.9.1/include/uapi"
" -Iinclude/generated/uapi -Idrivers/media/dvb-core/"
" -Idrivers/media/dvb-frontends/ -D__KERNEL__"
" -DCONFIG_AS_CFI=1 -DCONFIG_AS_CFI_SIGNAL_FRAME=1"
" -DCONFIG_AS_CFI_SECTIONS=1 -DCONFIG_AS_FXSAVEQ=1"
" -DCONFIG_AS_AVX=1 -DCC_HAVE_ASM_GOTO -DKBUILD_STR(s)=#s"
" -DKBUILD_BASENAME=KBUILD_STR(mantis_uart)"
" -DKBUILD_MODNAME=KBUILD_STR(mantis_core)"
" -isystem /usr/lib/gcc/x86_64-redhat-linux/4.4.7/include"
" -include /home/david/linux-3.9.1/include/linux/kconfig.h"
" -MD drivers/media/pci/mantis/.mantis_uart.o.d"
" drivers/media/pci/mantis/mantis_uart.c -quiet"
" -dumpbase mantis_uart.c -m64 -mtune=generic"
" -mno-red-zone -mcmodel=kernel -maccumulate-outgoing-args"
" -mno-sse -mno-mmx -mno-sse2 -mno-3dnow -mno-avx"
" -auxbase-strip drivers/media/pci/mantis/.tmp_mantis_uart.o"
" -g -Os -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs"
" -Werror-implicit-function-declaration -Wno-format-security"
" -Wno-sign-compare -Wframe-larger-than=2048"
" -Wno-unused-but-set-variable -Wdeclaration-after-statement"
" -Wno-pointer-sign -p -fno-strict-aliasing -fno-common"
" -fno-delete-null-pointer-checks -funit-at-a-time"
" -fstack-protector -fno-asynchronous-unwind-tables"
" -fno-reorder-blocks -fno-ipa-cp-clone"
" -fno-omit-frame-pointer -fno-optimize-sibling-calls"
" -femit-struct-debug-baseonly -fno-var-tracking"
" -fno-inline-functions-called-once -fno-strict-overflow"
" -fconserve-stack -fprofile-arcs -ftest-coverage -o -"
)
gccinv = GccInvocation(argstr.split())
assert gccinv.executable == "/usr/libexec/gcc/x86_64-redhat-linux/4.4.7/cc1"
assert gccinv.progname == "cc1"
assert not gccinv.is_driver
assert gccinv.sources == ["drivers/media/pci/mantis/mantis_uart.c"]
def test_not_gcc():
argstr = "objdump -h drivers/media/pci/mantis/.tmp_mantis_uart.o"
gccinv = GccInvocation(argstr.split())
assert gccinv.executable == "objdump"
assert gccinv.progname == "objdump"
assert not gccinv.is_driver
def test_dash_x():
argstr = (
"gcc -D__KERNEL__ -Wall -Wundef -Wstrict-prototypes"
" -Wno-trigraphs -fno-strict-aliasing -fno-common"
" -Werror-implicit-function-declaration"
" -Wno-format-security -fno-delete-null-pointer-checks"
" -Os -m64 -mno-sse -mpreferred-stack-boundary=3"
" -c -x c /dev/null -o .20355.tmp"
)
gccinv = GccInvocation(argstr.split())
assert gccinv.executable == "gcc"
assert gccinv.sources == ["/dev/null"]
def test_pipes():
argstr = (
"gcc -D__KERNEL__ -S -x c -c -O0 -mcmodel=kernel" " -fstack-protector" " - -o -"
)
gccinv = GccInvocation(argstr.split())
assert gccinv.sources == ["-"]
def test_print_file_name():
argstr = "gcc -print-file-name=include"
gccinv = GccInvocation(argstr.split())
assert gccinv.sources == []
assert "-print-file-name=include" in gccinv.otherargs
def test_collect2():
# From a kernel build:
argstr = (
"/usr/libexec/gcc/x86_64-redhat-linux/4.4.7/collect2"
" --eh-frame-hdr --build-id -m elf_x86_64"
" --hash-style=gnu -dynamic-linker"
" /lib64/ld-linux-x86-64.so.2 -o .20501.tmp"
" -L/usr/lib/gcc/x86_64-redhat-linux/4.4.7"
" -L/usr/lib/gcc/x86_64-redhat-linux/4.4.7"
" -L/usr/lib/gcc/x86_64-redhat-linux/4.4.7/../../../../lib64"
" -L/lib/../lib64 -L/usr/lib/../lib64"
" -L/usr/lib/gcc/x86_64-redhat-linux/4.4.7/../../.."
" --build-id /tmp/cckRREmI.o"
)
gccinv = GccInvocation(argstr.split())
assert gccinv.progname == "collect2"
assert not gccinv.is_driver
assert gccinv.sources == []
def test_link():
# From a kernel build:
argstr = (
"gcc -o scripts/genksyms/genksyms"
" scripts/genksyms/genksyms.o"
" scripts/genksyms/parse.tab.o"
" scripts/genksyms/lex.lex.o"
)
gccinv = GccInvocation(argstr.split())
assert gccinv.progname == "gcc"
assert gccinv.sources == [
"scripts/genksyms/genksyms.o",
"scripts/genksyms/parse.tab.o",
"scripts/genksyms/lex.lex.o",
]
def test_quoted_spaces():
# Ensure we can handle spaces within a quoted argument
argstr = (
"/usr/libexec/gcc/x86_64-redhat-linux/4.4.7/cc1 -quiet"
" -nostdinc"
" -I/home/david/linux-3.9.1/arch/x86/include"
" -Iarch/x86/include/generated -Iinclude"
" -I/home/david/linux-3.9.1/arch/x86/include/uapi"
" -Iarch/x86/include/generated/uapi"
" -I/home/david/linux-3.9.1/include/uapi"
" -Iinclude/generated/uapi -D__KERNEL__ -DCONFIG_AS_CFI=1"
" -DCONFIG_AS_CFI_SIGNAL_FRAME=1"
" -DCONFIG_AS_CFI_SECTIONS=1 -DCONFIG_AS_FXSAVEQ=1"
" -DCONFIG_AS_AVX=1 -DCC_HAVE_ASM_GOTO"
# Here's the awkward argument:
' -DIPATH_IDSTR="QLogic kernel.org driver"'
" -DIPATH_KERN_TYPE=0 -DKBUILD_STR(s)=#s"
" -DKBUILD_BASENAME=KBUILD_STR(ipath_cq)"
" -DKBUILD_MODNAME=KBUILD_STR(ib_ipath)"
" -isystem /usr/lib/gcc/x86_64-redhat-linux/4.4.7/include"
" -include /home/david/linux-3.9.1/include/linux/kconfig.h"
" -MD drivers/infiniband/hw/ipath/.ipath_cq.o.d"
" drivers/infiniband/hw/ipath/ipath_cq.c"
" -quiet -dumpbase ipath_cq.c -m64 -mtune=generic"
" -mno-red-zone -mcmodel=kernel"
" -maccumulate-outgoing-args -mno-sse -mno-mmx -mno-sse2"
" -mno-3dnow -mno-avx -auxbase-strip"
" drivers/infiniband/hw/ipath/.tmp_ipath_cq.o"
" -g -Os -Wall -Wundef -Wstrict-prototypes"
" -Wno-trigraphs -Werror-implicit-function-declaration"
" -Wno-format-security -Wno-sign-compare"
" -Wframe-larger-than=2048 -Wno-unused-but-set-variable"
" -Wdeclaration-after-statement -Wno-pointer-sign -p"
" -fno-strict-aliasing -fno-common"
" -fno-delete-null-pointer-checks -funit-at-a-time"
" -fstack-protector -fno-asynchronous-unwind-tables"
" -fno-reorder-blocks -fno-ipa-cp-clone"
" -fno-omit-frame-pointer -fno-optimize-sibling-calls"
" -femit-struct-debug-baseonly -fno-var-tracking"
" -fno-inline-functions-called-once"
" -fno-strict-overflow -fconserve-stack"
" -fprofile-arcs -ftest-coverage -o -"
)
gccinv = GccInvocation.from_cmdline(argstr)
assert gccinv.sources == ["drivers/infiniband/hw/ipath/ipath_cq.c"]
assert 'IPATH_IDSTR="QLogic kernel.org driver"' in gccinv.defines
assert "KBUILD_STR(s)=#s" in gccinv.defines
assert "KBUILD_BASENAME=KBUILD_STR(ipath_cq)" in gccinv.defines
def test_space_after_dash_D():
# Note the space between the -D and its argument:
argstr = (
"gcc -c -x c -D __KERNEL__ -D SOME_OTHER_DEFINE /dev/null -o /tmp/ccqbm5As.s"
)
gccinv = GccInvocation.from_cmdline(argstr)
assert gccinv.defines == ["__KERNEL__", "SOME_OTHER_DEFINE"]
assert gccinv.sources == ["/dev/null"]
def test_space_after_dash_I():
argstr = (
"./install/libexec/gcc/x86_64-unknown-linux-gnu/4.9.0/cc1 -quiet"
" -nostdinc"
" -I somedir"
" -I some/other/dir"
" -D __KERNEL__"
" -D CONFIG_AS_CFI=1"
" -D CONFIG_AS_CFI_SIGNAL_FRAME=1"
" -D KBUILD_STR(s)=#s"
" -D KBUILD_BASENAME=KBUILD_STR(empty)"
" -D KBUILD_MODNAME=KBUILD_STR(empty)"
" scripts/mod/empty.c"
" -o -"
)
gccinv = GccInvocation.from_cmdline(argstr)
assert gccinv.defines == [
"__KERNEL__",
"CONFIG_AS_CFI=1",
"CONFIG_AS_CFI_SIGNAL_FRAME=1",
"KBUILD_STR(s)=#s",
"KBUILD_BASENAME=KBUILD_STR(empty)",
"KBUILD_MODNAME=KBUILD_STR(empty)",
]
assert gccinv.sources == ["scripts/mod/empty.c"]
def test_space_after_dash_U():
argstr = (
"./install/libexec/gcc/x86_64-unknown-linux-gnu/4.9.0/cc1"
" -E -lang-asm -quiet -nostdinc -C -C"
"-P -P"
" -U x86"
" -isystem /some/dir"
" -include /some/path/to/kconfig.h"
" -MD arch/x86/vdso/.vdso.lds.d"
" arch/x86/vdso/vdso.lds.S"
" -o arch/x86/vdso/vdso.lds"
" -mtune=generic -march=x86-64 -fno-directives-only"
)
gccinv = GccInvocation.from_cmdline(argstr)
assert gccinv.sources == ["arch/x86/vdso/vdso.lds.S"]
def test_MD_without_arg():
argstr = (
"/usr/bin/gcc"
" -Wp,-MD,arch/x86/purgatory/.purgatory.o.d"
" -nostdinc"
" -isystem"
" /usr/lib/gcc/x86_64-redhat-linux/5.1.1/include"
" -I./arch/x86/include"
" -Iarch/x86/include/generated/uapi"
" -Iarch/x86/include/generated"
" -Iinclude"
" -I./arch/x86/include/uapi"
" -Iarch/x86/include/generated/uapi"
" -I./include/uapi"
" -Iinclude/generated/uapi"
" -include"
" ./include/linux/kconfig.h"
" -D__KERNEL__"
" -fno-strict-aliasing"
" -Wall"
" -Wstrict-prototypes"
" -fno-zero-initialized-in-bss"
" -fno-builtin"
" -ffreestanding"
" -c"
" -MD"
" -Os"
" -mcmodel=large"
" -m64"
" -DKBUILD_STR(s)=#s"
" -DKBUILD_BASENAME=KBUILD_STR(purgatory)"
" -DKBUILD_MODNAME=KBUILD_STR(purgatory)"
" -c"
" -o"
" arch/x86/purgatory/purgatory.o"
" arch/x86/purgatory/purgatory.c"
)
gccinv = GccInvocation.from_cmdline(argstr)
assert gccinv.sources == ["arch/x86/purgatory/purgatory.c"]
def test_openssl_invocation():
argstr = (
"/usr/bin/gcc"
" -Werror"
" -D"
" OPENSSL_DOING_MAKEDEPEND"
" -M"
" -fPIC"
" -DOPENSSL_PIC"
" -DZLIB"
" -DOPENSSL_THREADS"
" -D_REENTRANT"
" -DDSO_DLFCN"
" -DHAVE_DLFCN_H"
" -DKRB5_MIT"
" -m64"
" -DL_ENDIAN"
" -DTERMIO"
" -Wall"
" -O2"
" -g"
" -pipe"
" -Wall"
" -Werror=format-security"
" -Wp,-D_FORTIFY_SOURCE=2"
" -fexceptions"
" -fstack-protector-strong"
" --param=ssp-buffer-size=4"
" -grecord-gcc-switches"
" -m64"
" -mtune=generic"
" -Wa,--noexecstack"
" -DPURIFY"
" -DOPENSSL_IA32_SSE2"
" -DOPENSSL_BN_ASM_MONT"
" -DOPENSSL_BN_ASM_MONT5"
" -DOPENSSL_BN_ASM_GF2m"
" -DSHA1_ASM"
" -DSHA256_ASM"
" -DSHA512_ASM"
" -DMD5_ASM"
" -DAES_ASM"
" -DVPAES_ASM"
" -DBSAES_ASM"
" -DWHIRLPOOL_ASM"
" -DGHASH_ASM"
" -I."
" -I.."
" -I../include"
" -DOPENSSL_NO_DEPRECATED"
" -DOPENSSL_NO_EC2M"
" -DOPENSSL_NO_EC_NISTP_64_GCC_128"
" -DOPENSSL_NO_GMP"
" -DOPENSSL_NO_GOST"
" -DOPENSSL_NO_JPAKE"
" -DOPENSSL_NO_MDC2"
" -DOPENSSL_NO_RC5"
" -DOPENSSL_NO_RSAX"
" -DOPENSSL_NO_SCTP"
" -DOPENSSL_NO_SRP"
" -DOPENSSL_NO_STORE"
" -DOPENSSL_NO_UNIT_TEST"
" cryptlib.c"
" mem.c"
" mem_clr.c"
" mem_dbg.c"
" cversion.c"
" ex_data.c"
" cpt_err.c"
" ebcdic.c"
" uid.c"
" o_time.c"
" o_str.c"
" o_dir.c"
" o_fips.c"
" o_init.c"
" fips_ers.c"
)
gccinv = GccInvocation.from_cmdline(argstr)
assert gccinv.sources == [
"cryptlib.c",
"mem.c",
"mem_clr.c",
"mem_dbg.c",
"cversion.c",
"ex_data.c",
"cpt_err.c",
"ebcdic.c",
"uid.c",
"o_time.c",
"o_str.c",
"o_dir.c",
"o_fips.c",
"o_init.c",
"fips_ers.c",
]
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/third_party/gccinvocation/gccinvocation_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the LICENSE file
# in the root directory of this source tree.
#
# tests/**/__init__.py files are needed for pytest Python path resolution. See:
# https://docs.pytest.org/en/latest/explanation/pythonpath.html
|
CompilerGym-development
|
tests/third_party/gccinvocation/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for compiler_gym/service/service_cache.py."""
from compiler_gym.service.service_cache import ServiceCache
from tests.test_main import main
def test_service_cache(cache: ServiceCache):
cache = ServiceCache()
try:
# Test that expected files exist.
assert cache.path.is_dir()
assert (cache / "logs").is_dir()
assert (cache / "disk").exists()
# Test permissions by creating some empty files.
(cache / "foo.txt").touch()
(cache / "logs" / "foo.txt").touch()
(cache / "disk" / "foo.txt").touch()
finally:
cache.close()
assert not cache.path.is_dir()
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/service/service_cache_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym/service:connection."""
import gym
import pytest
import compiler_gym.envs # noqa Register LLVM environments.
from compiler_gym.errors import ServiceError
from compiler_gym.service import CompilerGymServiceConnection, ConnectionOpts
from compiler_gym.service.proto import GetSpacesRequest
from tests.test_main import main
@pytest.fixture(scope="function")
def connection() -> CompilerGymServiceConnection:
"""Yields a connection to a local service."""
with gym.make("llvm-v0") as env:
yield env.service
@pytest.fixture(scope="function")
def dead_connection() -> CompilerGymServiceConnection:
"""Yields a connection to a dead local service service."""
with gym.make("llvm-v0") as env:
# Kill the service.
env.service.connection.process.terminate()
env.service.connection.process.communicate()
yield env.service
def test_create_invalid_options():
with pytest.raises(TypeError, match="No endpoint provided for service connection"):
CompilerGymServiceConnection("")
def test_create_channel_failed_subprocess(
dead_connection: CompilerGymServiceConnection,
):
with pytest.raises(
(ServiceError, TimeoutError), match="Failed to create connection to localhost:"
):
CompilerGymServiceConnection(
f"{dead_connection.connection.url}",
ConnectionOpts(
init_max_seconds=1,
init_max_attempts=2,
rpc_init_max_seconds=0.1,
),
)
def test_create_channel_failed_subprocess_rpc_timeout(
dead_connection: CompilerGymServiceConnection,
):
"""Same as the above test, but RPC timeout is long enough that only a single
attempt can be made.
"""
with pytest.raises(
OSError,
match=(
r"Failed to create connection to localhost:\d+ after "
r"[\d\.]+ seconds \(1 attempt made\)"
),
):
CompilerGymServiceConnection(
f"{dead_connection.connection.url}",
ConnectionOpts(
init_max_seconds=0.1,
init_max_attempts=2,
rpc_init_max_seconds=1,
),
)
def test_call_stub_invalid_type(connection: CompilerGymServiceConnection):
with pytest.raises(
TypeError, match="Exception serializing request! Request type: type"
):
connection(connection.stub.GetSpaces, int)
def test_call_stub_negative_timeout(connection: CompilerGymServiceConnection):
with pytest.raises(TimeoutError, match=r"Deadline Exceeded \(-10.0 seconds\)"):
connection(connection.stub.GetSpaces, GetSpacesRequest(), timeout=-10)
def test_ManagedConnection_repr(connection: CompilerGymServiceConnection):
cnx = connection.connection
assert (
repr(cnx)
== f"Connection to service at {cnx.url} running on PID {cnx.process.pid}"
)
# Kill the service.
cnx.process.terminate()
cnx.process.communicate()
assert repr(cnx) == f"Connection to dead service at {cnx.url}"
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/service/connection_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the LICENSE file
# in the root directory of this source tree.
#
# tests/**/__init__.py files are needed for pytest Python path resolution. See:
# https://docs.pytest.org/en/latest/explanation/pythonpath.html
|
CompilerGym-development
|
tests/service/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym:validate."""
from collections.abc import Collection, Mapping
import google.protobuf.any_pb2 as any_pb2
import numpy as np
import pytest
from compiler_gym.service.proto import (
BooleanBox,
BooleanRange,
BooleanSequenceSpace,
BooleanTensor,
ByteBox,
ByteSequenceSpace,
BytesSequenceSpace,
ByteTensor,
CommandlineSpace,
DictEvent,
DictSpace,
DiscreteSpace,
DoubleBox,
DoubleRange,
DoubleSequenceSpace,
DoubleTensor,
Event,
FloatBox,
FloatRange,
FloatSequenceSpace,
FloatTensor,
Int64Box,
Int64Range,
Int64SequenceSpace,
Int64Tensor,
ListEvent,
ListSpace,
NamedDiscreteSpace,
Opaque,
Space,
SpaceSequenceSpace,
StringSpace,
StringTensor,
py_converters,
)
from compiler_gym.spaces import (
Box,
Commandline,
Dict,
Discrete,
NamedDiscrete,
Permutation,
Scalar,
Sequence,
SpaceSequence,
Tuple,
)
from tests.test_main import main
def test_convert_boolean_tensor_message_to_numpy():
shape = [1, 2, 3]
values = [True, False, True, True, False, False]
tensor_message = BooleanTensor(shape=shape, value=values)
np_array = py_converters.convert_tensor_message_to_numpy(tensor_message)
assert np_array.dtype == bool
assert np.array_equal(np_array.shape, shape)
flat_np_array = np_array.flatten()
assert np.array_equal(flat_np_array, values)
def test_convert_numpy_to_boolean_tensor_message():
tensor = np.array([[True], [False]], dtype=bool)
tensor_message = py_converters.convert_numpy_to_boolean_tensor_message(tensor)
assert isinstance(tensor_message, BooleanTensor)
assert np.array_equal(tensor.shape, tensor_message.shape)
assert np.array_equal(tensor.flatten(), tensor_message.value)
def test_convert_byte_tensor_message_to_numpy():
shape = [1, 2, 3]
values = [1, 2, 3, 4, 5, 6]
tensor_message = ByteTensor(shape=shape, value=bytes(values))
np_array = py_converters.convert_tensor_message_to_numpy(tensor_message)
assert np_array.dtype == np.byte
assert np.array_equal(np_array.shape, shape)
flat_np_array = np_array.flatten()
assert np.array_equal(flat_np_array, values)
def test_convert_numpy_to_byte_tensor_message():
tensor = np.array([[1], [2]], dtype=np.int8)
tensor_message = py_converters.convert_numpy_to_byte_tensor_message(tensor)
assert isinstance(tensor_message, ByteTensor)
assert np.array_equal(tensor.shape, tensor_message.shape)
assert tensor.tobytes() == tensor_message.value
def test_convert_int64_tensor_message_to_numpy():
shape = [1, 2, 3]
values = [1, 2, 3, 4, 5, 6]
tensor_message = Int64Tensor(shape=shape, value=values)
np_array = py_converters.convert_tensor_message_to_numpy(tensor_message)
assert np_array.dtype == np.int64
assert np.array_equal(np_array.shape, shape)
flat_np_array = np_array.flatten()
assert np.array_equal(flat_np_array, values)
def test_convert_numpy_to_int64_tensor_message():
tensor = np.array([[1], [2]], dtype=np.int64)
tensor_message = py_converters.convert_numpy_to_int64_tensor_message(tensor)
assert isinstance(tensor_message, Int64Tensor)
assert np.array_equal(tensor.shape, tensor_message.shape)
assert np.array_equal(tensor.flatten(), tensor_message.value)
def test_convert_float_tensor_message_to_numpy():
shape = [1, 2, 3]
values = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]
tensor_message = FloatTensor(shape=shape, value=values)
np_array = py_converters.convert_tensor_message_to_numpy(tensor_message)
assert np_array.dtype == np.float32
assert np.array_equal(np_array.shape, shape)
flat_np_array = np_array.flatten()
assert np.array_equal(flat_np_array, values)
def test_convert_numpy_to_float_tensor_message():
tensor = np.array([[1], [2]], dtype=np.float32)
tensor_message = py_converters.convert_numpy_to_float_tensor_message(tensor)
assert isinstance(tensor_message, FloatTensor)
assert np.array_equal(tensor.shape, tensor_message.shape)
assert np.array_equal(tensor.flatten(), tensor_message.value)
def test_convert_double_tensor_message_to_numpy():
shape = [1, 2, 3]
values = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]
tensor_message = DoubleTensor(shape=shape, value=values)
np_array = py_converters.convert_tensor_message_to_numpy(tensor_message)
assert np_array.dtype == np.float64
assert np.array_equal(np_array.shape, shape)
flat_np_array = np_array.flatten()
assert np.array_equal(flat_np_array, values)
def test_convert_numpy_to_double_tensor_message():
tensor = np.array([[1], [2]], dtype=float)
tensor_message = py_converters.convert_numpy_to_double_tensor_message(tensor)
assert isinstance(tensor_message, DoubleTensor)
assert np.array_equal(tensor.shape, tensor_message.shape)
assert np.array_equal(tensor.flatten(), tensor_message.value)
def test_convert_string_tensor_message_to_numpy():
shape = [1, 2]
values = ["a", "b"]
tensor_message = StringTensor(shape=shape, value=values)
np_array = py_converters.convert_tensor_message_to_numpy(tensor_message)
assert np_array.dtype == object
assert np.array_equal(np_array.shape, shape)
flat_np_array = np_array.flatten()
assert np.array_equal(flat_np_array, values)
def test_convert_numpy_to_string_tensor_message():
tensor = np.array([["a"], ["b"]], dtype=object)
tensor_message = py_converters.convert_numpy_to_string_tensor_message(tensor)
assert isinstance(tensor_message, StringTensor)
assert np.array_equal(tensor.shape, tensor_message.shape)
assert np.array_equal(tensor.flatten(), tensor_message.value)
def test_numpy_to_tensor_message_converter():
converter = py_converters.NumpyToTensorMessageConverter()
tensor = np.array([[1], [2]], dtype=float)
tensor_message = converter(tensor)
assert isinstance(tensor_message, DoubleTensor)
assert np.array_equal(tensor.shape, tensor_message.shape)
assert np.array_equal(tensor.flatten(), tensor_message.value)
def test_type_based_converter():
converter = py_converters.TypeBasedConverter(
conversion_map={FloatTensor: py_converters.convert_tensor_message_to_numpy}
)
tensor_message = FloatTensor(shape=[1], value=[1])
numpy_array = converter(tensor_message)
assert isinstance(numpy_array, np.ndarray)
def test_event_message_default_converter():
message_converter = py_converters.TypeBasedConverter(
conversion_map={FloatTensor: py_converters.convert_tensor_message_to_numpy}
)
event_converter = py_converters.EventMessageDefaultConverter(message_converter)
tensor_message = FloatTensor(shape=[1], value=[1])
event_message = Event(float_tensor=tensor_message)
numpy_array = event_converter(event_message)
assert isinstance(numpy_array, np.ndarray)
def test_list_event_message_converter():
message_converter = py_converters.TypeBasedConverter(
conversion_map={FloatTensor: py_converters.convert_tensor_message_to_numpy}
)
event_converter = py_converters.EventMessageDefaultConverter(message_converter)
list_converter = py_converters.ListEventMessageConverter(event_converter)
tensor_message = FloatTensor(shape=[1], value=[1])
event_message = Event(float_tensor=tensor_message)
list_message = ListEvent(event=[event_message])
converted_list = list_converter(list_message)
assert isinstance(converted_list, Collection)
assert len(converted_list) == 1
assert isinstance(converted_list[0], np.ndarray)
def test_to_list_event_message_converter():
converter = py_converters.TypeBasedConverter(
conversion_map={int: lambda x: Event(int64_value=x)}
)
list_converter = py_converters.ToListEventMessageConverter(converter)
original_list = [1, 2]
converted_list = list_converter(original_list)
assert isinstance(converted_list, ListEvent)
assert len(converted_list.event) == len(original_list)
assert converted_list.event[0].int64_value == original_list[0]
assert converted_list.event[1].int64_value == original_list[1]
def test_to_dict_event_message_converter():
converter = py_converters.TypeBasedConverter(
conversion_map={int: lambda x: Event(int64_value=x)}
)
dict_converter = py_converters.ToDictEventMessageConverter(converter)
original_dict = {"a": 1}
converted_dict = dict_converter(original_dict)
assert isinstance(converted_dict, DictEvent)
assert len(converted_dict.event) == len(original_dict)
assert converted_dict.event["a"].int64_value == original_dict["a"]
def test_dict_event_message_converter():
message_converter = py_converters.TypeBasedConverter(
conversion_map={FloatTensor: py_converters.convert_tensor_message_to_numpy}
)
event_converter = py_converters.EventMessageDefaultConverter(message_converter)
dict_converter = py_converters.DictEventMessageConverter(event_converter)
tensor_message = FloatTensor(shape=[1], value=[1])
event_message = Event(float_tensor=tensor_message)
dict_message = DictEvent(event={"event_message_key": event_message})
converted_list = dict_converter(dict_message)
assert isinstance(converted_list, Mapping)
assert len(converted_list) == 1
assert "event_message_key" in converted_list
assert isinstance(converted_list["event_message_key"], np.ndarray)
def test_protobuf_any_unpacker():
unpacker = py_converters.ProtobufAnyUnpacker(
{"compiler_gym.FloatTensor": FloatTensor}
)
any_msg = any_pb2.Any()
tensor_message = FloatTensor(shape=[1], value=[1])
any_msg.Pack(tensor_message)
unpacked_tensor_message = unpacker(any_msg)
assert tensor_message == unpacked_tensor_message
def test_protobuf_any_unpacker_value_error():
unpacker = py_converters.ProtobufAnyUnpacker(
{"IntentionallyWrongType": FloatTensor}
)
any_msg = any_pb2.Any()
tensor_message = FloatTensor(shape=[1], value=[1])
any_msg.Pack(tensor_message)
any_msg.type_url = "IntentionallyWrongType"
with pytest.raises(ValueError):
unpacker(any_msg)
def test_protobuf_any_converter():
unpacker = py_converters.ProtobufAnyUnpacker(
{"compiler_gym.FloatTensor": FloatTensor}
)
type_based_converter = py_converters.TypeBasedConverter(
conversion_map={FloatTensor: py_converters.convert_tensor_message_to_numpy}
)
converter = py_converters.ProtobufAnyConverter(
unpacker=unpacker, message_converter=type_based_converter
)
any_msg = any_pb2.Any()
tensor_message = FloatTensor(shape=[1], value=[1])
any_msg.Pack(tensor_message)
tensor = converter(any_msg)
assert isinstance(tensor, np.ndarray)
def test_message_default_converter():
value = 5
converter = py_converters.make_message_default_converter()
message = Event(int64_value=value)
converted = converter(message)
assert type(converted) == int
assert value == converted
def test_to_event_message_default_converter():
converter = py_converters.to_event_message_default_converter()
val = [{"a": 1}]
converted_val = converter(val)
assert isinstance(converted_val, Event)
assert isinstance(converted_val.event_list, ListEvent)
assert len(converted_val.event_list.event) == 1
assert isinstance(converted_val.event_list.event[0], Event)
assert isinstance(converted_val.event_list.event[0].event_dict, DictEvent)
assert (
converted_val.event_list.event[0].event_dict.event["a"].int64_value
== val[0]["a"]
)
def test_convert_boolean_range_message():
range = BooleanRange(min=False, max=True)
converted_range = py_converters.convert_range_message(range)
assert converted_range.dtype == bool
assert converted_range.min == range.min
assert converted_range.max == range.max
range_default = BooleanRange()
converted_range_default = py_converters.convert_range_message(range_default)
assert converted_range_default.min == False # noqa: E712
assert converted_range_default.max == True # noqa: E712
def test_convert_to_boolean_range_message():
scalar = Scalar(min=False, max=True, dtype=bool, name=None)
range = py_converters.convert_to_range_message(scalar)
assert isinstance(range, BooleanRange)
assert range.min == scalar.min
assert range.max == scalar.max
def test_convert_int64_range_message():
range = Int64Range(min=2, max=3)
converted_range = py_converters.convert_range_message(range)
assert converted_range.dtype == np.int64
assert converted_range.min == range.min
assert converted_range.max == range.max
range_default = Int64Range()
converted_range_default = py_converters.convert_range_message(range_default)
assert converted_range_default.min == np.iinfo(np.int64).min
assert converted_range_default.max == np.iinfo(np.int64).max
def test_convert_to_int64_range_message():
scalar = Scalar(min=2, max=3, dtype=np.int64, name=None)
range = py_converters.convert_to_range_message(scalar)
assert isinstance(range, Int64Range)
assert range.min == 2
assert range.max == 3
def test_convert_float_range_message():
range = FloatRange(min=2, max=3)
converted_range = py_converters.convert_range_message(range)
assert converted_range.dtype == np.float32
assert converted_range.min == range.min
assert converted_range.max == range.max
range_default = DoubleRange()
converted_range_default = py_converters.convert_range_message(range_default)
assert np.isneginf(converted_range_default.min)
assert np.isposinf(converted_range_default.max)
def test_convert_to_float_range_message():
scalar = Scalar(min=2, max=3, dtype=np.float32, name=None)
range = py_converters.convert_to_range_message(scalar)
assert isinstance(range, FloatRange)
assert range.min == 2
assert range.max == 3
def test_convert_double_range_message():
range = DoubleRange(min=2, max=3)
converted_range = py_converters.convert_range_message(range)
assert converted_range.dtype == float
assert converted_range.min == range.min
assert converted_range.max == range.max
range_default = DoubleRange()
converted_range_default = py_converters.convert_range_message(range_default)
assert np.isneginf(converted_range_default.min)
assert np.isposinf(converted_range_default.max)
def test_convert_to_double_range_message():
scalar = Scalar(min=2, max=3, dtype=np.float64, name=None)
range = py_converters.convert_to_range_message(scalar)
assert isinstance(range, DoubleRange)
assert range.min == 2
assert range.max == 3
def test_convert_boolean_box_message():
box = BooleanBox(
low=BooleanTensor(value=[1, 2], shape=[1, 2]),
high=BooleanTensor(value=[2, 3], shape=[1, 2]),
)
converted_box = py_converters.convert_box_message(box)
assert isinstance(converted_box, Box)
assert converted_box.dtype == bool
assert np.array_equal(box.low.shape, converted_box.shape)
assert np.array_equal(box.high.shape, converted_box.shape)
assert np.array_equal(box.low.value, converted_box.low.flatten())
assert np.array_equal(box.high.value, converted_box.high.flatten())
def test_convert_to_boolean_box_message():
box = Box(
low=np.array([[False], [True]]),
high=np.array([[False], [True]]),
name=None,
dtype=bool,
)
converted_box = py_converters.convert_to_box_message(box)
assert isinstance(converted_box, BooleanBox)
assert isinstance(converted_box.low, BooleanTensor)
assert np.array_equal(converted_box.low.shape, box.shape)
assert np.array_equal(converted_box.low.value, box.low.flatten())
assert isinstance(converted_box.high, BooleanTensor)
assert np.array_equal(converted_box.high.shape, box.shape)
assert np.array_equal(converted_box.high.value, box.high.flatten())
def test_convert_byte_box_message():
box = ByteBox(
low=ByteTensor(value=bytes([1, 2]), shape=[1, 2]),
high=ByteTensor(value=bytes([2, 3]), shape=[1, 2]),
)
converted_box = py_converters.convert_box_message(box)
assert isinstance(converted_box, Box)
assert converted_box.dtype == np.int8
assert np.array_equal(box.low.shape, converted_box.shape)
assert np.array_equal(box.high.shape, converted_box.shape)
assert np.array_equal(box.low.value, bytes(converted_box.low.flatten()))
assert np.array_equal(box.high.value, bytes(converted_box.high.flatten()))
def test_convert_to_byte_box_message():
box = Box(
low=np.array([[1], [2]]), high=np.array([[3], [4]]), name=None, dtype=np.int8
)
converted_box = py_converters.convert_to_box_message(box)
assert isinstance(converted_box, ByteBox)
assert isinstance(converted_box.low, ByteTensor)
assert np.array_equal(converted_box.low.shape, box.shape)
assert np.array_equal(
np.frombuffer(converted_box.low.value, dtype=np.int8), box.low.flatten()
)
assert isinstance(converted_box.high, ByteTensor)
assert np.array_equal(converted_box.high.shape, box.shape)
assert np.array_equal(
np.frombuffer(converted_box.high.value, dtype=np.int8), box.high.flatten()
)
def test_convert_int64_box_message():
box = Int64Box(
low=Int64Tensor(value=[1, 2], shape=[1, 2]),
high=Int64Tensor(value=[2, 3], shape=[1, 2]),
)
converted_box = py_converters.convert_box_message(box)
assert isinstance(converted_box, Box)
assert converted_box.dtype == np.int64
assert np.array_equal(box.low.shape, converted_box.shape)
assert np.array_equal(box.high.shape, converted_box.shape)
assert np.array_equal(box.low.value, converted_box.low.flatten())
assert np.array_equal(box.high.value, converted_box.high.flatten())
def test_convert_to_int64_box_message():
box = Box(
low=np.array([[1], [2]]), high=np.array([[3], [4]]), name=None, dtype=np.int64
)
converted_box = py_converters.convert_to_box_message(box)
assert isinstance(converted_box, Int64Box)
assert isinstance(converted_box.low, Int64Tensor)
assert np.array_equal(converted_box.low.shape, box.shape)
assert np.array_equal(converted_box.low.value, box.low.flatten())
assert isinstance(converted_box.high, Int64Tensor)
assert np.array_equal(converted_box.high.shape, box.shape)
assert np.array_equal(converted_box.high.value, box.high.flatten())
def test_convert_float_box_message():
box = FloatBox(
low=FloatTensor(value=[1, 2], shape=[1, 2]),
high=FloatTensor(value=[2, 3], shape=[1, 2]),
)
converted_box = py_converters.convert_box_message(box)
assert isinstance(converted_box, Box)
assert converted_box.dtype == np.float32
assert np.array_equal(box.low.shape, converted_box.shape)
assert np.array_equal(box.high.shape, converted_box.shape)
assert np.array_equal(box.low.value, converted_box.low.flatten())
assert np.array_equal(box.high.value, converted_box.high.flatten())
def test_convert_to_float_box_message():
box = Box(
low=np.array([[1], [2]], dtype=np.float32),
high=np.array([[3], [4]], dtype=np.float32),
name=None,
dtype=np.float32,
)
converted_box = py_converters.convert_to_box_message(box)
assert isinstance(converted_box, FloatBox)
assert isinstance(converted_box.low, FloatTensor)
assert np.array_equal(converted_box.low.shape, box.shape)
assert np.array_equal(converted_box.low.value, box.low.flatten())
assert isinstance(converted_box.high, FloatTensor)
assert np.array_equal(converted_box.high.shape, box.shape)
assert np.array_equal(converted_box.high.value, box.high.flatten())
def test_convert_double_box_message():
box = DoubleBox(
low=DoubleTensor(value=[1, 2], shape=[1, 2]),
high=DoubleTensor(value=[2, 3], shape=[1, 2]),
)
converted_box = py_converters.convert_box_message(box)
assert isinstance(converted_box, Box)
assert converted_box.dtype == float
assert np.array_equal(box.low.shape, converted_box.shape)
assert np.array_equal(box.high.shape, converted_box.shape)
assert np.array_equal(box.low.value, converted_box.low.flatten())
assert np.array_equal(box.high.value, converted_box.high.flatten())
def test_convert_to_double_box_message():
box = Box(
low=np.array([[1.0], [2.0]]),
high=np.array([[3.0], [4.0]]),
name=None,
dtype=np.float64,
)
converted_box = py_converters.convert_to_box_message(box)
assert isinstance(converted_box, DoubleBox)
assert isinstance(converted_box.low, DoubleTensor)
assert np.array_equal(converted_box.low.shape, box.shape)
assert np.array_equal(converted_box.low.value, box.low.flatten())
assert isinstance(converted_box.high, DoubleTensor)
assert np.array_equal(converted_box.high.shape, box.shape)
assert np.array_equal(converted_box.high.value, box.high.flatten())
def test_convert_discrete_space_message():
message = DiscreteSpace(n=5)
converted_message = py_converters.convert_discrete_space_message(message)
assert message.n == converted_message.n
def test_convert_to_discrete_space_message():
space = Discrete(name=None, n=5)
converted_space = py_converters.convert_to_discrete_space_message(space)
assert isinstance(converted_space, DiscreteSpace)
assert converted_space.n == 5
def test_convert_to_named_discrete_space_message():
space = NamedDiscrete(name=None, items=["a", "b"])
converted_space = py_converters.convert_to_named_discrete_space_message(space)
assert isinstance(converted_space, NamedDiscreteSpace)
assert np.array_equal(space.names, converted_space.name)
def test_convert_named_discrete_space_message():
message = NamedDiscreteSpace(name=["a", "b", "c"])
converted_message = py_converters.convert_named_discrete_space_message(message)
assert isinstance(converted_message, NamedDiscrete)
assert np.array_equal(message.name, converted_message.names)
def test_convert_commandline_space_message():
message = CommandlineSpace(name=["a", "b", "c"])
converted_message = py_converters.convert_commandline_space_message(message)
assert isinstance(converted_message, Commandline)
assert np.array_equal(message.name, converted_message.names)
def test_convert_boolean_sequence_space():
seq = BooleanSequenceSpace(
length_range=Int64Range(min=1, max=2),
scalar_range=BooleanRange(min=True, max=False),
)
converted_seq = py_converters.convert_sequence_space(seq)
assert isinstance(converted_seq, Sequence)
assert converted_seq.dtype == bool
assert converted_seq.size_range[0] == 1
assert converted_seq.size_range[1] == 2
assert isinstance(converted_seq.scalar_range, Scalar)
assert converted_seq.scalar_range.min == True # noqa: E712
assert converted_seq.scalar_range.max == False # noqa: E712
def test_convert_to_boolean_sequence_space():
seq = Sequence(
name=None,
dtype=bool,
size_range=(1, 2),
scalar_range=Scalar(name=None, min=True, max=False, dtype=bool),
)
converted_seq = py_converters.convert_to_ranged_sequence_space(seq)
assert isinstance(converted_seq, BooleanSequenceSpace)
assert converted_seq.length_range.min == 1
assert converted_seq.length_range.max == 2
assert isinstance(converted_seq.scalar_range, BooleanRange)
assert converted_seq.scalar_range.min == True # noqa: E712
assert converted_seq.scalar_range.max == False # noqa: E712
def test_convert_bytes_sequence_space():
seq = BytesSequenceSpace(length_range=Int64Range(min=1, max=2))
converted_seq = py_converters.convert_sequence_space(seq)
assert isinstance(converted_seq, Sequence)
assert converted_seq.dtype == bytes
assert converted_seq.size_range[0] == 1
assert converted_seq.size_range[1] == 2
def test_convert_to_bytes_sequence_space():
seq = Sequence(name=None, dtype=bytes, size_range=(1, 2))
converted_seq = py_converters.convert_to_bytes_sequence_space(seq)
assert isinstance(converted_seq, BytesSequenceSpace)
assert converted_seq.length_range.min == 1
assert converted_seq.length_range.max == 2
def test_convert_byte_sequence_space():
seq = ByteSequenceSpace(
length_range=Int64Range(min=1, max=2), scalar_range=Int64Range(min=3, max=4)
)
converted_seq = py_converters.convert_sequence_space(seq)
assert isinstance(converted_seq, Sequence)
assert converted_seq.dtype == np.int8
assert converted_seq.size_range[0] == 1
assert converted_seq.size_range[1] == 2
assert isinstance(converted_seq.scalar_range, Scalar)
assert converted_seq.scalar_range.min == 3
assert converted_seq.scalar_range.max == 4
def test_convert_to_byte_sequence_space():
seq = Sequence(
name=None,
dtype=np.int8,
size_range=(1, 2),
scalar_range=Scalar(name=None, min=4, max=5, dtype=np.int8),
)
converted_seq = py_converters.convert_to_ranged_sequence_space(seq)
assert isinstance(converted_seq, ByteSequenceSpace)
assert converted_seq.length_range.min == 1
assert converted_seq.length_range.max == 2
assert isinstance(converted_seq.scalar_range, Int64Range)
assert converted_seq.scalar_range.min == 4
assert converted_seq.scalar_range.max == 5
def test_convert_int64_sequence_space():
seq = Int64SequenceSpace(
length_range=Int64Range(min=1, max=2), scalar_range=Int64Range(min=3, max=4)
)
converted_seq = py_converters.convert_sequence_space(seq)
assert isinstance(converted_seq, Sequence)
assert converted_seq.dtype == np.int64
assert converted_seq.size_range[0] == 1
assert converted_seq.size_range[1] == 2
assert isinstance(converted_seq.scalar_range, Scalar)
assert converted_seq.scalar_range.min == 3
assert converted_seq.scalar_range.max == 4
def test_convert_to_int64_sequence_space():
seq = Sequence(
name=None,
dtype=np.int64,
size_range=(1, 2),
scalar_range=Scalar(name=None, min=4, max=5, dtype=np.int64),
)
converted_seq = py_converters.convert_to_ranged_sequence_space(seq)
assert isinstance(converted_seq, Int64SequenceSpace)
assert converted_seq.length_range.min == 1
assert converted_seq.length_range.max == 2
assert isinstance(converted_seq.scalar_range, Int64Range)
assert converted_seq.scalar_range.min == 4
assert converted_seq.scalar_range.max == 5
def test_convert_float_sequence_space():
seq = FloatSequenceSpace(
length_range=Int64Range(min=1, max=2), scalar_range=FloatRange(min=3.1, max=4)
)
converted_seq = py_converters.convert_sequence_space(seq)
assert isinstance(converted_seq, Sequence)
assert converted_seq.dtype == np.float32
assert converted_seq.size_range[0] == 1
assert converted_seq.size_range[1] == 2
assert isinstance(converted_seq.scalar_range, Scalar)
assert np.isclose(converted_seq.scalar_range.min, 3.1)
assert converted_seq.scalar_range.max == 4
def test_convert_to_float_sequence_space():
seq = Sequence(
name=None,
dtype=np.float32,
size_range=(1, 2),
scalar_range=Scalar(name=None, min=4, max=5, dtype=np.float32),
)
converted_seq = py_converters.convert_to_ranged_sequence_space(seq)
assert isinstance(converted_seq, FloatSequenceSpace)
assert converted_seq.length_range.min == 1
assert converted_seq.length_range.max == 2
assert isinstance(converted_seq.scalar_range, FloatRange)
assert np.isclose(converted_seq.scalar_range.min, 4)
assert np.isclose(converted_seq.scalar_range.max, 5)
def test_convert_double_sequence_space():
seq = DoubleSequenceSpace(
length_range=Int64Range(min=1, max=2), scalar_range=DoubleRange(min=3.1, max=4)
)
converted_seq = py_converters.convert_sequence_space(seq)
assert isinstance(converted_seq, Sequence)
assert converted_seq.dtype == float
assert converted_seq.size_range[0] == 1
assert converted_seq.size_range[1] == 2
assert isinstance(converted_seq.scalar_range, Scalar)
assert converted_seq.scalar_range.min == 3.1
assert converted_seq.scalar_range.max == 4
def test_convert_to_double_sequence_space():
seq = Sequence(
name=None,
dtype=np.float64,
size_range=(1, 2),
scalar_range=Scalar(name=None, min=4.0, max=5.0, dtype=np.float64),
)
converted_seq = py_converters.convert_to_ranged_sequence_space(seq)
assert isinstance(converted_seq, DoubleSequenceSpace)
assert converted_seq.length_range.min == 1
assert converted_seq.length_range.max == 2
assert isinstance(converted_seq.scalar_range, DoubleRange)
assert converted_seq.scalar_range.min == 4.0
assert converted_seq.scalar_range.max == 5.0
def test_convert_string_space():
space = StringSpace(length_range=Int64Range(min=1, max=2))
converted_space = py_converters.convert_sequence_space(space)
assert isinstance(converted_space, Sequence)
assert converted_space.dtype == str
assert converted_space.size_range[0] == 1
assert converted_space.size_range[1] == 2
def test_convert_to_string_space():
space = Sequence(name=None, size_range=(1, 2), dtype=str)
converted_space = py_converters.convert_to_string_space(space)
assert isinstance(converted_space, StringSpace)
assert converted_space.length_range.min == 1
assert converted_space.length_range.max == 2
def test_convert_space_sequence_space():
space = Space(
space_sequence=SpaceSequenceSpace(
length_range=Int64Range(min=0, max=2),
space=Space(int64_value=Int64Range(min=-1, max=1)),
),
)
converted_space = py_converters.message_default_converter(space)
assert isinstance(converted_space, SpaceSequence)
assert converted_space.size_range[0] == space.space_sequence.length_range.min
assert converted_space.size_range[1] == space.space_sequence.length_range.max
assert isinstance(converted_space.space, Scalar)
assert np.dtype(converted_space.space.dtype) == np.int64
assert converted_space.space.min == space.space_sequence.space.int64_value.min
assert converted_space.space.max == space.space_sequence.space.int64_value.max
def test_space_message_default_converter():
message_converter = py_converters.TypeBasedConverter(
conversion_map={StringSpace: py_converters.convert_sequence_space}
)
space_converter = py_converters.SpaceMessageDefaultConverter(message_converter)
val = StringSpace(length_range=Int64Range(min=1, max=2))
space_message = Space(string_value=val)
converted_space = space_converter(space_message)
assert isinstance(converted_space, Sequence)
assert converted_space.dtype == str
assert converted_space.size_range[0] == 1
assert converted_space.size_range[1] == 2
def test_list_space_message_converter():
message_converter = py_converters.TypeBasedConverter(
conversion_map={StringSpace: py_converters.convert_sequence_space}
)
space_converter = py_converters.SpaceMessageDefaultConverter(message_converter)
list_converter = py_converters.ListSpaceMessageConverter(space_converter)
space_message = ListSpace(
space=[
Space(
string_value=StringSpace(length_range=Int64Range(min=1, max=2)),
)
]
)
converted_space = list_converter(space_message)
assert isinstance(converted_space, Tuple)
assert len(converted_space.spaces) == 1
assert converted_space.spaces[0].dtype == str
assert converted_space.spaces[0].size_range[0] == 1
assert converted_space.spaces[0].size_range[1] == 2
def test_tuple_to_list_space_message_converter():
to_message_converter = py_converters.TypeBasedConverter(
conversion_map={Discrete: py_converters.convert_to_discrete_space_message}
)
to_space_converter = py_converters.ToSpaceMessageConverter(to_message_converter)
to_list_converter = py_converters.ToListSpaceMessageConverter(to_space_converter)
space = Tuple(name=None, spaces=[Discrete(name=None, n=5)])
converted_space = to_list_converter(space)
assert isinstance(converted_space, ListSpace)
assert len(converted_space.space) == 1
assert isinstance(converted_space.space[0], Space)
assert hasattr(converted_space.space[0], "discrete")
assert converted_space.space[0].discrete.n == 5
def test_to_list_space_message_converter():
to_message_converter = py_converters.TypeBasedConverter(
conversion_map={Discrete: py_converters.convert_to_discrete_space_message}
)
to_space_converter = py_converters.ToSpaceMessageConverter(to_message_converter)
to_list_converter = py_converters.ToListSpaceMessageConverter(to_space_converter)
space = Tuple(name=None, spaces=[Discrete(name=None, n=5)])
converted_space = to_list_converter(space)
assert isinstance(converted_space, ListSpace)
assert len(converted_space.space) == 1
assert isinstance(converted_space.space[0], Space)
assert hasattr(converted_space.space[0], "discrete")
assert converted_space.space[0].discrete.n == 5
def test_dict_space_message_converter():
message_converter = py_converters.TypeBasedConverter(
conversion_map={StringSpace: py_converters.convert_sequence_space}
)
space_converter = py_converters.SpaceMessageDefaultConverter(message_converter)
dict_converter = py_converters.DictSpaceMessageConverter(space_converter)
space_message = DictSpace(
space={
"key": Space(
string_value=StringSpace(length_range=Int64Range(min=1, max=2)),
)
}
)
converted_space = dict_converter(space_message)
assert isinstance(converted_space, Dict)
assert len(converted_space.spaces) == 1
assert "key" in converted_space.spaces
assert converted_space.spaces["key"].dtype == str
assert converted_space.spaces["key"].size_range[0] == 1
assert converted_space.spaces["key"].size_range[1] == 2
def test_to_dict_space_message_converter():
to_message_converter = py_converters.TypeBasedConverter(
conversion_map={Discrete: py_converters.convert_to_discrete_space_message}
)
to_space_converter = py_converters.ToSpaceMessageConverter(to_message_converter)
to_dict_converter = py_converters.ToDictSpaceMessageConverter(to_space_converter)
space = Dict(name=None, spaces={"key": Discrete(name=None, n=5)})
converted_space = to_dict_converter(space)
assert isinstance(converted_space, DictSpace)
assert len(converted_space.space) == 1
assert "key" in converted_space.space
assert isinstance(converted_space.space["key"], Space)
assert hasattr(converted_space.space["key"], "discrete")
assert converted_space.space["key"].discrete.n == 5
def test_to_space_message_default_converter():
space = Tuple(
name=None,
spaces=[
Dict(
name=None,
spaces={"key": Box(name=None, low=0, high=1, shape=[1, 2])},
)
],
)
converted_space = py_converters.to_space_message_default_converter()(space)
assert isinstance(converted_space, Space)
assert isinstance(
converted_space.space_list.space[0].space_dict.space["key"].float_box,
FloatBox,
)
def test_opaque_json_message_converter():
message = Opaque(format="json://", data='{"key": "val"}'.encode("utf-8"))
converted_message = py_converters.message_default_converter(message)
assert isinstance(converted_message, Mapping)
assert len(converted_message) == 1
assert "key" in converted_message
assert converted_message["key"] == "val"
def test_type_id_dispatch_converter():
def default_converter(msg):
return msg.string_value + "_default"
conversion_map = {
"type_1": lambda msg: msg.string_value + "_type_1",
"type_2": lambda msg: msg.string_value + "_type_2",
}
type_id_converter = py_converters.TypeIdDispatchConverter(
default_converter=default_converter, conversion_map=conversion_map
)
assert type_id_converter(Event(string_value="msg_val")) == "msg_val_default"
assert (
type_id_converter(Event(string_value="msg_val", type_id="type_1"))
== "msg_val_type_1"
)
assert (
type_id_converter(Event(string_value="msg_val", type_id="type_2"))
== "msg_val_type_2"
)
def test_convert_permutation_space_message():
msg = Space(
type_id="permutation",
int64_sequence=Int64SequenceSpace(
length_range=Int64Range(min=5, max=5), scalar_range=Int64Range(min=0, max=4)
),
)
permutation = py_converters.message_default_converter(msg)
assert isinstance(permutation, Permutation)
assert permutation.scalar_range.min == 0
assert permutation.scalar_range.max == 4
assert permutation.size_range[0] == 5
assert permutation.size_range[1] == 5
invalid_permutation_space_msg = Space(
type_id="permutation",
int64_sequence=Int64SequenceSpace(
length_range=Int64Range(min=3, max=5), scalar_range=Int64Range(min=0, max=4)
),
)
with pytest.raises(ValueError, match="Invalid permutation space message"):
py_converters.message_default_converter(invalid_permutation_space_msg)
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/service/proto/py_converters_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the LICENSE file
# in the root directory of this source tree.
#
# tests/**/__init__.py files are needed for pytest Python path resolution. See:
# https://docs.pytest.org/en/latest/explanation/pythonpath.html
|
CompilerGym-development
|
tests/service/proto/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the LICENSE file
# in the root directory of this source tree.
#
# tests/**/__init__.py files are needed for pytest Python path resolution. See:
# https://docs.pytest.org/en/latest/explanation/pythonpath.html
|
CompilerGym-development
|
tests/service/runtime/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym/service/runtime:benchmark_cache."""
import pytest
from compiler_gym.service.proto import Benchmark, File
from compiler_gym.service.runtime.benchmark_cache import BenchmarkCache, logger
from tests.test_main import main
def make_benchmark_of_size(size_in_bytes: int, target: int = 0) -> Benchmark:
"""Test helper. Generate a benchmark of the given size in bytes."""
target = target or size_in_bytes
bm = Benchmark(program=File(contents=("." * target).encode("utf-8")))
size_offset = bm.ByteSize() - size_in_bytes
if size_offset:
return make_benchmark_of_size(size_in_bytes, size_in_bytes - size_offset)
return bm
@pytest.mark.parametrize("size", [5, 10, 100, 1024])
def test_make_benchmark_of_size(size: int):
"""Sanity check for test helper function."""
assert make_benchmark_of_size(size).ByteSize() == size
def test_oversized_benchmark_triggers_evict_to_capacity(mocker):
cache = BenchmarkCache(max_size_in_bytes=10)
mocker.spy(cache, "evict_to_capacity")
cache["test"] = make_benchmark_of_size(50)
assert cache.size == 1
assert cache.size_in_bytes == 50
cache.evict_to_capacity.assert_called_once()
def test_replace_existing_item():
cache = BenchmarkCache()
cache["a"] = make_benchmark_of_size(30)
assert cache.size == 1
assert cache.size_in_bytes == 30
cache["a"] = make_benchmark_of_size(50)
assert cache.size == 1
assert cache.size_in_bytes == 50
def test_evict_to_capacity_on_max_size_reached(mocker):
"""Test that cache is evict_to_capacityd when the maximum size is exceeded."""
cache = BenchmarkCache(max_size_in_bytes=100)
mocker.spy(cache, "evict_to_capacity")
mocker.spy(logger, "info")
cache["a"] = make_benchmark_of_size(30)
cache["b"] = make_benchmark_of_size(30)
cache["c"] = make_benchmark_of_size(30)
assert cache.evict_to_capacity.call_count == 0
cache["d"] = make_benchmark_of_size(30)
assert cache.evict_to_capacity.call_count == 1
assert cache.size == 2
assert cache.size_in_bytes == 60
logger.info.assert_called_once_with(
"Evicted %d benchmarks from cache. Benchmark cache size now %d bytes, "
"%d items",
2,
30,
1,
)
def test_oversized_benchmark_emits_warning(mocker):
"""Test that a warning is emitted when a single item is larger than the
entire target cache size.
"""
cache = BenchmarkCache(max_size_in_bytes=10)
mocker.spy(logger, "warning")
cache["test"] = make_benchmark_of_size(50)
logger.warning.assert_called_once_with(
"Adding new benchmark with size %d bytes exceeds total target cache "
"size of %d bytes",
50,
10,
)
def test_contains():
cache = BenchmarkCache(max_size_in_bytes=100)
cache["a"] = make_benchmark_of_size(30)
assert "a" in cache
assert "b" not in cache
def test_getter():
cache = BenchmarkCache(max_size_in_bytes=100)
a = make_benchmark_of_size(30)
b = make_benchmark_of_size(40)
cache["a"] = a
cache["b"] = b
assert cache["a"] == a
assert cache["a"] != b
assert cache["b"] == b
with pytest.raises(KeyError, match="c"):
cache["c"]
def test_evict_to_capacity_on_maximum_size_update(mocker):
"""Test that cache is evict_to_capacityd when the maximum size is exceeded."""
cache = BenchmarkCache(max_size_in_bytes=100)
mocker.spy(cache, "evict_to_capacity")
mocker.spy(logger, "info")
cache["a"] = make_benchmark_of_size(30)
cache["b"] = make_benchmark_of_size(30)
cache["c"] = make_benchmark_of_size(30)
assert cache.evict_to_capacity.call_count == 0
cache.max_size_in_bytes = 50
assert cache.evict_to_capacity.call_count == 1
assert cache.size_in_bytes == 30
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/service/runtime/benchmark_cache_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the LICENSE file
# in the root directory of this source tree.
#
# tests/**/__init__.py files are needed for pytest Python path resolution. See:
# https://docs.pytest.org/en/latest/explanation/pythonpath.html
|
CompilerGym-development
|
tests/views/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym/views."""
import pytest
from compiler_gym.views import RewardView
from tests.test_main import main
class MockReward:
def __init__(self, name, ret=None):
self.name = name
self.ret = list(reversed(ret or []))
self.observation_spaces = []
def update(self, *args, **kwargs):
ret = self.ret[-1]
del self.ret[-1]
return ret
class MockObservationView:
pass
def test_empty_space():
reward = RewardView([], MockObservationView())
with pytest.raises(ValueError) as ctx:
_ = reward["foo"]
assert str(ctx.value) == "No reward spaces"
def test_invalid_reward_name():
reward = RewardView([MockReward(name="foo")], MockObservationView())
with pytest.raises(KeyError):
_ = reward["invalid"]
def test_reward_values():
spaces = [
MockReward(name="codesize", ret=[-5]),
MockReward(name="runtime", ret=[10]),
]
reward = RewardView(spaces, MockObservationView())
value = reward["codesize"]
assert value == -5
value = reward["runtime"]
assert value == 10
def test_reward_values_bound_methods():
spaces = [
MockReward(name="codesize", ret=[-5]),
MockReward(name="runtime", ret=[10]),
]
reward = RewardView(spaces, MockObservationView())
value = reward.codesize()
assert value == -5
value = reward.runtime()
assert value == 10
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/views/reward_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym/views."""
import numpy as np
import pytest
from compiler_gym.errors import ServiceError
from compiler_gym.service.proto import (
DoubleBox,
DoubleTensor,
Int64Box,
Int64Range,
Int64Tensor,
ObservationSpace,
Space,
StringSpace,
)
from compiler_gym.views import ObservationView
from tests.test_main import main
class MockRawStep:
"""Mock for the raw_step callack of ObservationView."""
def __init__(self, ret=None):
self.called_observation_spaces = []
self.ret = list(reversed(ret or [None]))
def __call__(self, actions, observation_spaces, reward_spaces):
assert not actions
assert len(observation_spaces) == 1
assert not reward_spaces
self.called_observation_spaces.append(observation_spaces[0].id)
ret = self.ret[-1]
del self.ret[-1]
return [ret], [], False, {}
def test_empty_space():
with pytest.raises(ValueError) as ctx:
ObservationView(MockRawStep(), [])
assert str(ctx.value) == "No observation spaces"
def test_observed_value_types():
spaces = [
ObservationSpace(
name="ir",
space=Space(string_value=StringSpace(length_range=Int64Range(min=0))),
),
ObservationSpace(
name="features",
space=Space(
int64_box=Int64Box(
low=Int64Tensor(shape=[2], value=[-100, -100]),
high=Int64Tensor(shape=[2], value=[100, 100]),
),
),
),
ObservationSpace(
name="dfeat",
space=Space(
double_box=DoubleBox(
low=DoubleTensor(shape=[1], value=[0.5]),
high=DoubleTensor(shape=[1], value=[2.5]),
),
),
),
ObservationSpace(
name="binary",
space=Space(int64_value=Int64Range(min=5, max=5)),
),
]
mock = MockRawStep(
ret=[
"Hello, IR",
[1.0, 2.0],
[-5, 15],
b"Hello, bytes\0",
"Hello, IR",
[1.0, 2.0],
[-5, 15],
b"Hello, bytes\0",
]
)
observation = ObservationView(mock, spaces)
value = observation["ir"]
assert isinstance(value, str)
assert value == "Hello, IR"
value = observation["dfeat"]
np.testing.assert_array_almost_equal(value, [1.0, 2.0])
value = observation["features"]
np.testing.assert_array_equal(value, [-5, 15])
value = observation["binary"]
assert value == b"Hello, bytes\0"
# Check that the correct observation_space_list indices were used.
assert mock.called_observation_spaces == ["ir", "dfeat", "features", "binary"]
mock.called_observation_spaces = []
# Repeat the above tests using the generated bound methods.
value = observation.ir()
assert isinstance(value, str)
assert value == "Hello, IR"
value = observation.dfeat()
np.testing.assert_array_almost_equal(value, [1.0, 2.0])
value = observation.features()
np.testing.assert_array_equal(value, [-5, 15])
value = observation.binary()
assert value == b"Hello, bytes\0"
# Check that the correct observation_space_list indices were used.
assert mock.called_observation_spaces == ["ir", "dfeat", "features", "binary"]
def test_observation_when_raw_step_returns_incorrect_no_of_observations():
"""Test that a ServiceError is propagated when raw_step() returns unexpected
number of observations."""
def make_failing_raw_step(n: int):
def failing_raw_step(*args, **kwargs):
"""A callback that returns done=True."""
del args # Unused
del kwargs # Unused
return ["ir"] * n, None, False, {}
return failing_raw_step
spaces = [
ObservationSpace(
name="ir",
space=Space(int64_value=Int64Range(min=0)),
)
]
observation = ObservationView(make_failing_raw_step(0), spaces)
with pytest.raises(
ServiceError, match=r"^Expected 1 'ir' observation but the service returned 0$"
):
observation["ir"]
observation = ObservationView(make_failing_raw_step(3), spaces)
with pytest.raises(
ServiceError, match=r"^Expected 1 'ir' observation but the service returned 3$"
):
observation["ir"]
def test_observation_when_raw_step_returns_done():
"""Test that a SessionNotFoundError from the raw_step() callback propagates as a"""
def make_failing_raw_step(error_msg=None):
def failing_raw_step(*args, **kwargs):
"""A callback that returns done=True."""
info = {}
if error_msg:
info["error_details"] = error_msg
return [], None, True, info
return failing_raw_step
spaces = [
ObservationSpace(
name="ir",
space=Space(int64_value=Int64Range(min=0)),
)
]
observation = ObservationView(make_failing_raw_step(), spaces)
with pytest.raises(ServiceError, match=r"^Failed to compute observation 'ir'$"):
observation["ir"] # pylint: disable=pointless-statement
observation = ObservationView(make_failing_raw_step("Oh no!"), spaces)
with pytest.raises(
ServiceError, match=r"^Failed to compute observation 'ir': Oh no!$"
):
observation["ir"] # pylint: disable=pointless-statement
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/views/observation_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Evaluate tabular_q policy for leaderboard."""
import os
import sys
from typing import Dict
from absl import app, flags
from compiler_gym.envs import LlvmEnv
from compiler_gym.leaderboard.llvm_instcount import eval_llvm_instcount_policy
sys.path.insert(0, os.path.dirname(os.path.realpath(__file__)) + "/../../../examples")
from tabular_q import ( # noqa pylint: disable=wrong-import-position
StateActionTuple,
rollout,
train,
)
FLAGS = flags.FLAGS
def train_and_run(env: LlvmEnv) -> None:
"""Run tabular Q learning on an environment"""
FLAGS.log_every = 0 # Disable printing to stdout
q_table: Dict[StateActionTuple, float] = {}
env.observation_space = "Autophase"
training_env = env.fork()
train(q_table, training_env)
training_env.close()
rollout(q_table, env, printout=False)
if __name__ == "__main__":
app.run(eval_llvm_instcount_policy(train_and_run))
|
CompilerGym-development
|
leaderboard/llvm_instcount/tabular_q/tabular_q_eval.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for //leaderboard/llvm_instcount/tabular_q_eval."""
import pytest
from absl import flags
from compiler_gym.leaderboard.llvm_instcount import eval_llvm_instcount_policy
from leaderboard.llvm_instcount.tabular_q.tabular_q_eval import train_and_run
from tests.test_main import main as _test_main
FLAGS = flags.FLAGS
pytest_plugins = ["tests.pytest_plugins.llvm"]
def test_tabular_q():
FLAGS.unparse_flags()
FLAGS(
[
"argv0",
"--n=1",
"--max_benchmarks=1",
"--nproc=1",
"--novalidate",
]
)
with pytest.raises(SystemExit):
eval_llvm_instcount_policy(train_and_run)
if __name__ == "__main__":
_test_main()
|
CompilerGym-development
|
leaderboard/llvm_instcount/tabular_q/tabular_q_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""An implementation of a random search policy for the LLVM codesize task.
The search is the same as the included compiler_gym.bin.random_search. See
README.md in this directory for a detailed description.
"""
from time import sleep
import gym
from absl import flags
from compiler_gym.envs import LlvmEnv
from compiler_gym.leaderboard.llvm_instcount import eval_llvm_instcount_policy
from compiler_gym.random_search import RandomAgentWorker
flags.DEFINE_float(
"patience_ratio",
1.0,
"The ratio of patience to the size of the action space. "
"Patience = patience_ratio * action_space_size",
)
flags.DEFINE_integer(
"search_time",
60,
"The minimum number of seconds to run the random search for. After this "
"many seconds have elapsed the best results are aggregated from the "
"search threads and the search is terminated.",
)
FLAGS = flags.FLAGS
def random_search(env: LlvmEnv) -> None:
"""Run a random search on the given environment."""
patience = int(env.action_space.n * FLAGS.patience_ratio)
# Start parallel random search workers.
workers = [
RandomAgentWorker(
make_env=lambda: gym.make("llvm-ic-v0", benchmark=env.benchmark),
patience=patience,
)
for _ in range(FLAGS.nproc)
]
for worker in workers:
worker.start()
sleep(FLAGS.search_time)
# Stop the workers.
for worker in workers:
worker.alive = False
for worker in workers:
worker.join()
# Aggregate the best results.
best_actions = []
best_reward = -float("inf")
for worker in workers:
if worker.best_returns > best_reward:
best_reward, best_actions = worker.best_returns, list(worker.best_actions)
# Replay the best sequence of actions to produce the final environment
# state.
for action in best_actions:
_, _, done, _ = env.step(action)
assert not done
if __name__ == "__main__":
eval_llvm_instcount_policy(random_search)
|
CompilerGym-development
|
leaderboard/llvm_instcount/random_search/random_search.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for //leaderboard/llvm_instcount/random_search."""
import pytest
from leaderboard.llvm_instcount.random_search.random_search import (
eval_llvm_instcount_policy,
random_search,
)
from tests.pytest_plugins.common import set_command_line_flags
from tests.test_main import main as _test_main
def test_random_search():
set_command_line_flags(
[
"argv0",
"--n=1",
"--max_benchmarks=1",
"--search_time=1",
"--nproc=1",
"--patience_ratio=0.1",
"--novalidate",
]
)
with pytest.raises(SystemExit):
eval_llvm_instcount_policy(random_search)
if __name__ == "__main__":
_test_main()
|
CompilerGym-development
|
leaderboard/llvm_instcount/random_search/random_search_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for //leaderboard/llvm_instcount/e_greedy."""
import sys
from concurrent.futures import ThreadPoolExecutor
import pytest
from absl import flags
from compiler_gym.envs import LlvmEnv
from compiler_gym.leaderboard.llvm_instcount import eval_llvm_instcount_policy
from leaderboard.llvm_instcount.e_greedy.e_greedy import (
e_greedy_search,
select_best_action,
)
from tests.test_main import main as _test_main
FLAGS = flags.FLAGS
pytest_plugins = ["tests.pytest_plugins.llvm"]
def test_random_search():
sys.argv = [
"argv0",
"--n=1",
"--max_benchmarks=1",
"--nproc=1",
"--novalidate",
]
with pytest.raises(SystemExit):
eval_llvm_instcount_policy(e_greedy_search)
def test_select_best_action_closed_environment(env: LlvmEnv):
"""Test that select_best_action() recovers from an environment whose service
has closed."""
env.reward_space = "IrInstructionCount"
env.reset(benchmark="cbench-v1/crc32")
with ThreadPoolExecutor() as executor:
best_a = select_best_action(env, executor)
env.close()
best_b = select_best_action(env, executor)
assert best_a == best_b
if __name__ == "__main__":
_test_main()
|
CompilerGym-development
|
leaderboard/llvm_instcount/e_greedy/e_greedy_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""ϵ-greedy policy for LLVM codesize."""
import logging
import random
from concurrent.futures import ThreadPoolExecutor, as_completed
from typing import NamedTuple
from absl import flags
from compiler_gym.envs import CompilerEnv, LlvmEnv
from compiler_gym.leaderboard.llvm_instcount import eval_llvm_instcount_policy
flags.DEFINE_float(
"epsilon", 0, "The ratio of patience to the size of the action space. "
)
FLAGS = flags.FLAGS
class RewardAction(NamedTuple):
"""An action -> reward tuple for a single step()."""
# Use reward as the element in the tuple as the reward is used for ordering.
reward: float
action: int
def select_best_action(env: CompilerEnv, executor: ThreadPoolExecutor) -> RewardAction:
"""Determine the best action by trying all possible options and ranking them."""
def eval_action(fkd: CompilerEnv, action: int) -> RewardAction:
"""Evaluate the given action."""
try:
_, reward, _, _ = fkd.step(action)
finally:
fkd.close()
return RewardAction(reward=reward, action=action)
# Select the best action using the reward that the action produces, then
# action index as a tie-breaker. Do this by creating n forks of the
# environment, one for every action, and evaluting the actions in parallel
# threads. Note that calls to fork() occur in the main thread for thread
# safety in case of environment restart.
futures = (
executor.submit(eval_action, env.fork(), action)
for action in range(env.action_space.n)
)
best_reward_action = RewardAction(reward=-float("inf"), action=0)
for future in as_completed(futures):
reward_action: RewardAction = future.result()
if reward_action > best_reward_action:
best_reward_action = reward_action
return best_reward_action
def e_greedy_search(env: LlvmEnv) -> None:
"""Run an ϵ-greedy search on an environment."""
step_count = 0
with ThreadPoolExecutor(max_workers=FLAGS.nproc) as executor:
while True:
step_count += 1
if random.random() < FLAGS.epsilon:
# Exploratory step. Randomly select and apply an action.
action = env.action_space.sample()
_, reward, done, _ = env.step(action)
logging.debug(
"Step %d, exploratory action %s, reward %.4f, cumulative %.4f",
step_count,
env.action_space.flags[action],
reward,
env.episode_reward,
)
else:
# Select the best reward and apply it, or terminate the search
# if no positive reward is attainable.
best = select_best_action(env, executor)
if best.reward <= 0:
logging.debug(
"Greedy search terminated after %d steps, "
"no further reward attainable",
step_count,
)
done = True
else:
_, reward, done, _ = env.step(best.action)
logging.debug(
"Step %d, greedy action %s, reward %.4f, cumulative %.4f",
step_count,
env.action_space.flags[best.action],
reward,
env.episode_reward,
)
if env.reward_space.deterministic and reward != best.reward:
logging.warning(
"Action %s produced different reward on replay, %.4f != %.4f",
env.action_space.flags[best.action],
best.reward,
reward,
)
# Stop the search if we have reached a terminal state.
if done:
return
if __name__ == "__main__":
eval_llvm_instcount_policy(e_greedy_search)
|
CompilerGym-development
|
leaderboard/llvm_instcount/e_greedy/e_greedy.py
|
#!/usr/bin/env python3
#
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""A script to auto-populate RST files from the CompilerGym header files.
Usage:
$ python generate_cc_rst.py
"""
import os
from pathlib import Path
from typing import List
SOURCES = Path("../compiler_gym")
OUTPUT_DIR = Path("source/cc")
def header(message, underline="="):
underline = underline * (len(str(message)) // len(underline))
return f"{message}\n{underline}"
def main():
valid_files: List[Path] = []
for root, _, files in os.walk(SOURCES):
if "third_party" in root:
continue
headers = [
f
for f in files
if (f.endswith(".h") or f.endswith(".proto")) and not f.endswith("Impl.h")
]
if not headers:
continue
while root.startswith("../"):
root = root[len("../") :]
root = Path(root)
(OUTPUT_DIR / root).parent.mkdir(parents=True, exist_ok=True)
output_path = Path(f"{OUTPUT_DIR / root}.rst")
valid_files.append(output_path)
print("Generating", output_path)
with open(output_path, "w") as f:
print(header(str(root)), file=f)
print(file=f)
print(".. contents::", file=f)
print(" :local:", file=f)
for header_name in headers:
print(file=f)
print(header(header_name, "-"), file=f)
print(file=f)
print(f':code:`#include "{root}/{header_name}"`', file=f)
print(file=f)
print(f".. doxygenfile:: {root}/{header_name}", file=f)
for root, _, files in os.walk(OUTPUT_DIR):
for file in files:
path = Path(root) / file
if path not in valid_files:
print("rm", path)
path.unlink()
if __name__ == "__main__":
main()
|
CompilerGym-development
|
docs/generate_cc_rst.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
# Configuration file for the Sphinx documentation builder.
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
import sphinx.errors
sphinx.application.ExtensionError = sphinx.errors.ExtensionError
# -- Project information -----------------------------------------------------
project = "CompilerGym"
copyright = "Meta Platforms, Inc"
author = "Meta Platforms, Inc"
# Read the version from the //:VERSION file.
with open("../../VERSION") as f:
version = f.read().strip()
release = version
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.doctest",
"sphinx.ext.intersphinx",
"sphinx.ext.todo",
"sphinx.ext.coverage",
"sphinx.ext.mathjax",
"sphinx.ext.napoleon",
"sphinx.ext.viewcode",
"sphinx_rtd_theme",
"sphinx.ext.autosectionlabel",
"sphinxemoji.sphinxemoji",
"breathe",
"sphinx_reredirects",
]
autosectionlabel_prefix_document = True
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
html_theme = "sphinx_rtd_theme"
html_theme_options = {
"analytics_id": "G-WJN2CKJJKH",
"collapse_navigation": True,
"display_version": True,
"logo_only": True,
}
html_css_files = [
"css/custom.css",
]
html_static_path = ["_static"]
html_logo = "_static/img/logo.png"
html_favicon = "_static/img/favicon.png"
# -- Breathe configuration -
breathe_default_project = "CompilerGym"
breathe_projects = {"CompilerGym": "../doxygen/xml"}
redirects = {
"explorer/index.html": "https://compilergym.metademolab.com/",
}
|
CompilerGym-development
|
docs/source/conf.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""A CompilerGym API and web frontend.
This exposes an API with two operations:
1. /api/v4/describe
Describe the CompilerGym interface. This generates a list of action
names and their numeric values, a list of benchmark datasets and the
benchmarks within them, and a list of reward spaces.
Example usage:
$ curl localhost:5000/api/v4/describe
{
"actions": {
"-adce": 1,
...
"-tailcallelim": 122
},
"benchmarks": {
"benchmark://anghabench-v1": [
"8cc/extr_buffer.c_buf_append",
...
"8cc/extr_buffer.c_quote_cstring_len"
],
"benchmark://blas-v0": [
...
],
"benchmark://cbench-v1": [
"adpcm",
...
"jpeg-c"
],
...
},
"rewards": [
"IrInstructionCount",
...
"ObjectTextSizeOz"
]
}
2. /ap/v4/step
Compute the state from the given environment description. Query
arguments:
benchmark: The name of the benchmark. If "benchmark_source" is set
(see below), this is the name of the local file that the user
selected.
benchmark_source: An inline string of code to use as the benchmark.
reward: The name of the reward signal to use.
actions: An optional, command-separated list of actions to run.
all_states: An optional string that if "1" means that a list of
all states will be returned, one for each action. Else, only
the state for the final action is returned.
Example usage:
$ curl 'localhost:5000/api/v4/step?benchmark=benchmark://cbench-v1/adpcm&reward=IrInstructionCountOz&actions=1,2,3'
{
"commandline": "opt - ...",
"rewards": [0.003],
"done": false,
"ir": "...",
"states": [
{
"instcount": {...},
"autophase": {...},
"reward": 0.003
},
]
}
"""
import logging
import os
import sys
import tempfile
from functools import lru_cache
from itertools import islice
from pathlib import Path
from threading import Lock
from typing import Any, Dict, List, Optional
from flask import Flask, jsonify, request, send_file
from flask_cors import CORS
from pydantic import BaseModel
import compiler_gym
from compiler_gym.datasets.benchmark import Benchmark
from compiler_gym.envs import LlvmEnv
from compiler_gym.envs.llvm import make_benchmark
from compiler_gym.util.truncate import truncate
app = Flask("compiler_gym")
CORS(app)
resource_dir: Path = (Path(__file__).parent / "frontends/compiler_gym/build").absolute()
logger = logging.getLogger(__name__)
# A single compiler environment that is used to serve all endpoints.
env: LlvmEnv = compiler_gym.make("llvm-v0")
env_lock = Lock()
class StateToVisualize(BaseModel):
"""Encapsulates the state to visualize in the frontend."""
instcount: Dict[str, int]
autophase: Dict[str, int]
# The reward signal measures how "good" the previous action was. Over time
# the sequence of actions that produces the highest cumulative reward is the
# best:
reward: float
class StepRequest(BaseModel):
"""User arguments to /api/v4/step."""
# The name of the benchmark.
benchmark: str
# The inline source code for a benchmark.
benchmark_source: Optional[str]
# The reward space to use.
reward: str
# A comma-separated list of actions to perform.
actions: List[int]
# Whether to return a state for every action, or only the final action. See
# StepReply.states.
all_states: bool
@classmethod
def from_request(cls):
"""Parse the arguments from Flask's request arguments."""
def required_arg(name: str) -> str:
value = request.args.get(name)
if not value:
raise ValueError(f"Missing requirement argument: {name}")
return value
actions_str: str = request.args.get("actions")
actions: List[int] = (
[int(x) for x in actions_str.split(",")] if actions_str else []
)
return cls(
benchmark=required_arg("benchmark"),
benchmark_source=request.args.get("benchmark_source"),
reward=required_arg("reward"),
actions=actions,
all_states=request.args.get("all_states", "0") == "1",
)
class StepReply(BaseModel):
"""The data returned by a call to /api/v4/step."""
# This summarizes the sequence of actions that the user has selected so far:
commandline: str
# If the compiler environment dies, crashes, or encounters some
# unrecoverable error, this "done" flag is set. At this point the user
# should start a new session.
done: bool
# The current LLVM-IR:
ir: str
# A list of states to visualize, ordered from first to last.
states: List[StateToVisualize]
@app.route("/api/v4/describe")
def describe():
with env_lock:
env.reset()
return jsonify(
{
# A mapping from dataset name to benchmark name. To generate a full
# benchmark URI, join the two values with a '/'. E.g. given a benchmark
# "qsort" in the dataset "benchmark://cbench-v1", the full URI is
# "benchmark://cbench-v1/qsort".
"benchmarks": {
dataset.name: list(
islice(
(
x[len(dataset.name) + 1 :]
for x in dataset.benchmark_uris()
),
10,
)
)
for dataset in env.datasets
},
# A mapping from the name of an action to the numeric value. This
# numeric value is what is passed as argument to the step() function.
"actions": {k: v for v, k in enumerate(env.action_space.flags)},
# A list of reward space names. You select the reward space to use
# during start().
"rewards": sorted(list(env.reward.spaces.keys())),
}
)
@lru_cache(maxsize=16)
def _make_benchmark(name: str, source: str) -> Benchmark:
"""Construct a benchmark from a file name and contents."""
with tempfile.TemporaryDirectory() as d:
tmpfile = Path(d) / Path(name).name
with open(tmpfile, "w") as f:
f.write(source)
try:
return make_benchmark(tmpfile, timeout=60)
except Exception as e:
raise ValueError(f"Failed to compiler benchmark {name}: {e}")
def _step(request: StepRequest) -> StepReply:
"""Run the actual step with parsed arguments."""
states: List[StateToVisualize] = []
with env_lock:
env.reward_space = request.reward
# Create a benchmark from user-supplied code, or just look up the
# benchmark by name.
if request.benchmark_source:
benchmark = _make_benchmark(request.benchmark, request.benchmark_source)
else:
benchmark = request.benchmark
env.reset(benchmark=benchmark)
# Replay all actions except the last one.
if request.all_states:
# Replay actions one at a time to receive incremental rewards. The
# first item represents the state prior to any actions.
(instcount, autophase), _, done, info = env.multistep(
actions=[],
observation_spaces=[
env.observation.spaces["InstCountDict"],
env.observation.spaces["AutophaseDict"],
],
)
if done:
raise ValueError(
f"Failed to compute initial state: {info['error_details']}"
)
states.append(
StateToVisualize(
instcount=instcount,
autophase=autophase,
reward=0,
)
)
for action in request.actions[:-1]:
(instcount, autophase), reward, done, info = env.step(
action,
observation_spaces=[
env.observation.spaces["InstCountDict"],
env.observation.spaces["AutophaseDict"],
],
)
states.append(
StateToVisualize(
instcount=instcount,
autophase=autophase,
reward=reward,
)
)
if done:
raise ValueError(
f"Failed to apply action {action}: {info['error_details']}"
)
else:
# Replay actions in a single batch.
_, _, done, info = env.step(request.actions[:-1])
if done:
raise ValueError(
f"Failed to apply actions {request.actions}: {info['error_details']}"
)
# Perform the final action.
(ir, instcount, autophase), (reward,), done, _ = env.multistep(
actions=request.actions[-1:],
observation_spaces=[
env.observation.spaces["Ir"],
env.observation.spaces["InstCountDict"],
env.observation.spaces["AutophaseDict"],
],
reward_spaces=[env.reward_space],
)
states.append(
StateToVisualize(
instcount=instcount,
autophase=autophase,
reward=reward,
)
)
return StepReply(
commandline=env.action_space.to_string(env.actions),
done=done,
ir=truncate(ir, max_line_len=250, max_lines=1024),
states=states,
)
@app.route("/api/v4/step")
def step() -> Dict[str, Any]:
try:
request = StepRequest.from_request()
except ValueError as e:
return jsonify({"error": f"Invalid actions: {e}"}), 400
try:
return jsonify(_step(request).dict())
except Exception as e:
return jsonify({"error": str(e)}), 400
# Web endpoints.
@app.route("/")
def index_resource():
return send_file(resource_dir / "index.html")
@app.route("/<path>")
def root_resource(path: str):
return send_file(resource_dir / path)
@app.route("/static/css/<path>")
def css_resource(path: str):
return send_file(resource_dir / "static/css/" / path)
@app.route("/static/js/<path>")
def js_resource(path: str):
return send_file(resource_dir / "static/js/" / path)
if __name__ == "__main__":
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler(sys.stderr)
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.info("Serving from %s", resource_dir)
app.run(port=int(os.environ.get("PORT", "5000")), host="0.0.0.0")
|
CompilerGym-development
|
www/www.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym/bin:random_walk."""
import re
from absl.flags import FLAGS
from random_walk import run_random_walk
import compiler_gym
from compiler_gym.util.capture_output import capture_output
def test_run_random_walk_smoke_test():
FLAGS.unparse_flags()
FLAGS(["argv0"])
with capture_output() as out:
with compiler_gym.make("llvm-autophase-ic-v0") as env:
env.benchmark = "cbench-v1/crc32"
run_random_walk(env=env, step_count=5)
print(out.stdout)
# Note the ".*" before and after the step count to ignore the shell
# formatting.
assert re.search(r"Completed .*5.* steps in ", out.stdout)
|
CompilerGym-development
|
examples/random_walk_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import explore
def test_run_explore_smoke_test(capsys):
explore.main(
[
"explore",
"--env=llvm-ic-v0",
"--benchmark=cbench-v1/dijkstra",
"--episode_length=2",
"--explore_actions=-newgvn,-instcombine,-mem2reg",
"--nproc=2",
]
)
out, _ = capsys.readouterr()
assert "depth 2 of 2" in out
|
CompilerGym-development
|
examples/explore_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for //compiler_gym/bin:actor_critic."""
import sys
from absl import flags
from actor_critic import main
from compiler_gym.util.capture_output import capture_output
FLAGS = flags.FLAGS
def test_run_actor_critic_smoke_test():
flags = [
"argv0",
"--seed=0",
"--episode_len=2",
"--episodes=10",
"--log_interval=5",
"--benchmark=cbench-v1/crc32",
]
sys.argv = flags
FLAGS.unparse_flags()
FLAGS(flags)
with capture_output() as out:
main(["argv0"])
assert "Final performance (avg reward)" in out.stdout
|
CompilerGym-development
|
examples/actor_critic_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Simple compiler gym tabular q learning example.
Usage: python tabular_q.py --benchmark=<benchmark>
Using selected features from Autophase observation space, given a specific training
program as gym environment, find the best action sequence using online q learning.
"""
import random
from typing import Dict, NamedTuple
import gym
from absl import app, flags
import compiler_gym.util.flags.episode_length # noqa Flag definition.
import compiler_gym.util.flags.episodes # noqa Flag definition.
import compiler_gym.util.flags.learning_rate # noqa Flag definition.
from compiler_gym.util.flags.benchmark_from_flags import benchmark_from_flags
from compiler_gym.util.timer import Timer
flags.DEFINE_list(
"tabular_q_actions",
[
"-break-crit-edges",
"-early-cse-memssa",
"-gvn-hoist",
"-gvn",
"-instcombine",
"-instsimplify",
"-jump-threading",
"-loop-reduce",
"-loop-rotate",
"-loop-versioning",
"-mem2reg",
"-newgvn",
"-reg2mem",
"-simplifycfg",
"-sroa",
],
"A list of action names to explore from.",
)
flags.DEFINE_float("discount", 1.0, "The discount factor.")
flags.DEFINE_list(
"features_indices",
[19, 22, 51],
"Indices of Alphaphase features that are used to construct a state",
)
flags.DEFINE_integer(
"log_every", 50, "number of episode interval where progress is reported."
)
flags.DEFINE_float("epsilon", 0.2, "Epsilon rate of exploration. ")
FLAGS = flags.FLAGS
class StateActionTuple(NamedTuple):
"""An state action tuple used as q-table keys"""
autophase0: int
autophase1: int
autophase2: int
cur_step: int
action_index: int
def make_q_table_key(autophase_feature, action, step):
"""Create a hashable Q-table key.
For tabular learning we will be constructing a Q-table which maps a
(state, action) pair to an expected (remaining) reward. The purpose of this
function is to convert the (state, action) properties into a hashable tuple
that can be used as a key for a Q-table dictionary.
In the CompilerGym setup, encoding the true state the program is not obvious,
and this solution turns to use the observations from Autophase features instead.
The default arguments handpicked 3 indices from the Autophase feature that
appear to change a lot during optimization.
In addition, the current step in the episode is added to the state representation
as well. In the current fixed-episode-length setup, we need to differentiate
reaching a state at different steps, as they can lead to different final rewards,
depending on the remaining optimization steps.
Finally, we add the action index to the key.
"""
return StateActionTuple(
*autophase_feature[FLAGS.features_indices],
step,
FLAGS.tabular_q_actions.index(action),
)
def select_action(q_table, ob, step, epsilon=0.0):
qs = [
q_table.get(make_q_table_key(ob, act, step), -1)
for act in FLAGS.tabular_q_actions
]
if random.random() < epsilon:
return random.choice(FLAGS.tabular_q_actions)
max_indices = [i for i, x in enumerate(qs) if x == max(qs)]
# Breaking ties at random by selecting any of the indices.
return FLAGS.tabular_q_actions[random.choice(max_indices)]
def get_max_q_value(q_table, ob, step):
max_q = 0
for act in FLAGS.tabular_q_actions:
hashed = make_q_table_key(ob, act, step)
max_q = max(q_table.get(hashed, 0), max_q)
return max_q
def rollout(qtable, env, printout=False):
# rollout the policy using a given Q table greedily.
observation = env.reset()
action_seq, rewards = [], []
for i in range(FLAGS.episode_length):
a = select_action(qtable, observation, i)
action_seq.append(a)
observation, reward, done, info = env.step(env.action_space.flags.index(a))
rewards.append(reward)
if done:
break
if printout:
print(
"Resulting sequence: ", ",".join(action_seq), f"total reward {sum(rewards)}"
)
return sum(rewards)
def train(q_table, env):
# Buffer an old version of q table to inspect training progress.
prev_q = {}
# Run the training process "online", where the policy evaluation and
# policy improvement happens directly after one another.
for i in range(1, FLAGS.episodes + 1):
current_length = 0
observation = env.reset()
while current_length < FLAGS.episode_length:
# Run epsilon greedy policy to allow exploration.
a = select_action(q_table, observation, current_length, FLAGS.epsilon)
hashed = make_q_table_key(observation, a, current_length)
if hashed not in q_table:
q_table[hashed] = 0
# Take a stap in the environment, record the reward and state transition.
# Effectively we are evaluating the policy by taking a step in the
# environment.
observation, reward, done, info = env.step(env.action_space.flags.index(a))
if done:
break
current_length += 1
# Compute the target value of the current state, by using the current
# step-reward and bootstrapping from the next state. In Q-learning,
# a greedy policy is implied by the Q-table, thus we can approximate
# the expected reward at the next state as the maximum value of
# all the associated state-action pair rewards (Q values). A discount
# can be used to emphasize on immediate early rewards, and encourage
# the agent to achieve higher rewards sooner than later.
target = reward + FLAGS.discount * get_max_q_value(
q_table, observation, current_length
)
# Update Q value. Instead of replacing the Q value at the current
# state action pair directly, a learning rate is introduced to interpolate
# between the current value and target value, effectively damping the
# changes. By updating the Q-table, we effectively updated the policy.
q_table[hashed] = (
FLAGS.learning_rate * target
+ (1 - FLAGS.learning_rate) * q_table[hashed]
)
if FLAGS.log_every and i % FLAGS.log_every == 0:
def compare_qs(q_old, q_new):
diff = [q_new[k] - v for k, v in q_old.items()]
return sum(diff) / len(diff) if diff else 0.0
difference = compare_qs(prev_q, q_table)
# Evaluate the current policy
cur_rewards = rollout(q_table, env)
print(
f"episode={i:4d}, cur_reward={cur_rewards:.5f}, Q-table_entries={len(q_table):5d}, Q-table_diff={difference:.7f}"
)
prev_q = q_table.copy()
def main(argv):
# Initialize a Q table.
q_table: Dict[StateActionTuple, float] = {}
benchmark = benchmark_from_flags()
assert benchmark, "You must specify a benchmark using the --benchmark flag"
with gym.make("llvm-ic-v0", benchmark=benchmark) as env:
env.observation_space = "Autophase"
# Train a Q-table.
with Timer("Constructing Q-table"):
train(q_table, env)
# Rollout resulting policy.
rollout(q_table, env, printout=True)
if __name__ == "__main__":
app.run(main)
|
CompilerGym-development
|
examples/tabular_q.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Simple PT compiler gym actor-critic RL example.
Usage: python actor_critic.py
Use --help to list the configurable options.
The objective is to minimize the size of a benchmark (program) using
LLVM compiler passes. At each step there is a choice of which pass to
pick next and an episode consists of a sequence of such choices,
yielding the number of saved instructions as the overall reward.
For simplification of the learning task, only a (configurable) subset
of LLVM passes are considered and every episode has the same
(configurable) length.
Based on the PT actor-critic example:
https://github.com/pytorch/examples/blob/master/reinforcement_learning/actor_critic.py
"""
import random
import statistics
from collections import namedtuple
from typing import List
import gym
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from absl import app, flags
from torch.distributions import Categorical
import compiler_gym.util.flags.episodes # noqa Flag definition.
import compiler_gym.util.flags.learning_rate # noqa Flag definition.
import compiler_gym.util.flags.seed # noqa Flag definition.
from compiler_gym.util.flags.benchmark_from_flags import benchmark_from_flags
from compiler_gym.util.flags.env_from_flags import env_from_flags
from compiler_gym.wrappers import ConstrainedCommandline, TimeLimit
flags.DEFINE_list(
"flags",
[
"-break-crit-edges",
"-early-cse-memssa",
"-gvn-hoist",
"-gvn",
"-instcombine",
"-instsimplify",
"-jump-threading",
"-loop-reduce",
"-loop-rotate",
"-loop-versioning",
"-mem2reg",
"-newgvn",
"-reg2mem",
"-simplifycfg",
"-sroa",
],
"List of optimizatins to explore.",
)
flags.DEFINE_integer("episode_len", 5, "Number of transitions per episode.")
flags.DEFINE_integer("hidden_size", 64, "Latent vector size.")
flags.DEFINE_integer("log_interval", 100, "Episodes per log output.")
flags.DEFINE_integer("iterations", 1, "Times to redo entire training.")
flags.DEFINE_float("exploration", 0.0, "Rate to explore random transitions.")
flags.DEFINE_float("mean_smoothing", 0.95, "Smoothing factor for mean normalization.")
flags.DEFINE_float("std_smoothing", 0.4, "Smoothing factor for std dev normalization.")
eps = np.finfo(np.float32).eps.item()
SavedAction = namedtuple("SavedAction", ["log_prob", "value"])
FLAGS = flags.FLAGS
class MovingExponentialAverage:
"""Simple class to calculate exponential moving averages."""
def __init__(self, smoothing_factor):
self.smoothing_factor = smoothing_factor
self.value = None
def next(self, entry):
assert entry is not None
if self.value is None:
self.value = entry
else:
self.value = (
entry * (1 - self.smoothing_factor) + self.value * self.smoothing_factor
)
return self.value
class HistoryObservation(gym.ObservationWrapper):
"""For the input representation (state), if there are N possible
actions, then an action x is represented by a one-hot vector V(x)
with N entries. A sequence of M actions (x, y, ...) is represented
by an MxN matrix of 1-hot vectors (V(x), V(y), ...). Actions that
have not been taken yet are represented as the zero vector. This
way the input does not have a variable size since each episode has
a fixed number of actions.
"""
def __init__(self, env):
super().__init__(env=env)
self.observation_space = gym.spaces.Box(
low=np.full(len(FLAGS.flags), 0, dtype=np.float32),
high=np.full(len(FLAGS.flags), float("inf"), dtype=np.float32),
dtype=np.float32,
)
def reset(self, *args, **kwargs):
self._steps_taken = 0
self._state = np.zeros(
(FLAGS.episode_len - 1, self.action_space.n), dtype=np.int32
)
return super().reset(*args, **kwargs)
def step(self, action: int):
assert self._steps_taken < FLAGS.episode_len
if self._steps_taken < FLAGS.episode_len - 1:
# Don't need to record the last action since there are no
# further decisions to be made at that point, so that
# information need never be presented to the model.
self._state[self._steps_taken][action] = 1
self._steps_taken += 1
return super().step(action)
def observation(self, observation):
return self._state
class Policy(nn.Module):
"""A very simple actor critic policy model."""
def __init__(self):
super().__init__()
self.affine1 = nn.Linear(
(FLAGS.episode_len - 1) * len(FLAGS.flags), FLAGS.hidden_size
)
self.affine2 = nn.Linear(FLAGS.hidden_size, FLAGS.hidden_size)
self.affine3 = nn.Linear(FLAGS.hidden_size, FLAGS.hidden_size)
self.affine4 = nn.Linear(FLAGS.hidden_size, FLAGS.hidden_size)
# Actor's layer
self.action_head = nn.Linear(FLAGS.hidden_size, len(FLAGS.flags))
# Critic's layer
self.value_head = nn.Linear(FLAGS.hidden_size, 1)
# Action & reward buffer
self.saved_actions: List[SavedAction] = []
self.rewards: List[float] = []
# Keep exponential moving average of mean and standard
# deviation for use in normalization of the input.
self.moving_mean = MovingExponentialAverage(FLAGS.mean_smoothing)
self.moving_std = MovingExponentialAverage(FLAGS.std_smoothing)
def forward(self, x):
"""Forward of both actor and critic"""
# Initial layer maps the sequence of one-hot vectors into a
# vector of the hidden size. Next layers stay with the same
# size and use residual connections.
x = F.relu(self.affine1(x))
x = x.add(F.relu(self.affine2(x)))
x = x.add(F.relu(self.affine3(x)))
x = x.add(F.relu(self.affine4(x)))
# actor: choses action to take from state s_t
# by returning probability of each action
action_prob = F.softmax(self.action_head(x), dim=-1)
# critic: evaluates being in the state s_t
state_values = self.value_head(x)
# return values for both actor and critic as a tuple of 2 values:
# 1. a list with the probability of each action over the action space
# 2. the value from state s_t
return action_prob, state_values
def select_action(model, state, exploration_rate=0.0):
"""Selects an action and registers it with the action buffer."""
state = torch.from_numpy(state.flatten()).float()
probs, state_value = model(state)
# Create a probability distribution where the probability of
# action i is probs[i].
m = Categorical(probs)
# Sample an action using the distribution, or pick an action
# uniformly at random if in an exploration mode.
if random.random() < exploration_rate:
action = torch.tensor(random.randrange(0, len(probs)))
else:
action = m.sample()
# Save to action buffer. The drawing of a sample above simply
# returns a constant integer that we cannot back-propagate
# through, so it is important here that log_prob() is symbolic.
model.saved_actions.append(SavedAction(m.log_prob(action), state_value))
# The action to take.
return action.item()
def finish_episode(model, optimizer) -> float:
"""The training code. Calculates actor and critic loss and performs backprop."""
R = 0
saved_actions = model.saved_actions
policy_losses = [] # list to save actor (policy) loss
value_losses = [] # list to save critic (value) loss
returns = [] # list to save the true values
# Calculate the true value using rewards returned from the
# environment. We are iterating in reverse order while inserting
# at each step to the front of the returns list, which implies
# that returns[i] is the sum of rewards[j] for j >= i. We do not
# use a discount factor as the episode length is fixed and not
# very long, but if we had used one, it would appear here.
for r in model.rewards[::-1]:
R += r
returns.insert(0, R)
# Update the moving averages for mean and standard deviation and
# use that to normalize the input.
returns = torch.tensor(returns)
model.moving_mean.next(returns.mean())
model.moving_std.next(returns.std())
returns = (returns - model.moving_mean.value) / (model.moving_std.value + eps)
for (log_prob, value), R in zip(saved_actions, returns):
# The advantage is how much better a situation turned out in
# this case than the critic expected it to.
advantage = R - value.item()
# Calculate the actor (policy) loss. Because log_prob is
# symbolic, back propagation will increase the probability of
# taking the action that was taken if advantage is positive
# and will decrease it if advantage is negative. In this way
# we are learning a probability distribution without directly
# being able to back propagate through the drawing of the
# sample from that distribution.
#
# It may seem that once the critic becomes accurate, so that
# the advantage is always 0, then the policy can no longer
# learn because multiplication by 0 impedes back
# propagation. However, the critic does not know which action
# will be taken, so as long as there are worse-than-average or
# better-than-average policies with a non-zero probability,
# then the critic has to be wrong sometimes because it can
# only make one prediction across all actions, so learning
# will proceed.
policy_losses.append(-log_prob * advantage)
# Calculate critic (value) loss using L1 smooth loss.
value_losses.append(F.smooth_l1_loss(value, torch.tensor([R])))
# Reset gradients.
optimizer.zero_grad()
# Sum up all the values of policy_losses and value_losses.
loss = torch.stack(policy_losses).sum() + torch.stack(value_losses).sum()
loss_value = loss.item()
# Perform backprop.
loss.backward()
optimizer.step()
# Reset rewards and action buffer.
del model.rewards[:]
del model.saved_actions[:]
return loss_value
def TrainActorCritic(env):
model = Policy()
optimizer = optim.Adam(model.parameters(), lr=FLAGS.learning_rate)
# These statistics are just for logging.
max_ep_reward = -float("inf")
avg_reward = MovingExponentialAverage(0.95)
avg_loss = MovingExponentialAverage(0.95)
for episode in range(1, FLAGS.episodes + 1):
# Reset environment and episode reward.
state = env.reset()
ep_reward = 0
# The environment keeps track of when the episode is done, so
# we can loop infinitely here.
while True:
# Select action from policy.
action = select_action(model, state, FLAGS.exploration)
# Take the action
state, reward, done, _ = env.step(action)
model.rewards.append(reward)
ep_reward += reward
if done:
break
# Perform back propagation.
loss = finish_episode(model, optimizer)
# Update statistics.
max_ep_reward = max(max_ep_reward, ep_reward)
avg_reward.next(ep_reward)
avg_loss.next(loss)
# Log statistics.
if (
episode == 1
or episode % FLAGS.log_interval == 0
or episode == FLAGS.episodes
):
print(
f"Episode {episode}\t"
f"Last reward: {ep_reward:.2f}\t"
f"Avg reward: {avg_reward.value:.2f}\t"
f"Best reward: {max_ep_reward:.2f}\t"
f"Last loss: {loss:.6f}\t"
f"Avg loss: {avg_loss.value:.6f}\t",
flush=True,
)
print(f"\nFinal performance (avg reward): {avg_reward.value:.2f}")
print(f"Final avg reward versus own best: {avg_reward.value - max_ep_reward:.2f}")
# One could also return the best found solution here, though that
# is more random and noisy, while the average reward indicates how
# well the model is working on a consistent basis.
return avg_reward.value
def make_env():
FLAGS.env = "llvm-v0"
if not FLAGS.reward:
FLAGS.reward = "IrInstructionCountOz"
env = env_from_flags(benchmark=benchmark_from_flags())
env = ConstrainedCommandline(env, flags=FLAGS.flags)
env = TimeLimit(env, max_episode_steps=FLAGS.episode_len)
env = HistoryObservation(env)
return env
def main(argv):
"""Main entry point."""
del argv # unused
torch.manual_seed(FLAGS.seed)
random.seed(FLAGS.seed)
with make_env() as env:
print(f"Seed: {FLAGS.seed}")
print(f"Episode length: {FLAGS.episode_len}")
print(f"Exploration: {FLAGS.exploration:.2%}")
print(f"Learning rate: {FLAGS.learning_rate}")
print(f"Reward: {FLAGS.reward}")
print(f"Benchmark: {FLAGS.benchmark}")
print(f"Action space: {env.action_space}")
if FLAGS.iterations == 1:
TrainActorCritic(env)
return
# Performance varies greatly with random initialization and
# other random choices, so run the process multiple times to
# determine the distribution of outcomes.
performances = []
for i in range(1, FLAGS.iterations + 1):
print(f"\n*** Iteration {i} of {FLAGS.iterations}")
performances.append(TrainActorCritic(env))
print("\n*** Summary")
print(f"Final performances: {performances}\n")
print(f" Best performance: {max(performances):.2f}")
print(f"Median performance: {statistics.median(performances):.2f}")
print(f" Avg performance: {statistics.mean(performances):.2f}")
print(f" Worst performance: {min(performances):.2f}")
if __name__ == "__main__":
app.run(main)
|
CompilerGym-development
|
examples/actor_critic.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym/bin:brute_force."""
import tempfile
from pathlib import Path
import gym
from brute_force import run_brute_force
def test_run_brute_force_smoke_test():
with tempfile.TemporaryDirectory() as tmp:
outdir = Path(tmp)
run_brute_force(
make_env=lambda: gym.make("llvm-ic-v0", benchmark="cbench-v1/crc32"),
action_names=["-sroa", "-mem2reg"],
episode_length=2,
outdir=outdir,
nproc=1,
chunksize=2,
)
assert (outdir / "meta.json").is_file()
assert (outdir / "results.csv").is_file()
|
CompilerGym-development
|
examples/brute_force_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Sweep the inner loop size of CUDA loop nests."""
import logging
from itertools import product
from pathlib import Path
from typing import List, Optional
from typer import Typer
import compiler_gym
from compiler_gym.util.runfiles_path import create_user_logs_dir
logger = logging.getLogger(__name__)
app = Typer()
def wrapped_step(env, action):
done = True
while done:
observation, reward, done, info = env.step(action)
if done:
logger.warning("Step failed: %s", info["error_details"])
env.reset()
return observation, reward, done, info
def flops_after_steps(env, num_steps):
wrapped_step(env, [1] * (num_steps - 1))
env.observation_space = "flops"
observation, _, _, _ = wrapped_step(env, 1)
env.observation_space = None
return observation
def run_one_sweep(
device: str,
k: int,
vectorize: int = 1,
linear: bool = False,
logdir: Optional[Path] = None,
):
"""Run a single sweep."""
logdir = logdir or create_user_logs_dir("loop_tool_sweep")
logfile = logdir / f"k{k}-v{vectorize}-{device}-{'linear' if linear else 'log'}.txt"
print("Logging results to", logfile)
print()
print("Device", "K", "Inner", "Vec.", "FLOPS", sep="\t")
with open(logfile, "w") as f:
print("device", "k", "inner", "vectorize", "flops", sep=",", file=f)
def log(k, inner, vectorize, flops):
print(device.upper(), k, inner, vectorize, flops, sep="\t", flush=True)
with open(logfile, "a") as f:
print(device, k, inner, vectorize, flops, sep=",", file=f)
actions = [3, 0, 1, 3, 0]
k *= 1024 # raw number of elements
with compiler_gym.make("loop_tool-v0") as env:
env.reset(
benchmark=env.datasets.benchmark(
uri=f"benchmark://loop_tool-{device}-v0/{k}"
),
action_space="simple",
)
if vectorize - 1:
vs = [1] * (vectorize - 1)
actions += vs + [0, 1, 0] + vs + [0, 2, 0]
for a in actions:
wrapped_step(env, a)
if linear:
for i in range(k // (vectorize * 1024)):
step_count = 1022 if i == 0 else 1023
flops = flops_after_steps(env, step_count)
log(k, (i + 1) * 1024, vectorize, flops)
else: # linear=False (log)
inner = 1
step = 512
wrapped_step(env, [1] * (step - 1))
inner += step - 1
while inner * vectorize <= k:
flops = flops_after_steps(env, step)
inner += step
log(k, inner, vectorize, flops)
step *= 2
@app.command()
def sweep(
device: List[str] = ["cuda"],
k: List[int] = [512, 1024, 2048, 4096, 8192],
vectorize: List[int] = [1],
linear: List[bool] = [False],
logdir: Optional[Path] = None,
):
logdir = logdir or create_user_logs_dir("loop_tool_sweep")
for device_, k_, vectorize_, linear_ in product(device, k, vectorize, linear):
run_one_sweep(
device=device_, k=k_, vectorize=vectorize_, linear=linear_, logdir=logdir
)
if __name__ == "__main__":
app()
|
CompilerGym-development
|
examples/loop_tool_sweep.py
|
#!/usr/bin/env python3
#
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import distutils.util
import setuptools
with open("../VERSION") as f:
version = f.read().strip()
with open("requirements.txt") as f:
requirements = [ln.split("#")[0].rstrip() for ln in f.readlines()]
with open("../tests/requirements.txt") as f:
requirements += [ln.split("#")[0].rstrip() for ln in f.readlines()]
setuptools.setup(
name="compiler_gym_examples",
version=version,
description="Example code for CompilerGym",
author="Facebook AI Research",
url="https://github.com/facebookresearch/CompilerGym",
license="MIT",
install_requires=requirements,
packages=[
"llvm_autotuning",
"llvm_autotuning.autotuners",
"llvm_rl",
"llvm_rl.model",
],
package_data={
"llvm_autotuning": [
"config/*.yaml",
"config/**/*.yaml",
],
"llvm_rl": [
"config/*.yaml",
"config/**/*.yaml",
],
},
python_requires=">=3.8",
platforms=[distutils.util.get_platform()],
zip_safe=False,
)
|
CompilerGym-development
|
examples/setup.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for //compiler_gym/bin:tabular_q."""
from absl import flags
from tabular_q import main
from compiler_gym.util.capture_output import capture_output
FLAGS = flags.FLAGS
def test_run_tabular_q_smoke_test():
FLAGS.unparse_flags()
FLAGS(
[
"argv0",
"--episode_length=5",
"--episodes=10",
"--log_every=2",
"--benchmark=cbench-v1/crc32",
]
)
with capture_output() as out:
main(["argv0"])
assert "Resulting sequence" in out.stdout
|
CompilerGym-development
|
examples/tabular_q_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Run a parallelized brute force of an action space.
This script enumerates all possible combinations of actions up to a finite
length and evaluates them, logging the incremental rewards of each.
Example usage:
$ python brute_force.py --env=llvm-ic-v0 --benchmark=cbench-v1/dijkstra \
--episode_length=8 --brute_force_action_list=-sroa,-mem2reg,-newgvn
Enumerating all episodes of 3 actions x 8 steps
Started 24 brute force workers for benchmark benchmark://cbench-v1/dijkstra using reward IrInstructionCountOz.
=== Running 6,561 trials ===
Runtime: 8 seconds. Progress: 100.00%. Best reward found: 0.8571428571428572.
Ending jobs ... I1014 12:04:51.671775 3245811 CreateAndRunCompilerGymServiceImpl.h:128] Service "/dev/shm/compiler_gym_cec/s/1014T120451-646797-5770" listening on 37505, PID = 3245811
completed 6,561 of 6,561 trials (100.000%), best sequence -mem2reg -mem2reg -sroa -sroa -mem2reg -sroa -sroa -newgvn
Use --help to list the configurable options.
"""
import itertools
import json
import logging
import math
import os
import sys
from pathlib import Path
from queue import Queue
from threading import Thread
from time import time
from typing import List
import humanize
from absl import app, flags
import compiler_gym.util.flags.episode_length # noqa Flag definition.
import compiler_gym.util.flags.nproc # noqa Flag definition.
import compiler_gym.util.flags.output_dir # noqa Flag definition.
from compiler_gym.envs import CompilerEnv
from compiler_gym.util.flags.benchmark_from_flags import benchmark_from_flags
from compiler_gym.util.flags.env_from_flags import env_from_flags
from compiler_gym.util.gym_type_hints import ActionType
from compiler_gym.util.runfiles_path import create_user_logs_dir
flags.DEFINE_list(
"brute_force_action_list",
[],
"A list of action names to enumerate. If not provided, all actions are used "
"(warning: this might make a long time!)",
)
FLAGS = flags.FLAGS
def grouper(iterable, n):
"""Split an iterable into chunks of length `n`, padded if required."""
args = [iter(iterable)] * n
return itertools.zip_longest(*args, fillvalue=None)
class BruteForceProducer(Thread):
"""A thread which enumerates all possible combinations of actions up to
length episode_length and writes chunks of these combinations to a queue.
"""
def __init__(
self,
in_q: Queue,
actions: List[ActionType],
episode_length: int,
nproc: int,
chunksize: int = 128,
):
super().__init__()
self.in_q = in_q
self.actions = actions
self.episode_length = episode_length
self.nproc = nproc
self.chunksize = chunksize
self.alive = True # Set this to False to signal the thread to stop.
def run(self):
for chunk in grouper(
itertools.product(*[self.actions] * self.episode_length), self.chunksize
):
if not self.alive:
break
self.in_q.put(chunk)
# Signal for each worker to end.
for _ in range(self.nproc):
self.in_q.put(None)
class BruteForceWorker(Thread):
"""Worker thread which reads chunks of action lists and evaluates them.
Chunks of action lists are read from in_q and written to out_q, along with
the incremental reward of each action.
"""
def __init__(
self,
worker_id: int,
in_q: Queue,
out_q: Queue,
env: CompilerEnv,
):
super().__init__()
self.id = worker_id
self.in_q = in_q
self.out_q = out_q
self.env = env
# Incremental progress.
self.num_trials = 0
self.alive = True # Set this to False to signal the thread to stop.
def log(self, *args, **kwargs):
logging.debug(
f"Worker {self.id} ({self.num_trials} trials):", *args, **kwargs, flush=True
)
def run(self) -> None:
"""Grab chunks of work from in_q and write results to out_q."""
chunk = self.in_q.get()
while chunk and self.alive:
results = []
self.log("Processing chunk")
for actions in chunk:
# A "None" value is used to pad an incomplete chunk. There will
# be no more work to do after this.
if not actions:
break
self.num_trials += 1
rewards = self.run_one_episode(actions)
results.append((actions, rewards))
self.out_q.put(results)
chunk = self.in_q.get()
# Signal that we're done.
self.out_q.put(None)
self.env.close()
self.log("Worker is done")
def run_one_episode(self, actions: List[int]) -> List[float]:
"""Evaluate the reward of every action in a list."""
self.env.reset()
rewards = []
for action in actions:
_, reward, done, _ = self.env.step(action)
rewards.append(reward)
if done:
break
return rewards
def run_brute_force(
make_env,
action_names: List[str],
episode_length: int,
outdir: Path,
nproc: int,
chunksize: int = 128,
):
"""Run a brute force job."""
meta_path = outdir / "meta.json"
results_path = outdir / "results.csv"
with make_env() as env:
env.reset()
action_names = action_names or env.action_space.names
if not env.reward_space:
raise ValueError("A reward space must be specified for random search")
reward_space_name = env.reward_space.name
actions = [env.action_space.names.index(a) for a in action_names]
benchmark_uri = str(env.benchmark)
meta = {
"env": env.spec.id,
"action_names": action_names,
"benchmark": benchmark_uri,
"reward": reward_space_name,
"init_reward": env.reward[reward_space_name],
"episode_length": episode_length,
"nproc": nproc,
"chunksize": chunksize,
}
with open(str(meta_path), "w") as f:
json.dump(meta, f)
print(f"Wrote {meta_path}")
print(f"Writing results to {results_path}")
# A queue for communicating action sequences to workers, and a queue for
# workers to report <action_sequence, reward_sequence> results.
in_q = Queue(maxsize=32)
out_q = Queue(maxsize=128)
# Generate the action sequences to run.
producer = BruteForceProducer(
in_q=in_q,
nproc=nproc,
actions=actions,
episode_length=episode_length,
chunksize=chunksize,
)
producer.start()
# Worker threads that will consume the action sequences and produce rewards.
workers = [
BruteForceWorker(worker_id=i, env=make_env(), in_q=in_q, out_q=out_q)
for i in range(1, nproc + 1)
]
for worker in workers:
worker.start()
# The consumer loop. Read results from workers as they come in and write
# them to file.
started = time()
expected_trial_count = len(actions) ** episode_length
expected_chunk_count = math.ceil(expected_trial_count / chunksize)
chunk_count = 0
best_reward = -float("inf")
best_action_sequence = []
print(
f"Enumerating all episodes of {len(actions)} actions x {episode_length} steps"
)
print(
f"Started {len(workers)} brute force workers for benchmark "
f"{benchmark_uri} using reward {reward_space_name}."
)
print(f"=== Running {humanize.intcomma(expected_trial_count)} trials ===")
try:
with open(str(results_path), "w") as f:
print(
*[f"action_{i}" for i in range(1, episode_length + 1)],
*[f"reward_{i}" for i in range(1, episode_length + 1)],
sep=",",
file=f,
flush=True,
)
nproc_completed = 0
while nproc_completed < nproc:
chunk = out_q.get()
if not chunk:
nproc_completed += 1
continue
chunk_count += 1
print(
f"\r\033[KRuntime: {humanize.naturaldelta(time() - started)}. "
f"Progress: {chunk_count/expected_chunk_count:.2%}. "
f"Best reward found: {best_reward}.",
file=sys.stderr,
flush=True,
end="",
)
for actions, rewards in chunk:
print(*actions, *rewards, sep=",", file=f, flush=True)
if rewards and rewards[-1] is not None:
if sum(rewards) > best_reward:
best_reward = sum(rewards)
best_action_sequence = actions
except KeyboardInterrupt:
print("\nkeyboard interrupt", end="", flush=True)
print(file=sys.stderr, flush=True)
print("Ending jobs ... ", end="", flush=True)
# In case of early exit, signal to the threads to terminate.
producer.alive = False
for worker in workers:
worker.alive = False
# Wait for everyone to finish.
producer.join()
for worker in workers:
worker.join()
num_trials = sum(worker.num_trials for worker in workers)
with make_env() as env:
print(
f"completed {humanize.intcomma(num_trials)} of "
f"{humanize.intcomma(expected_trial_count)} trials "
f"({num_trials / expected_trial_count:.3%}), best sequence",
" ".join([env.action_space.flags[i] for i in best_action_sequence]),
)
def main(argv):
"""Main entry point."""
argv = FLAGS(argv)
if len(argv) != 1:
raise app.UsageError(f"Unknown command line arguments: {argv[1:]}")
# Use default logdir of <base>/brute_force/<benchmark> unless told
# otherwise.
benchmark = benchmark_from_flags()
if not benchmark:
raise app.UsageError("No benchmark specified.")
with env_from_flags(benchmark) as env:
env.reset()
logs_dir = Path(
FLAGS.output_dir
or create_user_logs_dir(
f'brute_force/{os.path.normpath(f"random/{env.benchmark.uri.scheme}/{env.benchmark.uri.path}")}'
)
)
run_brute_force(
make_env=lambda: env_from_flags(benchmark_from_flags()),
action_names=FLAGS.brute_force_action_list,
episode_length=FLAGS.episode_length,
outdir=logs_dir,
nproc=FLAGS.nproc,
)
if __name__ == "__main__":
app.run(main)
|
CompilerGym-development
|
examples/brute_force.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""This script runs microbenchmarks of CompilerGym environment operations.
To collect new measurements, run one of the following commands:
$ python -m op_benchmarks {run,init,reset,step,observations} --env=llvm-v0 --n=100
To aggregate results from prior runs:
$ python -m op_benchmarks info
"""
import logging
import os
import re
from collections import defaultdict
from itertools import islice
from math import ceil
from multiprocessing import cpu_count
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional
import numpy as np
import pandas as pd
import typer
from tabulate import tabulate
import compiler_gym
from compiler_gym import CompilerEnv
from compiler_gym.errors import BenchmarkInitError
from compiler_gym.util.executor import Executor
from compiler_gym.util.logging import init_logging
from compiler_gym.util.runfiles_path import create_user_logs_dir
from compiler_gym.util.timer import Timer
app = typer.Typer()
logger = logging.getLogger(__name__)
def get_runtimes(op: Callable[[], Any], n: int):
"""Run `n` reptitions of function `op`, ignoring any errors."""
runtimes = []
for _ in range(n):
try:
with Timer() as timer:
op()
runtimes.append(timer.time)
except Exception as e: # pylint: disable=broad-except
logger.warning("Op failed: %s", e)
return runtimes
@app.command()
def init(
n: int = int(1e6),
j: int = cpu_count(),
env: str = "llvm-autophase-ic-v0",
outdir: Optional[Path] = None,
):
"""Benchmark the environment startup time."""
executor = Executor(type="local", cpus=j)
outdir = Path(outdir or create_user_logs_dir("op_benchmarks"))
with executor.get_executor(logs_dir=outdir) as session:
_init(n=n, outdir=outdir, j=j, env_name=env, session=session)
def _init(n: int, outdir: Path, j: int, env_name: str, session: Executor):
outdir.mkdir(exist_ok=True, parents=True)
for i in range(1, j + 1):
session.submit(
_init_worker,
env_name=env_name,
n=int(ceil(n / j)),
outfile=outdir / f".op:1:startup-shard-{i:02d}.txt",
)
def _init_worker(env_name: str, n: int, outfile: Path):
with open(outfile, "w") as f:
for _ in range(0, n, min(100, n)):
runtimes = get_runtimes(
lambda: compiler_gym.make(env_name).close(), min(100, n)
)
print("\n".join(f"{x:.8f}" for x in runtimes), file=f, flush=True)
def get_benchmarks(env_name: str, n: int, seed: int, outdir: Path) -> List[str]:
"""Get `n` benchmarks from all datasets.
If the dataset is smaller than `n`, benchmarks are repeated. If the dataset
is larger than `n`, `n` random unique programs are sampled.
"""
benchmarks = []
with compiler_gym.make(env_name) as env:
datasets = sorted(list(env.datasets))
benchmarks_per_dataset = int(ceil(n / len(datasets)))
for ds in datasets:
logger.info(
"Enumerating %s benchmarks from dataset from %s ...",
benchmarks_per_dataset,
ds,
)
if ds.size == 0 or ds.size > benchmarks_per_dataset:
rng = np.random.default_rng(seed)
uniq_bm_uris = set()
benchmarks_from_dataset = []
while len(benchmarks_from_dataset) < benchmarks_per_dataset:
bm = ds.random_benchmark(rng)
if bm.uri in uniq_bm_uris:
continue
uniq_bm_uris.add(bm.uri)
# Start an environment to check that the benchmark can be
# initialized.
try:
env.reset(benchmark=bm)
except (BenchmarkInitError, ValueError, TimeoutError):
continue
benchmarks_from_dataset.append(bm.uri)
benchmarks += benchmarks_from_dataset
else:
bms = list(ds.benchmark_uris())
bms *= int(ceil(benchmarks_per_dataset / len(bms)))
benchmarks += bms[:benchmarks_per_dataset]
benchmarks = sorted(benchmarks)
with open(outdir / "benchmarks.txt", "w") as f:
for bm in benchmarks:
print(bm, file=f)
return benchmarks
def chunkify(iterable, n):
iterable = iter(iterable)
chunk = list(islice(iterable, n))
while chunk:
yield chunk
chunk = list(islice(iterable, n))
@app.command()
def reset(
n: int = int(1e6),
num_benchmarks: int = int(1e3),
env: str = "llvm-autophase-ic-v0",
j: int = cpu_count(),
seed: int = 0xCC,
outdir: Optional[Path] = None,
):
"""Benchmark the env.reset() operator."""
executor = Executor(type="local", cpus=j)
outdir = Path(outdir or create_user_logs_dir("op_benchmarks"))
benchmarks = get_benchmarks(
env_name=env, n=min(n, num_benchmarks), seed=seed, outdir=outdir
)
with executor.get_executor(logs_dir=outdir) as session:
_reset(
benchmarks=benchmarks,
n=n,
outdir=outdir,
j=j,
env_name=env,
session=session,
)
def _reset(
benchmarks: List[str],
n: int,
outdir: Path,
env_name: str,
j: int,
session: Executor,
):
outdir.mkdir(exist_ok=True, parents=True)
num_measurements_per_benchmark = int(ceil(n / len(benchmarks)))
for i, benchmarks_chunk in enumerate(chunkify(benchmarks, j), start=1):
session.submit(
_reset_worker,
num_measurements_per_benchmark=num_measurements_per_benchmark,
benchmarks=benchmarks_chunk,
env_name=env_name,
outfile=outdir / f".op:2:reset-shard-{i:02d}.txt",
)
def _reset_worker(
num_measurements_per_benchmark: int,
benchmarks: List[str],
env_name: str,
outfile: Path,
):
with compiler_gym.make(env_name) as env:
with open(outfile, "w") as f:
for benchmark in benchmarks:
env.reset(benchmark=benchmark)
runtimes = get_runtimes(
lambda: env.reset(benchmark=benchmark),
num_measurements_per_benchmark,
)
print("\n".join(f"{x:.8f} {benchmark}" for x in runtimes), file=f)
@app.command()
def step(
n: int = int(1e6),
num_benchmarks: int = int(1e3),
env: str = "llvm-autophase-ic-v0",
j: int = cpu_count(),
seed: int = 0xCC,
outdir: Optional[Path] = None,
):
"""Benchmark the env.step() operator."""
executor = Executor(type="local", cpus=j)
outdir = Path(outdir or create_user_logs_dir("op_benchmarks"))
benchmarks = get_benchmarks(
env_name=env, n=min(n, num_benchmarks), seed=seed, outdir=outdir
)
with executor.get_executor(logs_dir=outdir) as session:
_step(
session=session,
outdir=outdir,
benchmarks=benchmarks,
n=n,
j=j,
env_name=env,
seed=seed,
)
def _step(
n: int,
benchmarks: List[str],
env_name: str,
seed: int,
j: int,
outdir: Path,
session: Executor,
):
outdir.mkdir(exist_ok=True, parents=True)
num_measurements_per_benchmark = int(ceil(n / len(benchmarks)))
for i, benchmarks_chunk in enumerate(chunkify(benchmarks, j), start=1):
session.submit(
_step_worker,
num_measurements_per_benchmark=num_measurements_per_benchmark,
seed=seed + (i * len(benchmarks_chunk)),
benchmarks=benchmarks_chunk,
env_name=env_name,
step_outfile=outdir / f".op:3:step-shard-{i:02d}.txt",
batched_outfile=outdir / f".op:3:step-batched-shard-{i:02d}.txt",
)
def _step_worker(
num_measurements_per_benchmark: int,
benchmarks: List[str],
env_name: str,
seed: str,
step_outfile: Path,
batched_outfile: Path,
):
def get_step_times(env: CompilerEnv, num_steps: int, batched=False):
while batched:
# Run all actions in a single step().
steps = [env.action_space.sample() for _ in range(num_steps)]
with Timer() as timer:
_, _, done, _ = env.multistep(steps)
if not done:
return [timer.time / num_steps] * num_steps
env.reset()
# Run each action as a step().
runtimes = []
while len(runtimes) < num_steps:
with Timer() as timer:
_, _, done, _ = env.step(env.action_space.sample())
if done:
env.reset()
else:
runtimes.append(timer.time)
return runtimes
with compiler_gym.make(env_name) as env:
with open(step_outfile, "w") as f:
for i, benchmark in enumerate(benchmarks, start=seed):
env.reset(benchmark=benchmark)
env.seed(i)
runtimes = get_step_times(env, num_measurements_per_benchmark)
print("\n".join(f"{x:.8f} {benchmark}" for x in runtimes), file=f)
with open(batched_outfile, "w") as f:
for i, benchmark in enumerate(benchmarks, start=seed):
env.reset(benchmark=benchmark)
env.seed(i)
runtimes = get_step_times(
env, num_measurements_per_benchmark, batched=True
)
print("\n".join(f"{x:.8f} {benchmark}" for x in runtimes), file=f)
@app.command()
def observations(
env: str = "llvm-autophase-ic-v0",
observation_spaces: List[str] = [
"Ir",
"InstCount",
"Autophase",
"Inst2vec",
"Programl",
"IrInstructionCount",
"ObjectTextSizeBytes",
"Runtime",
],
n: int = int(1e6),
num_benchmarks: int = int(1e3),
j: int = cpu_count(),
seed: int = 0xCC,
outdir: Optional[Path] = None,
) -> List[float]:
"""Benchmark the environment observation spaces."""
executor = Executor(type="local", cpus=j)
outdir = Path(outdir or create_user_logs_dir("op_benchmarks"))
benchmarks = get_benchmarks(
env_name=env, n=min(n, num_benchmarks), seed=seed, outdir=outdir
)
with executor.get_executor(logs_dir=outdir) as session:
_observations(
session=session,
env_name=env,
benchmarks=benchmarks,
j=j,
outdir=outdir,
observation_spaces=observation_spaces,
n=n,
)
def _observations(
observation_spaces: List[str],
benchmarks: List[str],
n: int,
j: int,
session: Executor,
outdir: Path,
env_name: str,
):
outdir.mkdir(exist_ok=True, parents=True)
num_measurements_per_benchmark = int(ceil(n / len(benchmarks)))
for i, benchmarks_chunk in enumerate(chunkify(benchmarks, j), start=1):
for observation_space in observation_spaces:
session.submit(
_observations_worker,
observation_space=observation_space,
num_measurements_per_benchmark=num_measurements_per_benchmark,
benchmarks=benchmarks_chunk,
env_name=env_name,
outfile=outdir / f".observation:{observation_space}-shard-{i:02d}.txt",
)
def _observations_worker(
observation_space: str,
num_measurements_per_benchmark: int,
benchmarks: List[str],
env_name: str,
outfile: Path,
):
with compiler_gym.make(env_name) as env:
with open(outfile, "w") as f:
for benchmark in benchmarks:
env.reset(benchmark=benchmark)
if "llvm-" in env_name and observation_space == "Runtime":
if not env.observation.IsRunnable():
return []
env.runtime_observation_count = 1
env.runtime_warmups_count = 0
runtimes = get_runtimes(
lambda: env.observation[observation_space],
num_measurements_per_benchmark,
)
print("\n".join(f"{x:.8f}" for x in runtimes), file=f, flush=True)
@app.command()
def run(
env: str = "llvm-autophase-ic-v0",
observation_spaces: List[str] = [
"Ir",
"InstCount",
"Autophase",
"Inst2vec",
"Programl",
"IrInstructionCount",
"ObjectTextSizeBytes",
"Runtime",
],
n: int = int(1e6),
num_benchmarks: int = int(1e3),
j: int = cpu_count(),
outdir: Optional[Path] = None,
seed: int = 0xCC,
):
"""Run all of the environment benchmarks."""
executor = Executor(type="local", cpus=j)
outdir = Path(outdir or create_user_logs_dir("op_benchmarks"))
benchmarks = get_benchmarks(
env_name=env, n=min(n, num_benchmarks), seed=seed, outdir=outdir
)
with executor.get_executor(logs_dir=outdir) as session:
_init(env_name=env, session=session, j=j, n=n, outdir=outdir)
_reset(
benchmarks=benchmarks,
n=n,
outdir=outdir,
j=j,
env_name=env,
session=session,
)
_step(
n=n,
j=j,
benchmarks=benchmarks,
env_name=env,
seed=seed,
outdir=outdir,
session=session,
)
_observations(
n=n,
j=j,
benchmarks=benchmarks,
env_name=env,
outdir=outdir,
session=session,
observation_spaces=observation_spaces,
)
info([outdir])
def _aggregate(
root: Path, files: List[str], outfile: Path
) -> Optional[Dict[str, float]]:
if not files:
return
if not (outfile).is_file():
runtimes = []
for file in files:
with open(root / file) as f:
runtimes += [float(x.split()[0]) for x in f if x.strip()]
if not runtimes:
return
runtimes = np.sort(runtimes)
with open(outfile, "w") as f:
print("\n".join(map(str, sorted(runtimes))), file=f)
else:
with open(outfile) as f:
runtimes = np.array(list(map(float, f)))
return {
"n": len(runtimes),
"p50": np.median(runtimes),
"p99": np.percentile(runtimes, 99),
"mean": np.mean(runtimes),
}
@app.command()
def info(outdirs: List[Path] = []):
"""Aggregate logs from previous runs."""
outdirs = outdirs or ["~/logs/compiler_gym/op_benchmarks"]
rows = []
for outdir in outdirs:
for root, _, files in os.walk(Path(outdir).expanduser()):
root = Path(root)
timestamp = "-".join([root.parent.name, root.name])
shards = defaultdict(list)
for file in files:
match = re.match(r"\.([:\w-]+)-shard-\d+\.txt", file)
if match:
shards[match.group(1)].append(file)
for shard, files in shards.items():
agg = _aggregate(root, files, root / f"{shard}.txt")
if agg:
rows.append(
{
"timestamp": timestamp,
"op": shard,
**agg,
}
)
df = pd.DataFrame(rows)
df.sort_values(["op", "timestamp"], inplace=True)
# Scale to milliseconds.
df["p50"] *= 1000
df["p99"] *= 1000
df["mean"] *= 1000
df = df.rename(columns={"p50": "p50 (ms)", "p99": "p99 (ms)", "mean": "mean (ms)"})
print(tabulate(df, headers="keys", showindex=False, tablefmt="psql"))
if __name__ == "__main__":
init_logging()
app()
|
CompilerGym-development
|
examples/op_benchmarks.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Run a parallelized exhaustive search of an action space.
All possible combinations of actions up to a finite limit are
evaluated, but partial sequences of actions that end up in the same
state are deduplicated, sometimes dramatically reducing the size of
the search space. Can also be configured to do a beam search.
Example usage:
$ python explore.py --env=llvm-ic-v0 --benchmark=cbench-v1/dijkstra \
--episode_length=10 --actions=-simplifycfg,-instcombine,-mem2reg,-newgvn
Use --help to list the configurable options.
"""
import hashlib
import math
from enum import IntEnum
from heapq import nlargest
from multiprocessing.pool import ThreadPool
from queue import Queue
from threading import Lock
from time import time
import humanize
from absl import app, flags
import compiler_gym.util.flags.episode_length # noqa Flag definition.
import compiler_gym.util.flags.nproc # noqa Flag definition.
from compiler_gym.util.flags.benchmark_from_flags import benchmark_from_flags
from compiler_gym.util.flags.env_from_flags import env_from_flags
from compiler_gym.wrappers import ConstrainedCommandline
flags.DEFINE_list(
"explore_actions",
[],
"A list of flag names to enumerate. If not provided, all actions are used.",
)
flags.DEFINE_integer(
"topn",
0,
"If positive, explore only the top n states for each sequence length. "
"This is in effect the width of a beam search.",
)
flags.DEFINE_integer(
"show_topn", 3, "Show this many top sequences " "at each sequence length."
)
FLAGS = flags.FLAGS
def make_env():
env = env_from_flags(benchmark=benchmark_from_flags())
if FLAGS.explore_actions:
env = ConstrainedCommandline(env, flags=FLAGS.explore_actions)
return env
# Used to determine if two rewards are equal up to a small
# tolerance. Cannot use math.isclose with default parameters as it
# sets abs_tol to 0, which means that a zero reward will compare
# unequal with e.g. 1e-100, leading to bugs.
def rewards_close(a, b):
return math.isclose(a, b, rel_tol=1e-5, abs_tol=1e-10)
NO_EDGE = -1
class Node:
def __init__(self, reward_sum, edge_count):
self.reward_sum = reward_sum
self.edges = [NO_EDGE] * edge_count
self.back_edge = None
# Represents env states as nodes and actions as edges.
class StateGraph:
def __init__(self, edges_per_node):
self._edges_per_node = edges_per_node
self._nodes = []
self._fingerprint_to_index = dict()
def add_or_find_node(self, fingerprint, reward_sum):
if fingerprint in self._fingerprint_to_index:
node_index = self._fingerprint_to_index[fingerprint]
assert rewards_close(
self._nodes[node_index].reward_sum, reward_sum
), f"{self._nodes[node_index].reward_sum} != {reward_sum}"
return (node_index, False)
node_index = self.node_count()
self._fingerprint_to_index[fingerprint] = node_index
node = Node(reward_sum, self._edges_per_node)
self._nodes.append(node)
return (node_index, True)
def add_edge(self, from_node_index, edge_index, to_node_index):
assert edge_index in range(self._edges_per_node)
assert from_node_index in range(self.node_count())
assert to_node_index in range(self.node_count())
assert self.get_edge(from_node_index, edge_index) == NO_EDGE
from_node = self._nodes[from_node_index]
from_node.edges[edge_index] = to_node_index
to_node = self._nodes[to_node_index]
if to_node.back_edge is None:
to_node.back_edge = (from_node_index, edge_index)
def get_edge(self, from_node_index, edge_index):
assert edge_index < self._edges_per_node
assert from_node_index < self.node_count()
return self._nodes[from_node_index].edges[edge_index]
# Returns a path back to node 0. For this to work, edges have to
# be added in a order so that the subgraph consisting of the first
# in-coming edge to each node defines a tree with node 0 as the
# root.
def node_path(self, node_index):
assert node_index < self.node_count()
path = []
while node_index != 0:
back_edge = self._nodes[node_index].back_edge
assert back_edge is not None
(prior_node_index, edge_index) = back_edge
node_index = prior_node_index
path.append(edge_index)
path.reverse()
return path
def reward_sum(self, node_index):
return self._nodes[node_index].reward_sum
def node_count(self):
return len(self._nodes)
def env_to_fingerprint(env):
# TODO: There is some sort of state in the env that is not
# captured by this. Figure out what it is and fix it. Also
# consider adding a fingerprint observation to env.
if False:
# BitcodeFile is slower, so using Ir instead.
path = env.observation["BitcodeFile"]
with open(path, "rb") as f:
data = f.read()
else:
data = env.observation["Ir"].encode()
return hashlib.sha256(data).digest()
def compute_edges(env, sequence):
edges = []
for action in range(env.action_space.n):
env.reset()
reward_sum = 0.0
for action in sequence + [action]:
_, reward, _, _ = env.step(action)
reward_sum += reward
edges.append((env_to_fingerprint(env), reward_sum))
return edges
class NodeTypeStats:
"""Keeps statistics on the exploration."""
class EdgeType(IntEnum):
unpruned = 0
self_pruned = 1
cross_pruned = 2
back_pruned = 3
dropped = 4
def __init__(self, action_count):
self._action_count = action_count
self._depth = 0
self._depth_start_time_in_seconds = time()
# Nodes added at this depth.
self._depth_stats = [0] * len(self.EdgeType)
# Nodes added across all depths.
self._all_stats = [0] * len(self.EdgeType)
# The full number of nodes that is theoretically in the graph
# at this depth if no nodes had been pruned anywhere.
self._full_depth_stats = [0] * len(self.EdgeType)
# The full number of nodes across depths if no nodes had been
# pruned anywhere.
self._full_all_stats = [0] * len(self.EdgeType)
def start_depth_and_print(self, episode_length):
self._depth += 1
print(
f"*** Processing depth {self._depth} of {episode_length} with",
f"{self._depth_stats[self.EdgeType.unpruned]} states and",
f"{self._action_count} actions.\n",
)
self._depth_start_time_in_seconds = time()
self._full_depth_stats[self.EdgeType.unpruned] = 0
for e in self.EdgeType:
self._depth_stats[e] = 0
if e != self.EdgeType.unpruned:
# The pruned nodes at the prior depth would have
# turned into this many more nodes at the next depth.
self._full_depth_stats[e] *= self._action_count
self._full_all_stats[e] += self._full_depth_stats[e]
# At a certain point these large numbers just clutter up
# the display.
if self._full_all_stats[e] > 1e9:
self._full_all_stats[e] = float("inf")
if self._full_depth_stats[e] > 1e9:
self._full_depth_stats[e] = float("inf")
def note_edge(self, edge_type):
self._adjust_edges(edge_type, 1)
def drop_unpruned_edge(self):
self._adjust_edges(self.EdgeType.unpruned, -1)
self._adjust_edges(self.EdgeType.dropped, 1)
def _adjust_edges(self, edge_type, adjustment):
self._depth_stats[edge_type] += adjustment
self._all_stats[edge_type] += adjustment
self._full_depth_stats[edge_type] += adjustment
self._full_all_stats[edge_type] += adjustment
def end_depth_and_print(self, env, graph, best_node):
align = 16
def number_list(stats):
return "".join(
[humanize.intcomma(n).rjust(align) for n in stats + [sum(stats)]]
)
legend = [e.name for e in self.EdgeType] + ["sum"]
print(
" ",
"".join([header.rjust(align) for header in legend]),
)
print(" added this depth", number_list(self._depth_stats))
print(" full nodes this depth", number_list(self._full_depth_stats))
print(" added across depths", number_list(self._all_stats))
print("full added across depths", number_list(self._full_all_stats))
# If this does not match then something was over or under
# counted. Based on x^0 + x^1 ... + x^n = (x^(n+1) - 1) / (x -
# 1), which is the number of nodes in a complete tree where
# every interior node has x children. If the numbers are too
# large then there may not be equality due to rounding, so do
# not check this in that case.
full_all_sum = sum(self._full_all_stats)
assert full_all_sum > 1e9 or full_all_sum == (
pow(env.action_space.n, self._depth + 1) - 1
) / (env.action_space.n - 1)
depth_time_in_seconds = time() - self._depth_start_time_in_seconds
print()
print(f"Time taken for depth: {depth_time_in_seconds:0.2f} s")
if FLAGS.show_topn >= 1:
print(f"Top {FLAGS.show_topn} sequence(s):")
for n in nlargest(
FLAGS.show_topn,
range(graph.node_count()),
key=lambda n: graph.reward_sum(n),
):
print(
f" {graph.reward_sum(n):0.4f} ",
", ".join(env.action_space.flags[f] for f in graph.node_path(n)),
)
print("\n")
# Compute an action graph and use it to find the optimal sequence
# within episode_length actions. Uses as many threads as there are
# elements in envs.
def compute_action_graph(pool, envs, episode_length):
assert len(envs) >= 1
env_queue = Queue()
for env in envs:
env_queue.put(env)
stats = NodeTypeStats(action_count=env.action_space.n)
graph = StateGraph(edges_per_node=env.action_space.n)
# Add the empty sequence of actions as the starting state.
envs[0].reset()
best_node, _ = graph.add_or_find_node(env_to_fingerprint(envs[0]), 0.0)
stats.note_edge(NodeTypeStats.EdgeType.unpruned)
# A node is defined by a sequence of actions that end up in that
# node. Nodes are deduplicated based on a hash (fingerprint) of
# their state, so that if two sequences of actions end up with the
# same state than they will also converge on the same node in the
# graph.
#
# The outer loop goes through sequences by the depth/length of the
# sequence, first all sequences of one element, then all sequences
# of two elements and so on. This partition of the nodes creates
# multiple kinds of edges:
#
# Back edges. Edges pointing to the same or lower depth. These
# edges represent sequences that are equivalent to a shorter
# sequence. These edges are pruned as no new nodes can be
# discovered from them and they cannot participate in a minimal
# best sequence as they are not minimal. Self edges are excluded
# from this definition.
#
# Self edges. Loops, i.e. edges that go from a node to
# itself. This represents actions that do not change the
# state. These are pruned for the same reason as back edges and
# have their own category as they are a very common case.
#
# Cross edges. These are edges that go forward to the next depth
# but there is already another edge that goes to the same
# node. The edge itself is not pruned from the graph, as it can
# be part of a minimal optimal sequence, but since the
# destination node already exists there is no new node introduced
# by a cross edge, so you could consider that the hypothetical
# distinct node that this edge might have created is pruned
# through deduplication.
#
# Unpruned edges. These are edges that go forward to the next
# depth and there is not yet any other edge that goes to that
# node. This kind of edge causes a new node to be created that
# will be expanded at the next depth.
#
# Dropped. These are otherwise unpruned edges that end up
# getting dropped due to a limit on how many states to explore
# per depth.
#
# If there are N nodes, then they are indexed as [0, N) in order
# of insertion. New nodes are added to the graph when an unpruned
# edge is found that points to them. A node is expanded when its
# edges are computed and added to the graph, potentially causing
# new nodes to be added.
#
# The nodes are partitioned into 3 ranges:
#
# [0; depth_start) These nodes are already expanded and done with.
#
# [depth_start; next_depth_start) These are the nodes at the
# current depth that will be expanded to create nodes at the next
# depth.
#
# [next_depth_start, N) These are the nodes that have been added
# at this iteration of the loop to be expanded at the next
# iteration of the loop.
dropped = set()
next_depth_start = 0
for depth in range(episode_length):
stats.start_depth_and_print(episode_length)
depth_start = next_depth_start
next_depth_start = graph.node_count()
if depth_start == next_depth_start:
print("There are no more states to process, stopping early.")
break
lock = Lock()
def expand_node(node_index):
with lock:
if node_index in dropped:
return node_index, ()
path = graph.node_path(node_index)
# ThreadPool.map doesn't support giving each thread its
# own env, so we use a queue instead. Each thread gets
# some env and has exclusive use of it while it has it.
local_env = env_queue.get()
edges = compute_edges(local_env, path)
env_queue.put(local_env)
return node_index, edges
undropped = [
n for n in range(depth_start, next_depth_start) if n not in dropped
]
computed_edges = pool.map(expand_node, undropped)
# This could easily be done also with a lock as above, saving
# the memory for computed_edges, and when done that way, the
# lock is not at all contended. However, there is currently an
# issue with non-determinism with multithreading and so it's
# preferable for right now to make the node ordering
# deterministic, so as to not add to the non-determinism, even
# though the node ordering shouldn't matter.
for node_index, edges in computed_edges:
for i, (fingerprint, reward_sum) in zip(range(len(edges)), edges):
target_node_index, inserted = graph.add_or_find_node(
fingerprint, reward_sum
)
if target_node_index == node_index: # self edge
assert not inserted
stats.note_edge(NodeTypeStats.EdgeType.self_pruned)
continue
if target_node_index < next_depth_start: # back edge
assert not inserted
stats.note_edge(NodeTypeStats.EdgeType.back_pruned)
continue
if not inserted: # cross edge
stats.note_edge(NodeTypeStats.EdgeType.cross_pruned)
else: # unpruned - node was added
stats.note_edge(NodeTypeStats.EdgeType.unpruned)
graph.add_edge(node_index, i, target_node_index)
best_reward = graph.reward_sum(best_node)
if reward_sum > best_reward and not rewards_close(
best_reward, reward_sum
):
best_node = target_node_index
if FLAGS.topn > 0:
top_nodes = list(range(next_depth_start, graph.node_count()))
top_nodes.sort(key=lambda n: graph.reward_sum(n), reverse=True)
for n in top_nodes[FLAGS.topn :]:
dropped.add(n)
stats.drop_unpruned_edge()
stats.end_depth_and_print(envs[0], graph, best_node)
def main(argv):
"""Main entry point."""
argv = FLAGS(argv)
if len(argv) != 1:
raise app.UsageError(f"Unknown command line arguments: {argv[1:]}")
print(f"Running with {FLAGS.nproc} threads.")
assert FLAGS.nproc >= 1
envs = []
try:
for _ in range(FLAGS.nproc):
envs.append(make_env())
with ThreadPool(len(envs)) as pool:
compute_action_graph(pool, envs, episode_length=FLAGS.episode_length)
finally:
for env in envs:
env.close()
if __name__ == "__main__":
app.run(main)
|
CompilerGym-development
|
examples/explore.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Perform a random walk of the action space of a CompilerGym environment.
Example usage:
# Run a random walk on cBench example program using instruction count reward.
$ python3 random_walk.py --env=llvm-v0 --step_min=100 --step_max=100 \
--benchmark=cbench-v1/dijkstra --reward=IrInstructionCount
"""
import random
import humanize
from absl import app, flags
from compiler_gym.envs import CompilerEnv
from compiler_gym.util.flags.benchmark_from_flags import benchmark_from_flags
from compiler_gym.util.flags.env_from_flags import env_from_flags
from compiler_gym.util.shell_format import emph
from compiler_gym.util.timer import Timer
flags.DEFINE_integer(
"step_min",
12,
"The minimum number of steps. Fewer steps may be performed if the "
"environment ends the episode early.",
)
flags.DEFINE_integer("step_max", 256, "The maximum number of steps.")
FLAGS = flags.FLAGS
def run_random_walk(env: CompilerEnv, step_count: int) -> None:
"""Perform a random walk of the action space.
:param env: The environment to use.
:param step_count: The number of steps to run. This value is an upper bound -
fewer steps will be performed if any of the actions lead the
environment to end the episode.
"""
rewards = []
step_num = 0
with Timer() as episode_time:
env.reset()
for step_num in range(1, step_count + 1):
action_index = env.action_space.sample()
with Timer() as step_time:
observation, reward, done, info = env.step(action_index)
print(
f"\n=== Step {humanize.intcomma(step_num)} ===\n"
f"Action: {env.action_space.names[action_index]} "
f"(changed={not info.get('action_had_no_effect')})\n"
f"Reward: {reward}"
)
rewards.append(reward)
if env.observation_space:
print(f"Observation:\n{observation}")
print(f"Step time: {step_time}")
if done:
print("Episode ended by environment")
break
def reward_percentage(reward, rewards):
if sum(rewards) == 0:
return 0
percentage = reward / sum(rewards)
return emph(f"{'+' if percentage >= 0 else ''}{percentage:.2%}")
print(
f"\nCompleted {emph(humanize.intcomma(step_num))} steps in {episode_time} "
f"({step_num / episode_time.time:.1f} steps / sec).\n"
f"Total reward: {sum(rewards)}\n"
f"Max reward: {max(rewards)} ({reward_percentage(max(rewards), rewards)} "
f"at step {humanize.intcomma(rewards.index(max(rewards)) + 1)})"
)
def main(argv):
"""Main entry point."""
assert len(argv) == 1, f"Unrecognized flags: {argv[1:]}"
with env_from_flags(benchmark=benchmark_from_flags()) as env:
step_min = min(FLAGS.step_min, FLAGS.step_max)
step_max = max(FLAGS.step_min, FLAGS.step_max)
run_random_walk(env=env, step_count=random.randint(step_min, step_max))
if __name__ == "__main__":
app.run(main)
|
CompilerGym-development
|
examples/random_walk.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for the unrolling CompilerGym service example."""
import subprocess
from pathlib import Path
import gym
import numpy as np
import pytest
import compiler_gym
import examples.example_unrolling_service as unrolling_service
from compiler_gym.envs import CompilerEnv
from compiler_gym.errors import SessionNotFound
from compiler_gym.spaces import Box, NamedDiscrete, Scalar, Sequence
from compiler_gym.util.commands import Popen
from tests.test_main import main
@pytest.fixture(scope="function")
def env() -> CompilerEnv:
"""Text fixture that yields an environment."""
with gym.make("unrolling-py-v0") as env_:
yield env_
@pytest.fixture(scope="module")
def bin() -> Path:
return unrolling_service.UNROLLING_PY_SERVICE_BINARY
def test_invalid_arguments(bin: Path):
"""Test that running the binary with unrecognized arguments is an error."""
def run(cmd):
with Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True
) as p:
stdout, stderr = p.communicate(timeout=10)
return p.returncode, stdout, stderr
returncode, _, stderr = run([str(bin), "foobar"])
assert "ERROR:" in stderr
assert "'foobar'" in stderr
assert returncode == 1
returncode, _, stderr = run([str(bin), "--foobar"])
# C++ and python flag parsing library emit slightly different error
# messages.
assert "ERROR:" in stderr or "FATAL" in stderr
assert "'foobar'" in stderr
assert returncode == 1
def test_versions(env: CompilerEnv):
"""Tests the GetVersion() RPC endpoint."""
assert env.version == compiler_gym.__version__
assert env.compiler_version == "1.0.0"
def test_action_space(env: CompilerEnv):
"""Test that the environment reports the service's action spaces."""
assert env.action_spaces == [
NamedDiscrete(
name="unrolling",
items=[
"-loop-unroll -unroll-count=2",
"-loop-unroll -unroll-count=4",
"-loop-unroll -unroll-count=8",
],
)
]
def test_observation_spaces(env: CompilerEnv):
"""Test that the environment reports the service's observation spaces."""
env.reset()
assert env.observation.spaces.keys() == {"ir", "features", "runtime", "size"}
assert env.observation.spaces["ir"].space == Sequence(
name="ir",
size_range=(0, np.iinfo(np.int64).max),
dtype=str,
opaque_data_format=None,
)
assert env.observation.spaces["features"].space == Box(
name="features", shape=(3,), low=0, high=100000, dtype=int
)
assert env.observation.spaces["runtime"].space == Scalar(
name="runtime", min=0, max=np.inf, dtype=float
)
assert env.observation.spaces["size"].space == Scalar(
name="size", min=0, max=np.inf, dtype=float
)
def test_reward_spaces(env: CompilerEnv):
"""Test that the environment reports the service's reward spaces."""
env.reset()
assert env.reward.spaces.keys() == {"runtime", "size"}
def test_step_before_reset(env: CompilerEnv):
"""Taking a step() before reset() is illegal."""
with pytest.raises(SessionNotFound, match=r"Must call reset\(\) before step\(\)"):
env.step(0)
def test_observation_before_reset(env: CompilerEnv):
"""Taking an observation before reset() is illegal."""
with pytest.raises(SessionNotFound, match=r"Must call reset\(\) before step\(\)"):
_ = env.observation["ir"]
def test_reward_before_reset(env: CompilerEnv):
"""Taking a reward before reset() is illegal."""
with pytest.raises(SessionNotFound, match=r"Must call reset\(\) before step\(\)"):
_ = env.reward["runtime"]
def test_reset_invalid_benchmark(env: CompilerEnv):
"""Test requesting a specific benchmark."""
with pytest.raises(LookupError) as ctx:
env.reset(benchmark="unrolling-v0/foobar")
assert str(ctx.value) == "Unknown program name"
def test_invalid_observation_space(env: CompilerEnv):
"""Test error handling with invalid observation space."""
with pytest.raises(LookupError):
env.observation_space = 100
def test_invalid_reward_space(env: CompilerEnv):
"""Test error handling with invalid reward space."""
with pytest.raises(LookupError):
env.reward_space = 100
def test_double_reset(env: CompilerEnv):
"""Test that reset() can be called twice."""
env.reset()
assert env.in_episode
env.reset()
assert env.in_episode
def test_Step_out_of_range(env: CompilerEnv):
"""Test error handling with an invalid action."""
env.reset()
with pytest.raises(ValueError) as ctx:
env.step(100)
assert str(ctx.value) == "Out-of-range"
def test_default_ir_observation(env: CompilerEnv):
"""Test default observation space."""
env.observation_space = "ir"
observation = env.reset()
assert len(observation) > 0
observation, reward, done, info = env.step(0)
assert not done, info
assert len(observation) > 0
assert reward is None
def test_default_features_observation(env: CompilerEnv):
"""Test default observation space."""
env.observation_space = "features"
observation = env.reset()
assert isinstance(observation, np.ndarray)
assert observation.shape == (3,)
assert observation.dtype == np.int64
assert all(obs >= 0 for obs in observation.tolist())
def test_default_reward(env: CompilerEnv):
"""Test default reward space."""
env.reward_space = "runtime"
env.reset()
observation, reward, done, info = env.step(0)
assert not done, info
assert observation is None
assert reward is not None
def test_observations(env: CompilerEnv):
"""Test observation spaces."""
env.reset()
assert len(env.observation["ir"]) > 0
np.testing.assert_array_less([-1, -1, -1], env.observation["features"])
def test_rewards(env: CompilerEnv):
"""Test reward spaces."""
env.reset()
assert env.reward["runtime"] is not None
def test_benchmarks(env: CompilerEnv):
assert list(env.datasets.benchmark_uris()) == [
"benchmark://unrolling-v0/offsets1",
"benchmark://unrolling-v0/conv2d",
]
def test_fork(env: CompilerEnv):
env.reset()
env.step(0)
env.step(1)
other_env = env.fork()
try:
assert env.benchmark == other_env.benchmark
assert other_env.actions == [0, 1]
finally:
other_env.close()
if __name__ == "__main__":
main()
|
CompilerGym-development
|
examples/example_unrolling_service/env_tests.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for the unrolling CompilerGym service example."""
import os
import subprocess
import sys
from getpass import getuser
from pathlib import Path
from typing import Iterable, List, Optional
import gym
import numpy as np
import pytest
import compiler_gym
from compiler_gym.datasets import Benchmark, Dataset
from compiler_gym.datasets.uri import BenchmarkUri
from compiler_gym.envs import CompilerEnv
from compiler_gym.envs.llvm.llvm_benchmark import get_system_library_flags
from compiler_gym.errors import SessionNotFound
from compiler_gym.spaces import Box, NamedDiscrete, Reward, Scalar, Sequence
from compiler_gym.third_party import llvm
from compiler_gym.util import debug_util as dbg
from compiler_gym.util.commands import Popen
from compiler_gym.util.registration import register
UNROLLING_PY_SERVICE_BINARY: Path = Path(
"example_unrolling_service/service_py/example_service.py"
)
assert UNROLLING_PY_SERVICE_BINARY.is_file(), "Service script not found"
BENCHMARKS_PATH: Path = Path("example_unrolling_service/benchmarks")
NEURO_VECTORIZER_HEADER: Path = Path(
"../compiler_gym/third_party/neuro-vectorizer/header.h"
)
class RuntimeReward(Reward):
"""An example reward that uses changes in the "runtime" observation value
to compute incremental reward.
"""
def __init__(self):
super().__init__(
name="runtime",
observation_spaces=["runtime"],
default_value=0,
default_negates_returns=True,
deterministic=False,
platform_dependent=True,
)
self.baseline_runtime = 0
def reset(self, benchmark: str, observation_view):
del benchmark # unused
self.baseline_runtime = observation_view["runtime"]
def update(self, action, observations, observation_view):
del action # unused
del observation_view # unused
return float(self.baseline_runtime - observations[0]) / self.baseline_runtime
class SizeReward(Reward):
"""An example reward that uses changes in the "size" observation value
to compute incremental reward.
"""
def __init__(self):
super().__init__(
name="size",
observation_spaces=["size"],
default_value=0,
default_negates_returns=True,
deterministic=False,
platform_dependent=True,
)
self.baseline_size = 0
def reset(self, benchmark: str, observation_view):
del benchmark # unused
self.baseline_runtime = observation_view["size"]
def update(self, action, observations, observation_view):
del action # unused
del observation_view # unused
return float(self.baseline_size - observations[0]) / self.baseline_size
class UnrollingDataset(Dataset):
def __init__(self, *args, **kwargs):
super().__init__(
name="benchmark://unrolling-v2",
license="MIT",
description="Unrolling example dataset",
)
self._benchmarks = {
"/offsets1": Benchmark.from_file_contents(
"benchmark://unrolling-v2/offsets1",
self.preprocess(BENCHMARKS_PATH / "offsets1.c"),
),
"/conv2d": Benchmark.from_file_contents(
"benchmark://unrolling-v2/conv2d",
self.preprocess(BENCHMARKS_PATH / "conv2d.c"),
),
}
@staticmethod
def preprocess(src: Path) -> bytes:
"""Front a C source through the compiler frontend."""
# TODO(github.com/facebookresearch/CompilerGym/issues/325): We can skip
# this pre-processing, or do it on the service side, once support for
# multi-file benchmarks lands.
cmd = [
str(llvm.clang_path()),
"-E",
"-o",
"-",
"-I",
str(NEURO_VECTORIZER_HEADER.parent),
src,
]
cmd += get_system_library_flags()
return subprocess.check_output(
cmd,
timeout=300,
)
def benchmark_uris(self) -> Iterable[str]:
yield from (f"benchmark://unrolling-v2{k}" for k in self._benchmarks.keys())
def benchmark_from_parsed_uri(self, uri: BenchmarkUri) -> Benchmark:
if uri.path in self._benchmarks:
return self._benchmarks[uri.path]
else:
raise LookupError("Unknown program name")
# Register the unrolling example service on module import. After importing this module,
# the unrolling-py-v2 environment will be available to gym.make(...).
register(
id="unrolling-py-v2",
entry_point="compiler_gym.service.client_service_compiler_env:ClientServiceCompilerEnv",
kwargs={
"service": UNROLLING_PY_SERVICE_BINARY,
"rewards": [RuntimeReward(), SizeReward()],
"datasets": [UnrollingDataset()],
},
)
@pytest.fixture(scope="function")
def env() -> CompilerEnv:
"""Text fixture that yields an environment."""
with gym.make("unrolling-py-v2") as env_:
yield env_
@pytest.fixture(scope="module")
def bin() -> Path:
return UNROLLING_PY_SERVICE_BINARY
def test_invalid_arguments(bin: Path):
"""Test that running the binary with unrecognized arguments is an error."""
def run(cmd):
with Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True
) as p:
stdout, stderr = p.communicate(timeout=10)
return p.returncode, stdout, stderr
returncode, _, stderr = run([str(bin), "foobar"])
assert "ERROR:" in stderr
assert "'foobar'" in stderr
assert returncode == 1
returncode, _, stderr = run([str(bin), "--foobar"])
# C++ and python flag parsing library emit slightly different error
# messages.
assert "ERROR:" in stderr or "FATAL" in stderr
assert "'foobar'" in stderr
assert returncode == 1
def test_versions(env: CompilerEnv):
"""Tests the GetVersion() RPC endpoint."""
assert env.version == compiler_gym.__version__
assert env.compiler_version == "1.0.0"
def test_action_space(env: CompilerEnv):
"""Test that the environment reports the service's action spaces."""
assert env.action_spaces == [
NamedDiscrete(
name="unrolling",
items=[
"-loop-unroll -unroll-count=2",
"-loop-unroll -unroll-count=4",
"-loop-unroll -unroll-count=8",
],
)
]
def test_observation_spaces(env: CompilerEnv):
"""Test that the environment reports the service's observation spaces."""
env.reset()
assert env.observation.spaces.keys() == {"ir", "features", "runtime", "size"}
assert env.observation.spaces["ir"].space == Sequence(
name="ir",
size_range=(0, np.iinfo(np.int64).max),
dtype=str,
opaque_data_format=None,
)
assert env.observation.spaces["features"].space == Box(
name="features", shape=(3,), low=0, high=100000, dtype=int
)
assert env.observation.spaces["runtime"].space == Scalar(
name="runtime", min=0, max=np.inf, dtype=float
)
assert env.observation.spaces["size"].space == Scalar(
name="size", min=0, max=np.inf, dtype=float
)
def test_reward_spaces(env: CompilerEnv):
"""Test that the environment reports the service's reward spaces."""
env.reset()
assert env.reward.spaces.keys() == {"runtime", "size"}
def test_step_before_reset(env: CompilerEnv):
"""Taking a step() before reset() is illegal."""
with pytest.raises(SessionNotFound, match=r"Must call reset\(\) before step\(\)"):
env.step(0)
def test_observation_before_reset(env: CompilerEnv):
"""Taking an observation before reset() is illegal."""
with pytest.raises(SessionNotFound, match=r"Must call reset\(\) before step\(\)"):
_ = env.observation["ir"]
def test_reward_before_reset(env: CompilerEnv):
"""Taking a reward before reset() is illegal."""
with pytest.raises(SessionNotFound, match=r"Must call reset\(\) before step\(\)"):
_ = env.reward["runtime"]
def test_reset_invalid_benchmark(env: CompilerEnv):
"""Test requesting a specific benchmark."""
with pytest.raises(LookupError) as ctx:
env.reset(benchmark="unrolling-v2/foobar")
assert str(ctx.value) == "Unknown program name"
def test_invalid_observation_space(env: CompilerEnv):
"""Test error handling with invalid observation space."""
with pytest.raises(LookupError):
env.observation_space = 100
def test_invalid_reward_space(env: CompilerEnv):
"""Test error handling with invalid reward space."""
with pytest.raises(LookupError):
env.reward_space = 100
def test_double_reset(env: CompilerEnv):
"""Test that reset() can be called twice."""
env.reset()
assert env.in_episode
env.reset()
assert env.in_episode
def test_Step_out_of_range(env: CompilerEnv):
"""Test error handling with an invalid action."""
env.reset()
with pytest.raises(ValueError) as ctx:
env.step(100)
assert str(ctx.value) == "Out-of-range"
def test_default_ir_observation(env: CompilerEnv):
"""Test default observation space."""
env.observation_space = "ir"
observation = env.reset()
assert len(observation) > 0
observation, reward, done, info = env.step(0)
assert not done, info
assert len(observation) > 0
assert reward is None
def test_default_features_observation(env: CompilerEnv):
"""Test default observation space."""
env.observation_space = "features"
observation = env.reset()
assert isinstance(observation, np.ndarray)
assert observation.shape == (3,)
assert observation.dtype == np.int64
assert all(obs >= 0 for obs in observation.tolist())
def test_default_reward(env: CompilerEnv):
"""Test default reward space."""
env.reward_space = "runtime"
env.reset()
observation, reward, done, info = env.step(0)
assert not done, info
assert observation is None
assert reward is not None
def test_observations(env: CompilerEnv):
"""Test observation spaces."""
env.reset()
assert len(env.observation["ir"]) > 0
np.testing.assert_array_less([-1, -1, -1], env.observation["features"])
def test_rewards(env: CompilerEnv):
"""Test reward spaces."""
env.reset()
assert env.reward["runtime"] is not None
def test_benchmarks(env: CompilerEnv):
assert list(env.datasets.benchmark_uris()) == [
"benchmark://unrolling-v2/offsets1",
"benchmark://unrolling-v2/conv2d",
]
def test_fork(env: CompilerEnv):
env.reset()
env.step(0)
env.step(1)
other_env = env.fork()
try:
assert env.benchmark == other_env.benchmark
assert other_env.actions == [0, 1]
finally:
other_env.close()
# Copied from CompilerGym/tests/test_main.py because there were errors in trying to import it here
def main(extra_pytest_args: Optional[List[str]] = None, debug_level: int = 1):
dbg.set_debug_level(debug_level)
# Keep test data isolated from user data.
os.environ[
"COMPILER_GYM_SITE_DATA"
] = f"/tmp/compiler_gym_{getuser()}/tests/site_data"
os.environ["COMPILER_GYM_CACHE"] = f"/tmp/compiler_gym_{getuser()}/tests/cache"
pytest_args = sys.argv + [
# Run pytest verbosely to print out test names to provide context in
# case of failures.
"-vv",
# Disable "Module already imported" warnings. See:
# https://docs.pytest.org/en/latest/how-to/usage.html#calling-pytest-from-python-code
"-W",
"ignore:Module already imported:pytest.PytestWarning",
# Disable noisy "Flaky tests passed" messages.
"--no-success-flaky-report",
]
# Support for sharding. If a py_test target has the shard_count attribute
# set (in the range [1,50]), then the pytest-shard module is used to divide
# the tests among the shards. See https://pypi.org/project/pytest-shard/
sharded_test = os.environ.get("TEST_TOTAL_SHARDS")
if sharded_test:
num_shards = int(os.environ["TEST_TOTAL_SHARDS"])
shard_index = int(os.environ["TEST_SHARD_INDEX"])
pytest_args += [f"--shard-id={shard_index}", f"--num-shards={num_shards}"]
else:
pytest_args += ["-p", "no:pytest-shard"]
pytest_args += extra_pytest_args or []
returncode = pytest.main(pytest_args)
# By default pytest will fail with an error if no tests are collected.
# Disable that behavior here (with a warning) since there are legitimate
# cases where we may want to run a test file with no tests in it. For
# example, when running on a continuous integration service where all the
# tests are marked with the @skip_on_ci decorator.
if returncode == pytest.ExitCode.NO_TESTS_COLLECTED.value:
print(
"WARNING: The test suite was empty. Is that intended?",
file=sys.stderr,
)
returncode = 0
sys.exit(returncode)
if __name__ == "__main__":
main(
extra_pytest_args=[
"-W",
"ignore::UserWarning",
]
)
|
CompilerGym-development
|
examples/example_unrolling_service/env_without_bazel_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Smoke test for examples/example_unrolling_service/example_without_bazel.py"""
from example_unrolling_service.example_without_bazel import main
from flaky import flaky
@flaky
def test_example_without_bazel():
main()
|
CompilerGym-development
|
examples/example_unrolling_service/example_without_bazel_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""This module defines and registers the example gym environments."""
import os
import subprocess
from pathlib import Path
from typing import Iterable
from compiler_gym.datasets import Benchmark, Dataset
from compiler_gym.datasets.uri import BenchmarkUri
from compiler_gym.envs.llvm.llvm_benchmark import get_system_library_flags
from compiler_gym.spaces import Reward
from compiler_gym.third_party import llvm
from compiler_gym.util.registration import register
from compiler_gym.util.runfiles_path import runfiles_path
UNROLLING_PY_SERVICE_BINARY: Path = runfiles_path(
"examples/example_unrolling_service/service_py/example-unrolling-service-py"
)
BENCHMARKS_PATH: Path = runfiles_path("examples/example_unrolling_service/benchmarks")
if not os.path.exists(BENCHMARKS_PATH):
BENCHMARKS_PATH = Path("example_unrolling_service/benchmarks")
NEURO_VECTORIZER_HEADER: Path = runfiles_path(
"compiler_gym/third_party/neuro-vectorizer/header.h"
)
if not os.path.exists(NEURO_VECTORIZER_HEADER):
NEURO_VECTORIZER_HEADER: Path = Path(
"../compiler_gym/third_party/neuro-vectorizer/header.h"
)
class RuntimeReward(Reward):
"""An example reward that uses changes in the "runtime" observation value
to compute incremental reward.
"""
def __init__(self):
super().__init__(
name="runtime",
observation_spaces=["runtime"],
default_value=0,
default_negates_returns=True,
deterministic=False,
platform_dependent=True,
)
self.baseline_runtime = 0
def reset(self, benchmark: str, observation_view):
del benchmark # unused
self.baseline_runtime = observation_view["runtime"]
def update(self, action, observations, observation_view):
del action # unused
del observation_view # unused
return float(self.baseline_runtime - observations[0]) / self.baseline_runtime
class SizeReward(Reward):
"""An example reward that uses changes in the "size" observation value
to compute incremental reward.
"""
def __init__(self):
super().__init__(
name="size",
observation_spaces=["size"],
default_value=0,
default_negates_returns=True,
deterministic=False,
platform_dependent=True,
)
self.baseline_size = 0
def reset(self, benchmark: str, observation_view):
del benchmark # unused
self.baseline_runtime = observation_view["size"]
def update(self, action, observations, observation_view):
del action # unused
del observation_view # unused
return float(self.baseline_size - observations[0]) / self.baseline_size
class UnrollingDataset(Dataset):
def __init__(self, *args, **kwargs):
super().__init__(
name="benchmark://unrolling-v0",
license="MIT",
description="Unrolling example dataset",
)
self._benchmarks = {
"/offsets1": Benchmark.from_file_contents(
"benchmark://unrolling-v0/offsets1",
self.preprocess(BENCHMARKS_PATH / "offsets1.c"),
),
"/conv2d": Benchmark.from_file_contents(
"benchmark://unrolling-v0/conv2d",
self.preprocess(BENCHMARKS_PATH / "conv2d.c"),
),
}
@staticmethod
def preprocess(src: Path) -> bytes:
"""Front a C source through the compiler frontend."""
# TODO(github.com/facebookresearch/CompilerGym/issues/325): We can skip
# this pre-processing, or do it on the service side, once support for
# multi-file benchmarks lands.
cmd = [
str(llvm.clang_path()),
"-E",
"-o",
"-",
"-I",
str(NEURO_VECTORIZER_HEADER.parent),
src,
] + get_system_library_flags()
return subprocess.check_output(
cmd,
timeout=300,
)
def benchmark_uris(self) -> Iterable[str]:
yield from (f"benchmark://unrolling-v0{k}" for k in self._benchmarks.keys())
def benchmark_from_parsed_uri(self, uri: BenchmarkUri) -> Benchmark:
if uri.path in self._benchmarks:
return self._benchmarks[uri.path]
else:
raise LookupError("Unknown program name")
# Register the unrolling example service on module import. After importing this module,
# the unrolling-py-v0 environment will be available to gym.make(...).
register(
id="unrolling-py-v0",
entry_point="compiler_gym.service.client_service_compiler_env:ClientServiceCompilerEnv",
kwargs={
"service": UNROLLING_PY_SERVICE_BINARY,
"rewards": [RuntimeReward(), SizeReward()],
"datasets": [UnrollingDataset()],
},
)
|
CompilerGym-development
|
examples/example_unrolling_service/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""This script demonstrates how the Python example service without needing
to use the bazel build system.
Prerequisite:
# In the repo's INSTALL.md, follow the 'Building from source using CMake' instructions with `-DCOMPILER_GYM_BUILD_EXAMPLES=ON` added to the `cmake` command
$ cd <path to source directory>/examples
Usage:
$ python example_unrolling_service/examples_without_bazel.py
It is equivalent in behavior to the example.py script in this directory.
"""
import logging
import subprocess
from pathlib import Path
from typing import Iterable
import compiler_gym
from compiler_gym.datasets import Benchmark, Dataset
from compiler_gym.datasets.uri import BenchmarkUri
from compiler_gym.envs.llvm.llvm_benchmark import get_system_library_flags
from compiler_gym.spaces import Reward
from compiler_gym.third_party import llvm
from compiler_gym.util.logging import init_logging
from compiler_gym.util.registration import register
UNROLLING_PY_SERVICE_BINARY: Path = Path(
"example_unrolling_service/service_py/example_service.py"
)
assert UNROLLING_PY_SERVICE_BINARY.is_file(), "Service script not found"
BENCHMARKS_PATH: Path = Path("example_unrolling_service/benchmarks")
NEURO_VECTORIZER_HEADER: Path = Path(
"../compiler_gym/third_party/neuro-vectorizer/header.h"
)
class RuntimeReward(Reward):
"""An example reward that uses changes in the "runtime" observation value
to compute incremental reward.
"""
def __init__(self):
super().__init__(
name="runtime",
observation_spaces=["runtime"],
default_value=0,
default_negates_returns=True,
deterministic=False,
platform_dependent=True,
)
self.baseline_runtime = 0
def reset(self, benchmark: str, observation_view):
del benchmark # unused
self.baseline_runtime = observation_view["runtime"]
def update(self, action, observations, observation_view):
del action # unused
del observation_view # unused
return float(self.baseline_runtime - observations[0]) / self.baseline_runtime
class SizeReward(Reward):
"""An example reward that uses changes in the "size" observation value
to compute incremental reward.
"""
def __init__(self):
super().__init__(
name="size",
observation_spaces=["size"],
default_value=0,
default_negates_returns=True,
deterministic=False,
platform_dependent=True,
)
self.baseline_size = 0
def reset(self, benchmark: str, observation_view):
del benchmark # unused
self.baseline_runtime = observation_view["size"]
def update(self, action, observations, observation_view):
del action # unused
del observation_view # unused
return float(self.baseline_size - observations[0]) / self.baseline_size
class UnrollingDataset(Dataset):
def __init__(self, *args, **kwargs):
super().__init__(
name="benchmark://unrolling-v1",
license="MIT",
description="Unrolling example dataset",
)
self._benchmarks = {
"/offsets1": Benchmark.from_file_contents(
"benchmark://unrolling-v1/offsets1",
self.preprocess(BENCHMARKS_PATH / "offsets1.c"),
),
"/conv2d": Benchmark.from_file_contents(
"benchmark://unrolling-v1/conv2d",
self.preprocess(BENCHMARKS_PATH / "conv2d.c"),
),
}
@staticmethod
def preprocess(src: Path) -> bytes:
"""Front a C source through the compiler frontend."""
# TODO(github.com/facebookresearch/CompilerGym/issues/325): We can skip
# this pre-processing, or do it on the service side, once support for
# multi-file benchmarks lands.
cmd = [
str(llvm.clang_path()),
"-E",
"-o",
"-",
"-I",
str(NEURO_VECTORIZER_HEADER.parent),
src,
]
cmd += get_system_library_flags()
return subprocess.check_output(
cmd,
timeout=300,
)
def benchmark_uris(self) -> Iterable[str]:
yield from (f"benchmark://unrolling-v1{k}" for k in self._benchmarks.keys())
def benchmark_from_parsed_uri(self, uri: BenchmarkUri) -> Benchmark:
if uri.path in self._benchmarks:
return self._benchmarks[uri.path]
else:
raise LookupError("Unknown program name")
# Register the unrolling example service on module import. After importing this module,
# the unrolling-py-v1 environment will be available to gym.make(...).
register(
id="unrolling-py-v1",
entry_point="compiler_gym.service.client_service_compiler_env:ClientServiceCompilerEnv",
kwargs={
"service": UNROLLING_PY_SERVICE_BINARY,
"rewards": [RuntimeReward(), SizeReward()],
"datasets": [UnrollingDataset()],
},
)
def main():
# Use debug verbosity to print out extra logging information.
init_logging(level=logging.DEBUG)
with compiler_gym.make(
"unrolling-py-v1",
benchmark="unrolling-v1/offsets1",
observation_space="features",
reward_space="runtime",
) as env:
compiler_gym.set_debug_level(4) # TODO: check why this has no effect
observation = env.reset()
print("observation: ", observation)
print()
observation, reward, done, info = env.step(env.action_space.sample())
print("observation: ", observation)
print("reward: ", reward)
print("done: ", done)
print("info: ", info)
print()
observation, reward, done, info = env.step(env.action_space.sample())
print("observation: ", observation)
print("reward: ", reward)
print("done: ", done)
print("info: ", info)
# TODO: implement write_bitcode(..) or write_ir(..)
# env.write_bitcode("/tmp/output.bc")
if __name__ == "__main__":
main()
|
CompilerGym-development
|
examples/example_unrolling_service/example_without_bazel.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""This script demonstrates how the example services defined in this directory
can be used as gym environments. Usage:
$ bazel run -c opt //examples/example_unrolling_service:example
"""
import compiler_gym
import examples.example_unrolling_service as unrolling_service # noqa Register environments.
with compiler_gym.make(
"unrolling-py-v0",
benchmark="unrolling-v0/offsets1",
observation_space="features",
reward_space="runtime",
) as env:
compiler_gym.set_debug_level(4) # TODO: check why this has no effect
observation = env.reset()
print("observation: ", observation)
print()
observation, reward, done, info = env.step(env.action_space.sample())
print("observation: ", observation)
print("reward: ", reward)
print("done: ", done)
print("info: ", info)
print()
observation, reward, done, info = env.step(env.action_space.sample())
print("observation: ", observation)
print("reward: ", reward)
print("done: ", done)
print("info: ", info)
env.reset()
# TODO: implement write_bitcode(..) or write_ir(..)
# env.write_bitcode("/tmp/output.bc")
|
CompilerGym-development
|
examples/example_unrolling_service/example.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
def extract_statistics_from_ir(ir: str):
stats = {"control_flow": 0, "arithmetic": 0, "memory": 0}
for line in ir.splitlines():
tokens = line.split()
if len(tokens) > 0:
opcode = tokens[0]
if opcode in [
"br",
"call",
"ret",
"switch",
"indirectbr",
"invoke",
"callbr",
"resume",
"catchswitch",
"catchret",
"cleanupret",
"unreachable",
]:
stats["control_flow"] += 1
elif opcode in [
"fneg",
"add",
"fadd",
"sub",
"fsub",
"mul",
"fmul",
"udiv",
"sdiv",
"fdiv",
"urem",
"srem",
"frem",
"shl",
"lshr",
"ashr",
"and",
"or",
"xor",
]:
stats["arithmetic"] += 1
elif opcode in [
"alloca",
"load",
"store",
"fence",
"cmpxchg",
"atomicrmw",
"getelementptr",
]:
stats["memory"] += 1
return stats
|
CompilerGym-development
|
examples/example_unrolling_service/service_py/utils.py
|
#! /usr/bin/env python3
#
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""An example CompilerGym service in python."""
import logging
import os
import shutil
import subprocess
from pathlib import Path
from typing import Optional, Tuple
import numpy as np
import utils
import compiler_gym.third_party.llvm as llvm
from compiler_gym.envs.llvm.llvm_benchmark import get_system_library_flags
from compiler_gym.service import CompilationSession
from compiler_gym.service.proto import (
ActionSpace,
Benchmark,
DoubleRange,
Event,
Int64Box,
Int64Range,
Int64Tensor,
NamedDiscreteSpace,
ObservationSpace,
Space,
StringSpace,
)
from compiler_gym.service.runtime import create_and_run_compiler_gym_service
from compiler_gym.util.commands import run_command
class UnrollingCompilationSession(CompilationSession):
"""Represents an instance of an interactive compilation session."""
compiler_version: str = "1.0.0"
# The list of actions that are supported by this service.
action_spaces = [
ActionSpace(
name="unrolling",
space=Space(
named_discrete=NamedDiscreteSpace(
name=[
"-loop-unroll -unroll-count=2",
"-loop-unroll -unroll-count=4",
"-loop-unroll -unroll-count=8",
],
),
),
)
]
# A list of observation spaces supported by this service. Each of these
# ObservationSpace protos describes an observation space.
observation_spaces = [
ObservationSpace(
name="ir",
space=Space(
string_value=StringSpace(length_range=Int64Range(min=0)),
),
deterministic=True,
platform_dependent=False,
default_observation=Event(string_value=""),
),
ObservationSpace(
name="features",
space=Space(
int64_box=Int64Box(
low=Int64Tensor(shape=[3], value=[0, 0, 0]),
high=Int64Tensor(shape=[3], value=[100000, 100000, 100000]),
),
),
),
ObservationSpace(
name="runtime",
space=Space(
double_value=DoubleRange(min=0),
),
deterministic=False,
platform_dependent=True,
default_observation=Event(
double_value=0,
),
),
ObservationSpace(
name="size",
space=Space(
double_value=DoubleRange(min=0),
),
deterministic=True,
platform_dependent=True,
default_observation=Event(
double_value=0,
),
),
]
def __init__(
self,
working_directory: Path,
action_space: ActionSpace,
benchmark: Benchmark,
use_custom_opt: bool = True,
):
super().__init__(working_directory, action_space, benchmark)
logging.info("Started a compilation session for %s", benchmark.uri)
self._benchmark = benchmark
self._action_space = action_space
# Resolve the paths to LLVM binaries once now.
self._clang = str(llvm.clang_path())
self._llc = str(llvm.llc_path())
self._llvm_diff = str(llvm.llvm_diff_path())
self._opt = str(llvm.opt_path())
# LLVM's opt does not always enforce the unrolling options passed as cli arguments. Hence, we created our own exeutable with custom unrolling pass in examples/example_unrolling_service/loop_unroller that enforces the unrolling factors passed in its cli.
# if self._use_custom_opt is true, use our custom exeutable, otherwise use LLVM's opt
self._use_custom_opt = use_custom_opt
# Dump the benchmark source to disk.
self._src_path = str(self.working_dir / "benchmark.c")
with open(self.working_dir / "benchmark.c", "wb") as f:
f.write(benchmark.program.contents)
self._llvm_path = str(self.working_dir / "benchmark.ll")
self._llvm_before_path = str(self.working_dir / "benchmark.previous.ll")
self._obj_path = str(self.working_dir / "benchmark.o")
self._exe_path = str(self.working_dir / "benchmark.exe")
run_command(
[
self._clang,
"-Xclang",
"-disable-O0-optnone",
"-emit-llvm",
"-S",
self._src_path,
"-o",
self._llvm_path,
]
+ get_system_library_flags(),
timeout=30,
)
def apply_action(self, action: Event) -> Tuple[bool, Optional[ActionSpace], bool]:
num_choices = len(self._action_space.space.named_discrete.name)
# This is the index into the action space's values ("a", "b", "c") that
# the user selected, e.g. 0 -> "a", 1 -> "b", 2 -> "c".
choice_index = action.int64_value
if choice_index < 0 or choice_index >= num_choices:
raise ValueError("Out-of-range")
args = self._action_space.space.named_discrete.name[choice_index]
logging.info(
"Applying action %d, equivalent command-line arguments: '%s'",
choice_index,
args,
)
args = args.split()
# make a copy of the LLVM file to compare its contents after applying the action
shutil.copyfile(self._llvm_path, self._llvm_before_path)
# apply action
if self._use_custom_opt:
# our custom unroller has an additional `f` at the beginning of each argument
for i, arg in enumerate(args):
# convert -<argument> to -f<argument>
arg = arg[0] + "f" + arg[1:]
args[i] = arg
run_command(
[
"../loop_unroller/loop_unroller",
self._llvm_path,
*args,
"-S",
"-o",
self._llvm_path,
],
timeout=30,
)
else:
run_command(
[
self._opt,
*args,
self._llvm_path,
"-S",
"-o",
self._llvm_path,
],
timeout=30,
)
# compare the IR files to check if the action had an effect
try:
subprocess.check_call(
[self._llvm_diff, self._llvm_before_path, self._llvm_path],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
timeout=60,
)
action_had_no_effect = True
except subprocess.CalledProcessError:
action_had_no_effect = False
end_of_session = False # TODO: this needs investigation: for how long can we apply loop unrolling? e.g., detect if there are no more loops in the IR?
new_action_space = None
return (end_of_session, new_action_space, action_had_no_effect)
@property
def ir(self) -> str:
with open(self._llvm_path) as f:
return f.read()
def get_observation(self, observation_space: ObservationSpace) -> Event:
logging.info("Computing observation from space %s", observation_space.name)
if observation_space.name == "ir":
return Event(string_value=self.ir)
elif observation_space.name == "features":
stats = utils.extract_statistics_from_ir(self.ir)
observation = Event(
int64_tensor=Int64Tensor(
shape=[len(list(stats.values()))], value=list(stats.values())
)
)
return observation
elif observation_space.name == "runtime":
# compile LLVM to object file
run_command(
[
self._llc,
"-filetype=obj",
self._llvm_path,
"-o",
self._obj_path,
],
timeout=30,
)
# build object file to binary
run_command(
[
self._clang,
self._obj_path,
"-O3",
"-o",
self._exe_path,
]
+ get_system_library_flags(),
timeout=30,
)
# TODO: add documentation that benchmarks need print out execution time
# Running 5 times and taking the average of middle 3
exec_times = []
for _ in range(5):
stdout = run_command(
[self._exe_path],
timeout=30,
)
try:
exec_times.append(int(stdout))
except ValueError:
raise ValueError(
f"Error in parsing execution time from output of command\n"
f"Please ensure that the source code of the benchmark measures execution time and prints to stdout\n"
f"Stdout of the program: {stdout}"
)
exec_times = np.sort(exec_times)
avg_exec_time = np.mean(exec_times[1:4])
return Event(double_value=avg_exec_time)
elif observation_space.name == "size":
# compile LLVM to object file
run_command(
[
self._llc,
"-filetype=obj",
self._llvm_path,
"-o",
self._obj_path,
],
timeout=30,
)
# build object file to binary
run_command(
[
self._clang,
self._obj_path,
"-Oz",
"-o",
self._exe_path,
]
+ get_system_library_flags(),
timeout=30,
)
binary_size = os.path.getsize(self._exe_path)
return Event(double_value=binary_size)
else:
raise KeyError(observation_space.name)
if __name__ == "__main__":
create_and_run_compiler_gym_service(UnrollingCompilationSession)
|
CompilerGym-development
|
examples/example_unrolling_service/service_py/example_service.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for the unrolling CompilerGym service example."""
import subprocess
from pathlib import Path
import gym
import numpy as np
import pytest
import compiler_gym
import examples.loop_optimizations_service as loop_optimizations_service
from compiler_gym.envs import CompilerEnv
from compiler_gym.errors import SessionNotFound
from compiler_gym.service.client_service_compiler_env import ClientServiceCompilerEnv
from compiler_gym.spaces import ActionSpace, Dict, NamedDiscrete, Scalar, Sequence
from compiler_gym.third_party.autophase import AUTOPHASE_FEATURE_NAMES
from tests.test_main import main
@pytest.fixture(scope="function")
def env() -> CompilerEnv:
"""Text fixture that yields an environment."""
with gym.make("loops-opt-py-v0") as env_:
yield env_
@pytest.fixture(scope="module")
def bin() -> Path:
return loop_optimizations_service.LOOPS_OPT_PY_SERVICE_BINARY
def test_invalid_arguments(bin: Path):
"""Test that running the binary with unrecognized arguments is an error."""
def run(cmd):
p = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True
)
stdout, stderr = p.communicate(timeout=10)
return p.returncode, stdout, stderr
returncode, _, stderr = run([str(bin), "foobar"])
assert "ERROR:" in stderr
assert "'foobar'" in stderr
assert returncode == 1
returncode, _, stderr = run([str(bin), "--foobar"])
# C++ and python flag parsing library emit slightly different error
# messages.
assert "ERROR:" in stderr or "FATAL" in stderr
assert "'foobar'" in stderr
assert returncode == 1
def test_versions(env: ClientServiceCompilerEnv):
"""Tests the GetVersion() RPC endpoint."""
assert env.version == compiler_gym.__version__
assert env.compiler_version == "1.0.0"
def test_action_space(env: CompilerEnv):
"""Test that the environment reports the service's action spaces."""
assert env.action_spaces == [
ActionSpace(
NamedDiscrete(
name="loop-opt",
items=[
"--loop-unroll --unroll-count=2",
"--loop-unroll --unroll-count=4",
"--loop-unroll --unroll-count=8",
"--loop-unroll --unroll-count=16",
"--loop-unroll --unroll-count=32",
"--loop-vectorize -force-vector-width=2",
"--loop-vectorize -force-vector-width=4",
"--loop-vectorize -force-vector-width=8",
"--loop-vectorize -force-vector-width=16",
"--loop-vectorize -force-vector-width=32",
],
)
)
]
def test_observation_spaces(env: CompilerEnv):
"""Test that the environment reports the service's observation spaces."""
env.reset()
assert env.observation.spaces.keys() == {
"ir",
"Inst2vec",
"Autophase",
"AutophaseDict",
"Programl",
"runtime",
"size",
}
assert env.observation.spaces["ir"].space == Sequence(
name="ir",
size_range=(0, np.iinfo(int).max),
dtype=str,
)
assert env.observation.spaces["Inst2vec"].space == Sequence(
name="Inst2vec",
size_range=(0, np.iinfo(int).max),
scalar_range=Scalar(
name=None,
min=np.iinfo(np.int64).min,
max=np.iinfo(np.int64).max,
dtype=np.int64,
),
dtype=int,
)
assert env.observation.spaces["Autophase"].space == Sequence(
name="Autophase",
size_range=(len(AUTOPHASE_FEATURE_NAMES), len(AUTOPHASE_FEATURE_NAMES)),
scalar_range=Scalar(
name=None,
min=np.iinfo(np.int64).min,
max=np.iinfo(np.int64).max,
dtype=np.int64,
),
dtype=int,
)
assert env.observation.spaces["AutophaseDict"].space == Dict(
name="AutophaseDict",
spaces={
name: Scalar(name=None, min=0, max=np.iinfo(np.int64).max, dtype=np.int64)
for name in AUTOPHASE_FEATURE_NAMES
},
)
assert env.observation.spaces["Programl"].space == Sequence(
name="Programl",
size_range=(0, np.iinfo(int).max),
dtype=str,
)
assert env.observation.spaces["runtime"].space == Scalar(
name="runtime", min=0, max=np.inf, dtype=float
)
assert env.observation.spaces["size"].space == Scalar(
name="size", min=0, max=np.inf, dtype=float
)
def test_reward_spaces(env: CompilerEnv):
"""Test that the environment reports the service's reward spaces."""
env.reset()
assert env.reward.spaces.keys() == {"runtime", "size"}
def test_step_before_reset(env: CompilerEnv):
"""Taking a step() before reset() is illegal."""
with pytest.raises(SessionNotFound, match=r"Must call reset\(\) before step\(\)"):
env.step(0)
def test_observation_before_reset(env: CompilerEnv):
"""Taking an observation before reset() is illegal."""
with pytest.raises(SessionNotFound, match=r"Must call reset\(\) before step\(\)"):
_ = env.observation["ir"]
def test_reward_before_reset(env: CompilerEnv):
"""Taking a reward before reset() is illegal."""
with pytest.raises(SessionNotFound, match=r"Must call reset\(\) before step\(\)"):
_ = env.reward["runtime"]
def test_reset_invalid_benchmark(env: CompilerEnv):
"""Test requesting a specific benchmark."""
with pytest.raises(LookupError) as ctx:
env.reset(benchmark="loops-opt-v0/foobar")
assert str(ctx.value) == "Unknown program name"
def test_invalid_observation_space(env: CompilerEnv):
"""Test error handling with invalid observation space."""
with pytest.raises(LookupError):
env.observation_space = 100
def test_invalid_reward_space(env: CompilerEnv):
"""Test error handling with invalid reward space."""
with pytest.raises(LookupError):
env.reward_space = 100
def test_double_reset(env: CompilerEnv):
"""Test that reset() can be called twice."""
env.reset()
assert env.in_episode
env.reset()
assert env.in_episode
def test_Step_out_of_range(env: CompilerEnv):
"""Test error handling with an invalid action."""
env.reset()
with pytest.raises(ValueError) as ctx:
env.step(100)
assert str(ctx.value) == "Out-of-range"
def test_default_ir_observation(env: CompilerEnv):
"""Test default IR observation space."""
env.observation_space = "ir"
observation = env.reset()
assert len(observation) > 0
observation, reward, done, info = env.step(0)
assert not done, info
assert len(observation) > 0
assert reward is None
def test_default_inst2vec_observation(env: CompilerEnv):
"""Test default inst2vec observation space."""
env.observation_space = "Inst2vec"
observation = env.reset()
assert isinstance(observation, np.ndarray)
assert len(observation) >= 0
assert observation.dtype == np.int64
assert all(obs >= 0 for obs in observation.tolist())
def test_default_autophase_observation(env: CompilerEnv):
"""Test default autophase observation space."""
env.observation_space = "Autophase"
observation = env.reset()
assert isinstance(observation, np.ndarray)
assert observation.shape == (len(AUTOPHASE_FEATURE_NAMES),)
assert observation.dtype == np.int64
assert all(obs >= 0 for obs in observation.tolist())
def test_default_autophase_dict_observation(env: CompilerEnv):
"""Test default autophase dict observation space."""
env.observation_space = "AutophaseDict"
observation = env.reset()
assert isinstance(observation, dict)
assert sorted(observation.keys()) == sorted(AUTOPHASE_FEATURE_NAMES)
assert len(observation.values()) == len(AUTOPHASE_FEATURE_NAMES)
assert all(obs >= 0 for obs in observation.values())
def test_default_programl_observation(env: CompilerEnv):
"""Test default observation space."""
env.observation_space = "Programl"
observation = env.reset()
assert len(observation) > 0
observation, reward, done, info = env.step(0)
assert not done, info
assert len(observation) > 0
assert reward is None
def test_default_reward(env: CompilerEnv):
"""Test default reward space."""
env.reward_space = "runtime"
env.reset()
observation, reward, done, info = env.step(0)
assert not done, info
assert observation is None
assert reward is not None
def test_observations(env: CompilerEnv):
"""Test observation spaces."""
env.reset()
assert len(env.observation["ir"]) > 0
assert all(env.observation["Inst2vec"] >= 0)
assert all(env.observation["Autophase"] >= 0)
assert len(env.observation["Programl"]) > 0
def test_rewards(env: CompilerEnv):
"""Test reward spaces."""
env.reset()
assert env.reward["runtime"] is not None
def test_benchmarks(env: CompilerEnv):
assert list(env.datasets.benchmark_uris()) == [
"benchmark://loops-opt-v0/add",
"benchmark://loops-opt-v0/offsets1",
"benchmark://loops-opt-v0/conv2d",
]
def test_fork(env: CompilerEnv):
env.reset()
env.step(0)
env.step(1)
other_env = env.fork()
try:
assert env.benchmark == other_env.benchmark
assert other_env.actions == [0, 1]
finally:
other_env.close()
if __name__ == "__main__":
main()
|
CompilerGym-development
|
examples/loop_optimizations_service/env_tests.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for the loop optimizations environment example."""
import os
import subprocess
import sys
from getpass import getuser
from pathlib import Path
from typing import Iterable, List, Optional
import gym
import numpy as np
import pytest
import compiler_gym
import examples.loop_optimizations_service as loop_optimizations_service
from compiler_gym.datasets import Benchmark, Dataset
from compiler_gym.datasets.uri import BenchmarkUri
from compiler_gym.envs import CompilerEnv
from compiler_gym.envs.llvm.llvm_benchmark import get_system_library_flags
from compiler_gym.errors import SessionNotFound
from compiler_gym.service.client_service_compiler_env import ClientServiceCompilerEnv
from compiler_gym.spaces import Dict, NamedDiscrete, Reward, Scalar, Sequence
from compiler_gym.third_party import llvm
from compiler_gym.third_party.autophase import AUTOPHASE_FEATURE_NAMES
from compiler_gym.util import debug_util as dbg
from compiler_gym.util.registration import register
LOOPS_OPT_PY_SERVICE_BINARY: Path = Path(
"loop_optimizations_service/service_py/loops_opt_service.py"
)
assert LOOPS_OPT_PY_SERVICE_BINARY.is_file(), "Service script not found"
BENCHMARKS_PATH: Path = Path("loop_optimizations_service/benchmarks")
NEURO_VECTORIZER_HEADER: Path = Path(
"../compiler_gym/third_party/neuro-vectorizer/header.h"
)
class RuntimeReward(Reward):
"""An example reward that uses changes in the "runtime" observation value
to compute incremental reward.
"""
def __init__(self):
super().__init__(
name="runtime",
observation_spaces=["runtime"],
default_value=0,
default_negates_returns=True,
deterministic=False,
platform_dependent=True,
)
self.baseline_runtime = 0
def reset(self, benchmark: str, observation_view):
del benchmark # unused
self.baseline_runtime = observation_view["runtime"]
def update(self, action, observations, observation_view):
del action # unused
del observation_view # unused
return float(self.baseline_runtime - observations[0]) / self.baseline_runtime
class SizeReward(Reward):
"""An example reward that uses changes in the "size" observation value
to compute incremental reward.
"""
def __init__(self):
super().__init__(
name="size",
observation_spaces=["size"],
default_value=0,
default_negates_returns=True,
deterministic=False,
platform_dependent=True,
)
self.baseline_size = 0
def reset(self, benchmark: str, observation_view):
del benchmark # unused
self.baseline_runtime = observation_view["size"]
def update(self, action, observations, observation_view):
del action # unused
del observation_view # unused
return float(self.baseline_size - observations[0]) / self.baseline_size
class LoopsDataset(Dataset):
def __init__(self, *args, **kwargs):
super().__init__(
name="benchmark://loops-opt-v2",
license="MIT",
description="Loops optimization dataset",
)
self._benchmarks = {
"/add": Benchmark.from_file_contents(
"benchmark://loops-opt-v2/add",
self.preprocess(BENCHMARKS_PATH / "add.c"),
),
"/offsets1": Benchmark.from_file_contents(
"benchmark://loops-opt-v2/offsets1",
self.preprocess(BENCHMARKS_PATH / "offsets1.c"),
),
"/conv2d": Benchmark.from_file_contents(
"benchmark://loops-opt-v2/conv2d",
self.preprocess(BENCHMARKS_PATH / "conv2d.c"),
),
}
@staticmethod
def preprocess(src: Path) -> bytes:
"""Front a C source through the compiler frontend."""
# TODO(github.com/facebookresearch/CompilerGym/issues/325): We can skip
# this pre-processing, or do it on the service side, once support for
# multi-file benchmarks lands.
cmd = [
str(llvm.clang_path()),
"-E",
"-o",
"-",
"-I",
str(NEURO_VECTORIZER_HEADER.parent),
src,
]
cmd += get_system_library_flags()
return subprocess.check_output(
cmd,
timeout=300,
)
def benchmark_uris(self) -> Iterable[str]:
yield from (f"benchmark://loops-opt-v2{k}" for k in self._benchmarks.keys())
def benchmark_from_parsed_uri(self, uri: BenchmarkUri) -> Benchmark:
if uri.path in self._benchmarks:
return self._benchmarks[uri.path]
else:
raise LookupError("Unknown program name")
# Register the unrolling example service on module import. After importing this module,
# the loops-opt-py-v0 environment will be available to gym.make(...).
register(
id="loops-opt-py-v2",
entry_point="compiler_gym.service.client_service_compiler_env:ClientServiceCompilerEnv",
kwargs={
"service": LOOPS_OPT_PY_SERVICE_BINARY,
"rewards": [RuntimeReward(), SizeReward()],
"datasets": [LoopsDataset()],
},
)
@pytest.fixture(scope="function")
def env() -> CompilerEnv:
"""Text fixture that yields an environment."""
with gym.make("loops-opt-py-v0") as env_:
yield env_
@pytest.fixture(scope="module")
def bin() -> Path:
return loop_optimizations_service.LOOPS_OPT_PY_SERVICE_BINARY
def test_invalid_arguments(bin: Path):
"""Test that running the binary with unrecognized arguments is an error."""
def run(cmd):
p = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True
)
stdout, stderr = p.communicate(timeout=10)
return p.returncode, stdout, stderr
returncode, _, stderr = run([str(bin), "foobar"])
assert "ERROR:" in stderr
assert "'foobar'" in stderr
assert returncode == 1
returncode, _, stderr = run([str(bin), "--foobar"])
# C++ and python flag parsing library emit slightly different error
# messages.
assert "ERROR:" in stderr or "FATAL" in stderr
assert "'foobar'" in stderr
assert returncode == 1
def test_versions(env: ClientServiceCompilerEnv):
"""Tests the GetVersion() RPC endpoint."""
assert env.version == compiler_gym.__version__
assert env.compiler_version == "1.0.0"
def test_action_space(env: CompilerEnv):
"""Test that the environment reports the service's action spaces."""
assert env.action_spaces == [
NamedDiscrete(
name="loop-opt",
items=[
"--loop-unroll --unroll-count=2",
"--loop-unroll --unroll-count=4",
"--loop-unroll --unroll-count=8",
"--loop-unroll --unroll-count=16",
"--loop-unroll --unroll-count=32",
"--loop-vectorize -force-vector-width=2",
"--loop-vectorize -force-vector-width=4",
"--loop-vectorize -force-vector-width=8",
"--loop-vectorize -force-vector-width=16",
"--loop-vectorize -force-vector-width=32",
],
)
]
def test_observation_spaces(env: CompilerEnv):
"""Test that the environment reports the service's observation spaces."""
env.reset()
assert env.observation.spaces.keys() == {
"ir",
"Inst2vec",
"Autophase",
"AutophaseDict",
"Programl",
"runtime",
"size",
}
assert env.observation.spaces["ir"].space == Sequence(
name="ir",
size_range=(0, np.iinfo(int).max),
dtype=str,
)
assert env.observation.spaces["Inst2vec"].space == Sequence(
name="Inst2vec",
size_range=(0, np.iinfo(int).max),
scalar_range=Scalar(
name=None,
min=np.iinfo(np.int64).min,
max=np.iinfo(np.int64).max,
dtype=np.int64,
),
dtype=int,
)
assert env.observation.spaces["Autophase"].space == Sequence(
name="Autophase",
size_range=(len(AUTOPHASE_FEATURE_NAMES), len(AUTOPHASE_FEATURE_NAMES)),
scalar_range=Scalar(
name=None,
min=np.iinfo(np.int64).min,
max=np.iinfo(np.int64).max,
dtype=np.int64,
),
dtype=int,
)
assert env.observation.spaces["AutophaseDict"].space == Dict(
name="AutophaseDict",
spaces={
name: Scalar(name=None, min=0, max=np.iinfo(np.int64).max, dtype=np.int64)
for name in AUTOPHASE_FEATURE_NAMES
},
)
assert env.observation.spaces["Programl"].space == Sequence(
name="Programl",
size_range=(0, np.iinfo(int).max),
dtype=str,
)
assert env.observation.spaces["runtime"].space == Scalar(
name="runtime", min=0, max=np.inf, dtype=float
)
assert env.observation.spaces["size"].space == Scalar(
name="size", min=0, max=np.inf, dtype=float
)
def test_reward_spaces(env: CompilerEnv):
"""Test that the environment reports the service's reward spaces."""
env.reset()
assert env.reward.spaces.keys() == {"runtime", "size"}
def test_step_before_reset(env: CompilerEnv):
"""Taking a step() before reset() is illegal."""
with pytest.raises(SessionNotFound, match=r"Must call reset\(\) before step\(\)"):
env.step(0)
def test_observation_before_reset(env: CompilerEnv):
"""Taking an observation before reset() is illegal."""
with pytest.raises(SessionNotFound, match=r"Must call reset\(\) before step\(\)"):
_ = env.observation["ir"]
def test_reward_before_reset(env: CompilerEnv):
"""Taking a reward before reset() is illegal."""
with pytest.raises(SessionNotFound, match=r"Must call reset\(\) before step\(\)"):
_ = env.reward["runtime"]
def test_reset_invalid_benchmark(env: CompilerEnv):
"""Test requesting a specific benchmark."""
with pytest.raises(LookupError) as ctx:
env.reset(benchmark="loops-opt-v2/foobar")
assert str(ctx.value) == "Unknown program name"
def test_invalid_observation_space(env: CompilerEnv):
"""Test error handling with invalid observation space."""
with pytest.raises(LookupError):
env.observation_space = 100
def test_invalid_reward_space(env: CompilerEnv):
"""Test error handling with invalid reward space."""
with pytest.raises(LookupError):
env.reward_space = 100
def test_double_reset(env: CompilerEnv):
"""Test that reset() can be called twice."""
env.reset()
assert env.in_episode
env.reset()
assert env.in_episode
def test_Step_out_of_range(env: CompilerEnv):
"""Test error handling with an invalid action."""
env.reset()
with pytest.raises(ValueError) as ctx:
env.step(100)
assert str(ctx.value) == "Out-of-range"
def test_default_ir_observation(env: CompilerEnv):
"""Test default IR observation space."""
env.observation_space = "ir"
observation = env.reset()
assert len(observation) > 0
observation, reward, done, info = env.step(0)
assert not done, info
assert len(observation) > 0
assert reward is None
def test_default_inst2vec_observation(env: CompilerEnv):
"""Test default inst2vec observation space."""
env.observation_space = "Inst2vec"
observation = env.reset()
assert isinstance(observation, np.ndarray)
assert len(observation) >= 0
assert observation.dtype == np.int64
assert all(obs >= 0 for obs in observation.tolist())
def test_default_autophase_observation(env: CompilerEnv):
"""Test default autophase observation space."""
env.observation_space = "Autophase"
observation = env.reset()
assert isinstance(observation, np.ndarray)
assert observation.shape == (len(AUTOPHASE_FEATURE_NAMES),)
assert observation.dtype == np.int64
assert all(obs >= 0 for obs in observation.tolist())
def test_default_autophase_dict_observation(env: CompilerEnv):
"""Test default autophase dict observation space."""
env.observation_space = "AutophaseDict"
observation = env.reset()
assert isinstance(observation, dict)
assert sorted(observation.keys()) == sorted(AUTOPHASE_FEATURE_NAMES)
assert len(observation.values()) == len(AUTOPHASE_FEATURE_NAMES)
assert all(obs >= 0 for obs in observation.values())
def test_default_programl_observation(env: CompilerEnv):
"""Test default observation space."""
env.observation_space = "Programl"
observation = env.reset()
assert len(observation) > 0
observation, reward, done, info = env.step(0)
assert not done, info
assert len(observation) > 0
assert reward is None
def test_default_reward(env: CompilerEnv):
"""Test default reward space."""
env.reward_space = "runtime"
env.reset()
observation, reward, done, info = env.step(0)
assert not done, info
assert observation is None
assert reward is not None
def test_observations(env: CompilerEnv):
"""Test observation spaces."""
env.reset()
assert len(env.observation["ir"]) > 0
assert all(env.observation["Inst2vec"] >= 0)
assert all(env.observation["Autophase"] >= 0)
assert len(env.observation["Programl"]) > 0
def test_rewards(env: CompilerEnv):
"""Test reward spaces."""
env.reset()
assert env.reward["runtime"] is not None
def test_benchmarks(env: CompilerEnv):
assert list(env.datasets.benchmark_uris()) == [
"benchmark://loops-opt-v2/add",
"benchmark://loops-opt-v2/offsets1",
"benchmark://loops-opt-v2/conv2d",
]
def test_fork(env: CompilerEnv):
env.reset()
env.step(0)
env.step(1)
other_env = env.fork()
try:
assert env.benchmark == other_env.benchmark
assert other_env.actions == [0, 1]
finally:
other_env.close()
# Copied from CompilerGym/tests/test_main.py because there were errors in trying to import it here
def main(extra_pytest_args: Optional[List[str]] = None, debug_level: int = 1):
dbg.set_debug_level(debug_level)
# Keep test data isolated from user data.
os.environ[
"COMPILER_GYM_SITE_DATA"
] = f"/tmp/compiler_gym_{getuser()}/tests/site_data"
os.environ["COMPILER_GYM_CACHE"] = f"/tmp/compiler_gym_{getuser()}/tests/cache"
pytest_args = sys.argv + [
# Run pytest verbosely to print out test names to provide context in
# case of failures.
"-vv",
# Disable "Module already imported" warnings. See:
# https://docs.pytest.org/en/latest/how-to/usage.html#calling-pytest-from-python-code
"-W",
"ignore:Module already imported:pytest.PytestWarning",
# Disable noisy "Flaky tests passed" messages.
"--no-success-flaky-report",
]
# Support for sharding. If a py_test target has the shard_count attribute
# set (in the range [1,50]), then the pytest-shard module is used to divide
# the tests among the shards. See https://pypi.org/project/pytest-shard/
sharded_test = os.environ.get("TEST_TOTAL_SHARDS")
if sharded_test:
num_shards = int(os.environ["TEST_TOTAL_SHARDS"])
shard_index = int(os.environ["TEST_SHARD_INDEX"])
pytest_args += [f"--shard-id={shard_index}", f"--num-shards={num_shards}"]
else:
pytest_args += ["-p", "no:pytest-shard"]
pytest_args += extra_pytest_args or []
returncode = pytest.main(pytest_args)
# By default pytest will fail with an error if no tests are collected.
# Disable that behavior here (with a warning) since there are legitimate
# cases where we may want to run a test file with no tests in it. For
# example, when running on a continuous integration service where all the
# tests are marked with the @skip_on_ci decorator.
if returncode == pytest.ExitCode.NO_TESTS_COLLECTED.value:
print(
"WARNING: The test suite was empty. Is that intended?",
file=sys.stderr,
)
returncode = 0
sys.exit(returncode)
if __name__ == "__main__":
main(
extra_pytest_args=[
"-W",
"ignore::UserWarning",
]
)
|
CompilerGym-development
|
examples/loop_optimizations_service/env_without_bazel_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Smoke test for examples/loop_optimizations_service/example_without_bazel.py"""
from flaky import flaky
from loop_optimizations_service.example_without_bazel import main
@flaky
def test_example_without_bazel():
main()
|
CompilerGym-development
|
examples/loop_optimizations_service/example_without_bazel_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""This module registers the Loop Optimizations CompilerGym environment """
import os
import subprocess
from pathlib import Path
from typing import Iterable
from compiler_gym.datasets import Benchmark, Dataset
from compiler_gym.datasets.uri import BenchmarkUri
from compiler_gym.envs.llvm.llvm_benchmark import get_system_library_flags
from compiler_gym.spaces import Reward
from compiler_gym.third_party import llvm
from compiler_gym.util.registration import register
from compiler_gym.util.runfiles_path import runfiles_path
LOOPS_OPT_PY_SERVICE_BINARY: Path = runfiles_path(
"examples/loop_optimizations_service/service_py/loops-opt-service-py"
)
BENCHMARKS_PATH: Path = runfiles_path("examples/loop_optimizations_service/benchmarks")
if not os.path.exists(BENCHMARKS_PATH):
BENCHMARKS_PATH = Path("loop_optimizations_service/benchmarks")
NEURO_VECTORIZER_HEADER: Path = runfiles_path(
"compiler_gym/third_party/neuro-vectorizer/header.h"
)
if not os.path.exists(NEURO_VECTORIZER_HEADER):
NEURO_VECTORIZER_HEADER: Path = Path(
"../compiler_gym/third_party/neuro-vectorizer/header.h"
)
class RuntimeReward(Reward):
"""An example reward that uses changes in the "runtime" observation value
to compute incremental reward.
"""
def __init__(self):
super().__init__(
name="runtime",
observation_spaces=["runtime"],
default_value=0,
default_negates_returns=True,
deterministic=False,
platform_dependent=True,
)
self.baseline_runtime = 0
def reset(self, benchmark: str, observation_view):
del benchmark # unused
self.baseline_runtime = observation_view["runtime"]
def update(self, action, observations, observation_view):
del action # unused
del observation_view # unused
return float(self.baseline_runtime - observations[0]) / self.baseline_runtime
class SizeReward(Reward):
"""An example reward that uses changes in the "size" observation value
to compute incremental reward.
"""
def __init__(self):
super().__init__(
name="size",
observation_spaces=["size"],
default_value=0,
default_negates_returns=True,
deterministic=False,
platform_dependent=True,
)
self.baseline_size = 0
def reset(self, benchmark: str, observation_view):
del benchmark # unused
self.baseline_runtime = observation_view["size"]
def update(self, action, observations, observation_view):
del action # unused
del observation_view # unused
return float(self.baseline_size - observations[0]) / self.baseline_size
class LoopsDataset(Dataset):
def __init__(self, *args, **kwargs):
super().__init__(
name="benchmark://loops-opt-v0",
license="MIT",
description="Loops optimization dataset",
)
self._benchmarks = {
"/add": Benchmark.from_file_contents(
"benchmark://loops-opt-v0/add",
self.preprocess(BENCHMARKS_PATH / "add.c"),
),
"/offsets1": Benchmark.from_file_contents(
"benchmark://loops-opt-v0/offsets1",
self.preprocess(BENCHMARKS_PATH / "offsets1.c"),
),
"/conv2d": Benchmark.from_file_contents(
"benchmark://loops-opt-v0/conv2d",
self.preprocess(BENCHMARKS_PATH / "conv2d.c"),
),
}
@staticmethod
def preprocess(src: Path) -> bytes:
"""Front a C source through the compiler frontend."""
# TODO(github.com/facebookresearch/CompilerGym/issues/325): We can skip
# this pre-processing, or do it on the service side, once support for
# multi-file benchmarks lands.
cmd = [
str(llvm.clang_path()),
"-E",
"-o",
"-",
"-I",
str(NEURO_VECTORIZER_HEADER.parent),
src,
]
cmd += get_system_library_flags()
return subprocess.check_output(
cmd,
timeout=300,
)
def benchmark_uris(self) -> Iterable[str]:
yield from (f"benchmark://loops-opt-v0{k}" for k in self._benchmarks.keys())
def benchmark_from_parsed_uri(self, uri: BenchmarkUri) -> Benchmark:
if uri.path in self._benchmarks:
return self._benchmarks[uri.path]
else:
raise LookupError("Unknown program name")
# Register the unrolling example service on module import. After importing this module,
# the loops-opt-py-v0 environment will be available to gym.make(...).
register(
id="loops-opt-py-v0",
entry_point="compiler_gym.service.client_service_compiler_env:ClientServiceCompilerEnv",
kwargs={
"service": LOOPS_OPT_PY_SERVICE_BINARY,
"rewards": [RuntimeReward(), SizeReward()],
"datasets": [LoopsDataset()],
},
)
|
CompilerGym-development
|
examples/loop_optimizations_service/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""This script uses the loop optimizations service without needing
to use the bazel build system.
Prerequisite:
# In the repo's INSTALL.md, follow the 'Building from source using CMake' instructions with `-DCOMPILER_GYM_BUILD_EXAMPLES=ON` added to the `cmake` command
$ cd <path to source directory>/examples
Usage:
$ python loop_optimizations_service/examples_without_bazel.py
It is equivalent in behavior to the example.py script in this directory.
"""
import logging
import subprocess
from pathlib import Path
from typing import Iterable
import compiler_gym
from compiler_gym.datasets import Benchmark, Dataset
from compiler_gym.datasets.uri import BenchmarkUri
from compiler_gym.envs.llvm.llvm_benchmark import get_system_library_flags
from compiler_gym.spaces import Reward
from compiler_gym.third_party import llvm
from compiler_gym.util.logging import init_logging
from compiler_gym.util.registration import register
LOOPS_OPT_PY_SERVICE_BINARY: Path = Path(
"loop_optimizations_service/service_py/loops_opt_service.py"
)
BENCHMARKS_PATH: Path = Path("loop_optimizations_service/benchmarks")
NEURO_VECTORIZER_HEADER: Path = Path(
"../compiler_gym/third_party/neuro-vectorizer/header.h"
)
class RuntimeReward(Reward):
"""An example reward that uses changes in the "runtime" observation value
to compute incremental reward.
"""
def __init__(self):
super().__init__(
name="runtime",
observation_spaces=["runtime"],
default_value=0,
default_negates_returns=True,
deterministic=False,
platform_dependent=True,
)
self.baseline_runtime = 0
def reset(self, benchmark: str, observation_view):
del benchmark # unused
self.baseline_runtime = observation_view["runtime"]
def update(self, action, observations, observation_view):
del action # unused
del observation_view # unused
return float(self.baseline_runtime - observations[0]) / self.baseline_runtime
class SizeReward(Reward):
"""An example reward that uses changes in the "size" observation value
to compute incremental reward.
"""
def __init__(self):
super().__init__(
name="size",
observation_spaces=["size"],
default_value=0,
default_negates_returns=True,
deterministic=False,
platform_dependent=True,
)
self.baseline_size = 0
def reset(self, benchmark: str, observation_view):
del benchmark # unused
self.baseline_runtime = observation_view["size"]
def update(self, action, observations, observation_view):
del action # unused
del observation_view # unused
return float(self.baseline_size - observations[0]) / self.baseline_size
class LoopsDataset(Dataset):
def __init__(self, *args, **kwargs):
super().__init__(
name="benchmark://loops-opt-v1",
license="MIT",
description="Loops optimization dataset",
)
self._benchmarks = {
"/add": Benchmark.from_file_contents(
"benchmark://loops-opt-v1/add",
self.preprocess(BENCHMARKS_PATH / "add.c"),
),
"/offsets1": Benchmark.from_file_contents(
"benchmark://loops-opt-v1/offsets1",
self.preprocess(BENCHMARKS_PATH / "offsets1.c"),
),
"/conv2d": Benchmark.from_file_contents(
"benchmark://loops-opt-v1/conv2d",
self.preprocess(BENCHMARKS_PATH / "conv2d.c"),
),
}
@staticmethod
def preprocess(src: Path) -> bytes:
"""Front a C source through the compiler frontend."""
# TODO(github.com/facebookresearch/CompilerGym/issues/325): We can skip
# this pre-processing, or do it on the service side, once support for
# multi-file benchmarks lands.
cmd = [
str(llvm.clang_path()),
"-E",
"-o",
"-",
"-I",
str(NEURO_VECTORIZER_HEADER.parent),
src,
] + get_system_library_flags()
return subprocess.check_output(
cmd,
timeout=300,
)
def benchmark_uris(self) -> Iterable[str]:
yield from (f"benchmark://loops-opt-v1{k}" for k in self._benchmarks.keys())
def benchmark_from_parsed_uri(self, uri: BenchmarkUri) -> Benchmark:
if uri.path in self._benchmarks:
return self._benchmarks[uri.path]
else:
raise LookupError("Unknown program name")
# Register the unrolling example service on module import. After importing this module,
# the loops-opt-py-v1 environment will be available to gym.make(...).
register(
id="loops-opt-py-v1",
entry_point="compiler_gym.service.client_service_compiler_env:ClientServiceCompilerEnv",
kwargs={
"service": LOOPS_OPT_PY_SERVICE_BINARY,
"rewards": [RuntimeReward(), SizeReward()],
"datasets": [LoopsDataset()],
},
)
def main():
# Use debug verbosity to print out extra logging information.
init_logging(level=logging.DEBUG)
with compiler_gym.make(
"loops-opt-py-v1",
benchmark="loops-opt-v1/add",
observation_space="Programl",
reward_space="runtime",
) as env:
compiler_gym.set_debug_level(4) # TODO: check why this has no effect
observation = env.reset()
print("observation: ", observation)
print()
observation, reward, done, info = env.step(env.action_space.sample())
print("observation: ", observation)
print("reward: ", reward)
print("done: ", done)
print("info: ", info)
env.close()
# TODO: implement write_bitcode(..) or write_ir(..)
# env.write_bitcode("/tmp/output.bc")
if __name__ == "__main__":
main()
|
CompilerGym-development
|
examples/loop_optimizations_service/example_without_bazel.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import compiler_gym
import examples.loop_optimizations_service as loop_optimizations_service # noqa Register environments.
with compiler_gym.make(
"loops-opt-py-v0",
benchmark="loops-opt-v0/add",
observation_space="AutophaseDict",
reward_space="runtime",
) as env:
compiler_gym.set_debug_level(4) # TODO: check why this has no effect
observation = env.reset()
print("observation: ", observation)
print()
observation, reward, done, info = env.step(env.action_space.sample())
print("observation: ", observation)
print("reward: ", reward)
print("done: ", done)
print("info: ", info)
env.close()
# TODO: implement write_bitcode(..) or write_ir(..)
# env.write_bitcode("/tmp/output.bc")
|
CompilerGym-development
|
examples/loop_optimizations_service/example.py
|
#! /usr/bin/env python3
#
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""An example CompilerGym service in python."""
import logging
import os
import shutil
import subprocess
from pathlib import Path
from typing import Optional, Tuple
import numpy as np
import compiler_gym.third_party.llvm as llvm
from compiler_gym.service import CompilationSession
from compiler_gym.service.proto import (
ActionSpace,
Benchmark,
DictEvent,
DictSpace,
DoubleRange,
Event,
Int64Range,
Int64SequenceSpace,
Int64Tensor,
NamedDiscreteSpace,
ObservationSpace,
Space,
StringSpace,
)
from compiler_gym.service.runtime import create_and_run_compiler_gym_service
from compiler_gym.third_party.autophase import AUTOPHASE_FEATURE_NAMES
from compiler_gym.third_party.inst2vec import Inst2vecEncoder
from compiler_gym.util.commands import run_command
from compiler_gym.util.runfiles_path import runfiles_path # noqa
_INST2VEC_ENCODER = Inst2vecEncoder()
class LoopsOptCompilationSession(CompilationSession):
"""Represents an instance of an interactive compilation session."""
compiler_version: str = "1.0.0"
# The list of actions that are supported by this service.
action_spaces = [
ActionSpace(
name="loop-opt",
space=Space(
named_discrete=NamedDiscreteSpace(
name=[
"--loop-unroll --unroll-count=2",
"--loop-unroll --unroll-count=4",
"--loop-unroll --unroll-count=8",
"--loop-unroll --unroll-count=16",
"--loop-unroll --unroll-count=32",
"--loop-vectorize -force-vector-width=2",
"--loop-vectorize -force-vector-width=4",
"--loop-vectorize -force-vector-width=8",
"--loop-vectorize -force-vector-width=16",
"--loop-vectorize -force-vector-width=32",
]
),
),
)
]
# A list of observation spaces supported by this service. Each of these
# ObservationSpace protos describes an observation space.
observation_spaces = [
ObservationSpace(
name="ir",
space=Space(string_value=StringSpace(length_range=Int64Range(min=0))),
deterministic=True,
platform_dependent=False,
default_observation=Event(string_value=""),
),
ObservationSpace(
name="Inst2vec",
space=Space(
int64_sequence=Int64SequenceSpace(length_range=Int64Range(min=0)),
),
),
ObservationSpace(
name="Autophase",
space=Space(
int64_sequence=Int64SequenceSpace(
length_range=Int64Range(
min=len(AUTOPHASE_FEATURE_NAMES),
max=len(AUTOPHASE_FEATURE_NAMES),
)
),
),
deterministic=True,
platform_dependent=False,
),
ObservationSpace(
name="AutophaseDict",
space=Space(
space_dict=DictSpace(
space={
name: Space(int64_value=Int64Range(min=0))
for name in AUTOPHASE_FEATURE_NAMES
}
)
),
deterministic=True,
platform_dependent=False,
),
ObservationSpace(
name="Programl",
space=Space(string_value=StringSpace(length_range=Int64Range(min=0))),
deterministic=True,
platform_dependent=False,
default_observation=Event(string_value=""),
),
ObservationSpace(
name="runtime",
space=Space(double_value=DoubleRange(min=0)),
deterministic=False,
platform_dependent=True,
default_observation=Event(
double_value=0,
),
),
ObservationSpace(
name="size",
space=Space(double_value=DoubleRange(min=0)),
deterministic=True,
platform_dependent=True,
default_observation=Event(
double_value=0,
),
),
]
def __init__(
self,
working_directory: Path,
action_space: ActionSpace,
benchmark: Benchmark,
use_custom_opt: bool = True,
):
super().__init__(working_directory, action_space, benchmark)
logging.info("Started a compilation session for %s", benchmark.uri)
self._benchmark = benchmark
self._action_space = action_space
self.inst2vec = _INST2VEC_ENCODER
# Resolve the paths to LLVM binaries once now.
self._clang = str(llvm.clang_path())
self._llc = str(llvm.llc_path())
self._llvm_diff = str(llvm.llvm_diff_path())
self._opt = str(llvm.opt_path())
# LLVM's opt does not always enforce the loop optimization options passed as cli arguments.
# Hence, we created our own exeutable with custom unrolling and vectorization pass in examples/loops_opt_service/opt_loops that enforces the unrolling and vectorization factors passed in its cli.
# if self._use_custom_opt is true, use our custom exeutable, otherwise use LLVM's opt
self._use_custom_opt = use_custom_opt
# Dump the benchmark source to disk.
self._src_path = str(self.working_dir / "benchmark.c")
with open(self.working_dir / "benchmark.c", "wb") as f:
f.write(benchmark.program.contents)
self._llvm_path = str(self.working_dir / "benchmark.ll")
self._llvm_before_path = str(self.working_dir / "benchmark.previous.ll")
self._obj_path = str(self.working_dir / "benchmark.o")
self._exe_path = str(self.working_dir / "benchmark.exe")
run_command(
[
self._clang,
"-Xclang",
"-disable-O0-optnone",
"-emit-llvm",
"-S",
self._src_path,
"-o",
self._llvm_path,
],
timeout=30,
)
def apply_action(self, action: Event) -> Tuple[bool, Optional[ActionSpace], bool]:
num_choices = len(self._action_space.space.named_discrete.name)
# This is the index into the action space's values ("a", "b", "c") that
# the user selected, e.g. 0 -> "a", 1 -> "b", 2 -> "c".
choice_index = action.int64_value
if choice_index < 0 or choice_index >= num_choices:
raise ValueError("Out-of-range")
args = self._action_space.space.named_discrete.name[choice_index]
logging.info(
"Applying action %d, equivalent command-line arguments: '%s'",
choice_index,
args,
)
args = args.split()
# make a copy of the LLVM file to compare its contents after applying the action
shutil.copyfile(self._llvm_path, self._llvm_before_path)
# apply action
if self._use_custom_opt:
# our custom opt-loops has an additional `f` at the beginning of each argument
for i, arg in enumerate(args):
# convert --<argument> to --f<argument>
arg = arg[0:2] + "f" + arg[2:]
args[i] = arg
run_command(
[
os.path.join(os.path.dirname(__file__), "../opt_loops/opt_loops"),
self._llvm_path,
*args,
"-S",
"-o",
self._llvm_path,
],
timeout=30,
)
else:
run_command(
[
self._opt,
*args,
self._llvm_path,
"-S",
"-o",
self._llvm_path,
],
timeout=30,
)
# compare the IR files to check if the action had an effect
try:
subprocess.check_call(
[self._llvm_diff, self._llvm_before_path, self._llvm_path],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
timeout=60,
)
action_had_no_effect = True
except subprocess.CalledProcessError:
action_had_no_effect = False
end_of_session = False # TODO: this needs investigation: for how long can we apply loop optimizations? e.g., detect if there are no more loops in the IR? or look at the metadata?
new_action_space = None
return (end_of_session, new_action_space, action_had_no_effect)
@property
def ir(self) -> str:
with open(self._llvm_path) as f:
return f.read()
def get_observation(self, observation_space: ObservationSpace) -> Event:
logging.info("Computing observation from space %s", observation_space.name)
if observation_space.name == "ir":
return Event(string_value=self.ir)
elif observation_space.name == "Inst2vec":
Inst2vec_str = self.inst2vec.preprocess(self.ir)
Inst2vec_ids = self.inst2vec.encode(Inst2vec_str)
return Event(
int64_tensor=Int64Tensor(shape=[len(Inst2vec_ids)], value=Inst2vec_ids)
)
elif observation_space.name == "Autophase":
Autophase_str = run_command(
[
os.path.join(
os.path.dirname(__file__),
"../../../compiler_gym/third_party/autophase/compute_autophase-prelinked",
),
self._llvm_path,
],
timeout=30,
)
Autophase_list = list(map(int, list(Autophase_str.split(" "))))
return Event(
int64_tensor=Int64Tensor(
shape=[len(Autophase_list)], value=Autophase_list
)
)
elif observation_space.name == "AutophaseDict":
Autophase_str = run_command(
[
os.path.join(
os.path.dirname(__file__),
"../../../compiler_gym/third_party/autophase/compute_autophase-prelinked",
),
self._llvm_path,
],
timeout=30,
)
Autophase_list = list(map(int, list(Autophase_str.split(" "))))
Autophase_dict = {
name: Event(int64_value=val)
for name, val in zip(AUTOPHASE_FEATURE_NAMES, Autophase_list)
}
return Event(event_dict=DictEvent(event=Autophase_dict))
elif observation_space.name == "Programl":
Programl_str = run_command(
[
os.path.join(
os.path.dirname(__file__),
"../../../compiler_gym/third_party/programl/compute_programl",
),
self._llvm_path,
],
timeout=30,
)
return Event(string_value=Programl_str)
elif observation_space.name == "runtime":
# compile LLVM to object file
run_command(
[
self._llc,
"-filetype=obj",
self._llvm_path,
"-o",
self._obj_path,
],
timeout=30,
)
# build object file to binary
run_command(
[
self._clang,
self._obj_path,
"-O3",
"-o",
self._exe_path,
],
timeout=30,
)
# TODO: add documentation that benchmarks need print out execution time
# Running 5 times and taking the average of middle 3
exec_times = []
for _ in range(5):
stdout = run_command(
[self._exe_path],
timeout=30,
)
try:
exec_times.append(int(stdout))
except ValueError:
raise ValueError(
f"Error in parsing execution time from output of command\n"
f"Please ensure that the source code of the benchmark measures execution time and prints to stdout\n"
f"Stdout of the program: {stdout}"
)
exec_times = np.sort(exec_times)
avg_exec_time = np.mean(exec_times[1:4])
return Event(double_value=avg_exec_time)
elif observation_space.name == "size":
# compile LLVM to object file
run_command(
[
self._llc,
"-filetype=obj",
self._llvm_path,
"-o",
self._obj_path,
],
timeout=30,
)
# build object file to binary
run_command(
[
self._clang,
self._obj_path,
"-Oz",
"-o",
self._exe_path,
],
timeout=30,
)
binary_size = os.path.getsize(self._exe_path)
return Event(double_value=binary_size)
else:
raise KeyError(observation_space.name)
if __name__ == "__main__":
create_and_run_compiler_gym_service(LoopsOptCompilationSession)
|
CompilerGym-development
|
examples/loop_optimizations_service/service_py/loops_opt_service.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from enum import Enum
from pathlib import Path
from threading import Lock
from typing import Union
import numpy as np
from llvm_autotuning.just_keep_going_env import JustKeepGoingEnv
import compiler_gym
from compiler_gym.datasets import Benchmark
from compiler_gym.envs import LlvmEnv
from compiler_gym.wrappers import RuntimePointEstimateReward
logger = logging.getLogger(__name__)
_RUNTIME_LOCK = Lock()
class OptimizationTarget(str, Enum):
CODESIZE = "codesize"
BINSIZE = "binsize"
RUNTIME = "runtime"
@property
def optimization_space_enum_name(self) -> str:
return {
OptimizationTarget.CODESIZE: "IrInstructionCount",
OptimizationTarget.BINSIZE: "ObjectTextSizeBytes",
OptimizationTarget.RUNTIME: "Runtime",
}[self.value]
def make_env(self, benchmark: Union[str, Benchmark]) -> LlvmEnv:
env: LlvmEnv = compiler_gym.make("llvm-v0")
# TODO(cummins): This does not work with custom benchmarks, as the URI
# will not be known to the new environment.
if str(benchmark).startswith("file:///"):
benchmark = env.make_benchmark(Path(benchmark[len("file:///") :]))
env.benchmark = benchmark
if self.value == OptimizationTarget.CODESIZE:
env.reward_space = "IrInstructionCountOz"
elif self.value == OptimizationTarget.BINSIZE:
env.reward_space = "ObjectTextSizeOz"
elif self.value == OptimizationTarget.RUNTIME:
env = RuntimePointEstimateReward(env, warmup_count=0, runtime_count=3)
else:
assert False, f"Unknown OptimizationTarget: {self.value}"
# Wrap the env to ignore errors during search.
env = JustKeepGoingEnv(env)
return env
def final_reward(self, env: LlvmEnv, runtime_count: int = 30) -> float:
"""Compute the final reward of the environment.
Note that this may modify the environment state. You should call
:code:`reset()` before continuing to use the environment after this.
"""
# Reapply the environment state in a retry loop.
actions = list(env.actions)
env.reset()
for i in range(1, 5 + 1):
_, _, done, info = env.multistep(actions)
if not done:
break
logger.warning(
"Attempt %d to apply actions during final reward failed: %s",
i,
info.get("error_details"),
)
else:
raise ValueError("Failed to replay environment's actions")
if self.value == OptimizationTarget.CODESIZE:
return env.observation.IrInstructionCountOz() / max(
env.observation.IrInstructionCount(), 1
)
if self.value == OptimizationTarget.BINSIZE:
return env.observation.ObjectTextSizeOz() / max(
env.observation.ObjectTextSizeBytes(), 1
)
if self.value == OptimizationTarget.RUNTIME:
with _RUNTIME_LOCK:
with compiler_gym.make("llvm-v0", benchmark=env.benchmark) as new_env:
new_env.reset()
new_env.runtime_observation_count = runtime_count
new_env.runtime_warmup_count = 0
new_env.apply(env.state)
final_runtimes = new_env.observation.Runtime()
assert len(final_runtimes) == runtime_count
new_env.reset()
new_env.send_param("llvm.apply_baseline_optimizations", "-O3")
o3_runtimes = new_env.observation.Runtime()
assert len(o3_runtimes) == runtime_count
logger.debug("O3 runtimes: %s", o3_runtimes)
logger.debug("Final runtimes: %s", final_runtimes)
speedup = np.median(o3_runtimes) / max(np.median(final_runtimes), 1e-12)
logger.debug("Speedup: %.4f", speedup)
return speedup
assert False, f"Unknown OptimizationTarget: {self.value}"
|
CompilerGym-development
|
examples/llvm_autotuning/optimization_target.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
|
CompilerGym-development
|
examples/llvm_autotuning/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import logging
from pathlib import Path
from typing import Dict, Iterable, List
import gym
import pandas as pd
import yaml
from llvm_autotuning.autotuners import Autotuner
from llvm_autotuning.benchmarks import Benchmarks
from pydantic import BaseModel, Field
from compiler_gym import CompilerEnvStateWriter
from compiler_gym.util.executor import Executor
logger = logging.getLogger(__name__)
class Experiment(BaseModel):
"""The composition of a full autotuning experiment, comprising autotuner,
executor, and programs to tune.
"""
# === Start of fields list. ===
executor: Executor
"""The execution environment to use for training / testing jobs."""
autotuner: Autotuner
benchmarks: Benchmarks
"""The set of benchmarks to test on."""
working_directory: Path
"""The working directory where logs and other artifacts are written to."""
experiment: str = "unnamed_experiment"
"""A logical name for this experiment. This is used for naming RLlib
trials.
"""
num_replicas: int = Field(default=1, ge=1)
"""The number of duplicate jobs to run. E.g. for training, this will train
:code:`n` independent models in trials that share the same working
directory.
"""
seed: int = 0xCC
"""A numeric random seed."""
# === Start of public API. ===
def run(self) -> None:
"""Run the experiment."""
# The working directory may already have been created by hydra, so we
# will check for the config.json file below to see if this experiment
# has already run.
self.working_directory.mkdir(parents=True, exist_ok=True)
# Dump the parsed config to file.
assert not self.config_path.is_file(), (
f"Refusing to overwrite file: {self.config_path}. "
"Is the working directory clean?"
)
with open(self.config_path, "w") as f:
print(json.dumps(json.loads(self.json()), indent=2), file=f)
logger.info("Wrote %s", self.config_path)
results_num = 0
with self.executor.get_executor(
logs_dir=self.working_directory / "logs"
) as executor:
with gym.make("llvm-v0") as env:
for replica_num in range(self.num_replicas):
for benchmark in self.benchmarks.benchmark_uris_iterator(env):
results_num += 1
results_path = (
self.working_directory / f"results-{results_num:03d}.csv"
)
errors_path = (
self.working_directory / f"errors-{results_num:03d}.json"
)
executor.submit(
_experiment_worker,
autotuner=self.autotuner,
benchmark=benchmark,
results_path=results_path,
errors_path=errors_path,
seed=self.seed + replica_num,
)
def yaml(self) -> str:
"""Serialize the model configuration to a YAML string."""
# We can't directly dump the dict() representation because we need to
# simplify the types first, so we go via JSON.
simplified_data = json.loads(self.json())
return yaml.dump(simplified_data)
@property
def config_path(self) -> Path:
return self.working_directory / "config.json"
@property
def results_paths(self) -> Iterable[Path]:
"""Return an iterator over results files."""
for path in self.working_directory.iterdir():
if path.is_file() and path.name.startswith("results-"):
yield path
@property
def errors(self) -> Iterable[Dict[str, str]]:
"""Return an iterator over errors.
An error is a dictionary with keys: "benchmark", "error_type", and
"error_message".
"""
for path in self.working_directory.iterdir():
if path.is_file() and path.name.startswith("errors-"):
with open(path, "r") as f:
yield json.load(f)
@property
def configuration_number(self) -> str:
return self.working_directory.name.split("-")[-1]
@property
def timestamp(self) -> str:
return f"{self.working_directory.parent.parent.name}/{self.working_directory.parent.name}"
@property
def dataframe(self) -> pd.DataFrame:
"""Return the results as a dataframe."""
dfs = []
for path in self.results_paths:
dfs.append(pd.read_csv(path))
if not dfs:
return pd.DataFrame()
return pd.concat(dfs)
@classmethod
def from_logsdir(cls, working_directory: Path) -> List["Experiment"]:
"""Reconstruct experiments by recursively reading from logs directories."""
def find_config_dumps(dir: Path) -> Iterable[Path]:
"""Attempt to locate config files recursively in directories."""
if (dir / "config.json").is_file():
yield dir / "config.json"
return
for entry in dir.iterdir():
if entry.is_dir():
yield from find_config_dumps(entry)
experiments: List[Experiment] = []
for config_path in find_config_dumps(working_directory):
with open(config_path) as f:
try:
config = json.load(f)
config["working_directory"] = config_path.parent
experiments.append(cls(**config))
except json.decoder.JSONDecodeError as e:
logger.warning(
"Failed to parse JSON for model file %s: %s", config, e
)
continue
return experiments
# === Start of implementation details. ===
class Config:
validate_assignment = True
def _experiment_worker(
autotuner: Autotuner,
benchmark: str,
results_path: Path,
errors_path: Path,
seed: int,
) -> None:
try:
with autotuner.optimization_target.make_env(benchmark) as env:
env.seed(seed)
env.action_space.seed(seed)
state = autotuner(env, seed=seed)
except Exception as e: # pylint: disable=broad-except
logger.warning("Autotuner failed on benchmark %s: %s", benchmark, e)
with open(errors_path, "w") as f:
json.dump(
{
"benchmark": benchmark,
"error_type": type(e).__name__,
"error_message": str(e),
},
f,
)
return
logger.info("State %s", state)
with CompilerEnvStateWriter(open(results_path, "w")) as writer:
writer.write_state(state, flush=True)
|
CompilerGym-development
|
examples/llvm_autotuning/experiment.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import sys
import hydra
from llvm_autotuning.experiment import Experiment
from omegaconf import DictConfig, OmegaConf
from pydantic import ValidationError
from compiler_gym.util.shell_format import indent
@hydra.main(config_path="config", config_name="default")
def main(config: DictConfig) -> None:
# Parse the config to pydantic models.
OmegaConf.set_readonly(config, True)
try:
model: Experiment = Experiment(working_directory=os.getcwd(), **config)
except ValidationError as e:
print(e, file=sys.stderr)
sys.exit(1)
print("Experiment configuration:")
print()
print(indent(model.yaml()))
print()
model.run()
print()
print("Results written to", model.working_directory)
if __name__ == "__main__":
main()
|
CompilerGym-development
|
examples/llvm_autotuning/tune.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from compiler_gym.wrappers import CompilerEnvWrapper
logger = logging.getLogger(__name__)
# TODO(github.com/facebookresearch/CompilerGym/issues/469): Once step() and
# reset() no longer raise exceptions than this wrapper class can be removed.
class JustKeepGoingEnv(CompilerEnvWrapper):
"""This wrapper class prevents the step() and close() methods from raising
an exception.
Just keep swimming ...
|\\ o
| \\ o
|\\ / .\\ o
| | (
|/\\ /
| /
|/
Usage:
>>> env = compiler_gym.make("llvm-v0")
>>> env = JustKeepGoingEnv(env)
# enjoy ...
"""
def step(self, *args, **kwargs):
try:
return self.env.step(*args, **kwargs)
except Exception as e: # pylint: disable=broad-except
logger.warning("step() error: %s", e)
# Return "null" observation / reward.
default_observation = (
self.env.observation_space_spec.default_value
if self.env.observation_space
else None
)
default_reward = (
float(
self.env.reward_space_spec.reward_on_error(self.env.episode_reward)
)
if self.env.reward_space
else None
)
self.close()
return default_observation, default_reward, True, {"error_details": str(e)}
def reset(self, *args, **kwargs):
for _ in range(5):
try:
return super().reset(*args, **kwargs)
except Exception as e: # pylint: disable=broad-except
logger.warning("reset() error, retrying: %s", e)
self.close()
return self.reset(*args, **kwargs)
# No more retries.
return super().reset(*args, **kwargs)
def close(self):
try:
self.env.close()
except Exception as e: # pylint: disable=broad-except
logger.warning("Ignoring close() error: %s", e)
|
CompilerGym-development
|
examples/llvm_autotuning/just_keep_going_env.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import sys
from pathlib import Path
from typing import List
import pandas as pd
from llvm_autotuning.experiment import Experiment
from pydantic import ValidationError
from tabulate import tabulate
from typer import Typer
from compiler_gym.util.statistics import geometric_mean
app = Typer()
def experiments_from_paths(log_dirs: List[Path]) -> List[Experiment]:
experiments: List[Experiment] = []
for path in log_dirs:
try:
experiments += Experiment.from_logsdir(Path(path).expanduser())
except ValidationError as e:
print(e, file=sys.stderr)
sys.exit(1)
return experiments
@app.command()
def info(
log_dirs: List[Path] = ["~/logs/compiler_gym/llvm_autotuning"],
all_runs: bool = False,
group_by_working_directory: bool = False,
only_nonzero_reward: bool = False,
):
experiments = experiments_from_paths(log_dirs)
results = []
for experiment in experiments:
df = experiment.dataframe
# Exclude runs where reward was zero, used for pruning false results if
# the environment is flaky or can fail.
if only_nonzero_reward:
df = df[df.reward != 0]
if not len(df):
continue
df.to_csv(experiment.working_directory / "results.csv", index=False)
walltimes = df[["benchmark", "walltime"]].groupby("benchmark").mean()
rewards = df[["benchmark", "reward"]].groupby("benchmark").agg(geometric_mean)
num_results = len(df)
num_benchmarks = len(set(df["benchmark"]))
df = pd.concat((walltimes, rewards), axis=1)
avg_walltime = df["walltime"].mean()
avg_reward = geometric_mean(df["reward"])
df = pd.concat(
(
df,
pd.DataFrame(
[{"walltime": avg_walltime, "reward": avg_reward}],
index=["Average"],
),
)
)
df = df.reset_index()
df.insert(0, "config", experiment.configuration_number)
df.insert(0, "timestamp", experiment.timestamp)
df.insert(0, "experiment", experiment.experiment)
if all_runs:
print(experiment.working_directory)
print(tabulate(df, showindex=False, headers="keys", tablefmt="grid"))
print()
results.append(
{
"working_directory": experiment.working_directory,
"experiment": experiment.experiment,
"timestamp": experiment.timestamp,
"config": experiment.configuration_number,
"num_benchmarks": num_benchmarks,
"num_results": num_results,
"walltime": avg_walltime,
"reward": avg_reward,
}
)
df = pd.DataFrame(results)
if not len(df):
print("No results")
return
print("---------------------------------------")
print("Aggregate over experiments:")
if group_by_working_directory:
df = df.groupby(["working_directory"]).mean()
else:
df = df.groupby(["experiment", "timestamp", "config"]).mean()
# Cast float back to int.
df["num_benchmarks"] = [int(x) for x in df["num_benchmarks"]]
df["num_results"] = [int(x) for x in df["num_results"]]
# Better column names.
df = df.rename(columns={"reward": "geomean_reward", "walltime": "walltime (s)"})
pd.set_option("display.max_rows", None)
print(df)
if __name__ == "__main__":
app()
|
CompilerGym-development
|
examples/llvm_autotuning/info.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from itertools import islice
from typing import Iterable, List, Union
from pydantic import BaseModel, Field, root_validator, validator
from compiler_gym.datasets import Benchmark, BenchmarkUri
from compiler_gym.envs.llvm import LlvmEnv
class BenchmarksEntry(BaseModel):
"""This class represents a single entry in a Benchmarks class."""
# === Start of fields list. ===
dataset: str = Field(default=None, allow_mutation=False)
"""The name of a dataset to iterate over. If set, benchmarks are produced
by iterating over this dataset in order. If not set, the :code:`uris` list
must be provided.
"""
uris: List[str] = Field(default=[], allow_mutation=False)
"""A list of URIs to iterate over."""
max_benchmarks: int = Field(default=0, ge=0, allow_mutation=False)
"""The maximum number of benchmarks to yield from the given dataset or URIs
list.
"""
benchmarks_start_at: int = Field(default=0, ge=0, allow_mutation=False)
"""An offset into the dataset or URIs list to start iterating from.
Note that using very large offsets will slow things down as the
implementation still has to iterate over the excluded benchmarks.
"""
# === Start of public API. ===
def benchmarks_iterator(self, env: LlvmEnv) -> Iterable[Benchmark]:
"""Return an iterator over the benchmarks."""
return self._benchmark_iterator(env)
def benchmark_uris_iterator(self, env: LlvmEnv) -> Iterable[str]:
"""Return an iterator over the URIs of the benchmarks."""
return self._benchmark_iterator(env, uris=True)
# === Start of implementation details. ===
@root_validator
def check_that_either_dataset_or_uris_is_set(cls, values):
assert values.get("dataset") or values.get(
"uris"
), "Neither dataset or uris given"
return values
@validator("uris", pre=True)
def validate_uris(cls, value, *, values, **kwargs):
del kwargs
del values
for uri in value:
uri = BenchmarkUri.from_string(uri)
assert uri.scheme and uri.dataset, f"Invalid benchmark URI: {uri}"
return list(value)
def _benchmark_iterator(
self, env: LlvmEnv, uris: bool = False
) -> Union[Iterable[Benchmark], Iterable[str]]:
return (
self._uris_iterator(env, uris)
if self.uris
else self._dataset_iterator(env, uris)
)
def _uris_iterator(
self, env: LlvmEnv, uris: bool = False
) -> Union[Iterable[Benchmark], Iterable[str]]:
"""Iterate from a URIs list."""
start = self.benchmarks_start_at
n = len(self.uris)
if self.max_benchmarks:
n = min(len(self.uris), n)
if uris:
# Shortcut in case we already have a list of URIs that we can slice
# rather than iterating over.
return iter(self.uris[start:n])
return islice((env.datasets.benchmark(u) for u in self.uris), start, start + n)
def _dataset_iterator(
self, env: LlvmEnv, uris: bool = False
) -> Union[Iterable[Benchmark], Iterable[str]]:
"""Iterate from a dataset name."""
dataset = env.datasets[self.dataset]
dataset.install()
n = dataset.size or self.max_benchmarks # dataset.size == 0 for inf
if self.max_benchmarks:
n = min(self.max_benchmarks, n)
start = self.benchmarks_start_at
iterator = dataset.benchmark_uris if uris else dataset.benchmarks
return islice(iterator(), start, start + n)
class Config:
validate_assignment = True
class Benchmarks(BaseModel):
"""Represents a set of benchmarks to use for training/validation/testing.
There are two ways of describing benchmarks, either as a list of benchmark
URIs:
benchmarks:
uris:
- benchmark://cbench-v1/adpcm
- benchmark://cbench-v1/ghostscript
Or as a dataset to iterate over:
benchmarks:
dataset: benchmark://cbench-v1
max_benchmarks: 20
"""
benchmarks: List[BenchmarksEntry]
# === Start of public API. ===
def benchmarks_iterator(self, env: LlvmEnv) -> Iterable[Benchmark]:
"""Return an iterator over the benchmarks."""
for bm in self.benchmarks:
yield from bm.benchmarks_iterator(env)
def benchmark_uris_iterator(self, env: LlvmEnv) -> Iterable[str]:
"""Return an iterator over the URIs of the benchmarks."""
for bm in self.benchmarks:
yield from bm.benchmark_uris_iterator(env)
# === Start of implementation details. ===
@validator("benchmarks", pre=True)
def validate_benchmarks(cls, value) -> List[BenchmarksEntry]:
return [BenchmarksEntry(**v) for v in value]
|
CompilerGym-development
|
examples/llvm_autotuning/benchmarks.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Integration tests for the LLVM autotuners."""
from llvm_autotuning.autotuners import Autotuner
import compiler_gym
def test_autotune():
with compiler_gym.make("llvm-v0", reward_space="IrInstructionCount") as env:
env.reset(benchmark="benchmark://cbench-v1/crc32")
autotuner = Autotuner(
algorithm="random",
optimization_target="codesize",
search_time_seconds=3,
)
result = autotuner(env)
print(result)
assert result.benchmark == "benchmark://cbench-v1/crc32"
assert result.walltime >= 3
assert result.commandline == env.action_space.to_string(env.actions)
assert env.episode_reward >= 0
assert env.benchmark == "benchmark://cbench-v1/crc32"
assert env.reward_space == "IrInstructionCount"
|
CompilerGym-development
|
examples/llvm_autotuning/autotuners/random_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Integration tests for the LLVM autotuners."""
import pytest
from llvm_autotuning.autotuners import Autotuner
import compiler_gym
@pytest.mark.skip(reason="Workaround from pytest: I/O operation on closed file")
def test_autotune():
with compiler_gym.make("llvm-v0", reward_space="IrInstructionCount") as env:
env.reset(benchmark="benchmark://cbench-v1/crc32")
autotuner = Autotuner(
algorithm="opentuner_ga",
optimization_target="codesize",
search_time_seconds=3,
)
result = autotuner(env)
print(result)
assert result.benchmark == "benchmark://cbench-v1/crc32"
assert result.walltime >= 3
assert result.commandline == env.action_space.to_string(env.actions)
assert env.episode_reward >= 0
assert env.benchmark == "benchmark://cbench-v1/crc32"
assert env.reward_space == "IrInstructionCount"
|
CompilerGym-development
|
examples/llvm_autotuning/autotuners/opentuner_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from time import time
def greedy(env, search_time_seconds: int, **kwargs) -> None:
"""A greedy search policy.
At each step, the policy evaluates all possible actions and selects the
action with the highest reward. The search stops when no action produces a
positive reward.
:param env: The environment to optimize.
"""
def eval_action(env, action: int):
with env.fork() as fkd:
return (fkd.step(action)[1], action)
end_time = time() + search_time_seconds
while time() < end_time:
best = max(eval_action(env, action) for action in range(env.action_space.n))
if best[0] <= 0 or env.step(best[1])[2]:
return
|
CompilerGym-development
|
examples/llvm_autotuning/autotuners/greedy.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""This modules defines a class for describing LLVM autotuners."""
import tempfile
from pathlib import Path
from typing import Any, Dict
from llvm_autotuning.autotuners.greedy import greedy # noqa autotuner
from llvm_autotuning.autotuners.nevergrad_ import nevergrad # noqa autotuner
from llvm_autotuning.autotuners.opentuner_ import opentuner_ga # noqa autotuner
from llvm_autotuning.autotuners.random_ import random # noqa autotuner
from llvm_autotuning.optimization_target import OptimizationTarget
from pydantic import BaseModel, validator
from compiler_gym.compiler_env_state import CompilerEnvState
from compiler_gym.envs import CompilerEnv
from compiler_gym.util.capture_output import capture_output
from compiler_gym.util.runfiles_path import transient_cache_path
from compiler_gym.util.temporary_working_directory import temporary_working_directory
from compiler_gym.util.timer import Timer
class Autotuner(BaseModel):
"""This class represents an instance of an autotuning algorithm.
After instantiating from a config dict, instances of this class can be used
to tune CompilerEnv instances:
>>> autotuner = Autotuner(
algorithm="greedy",
optimization_target="codesize",
search_time_seconds=1800,
)
>>> env = compiler_gym.make("llvm-v0")
>>> autotuner(env)
"""
algorithm: str
"""The name of the autotuner algorithm."""
optimization_target: OptimizationTarget
"""The target that the autotuner is optimizing for."""
search_time_seconds: int
"""The search budget of the autotuner."""
algorithm_config: Dict[str, Any] = {}
"""An optional dictionary of keyword arguments for the autotuner function."""
@property
def autotune(self):
"""Return the autotuner function for this algorithm.
An autotuner function takes a single CompilerEnv argument and optional
keyword configuration arguments (determined by algorithm_config) and
tunes the environment, returning nothing.
"""
try:
return globals()[self.algorithm]
except KeyError as e:
raise ValueError(
f"Unknown autotuner: {self.algorithm}.\n"
f"Make sure the {self.algorithm}() function definition is available "
"in the global namespace of {__file__}."
) from e
@property
def autotune_kwargs(self) -> Dict[str, Any]:
"""Get the keyword arguments dictionary for the autotuner."""
kwargs = {
"optimization_target": self.optimization_target,
"search_time_seconds": self.search_time_seconds,
}
kwargs.update(self.algorithm_config)
return kwargs
def __call__(self, env: CompilerEnv, seed: int = 0xCC) -> CompilerEnvState:
"""Autotune the given environment.
:param env: The environment to autotune.
:param seed: The random seed for the autotuner.
:returns: A CompilerEnvState tuple describing the autotuning result.
"""
# Run the autotuner in a temporary working directory and capture the
# stdout/stderr.
with tempfile.TemporaryDirectory(
dir=transient_cache_path("."), prefix="autotune-"
) as tmpdir:
with temporary_working_directory(Path(tmpdir)):
with capture_output():
with Timer() as timer:
self.autotune(env, seed=seed, **self.autotune_kwargs)
return CompilerEnvState(
benchmark=env.benchmark.uri,
commandline=env.action_space.to_string(env.actions),
walltime=timer.time,
reward=self.optimization_target.final_reward(env),
)
# === Start of implementation details. ===
@validator("algorithm_config", pre=True)
def validate_algorithm_config(cls, value) -> Dict[str, Any]:
return value or {}
|
CompilerGym-development
|
examples/llvm_autotuning/autotuners/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from tempfile import TemporaryDirectory
from llvm_autotuning.optimization_target import OptimizationTarget
from compiler_gym.envs import CompilerEnv
from compiler_gym.random_search import random_search as lib_random_search
from compiler_gym.util.runfiles_path import transient_cache_path
def random(
env: CompilerEnv,
optimization_target: OptimizationTarget,
search_time_seconds: int,
patience: int = 350,
**kwargs
) -> None:
"""Run a random search on the environment.
:param env: The environment to optimize.
:param optimization_target: The target to optimize for.
:param search_time_seconds: The total search time.
:param patience: The number of steps to search without an improvement before
resetting to a new trajectory.
"""
with TemporaryDirectory(
dir=transient_cache_path("."), prefix="autotune-"
) as tmpdir:
final_env = lib_random_search(
make_env=lambda: optimization_target.make_env(env.benchmark),
outdir=tmpdir,
total_runtime=search_time_seconds,
patience=patience,
nproc=1,
)
env.apply(final_env.state)
final_env.close()
|
CompilerGym-development
|
examples/llvm_autotuning/autotuners/random_.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from functools import lru_cache
from time import time
from typing import Tuple
import nevergrad as ng
from llvm_autotuning.optimization_target import OptimizationTarget
from compiler_gym.envs import CompilerEnv
from compiler_gym.util.gym_type_hints import ActionType
def nevergrad(
env: CompilerEnv,
optimization_target: OptimizationTarget,
search_time_seconds: int,
seed: int,
episode_length: int = 100,
optimizer: str = "DiscreteLenglerOnePlusOne",
**kwargs
) -> None:
"""Optimize an environment using nevergrad.
Nevergrad is a gradient-free optimization platform that provides
implementations of various black box optimizations techniques:
https://facebookresearch.github.io/nevergrad/
"""
if optimization_target == OptimizationTarget.RUNTIME:
def calculate_negative_reward(actions: Tuple[ActionType]) -> float:
env.reset()
env.multistep(actions)
return -env.episode_reward
else:
# Only cache the deterministic non-runtime rewards.
@lru_cache(maxsize=int(1e4))
def calculate_negative_reward(actions: Tuple[ActionType]) -> float:
env.reset()
env.multistep(actions)
return -env.episode_reward
params = ng.p.Choice(
choices=range(env.action_space.n),
repetitions=episode_length,
deterministic=True,
)
params.random_state.seed(seed)
optimizer_class = getattr(ng.optimizers, optimizer)
optimizer = optimizer_class(parametrization=params, budget=1, num_workers=1)
end_time = time() + search_time_seconds
while time() < end_time:
x = optimizer.ask()
optimizer.tell(x, calculate_negative_reward(x.value))
# Get best solution and replay it.
recommendation = optimizer.provide_recommendation()
env.reset()
env.multistep(recommendation.value)
|
CompilerGym-development
|
examples/llvm_autotuning/autotuners/nevergrad_.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Integration tests for the LLVM autotuners."""
from llvm_autotuning.autotuners import Autotuner
import compiler_gym
def test_autotune():
with compiler_gym.make("llvm-v0", reward_space="IrInstructionCount") as env:
env.reset(benchmark="benchmark://cbench-v1/crc32")
env.reward_space = "IrInstructionCount"
autotuner = Autotuner(
algorithm="nevergrad",
optimization_target="codesize",
search_time_seconds=3,
)
result = autotuner(env)
print(result)
assert result.benchmark == "benchmark://cbench-v1/crc32"
assert result.walltime >= 3
assert result.commandline == env.action_space.to_string(env.actions)
assert env.episode_reward >= 0
assert env.benchmark == "benchmark://cbench-v1/crc32"
assert env.reward_space == "IrInstructionCount"
|
CompilerGym-development
|
examples/llvm_autotuning/autotuners/nevergrad_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import random
import subprocess
import tempfile
import warnings
from pathlib import Path
import numpy as np
from llvm_autotuning.optimization_target import OptimizationTarget
from compiler_gym.envs.llvm import compute_observation
from compiler_gym.errors import ServiceError
from compiler_gym.service.client_service_compiler_env import ClientServiceCompilerEnv
from compiler_gym.third_party.llvm import opt_path
from compiler_gym.util.runfiles_path import transient_cache_path
# Ignore import deprecation warnings from opentuner.
warnings.filterwarnings("ignore", category=DeprecationWarning)
import opentuner as ot # noqa: E402
from opentuner import ( # noqa: E402
ConfigurationManipulator,
MeasurementInterface,
PermutationParameter,
Result,
)
from opentuner.search.binaryga import BinaryGA # noqa: E402
from opentuner.search.manipulator import BooleanParameter # noqa: E402
from opentuner.tuningrunmain import TuningRunMain # noqa: E402
def opentuner_ga(
env: ClientServiceCompilerEnv,
optimization_target: OptimizationTarget,
search_time_seconds: int,
seed: int,
max_copies_of_pass: int = 4,
population: int = 200,
tournament: int = 5,
mutate: int = 2,
sharing: int = 1,
**kwargs,
) -> None:
"""Optimize an environment using opentuner.
OpenTuner is an extensible framework for program autotuning:
https://opentuner.org/
"""
cache_dir = transient_cache_path("llvm_autotuning")
cache_dir.mkdir(exist_ok=True, parents=True)
with tempfile.TemporaryDirectory(dir=cache_dir, prefix="opentuner-") as tmpdir:
argparser = ot.default_argparser()
args = argparser.parse_args(
args=[
f"--stop-after={search_time_seconds}",
f"--database={tmpdir}/opentuner.db",
"--no-dups",
"--technique=custom",
f"--seed={seed}",
"--parallelism=1",
]
)
ot.search.technique.register(
BinaryGA(
population=population,
tournament=tournament,
mutate=mutate,
sharing=sharing,
name="custom",
)
)
manipulator = LlvmOptFlagsTuner(
args,
target=optimization_target,
benchmark=env.benchmark,
max_copies_of_pass=max_copies_of_pass,
)
tuner = TuningRunMain(manipulator, args)
tuner.main()
class DesiredResult:
def __init__(self, configuration) -> None:
self.configuration = configuration
class Configuration:
def __init__(self, data) -> None:
self.data = data
wrapped = DesiredResult(Configuration(manipulator.best_config))
manipulator.run(wrapped, None, None)
env.reset()
env.multistep(manipulator.serialize_actions(manipulator.best_config))
class LlvmOptFlagsTuner(MeasurementInterface):
def __init__(
self,
*args,
target: OptimizationTarget,
benchmark=None,
max_copies_of_pass=4,
**kwargs,
):
super().__init__(*args, **kwargs)
self.opt = str(opt_path())
self.env = target.make_env(benchmark)
self.env.reset()
self.target = target
self.observation_space = self.env.observation.spaces[
target.optimization_space_enum_name
]
self.unoptimized_path = str(
self.env.service.connection.cache.path / "opentuner-unoptimized.bc"
)
self.tmp_optimized_path = str(
self.env.service.connection.cache.path / "opentuner-optimized.bc"
)
self.env.write_bitcode(self.unoptimized_path)
self.env.write_bitcode(self.tmp_optimized_path)
self.cost_o0 = self.env.observation["IrInstructionCountO0"]
self.cost_oz = self.env.observation["IrInstructionCountOz"]
self.flags_limit = self.env.action_space.n * max_copies_of_pass
self.run_count = 0
self.best_config = None
def manipulator(self) -> ConfigurationManipulator:
"""Define the search space."""
manipulator = ConfigurationManipulator()
# A permutation parameter to order the passes that are present.
manipulator.add_parameter(
PermutationParameter("flag_order", list(range(self.flags_limit)))
)
# Boolean parameters for whether each pass is present.
for i in range(self.flags_limit):
manipulator.add_parameter(BooleanParameter(f"flag{i}"))
def biased_random():
cfg = ConfigurationManipulator.random(manipulator)
# duplicates in the search space, bias to `n / 2` enabled
disabled = random.sample(
range(self.flags_limit), k=self.flags_limit - self.env.action_space.n
)
cfg.update({f"flag{x}": False for x in disabled})
return cfg
manipulator.random = biased_random
return manipulator
def serialize_flags(self, config):
"""Convert a point in the search space to an ordered list of opt flags."""
return [self.env.action_space.flags[a] for a in self.serialize_actions(config)]
def serialize_actions(self, config):
"""Convert a point in the search space to an ordered list of opt flags."""
n = len(self.env.action_space.flags)
serialized = []
for i in config["flag_order"]:
if config[f"flag{i}"]:
serialized.append(i % n)
return serialized
def __del__(self):
self.env.close()
def run(self, desired_result, input, limit):
"""Run a single config."""
del input # Unused
del limit # Unused
self.run_count += 1
try:
# Run opt to produce an optimized bitcode file.
cmd = [
self.opt,
self.unoptimized_path,
"-o",
self.tmp_optimized_path,
]
cmd += self.serialize_flags(desired_result.configuration.data)
subprocess.check_call(
cmd, timeout=300, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL
)
if not Path(self.tmp_optimized_path).is_file():
return Result(time=float("inf"))
except (subprocess.CalledProcessError, subprocess.TimeoutExpired):
return Result(time=float("inf"))
# We need to jump through a couple of hoops to optimize for runtime
# using OpenTuner. Replace the environment benchmark with the current
# optimized file. Use the same benchmark protocol buffer so that any
# dynamic configuration is preserved.
if self.target == OptimizationTarget.RUNTIME:
try:
new_benchmark = self.env.benchmark
new_benchmark.proto.program.uri = f"file:///{self.tmp_optimized_path}"
self.env.reset(benchmark=new_benchmark)
return Result(time=float(np.median(self.env.observation.Runtime())))
except (ServiceError, TimeoutError):
return Result(time=float("inf"))
try:
return Result(
time=float(
compute_observation(self.observation_space, self.tmp_optimized_path)
)
)
except (ValueError, TimeoutError):
return Result(time=float("inf"))
def save_final_config(self, configuration):
# Save parameter for later.
self.best_config = configuration.data
|
CompilerGym-development
|
examples/llvm_autotuning/autotuners/opentuner_.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Integration tests for the LLVM autotuners."""
import pytest
from llvm_autotuning.autotuners import Autotuner
import compiler_gym
@pytest.mark.skip(reason="greedy takes a long time")
def test_autotune():
with compiler_gym.make("llvm-v0", reward_space="IrInstructionCount") as env:
env.reset(benchmark="benchmark://cbench-v1/crc32")
autotuner = Autotuner(
algorithm="greedy",
optimization_target="codesize",
search_time_seconds=3,
)
result = autotuner(env)
print(result)
assert result.benchmark == "benchmark://cbench-v1/crc32"
assert result.walltime >= 3
assert result.commandline == env.action_space.to_string(env.actions)
assert env.episode_reward
assert env.benchmark == "benchmark://cbench-v1/crc32"
assert env.reward_space == "IrInstructionCount"
|
CompilerGym-development
|
examples/llvm_autotuning/autotuners/greedy_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import subprocess
import sys
from pathlib import Path
def test_llvm_autotuner_integration_test(tmp_path: Path):
subprocess.check_call(
[
sys.executable,
"-m",
"llvm_autotuning.tune",
"-m",
"experiment=my-exp",
f"outputs={tmp_path}/llvm_autotuning",
"executor.cpus=1",
"num_replicas=1",
"autotuner=nevergrad",
"autotuner.optimization_target=codesize",
"autotuner.search_time_seconds=3",
"autotuner.algorithm_config.episode_length=5",
"benchmarks=single_benchmark_for_testing",
]
)
assert (Path(tmp_path) / "llvm_autotuning/my-exp").is_dir()
|
CompilerGym-development
|
examples/llvm_autotuning/tests/integration_test.py
|
CompilerGym-development
|
examples/gcc_autotuning/__init__.py
|
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Autotuning script for GCC command line options."""
import random
from itertools import islice, product
from multiprocessing import Lock
from pathlib import Path
from typing import NamedTuple
import numpy as np
from absl import app, flags
from geneticalgorithm import geneticalgorithm as ga
import compiler_gym
import compiler_gym.util.flags.nproc # noqa Flag definition.
import compiler_gym.util.flags.output_dir # noqa Flag definition.
import compiler_gym.util.flags.seed # noqa Flag definition.
from compiler_gym.envs import CompilerEnv
from compiler_gym.envs.gcc import DEFAULT_GCC
from compiler_gym.errors import ServiceError
from compiler_gym.util.executor import Executor
from compiler_gym.util.runfiles_path import create_user_logs_dir
from .info import info
FLAGS = flags.FLAGS
flags.DEFINE_string(
"gcc_bin", DEFAULT_GCC, "Binary to use for gcc. Use docker:<image> for docker"
)
flags.DEFINE_list(
"gcc_benchmark",
None,
"List of benchmarks to search. Use 'all' for all. "
"Defaults to the 12 CHStone benchmarks.",
)
flags.DEFINE_list(
"search",
["random", "hillclimb", "genetic"],
"Type of search to perform. One of: {random,hillclimb,genetic}",
)
flags.DEFINE_integer(
"timeout", 60, "Timeout for each compilation in seconds", lower_bound=1
)
flags.DEFINE_integer(
"gcc_search_budget",
100,
"Maximum number of compilations per benchmark",
lower_bound=1,
)
flags.DEFINE_integer(
"gcc_search_repetitions", 1, "Number of times to repeat each search", lower_bound=1
)
flags.DEFINE_integer(
"actions_per_step",
10,
"Number of actions per compilation for action based searches",
lower_bound=1,
)
flags.DEFINE_integer("max_range", 256, "Limit space per option", lower_bound=0)
flags.DEFINE_integer("pop_size", 100, "Population size for GA", lower_bound=1)
flags.DEFINE_enum(
"objective", "obj_size", ["asm_size", "obj_size"], "Which objective to use"
)
# Lock to prevent multiple processes all calling compiler_gym.make("gcc-v0")
# simultaneously as this can cause issues with the docker API.
GCC_ENV_CONSTRUCTOR_LOCK = Lock()
def random_search(env: CompilerEnv):
best = float("inf")
for _ in range(FLAGS.gcc_search_budget):
env.reset()
env.choices = [
random.randint(-1, min(FLAGS.max_range, len(opt) - 1))
for opt in env.gcc_spec.options
]
best = min(objective(env), best)
return best
def hill_climb(env: CompilerEnv):
best = float("inf")
for _ in range(FLAGS.gcc_search_budget):
with env.fork() as fkd:
fkd.choices = [
random.randint(
max(-1, x - 5), min(len(env.gcc_spec.options[i]) - 1, x + 5)
)
for i, x in enumerate(env.choices)
]
cost = objective(fkd)
if cost < objective(env):
best = cost
env.choices = fkd.choices
return best
def genetic_algorithm(env: CompilerEnv):
def f(choices):
env.reset()
env.choices = choices = list(map(int, choices))
s = objective(env)
return s if s > 0 else float("inf")
model = ga(
function=f,
dimension=len(env.gcc_spec.options),
variable_type="int",
variable_boundaries=np.array(
[[-1, min(FLAGS.max_range, len(opt) - 1)] for opt in env.gcc_spec.options]
),
function_timeout=FLAGS.timeout,
algorithm_parameters={
"population_size": FLAGS.pop_size,
"max_num_iteration": max(1, int(FLAGS.gcc_search_budget / FLAGS.pop_size)),
"mutation_probability": 0.1,
"elit_ratio": 0.01,
"crossover_probability": 0.5,
"parents_portion": 0.3,
"crossover_type": "uniform",
"max_iteration_without_improv": None,
},
)
model.run()
return model.best_function
def objective(env) -> int:
"""Get the objective from an environment"""
# Retry loop to defend against flaky environment.
for _ in range(5):
try:
return env.observation[FLAGS.objective]
except ServiceError as e:
print(f"Objective function failed: {e}")
env.reset()
return env.observation[FLAGS.objective]
_SEARCH_FUNCTIONS = {
"random": random_search,
"hillclimb": hill_climb,
"genetic": genetic_algorithm,
}
class SearchResult(NamedTuple):
search: str
benchmark: str
best_size: int
baseline_size: int
@property
def scaled_best(self) -> float:
return self.baseline_size / self.best_size
def run_search(search: str, benchmark: str, seed: int) -> SearchResult:
"""Run a search and return the search class instance."""
with GCC_ENV_CONSTRUCTOR_LOCK:
env = compiler_gym.make("gcc-v0", gcc_bin=FLAGS.gcc_bin)
try:
random.seed(seed)
np.random.seed(seed)
env.reset(benchmark=benchmark)
env.step(env.action_space["-Os"])
baseline_size = objective(env)
env.reset(benchmark=benchmark)
best_size = _SEARCH_FUNCTIONS[search](env)
finally:
env.close()
return SearchResult(
search=search,
benchmark=benchmark,
best_size=best_size,
baseline_size=baseline_size,
)
def main(argv):
del argv # Unused.
# Validate the --search values now.
for search in FLAGS.search:
if search not in _SEARCH_FUNCTIONS:
raise app.UsageError(f"Invalid --search value: {search}")
def get_benchmarks():
benchmarks = []
with compiler_gym.make("gcc-v0", gcc_bin=FLAGS.gcc_bin) as env:
env.reset()
if FLAGS.gcc_benchmark == ["all"]:
for dataset in env.datasets:
benchmarks += islice(dataset.benchmark_uris(), 50)
elif FLAGS.gcc_benchmark:
for uri in FLAGS.gcc_benchmark:
benchmarks.append(env.datasets.benchmark(uri).uri)
else:
benchmarks = list(
env.datasets["benchmark://chstone-v0"].benchmark_uris()
)
benchmarks.sort()
return benchmarks
logdir = (
Path(FLAGS.output_dir)
if FLAGS.output_dir
else create_user_logs_dir("gcc_autotuning")
)
logdir.mkdir(exist_ok=True, parents=True)
with open(logdir / "results.csv", "w") as f:
print(
"search",
"benchmark",
"scaled_size",
"size",
"baseline_size",
sep=",",
file=f,
)
print("Logging results to", logdir)
# Parallel execution environment. Use flag --nproc to control the number of
# worker processes.
executor = Executor(type="local", timeout_hours=12, cpus=FLAGS.nproc, block=True)
with executor.get_executor(logs_dir=logdir) as session:
jobs = []
# Submit each search instance as a separate job.
grid = product(
range(FLAGS.gcc_search_repetitions), FLAGS.search, get_benchmarks()
)
for _, search, benchmark in grid:
if not benchmark:
raise app.UsageError("Empty benchmark name not allowed")
jobs.append(
session.submit(
run_search,
search=search,
benchmark=benchmark,
seed=FLAGS.seed + len(jobs),
)
)
for job in jobs:
result = job.result()
print(result.benchmark, f"{result.scaled_best:.3f}x", sep="\t")
with open(logdir / "results.csv", "a") as f:
print(
result.search,
result.benchmark,
result.scaled_best,
result.best_size,
result.baseline_size,
sep=",",
file=f,
)
# Print results aggregates.
info([logdir])
if __name__ == "__main__":
app.run(main)
|
CompilerGym-development
|
examples/gcc_autotuning/tune.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import sys
from pathlib import Path
from typing import List
import pandas as pd
from llvm_autotuning.experiment import Experiment
from pydantic import ValidationError
from typer import Typer
from compiler_gym.util.statistics import geometric_mean
app = Typer()
def experiments_from_paths(log_dirs: List[Path]) -> List[Experiment]:
experiments: List[Experiment] = []
for path in log_dirs:
try:
experiments += Experiment.from_logsdir(Path(path).expanduser())
except ValidationError as e:
print(e, file=sys.stderr)
sys.exit(1)
return experiments
@app.command()
def info(
log_dirs: List[Path] = ["~/logs/compiler_gym/gcc_autotuning"],
):
dfs: List[pd.DataFrame] = []
for path in log_dirs:
path = Path(path).expanduser()
for root, _, files in os.walk(path):
if "results.csv" not in files:
continue
root = Path(root)
df = pd.read_csv(root / "results.csv")
if not df.size:
continue
df["timestamp"] = "-".join([root.parent.name, root.name])
dfs.append(df)
if not dfs:
print("No results")
df = pd.concat(dfs)
df = df.groupby(["timestamp", "search"])[["scaled_size"]].agg(geometric_mean)
df = df.rename(columns={"scaled_size": "geomean_reward"})
pd.set_option("display.max_rows", None)
print(df)
if __name__ == "__main__":
app()
|
CompilerGym-development
|
examples/gcc_autotuning/info.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import subprocess
import sys
from functools import lru_cache
from pathlib import Path
from typing import Iterable
import docker
import pytest
from absl.flags import FLAGS
from . import tune
def docker_is_available() -> bool:
"""Return whether docker is available."""
try:
docker.from_env()
return True
except docker.errors.DockerException:
return False
@lru_cache(maxsize=2)
def system_gcc_is_available() -> bool:
"""Return whether there is a system GCC available."""
try:
stdout = subprocess.check_output(
["gcc", "--version"], universal_newlines=True, stderr=subprocess.DEVNULL
)
# On some systems "gcc" may alias to a different compiler, so check for
# the presence of the name "gcc" in the first line of output.
return "gcc" in stdout.split("\n")[0].lower()
except (subprocess.CalledProcessError, FileNotFoundError):
return False
def system_gcc_path() -> str:
"""Return the path of the system GCC as a string."""
return subprocess.check_output(
["which", "gcc"], universal_newlines=True, stderr=subprocess.DEVNULL
).strip()
def gcc_bins() -> Iterable[str]:
"""Return a list of available GCCs."""
if docker_is_available():
yield "docker:gcc:11.2.0"
if system_gcc_is_available():
yield system_gcc_path()
@pytest.fixture(scope="module", params=gcc_bins())
def gcc_bin(request) -> str:
return request.param
@pytest.mark.parametrize("search", ["random", "hillclimb", "genetic"])
def test_tune_smoke_test(search: str, gcc_bin: str, capsys, tmpdir: Path):
tmpdir = Path(tmpdir)
flags = [
"argv0",
"--seed=0",
f"--output_dir={tmpdir}",
f"--gcc_bin={gcc_bin}",
"--gcc_benchmark=benchmark://chstone-v0/aes",
f"--search={search}",
"--pop_size=3",
"--gcc_search_budget=6",
]
sys.argv = flags
FLAGS.unparse_flags()
FLAGS(flags)
tune.main([])
out, _ = capsys.readouterr()
assert "benchmark://chstone-v0/aes" in out
assert (tmpdir / "results.csv").is_file()
|
CompilerGym-development
|
examples/gcc_autotuning/tune_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for the example CompilerGym service."""
import socket
import subprocess
from pathlib import Path
from time import sleep
import gym
import numpy as np
import pytest
from flaky import flaky
import examples.example_compiler_gym_service as example
from compiler_gym.envs import CompilerEnv
from compiler_gym.errors import SessionNotFound
from compiler_gym.spaces import ActionSpace, Box, NamedDiscrete, Scalar, Sequence
from compiler_gym.util.commands import Popen
from tests.test_main import main
# Given that the C++ and Python service implementations have identical
# featuresets, we can parameterize the tests and run them against both backends.
EXAMPLE_ENVIRONMENTS = ["example-cc-v0", "example-py-v0"]
@pytest.fixture(scope="function", params=EXAMPLE_ENVIRONMENTS)
def env(request) -> CompilerEnv:
"""Text fixture that yields an environment."""
with gym.make(request.param) as env:
yield env
@pytest.fixture(
scope="module",
params=[example.EXAMPLE_CC_SERVICE_BINARY, example.EXAMPLE_PY_SERVICE_BINARY],
ids=["example-cc-v0", "example-py-v0"],
)
def bin(request) -> Path:
yield request.param
def test_invalid_arguments(bin: Path):
"""Test that running the binary with unrecognized arguments is an error."""
def run(cmd):
with Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True
) as p:
stdout, stderr = p.communicate(timeout=10)
return p.returncode, stdout, stderr
returncode, _, stderr = run([str(bin), "foobar"])
assert "ERROR:" in stderr
assert "'foobar'" in stderr
assert returncode == 1
returncode, _, stderr = run([str(bin), "--foobar"])
# C++ and python flag parsing library emit slightly different error
# messages.
assert "ERROR:" in stderr or "FATAL" in stderr
assert "'foobar'" in stderr
assert returncode == 1
def test_versions(env: CompilerEnv):
"""Tests the GetVersion() RPC endpoint."""
assert env.compiler_version == "1.0.0"
def test_action_space(env: CompilerEnv):
"""Test that the environment reports the service's action spaces."""
assert env.action_spaces == [
ActionSpace(
NamedDiscrete(
name="default",
items=["a", "b", "c"],
)
)
]
def test_observation_spaces(env: CompilerEnv):
"""Test that the environment reports the service's observation spaces."""
env.reset()
assert env.observation.spaces.keys() == {"ir", "features", "runtime"}
assert env.observation.spaces["ir"].space == Sequence(
name="ir",
size_range=(0, np.iinfo(np.int64).max),
dtype=str,
)
assert env.observation.spaces["features"].space == Box(
name="features", shape=(3,), low=-100, high=100, dtype=int
)
assert env.observation.spaces["runtime"].space == Scalar(
name="runtime", min=0, max=np.inf, dtype=float
)
def test_reward_spaces(env: CompilerEnv):
"""Test that the environment reports the service's reward spaces."""
env.reset()
assert env.reward.spaces.keys() == {"runtime"}
def test_step_before_reset(env: CompilerEnv):
"""Taking a step() before reset() is illegal."""
with pytest.raises(SessionNotFound, match=r"Must call reset\(\) before step\(\)"):
env.step(0)
def test_observation_before_reset(env: CompilerEnv):
"""Taking an observation before reset() is illegal."""
with pytest.raises(SessionNotFound, match=r"Must call reset\(\) before step\(\)"):
_ = env.observation["ir"]
def test_reward_before_reset(env: CompilerEnv):
"""Taking a reward before reset() is illegal."""
with pytest.raises(SessionNotFound, match=r"Must call reset\(\) before step\(\)"):
_ = env.reward["runtime"]
def test_reset_invalid_benchmark(env: CompilerEnv):
"""Test requesting a specific benchmark."""
with pytest.raises(LookupError) as ctx:
env.reset(benchmark="example-compiler-v0/foobar")
assert str(ctx.value) == "Unknown program name"
def test_invalid_observation_space(env: CompilerEnv):
"""Test error handling with invalid observation space."""
with pytest.raises(LookupError):
env.observation_space = 100
def test_invalid_reward_space(env: CompilerEnv):
"""Test error handling with invalid reward space."""
with pytest.raises(LookupError):
env.reward_space = 100
def test_double_reset(env: CompilerEnv):
"""Test that reset() can be called twice."""
env.reset()
assert env.in_episode
env.reset()
assert env.in_episode
def test_double_reset_with_step(env: CompilerEnv):
"""Test that reset() can be called twice with a step."""
env.reset()
assert env.in_episode
_, _, done, info = env.step(env.action_space.sample())
assert not done, info
env.reset()
_, _, done, info = env.step(env.action_space.sample())
assert not done, info
assert env.in_episode
def test_Step_out_of_range(env: CompilerEnv):
"""Test error handling with an invalid action."""
env.reset()
with pytest.raises(ValueError) as ctx:
env.step(100)
assert str(ctx.value) == "Out-of-range"
def test_default_ir_observation(env: CompilerEnv):
"""Test default observation space."""
env.observation_space = "ir"
observation = env.reset()
assert observation == "Hello, world!"
observation, reward, done, info = env.step(0)
assert observation == "Hello, world!"
assert reward is None
assert not done
def test_default_features_observation(env: CompilerEnv):
"""Test default observation space."""
env.observation_space = "features"
observation = env.reset()
assert isinstance(observation, np.ndarray)
assert observation.shape == (3,)
assert observation.dtype == np.int64
assert observation.tolist() == [0, 0, 0]
def test_default_reward(env: CompilerEnv):
"""Test default reward space."""
env.reward_space = "runtime"
env.reset()
observation, reward, done, info = env.step(0)
assert observation is None
assert reward == 0
assert not done
def test_observations(env: CompilerEnv):
"""Test observation spaces."""
env.reset()
assert env.observation["ir"] == "Hello, world!"
np.testing.assert_array_equal(env.observation["features"], [0, 0, 0])
def test_rewards(env: CompilerEnv):
"""Test reward spaces."""
env.reset()
assert env.reward["runtime"] == 0
def test_benchmarks(env: CompilerEnv):
assert list(env.datasets.benchmark_uris()) == [
"benchmark://example-compiler-v0/foo",
"benchmark://example-compiler-v0/bar",
]
def test_fork(env: CompilerEnv):
env.reset()
env.step(0)
env.step(1)
other_env = env.fork()
try:
assert env.benchmark == other_env.benchmark
assert other_env.actions == [0, 1]
finally:
other_env.close()
@flaky # Timeout-based test.
def test_force_working_dir(bin: Path, tmpdir):
"""Test that expected files are generated in the working directory."""
tmpdir = Path(tmpdir) / "subdir"
with Popen([str(bin), "--working_dir", str(tmpdir)]):
for _ in range(10):
sleep(0.5)
if (tmpdir / "pid.txt").is_file() and (tmpdir / "port.txt").is_file():
break
else:
pytest.fail(f"PID file not found in {tmpdir}: {list(tmpdir.iterdir())}")
def unsafe_select_unused_port() -> int:
"""Try and select an unused port that on the local system.
There is nothing to prevent the port number returned by this function from
being claimed by another process or thread, so it is liable to race conditions
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("127.0.0.1", 0))
s.listen(1)
port = s.getsockname()[1]
s.close()
return port
def port_is_free(port: int) -> bool:
"""Determine if a port is in use"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind(("127.0.0.1", port))
return True
except OSError:
return False
finally:
s.close()
@flaky # Unsafe free port allocation
def test_force_port(bin: Path, tmpdir):
"""Test that a forced --port value is respected."""
port = unsafe_select_unused_port()
assert port_is_free(port) # Sanity check
tmpdir = Path(tmpdir)
with Popen([str(bin), "--port", str(port), "--working_dir", str(tmpdir)]):
for _ in range(10):
sleep(0.5)
if (tmpdir / "pid.txt").is_file() and (tmpdir / "port.txt").is_file():
break
else:
pytest.fail(f"PID file not found in {tmpdir}: {list(tmpdir.iterdir())}")
with open(tmpdir / "port.txt") as f:
actual_port = int(f.read())
assert actual_port == port
assert not port_is_free(actual_port)
if __name__ == "__main__":
main()
|
CompilerGym-development
|
examples/example_compiler_gym_service/env_tests.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for the example CompilerGym service."""
import os
import socket
import subprocess
import sys
from getpass import getuser
from pathlib import Path
from time import sleep
from typing import Iterable, List, Optional
import gym
import numpy as np
import pytest
from flaky import flaky
from compiler_gym.datasets import Benchmark, Dataset
from compiler_gym.datasets.uri import BenchmarkUri
from compiler_gym.envs import CompilerEnv
from compiler_gym.service import SessionNotFound
from compiler_gym.spaces import Box, NamedDiscrete, Reward, Scalar, Sequence
from compiler_gym.util import debug_util as dbg
from compiler_gym.util.commands import Popen
from compiler_gym.util.registration import register
EXAMPLE_PY_SERVICE_BINARY: Path = Path(
"example_compiler_gym_service/service_py/example_service.py"
)
assert EXAMPLE_PY_SERVICE_BINARY.is_file(), "Service script not found"
class RuntimeReward(Reward):
"""An example reward that uses changes in the "runtime" observation value
to compute incremental reward.
"""
def __init__(self):
super().__init__(
name="runtime",
observation_spaces=["runtime"],
default_value=0,
default_negates_returns=True,
deterministic=False,
platform_dependent=True,
)
self.previous_runtime = None
def reset(self, benchmark: str, observation_view):
del benchmark # unused
self.previous_runtime = None
def update(self, action, observations, observation_view):
del action
del observation_view
if self.previous_runtime is None:
self.previous_runtime = observations[0]
reward = float(self.previous_runtime - observations[0])
self.previous_runtime = observations[0]
return reward
class ExampleDataset(Dataset):
def __init__(self, *args, **kwargs):
super().__init__(
name="benchmark://example-compiler-v0",
license="MIT",
description="An example dataset",
)
self._benchmarks = {
"/foo": Benchmark.from_file_contents(
"benchmark://example-compiler-v0/foo", "Ir data".encode("utf-8")
),
"/bar": Benchmark.from_file_contents(
"benchmark://example-compiler-v0/bar", "Ir data".encode("utf-8")
),
}
def benchmark_uris(self) -> Iterable[str]:
yield from (
f"benchmark://example-compiler-v0{k}" for k in self._benchmarks.keys()
)
def benchmark_from_parsed_uri(self, uri: BenchmarkUri) -> Benchmark:
if uri.path in self._benchmarks:
return self._benchmarks[uri.path]
else:
raise LookupError("Unknown program name")
# Register example-compiler-v0 for use with gym.make(...).
register(
id="example-without-bazel-v0",
entry_point="compiler_gym.service.client_service_compiler_env:ClientServiceCompilerEnv",
kwargs={
"service": EXAMPLE_PY_SERVICE_BINARY,
"rewards": [RuntimeReward()],
"datasets": [ExampleDataset()],
},
)
@pytest.fixture(scope="function")
def env() -> CompilerEnv:
"""Text fixture that yields an environment."""
with gym.make("example-without-bazel-v0") as env:
yield env
@pytest.fixture(scope="module")
def bin() -> Path:
return EXAMPLE_PY_SERVICE_BINARY
def test_invalid_arguments(bin: Path):
"""Test that running the binary with unrecognized arguments is an error."""
def run(cmd):
with Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True
) as p:
stdout, stderr = p.communicate(timeout=60)
return p.returncode, stdout, stderr
returncode, _, stderr = run([str(bin), "foobar"])
assert "ERROR:" in stderr
assert "'foobar'" in stderr
assert returncode == 1
returncode, _, stderr = run([str(bin), "--foobar"])
# C++ and python flag parsing library emit slightly different error
# messages.
assert "ERROR:" in stderr or "FATAL" in stderr
assert "'foobar'" in stderr
assert returncode == 1
def test_versions(env: CompilerEnv):
"""Tests the GetVersion() RPC endpoint."""
assert env.compiler_version == "1.0.0"
def test_action_space(env: CompilerEnv):
"""Test that the environment reports the service's action spaces."""
assert env.action_spaces == [
NamedDiscrete(
name="default",
items=["a", "b", "c"],
)
]
def test_observation_spaces(env: CompilerEnv):
"""Test that the environment reports the service's observation spaces."""
env.reset()
assert env.observation.spaces.keys() == {"ir", "features", "runtime"}
ir_space = env.observation.spaces["ir"]
assert isinstance(ir_space.space, Sequence)
assert ir_space.space.dtype == str
assert ir_space.space.size_range == (0, np.iinfo(np.int64).max)
feature_space = env.observation.spaces["features"].space
assert isinstance(feature_space, Box)
assert feature_space.shape == (3,)
assert np.all(feature_space.low == [-100, -100, -100])
assert np.all(feature_space.high == [100, 100, 100])
assert feature_space.dtype == int
runtime_space = env.observation.spaces["runtime"].space
assert isinstance(runtime_space, Scalar)
assert runtime_space.min == 0
assert runtime_space.max == np.inf
assert runtime_space.dtype == float
def test_reward_spaces(env: CompilerEnv):
"""Test that the environment reports the service's reward spaces."""
env.reset()
assert env.reward.spaces.keys() == {"runtime"}
def test_step_before_reset(env: CompilerEnv):
"""Taking a step() before reset() is illegal."""
with pytest.raises(SessionNotFound, match=r"Must call reset\(\) before step\(\)"):
env.step(0)
def test_observation_before_reset(env: CompilerEnv):
"""Taking an observation before reset() is illegal."""
with pytest.raises(SessionNotFound, match=r"Must call reset\(\) before step\(\)"):
_ = env.observation["ir"]
def test_reward_before_reset(env: CompilerEnv):
"""Taking a reward before reset() is illegal."""
with pytest.raises(SessionNotFound, match=r"Must call reset\(\) before step\(\)"):
_ = env.reward["runtime"]
def test_reset_invalid_benchmark(env: CompilerEnv):
"""Test requesting a specific benchmark."""
with pytest.raises(LookupError) as ctx:
env.reset(benchmark="example-compiler-v0/foobar")
assert str(ctx.value) == "Unknown program name"
def test_invalid_observation_space(env: CompilerEnv):
"""Test error handling with invalid observation space."""
with pytest.raises(LookupError):
env.observation_space = 100
def test_invalid_reward_space(env: CompilerEnv):
"""Test error handling with invalid reward space."""
with pytest.raises(LookupError):
env.reward_space = 100
def test_double_reset(env: CompilerEnv):
"""Test that reset() can be called twice."""
env.reset()
assert env.in_episode
env.reset()
assert env.in_episode
def test_double_reset_with_step(env: CompilerEnv):
"""Test that reset() can be called twice with a step."""
env.reset()
assert env.in_episode
_, _, done, info = env.step(env.action_space.sample())
assert not done, info
env.reset()
_, _, done, info = env.step(env.action_space.sample())
assert not done, info
assert env.in_episode
def test_Step_out_of_range(env: CompilerEnv):
"""Test error handling with an invalid action."""
env.reset()
with pytest.raises(ValueError) as ctx:
env.step(100)
assert str(ctx.value) == "Out-of-range"
def test_default_ir_observation(env: CompilerEnv):
"""Test default observation space."""
env.observation_space = "ir"
observation = env.reset()
assert observation == "Hello, world!"
observation, reward, done, info = env.step(0)
assert observation == "Hello, world!"
assert reward is None
assert not done
def test_default_features_observation(env: CompilerEnv):
"""Test default observation space."""
env.observation_space = "features"
observation = env.reset()
assert isinstance(observation, np.ndarray)
assert observation.shape == (3,)
assert observation.dtype == np.int64
assert observation.tolist() == [0, 0, 0]
def test_default_reward(env: CompilerEnv):
"""Test default reward space."""
env.reward_space = "runtime"
env.reset()
observation, reward, done, info = env.step(0)
assert observation is None
assert reward == 0
assert not done
def test_observations(env: CompilerEnv):
"""Test observation spaces."""
env.reset()
assert env.observation["ir"] == "Hello, world!"
np.testing.assert_array_equal(env.observation["features"], [0, 0, 0])
def test_rewards(env: CompilerEnv):
"""Test reward spaces."""
env.reset()
assert env.reward["runtime"] == 0
def test_benchmarks(env: CompilerEnv):
assert list(env.datasets.benchmark_uris()) == [
"benchmark://example-compiler-v0/foo",
"benchmark://example-compiler-v0/bar",
]
def test_fork(env: CompilerEnv):
env.reset()
env.step(0)
env.step(1)
other_env = env.fork()
try:
assert env.benchmark == other_env.benchmark
assert other_env.actions == [0, 1]
finally:
other_env.close()
@flaky # Timeout-based test.
def test_force_working_dir(bin: Path, tmpdir):
"""Test that expected files are generated in the working directory."""
tmpdir = Path(tmpdir) / "subdir"
with Popen([str(bin), "--working_dir", str(tmpdir)]):
for _ in range(10):
sleep(0.5)
if (tmpdir / "pid.txt").is_file() and (tmpdir / "port.txt").is_file():
break
else:
pytest.fail(f"PID file not found in {tmpdir}: {list(tmpdir.iterdir())}")
def unsafe_select_unused_port() -> int:
"""Try and select an unused port that on the local system.
There is nothing to prevent the port number returned by this function from
being claimed by another process or thread, so it is liable to race conditions
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("127.0.0.1", 0))
s.listen(1)
port = s.getsockname()[1]
s.close()
return port
def port_is_free(port: int) -> bool:
"""Determine if a port is in use"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind(("127.0.0.1", port))
return True
except OSError:
return False
finally:
s.close()
@flaky # Unsafe free port allocation
def test_force_port(bin: Path, tmpdir):
"""Test that a forced --port value is respected."""
port = unsafe_select_unused_port()
assert port_is_free(port) # Sanity check
tmpdir = Path(tmpdir)
with Popen([str(bin), "--port", str(port), "--working_dir", str(tmpdir)]):
for _ in range(10):
sleep(0.5)
if (tmpdir / "pid.txt").is_file() and (tmpdir / "port.txt").is_file():
break
else:
pytest.fail(f"PID file not found in {tmpdir}: {list(tmpdir.iterdir())}")
with open(tmpdir / "port.txt") as f:
actual_port = int(f.read())
assert actual_port == port
assert not port_is_free(actual_port)
# Copied from CompilerGym/tests/test_main.py because there were errors in trying to import it here
def main(extra_pytest_args: Optional[List[str]] = None, debug_level: int = 1):
dbg.set_debug_level(debug_level)
# Keep test data isolated from user data.
os.environ[
"COMPILER_GYM_SITE_DATA"
] = f"/tmp/compiler_gym_{getuser()}/tests/site_data"
os.environ["COMPILER_GYM_CACHE"] = f"/tmp/compiler_gym_{getuser()}/tests/cache"
pytest_args = sys.argv + [
# Run pytest verbosely to print out test names to provide context in
# case of failures.
"-vv",
# Disable "Module already imported" warnings. See:
# https://docs.pytest.org/en/latest/how-to/usage.html#calling-pytest-from-python-code
"-W",
"ignore:Module already imported:pytest.PytestWarning",
# Disable noisy "Flaky tests passed" messages.
"--no-success-flaky-report",
]
# Support for sharding. If a py_test target has the shard_count attribute
# set (in the range [1,50]), then the pytest-shard module is used to divide
# the tests among the shards. See https://pypi.org/project/pytest-shard/
sharded_test = os.environ.get("TEST_TOTAL_SHARDS")
if sharded_test:
num_shards = int(os.environ["TEST_TOTAL_SHARDS"])
shard_index = int(os.environ["TEST_SHARD_INDEX"])
pytest_args += [f"--shard-id={shard_index}", f"--num-shards={num_shards}"]
else:
pytest_args += ["-p", "no:pytest-shard"]
pytest_args += extra_pytest_args or []
returncode = pytest.main(pytest_args)
# By default pytest will fail with an error if no tests are collected.
# Disable that behavior here (with a warning) since there are legitimate
# cases where we may want to run a test file with no tests in it. For
# example, when running on a continuous integration service where all the
# tests are marked with the @skip_on_ci decorator.
if returncode == pytest.ExitCode.NO_TESTS_COLLECTED.value:
print(
"WARNING: The test suite was empty. Is that intended?",
file=sys.stderr,
)
returncode = 0
sys.exit(returncode)
if __name__ == "__main__":
main(
extra_pytest_args=[
"-W",
"ignore::UserWarning",
]
)
|
CompilerGym-development
|
examples/example_compiler_gym_service/env_without_bazel_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""This module defines and registers the example gym environments."""
from pathlib import Path
from typing import Iterable
from compiler_gym.datasets import Benchmark, Dataset
from compiler_gym.datasets.uri import BenchmarkUri
from compiler_gym.spaces import Reward
from compiler_gym.util.registration import register
from compiler_gym.util.runfiles_path import runfiles_path
EXAMPLE_CC_SERVICE_BINARY: Path = runfiles_path(
"examples/example_compiler_gym_service/service_cc/compiler_gym-example-service-cc"
)
EXAMPLE_PY_SERVICE_BINARY: Path = runfiles_path(
"examples/example_compiler_gym_service/service_py/compiler_gym-example-service-py"
)
class RuntimeReward(Reward):
"""An example reward that uses changes in the "runtime" observation value
to compute incremental reward.
"""
def __init__(self):
super().__init__(
name="runtime",
observation_spaces=["runtime"],
default_value=0,
default_negates_returns=True,
deterministic=False,
platform_dependent=True,
)
self.previous_runtime = None
def reset(self, benchmark: str, observation_view):
del benchmark # unused
self.previous_runtime = None
def update(self, action, observations, observation_view):
del action
del observation_view
if self.previous_runtime is None:
self.previous_runtime = observations[0]
reward = float(self.previous_runtime - observations[0])
self.previous_runtime = observations[0]
return reward
class ExampleDataset(Dataset):
def __init__(self, *args, **kwargs):
super().__init__(
name="benchmark://example-compiler-v0",
license="MIT",
description="An example dataset",
)
self._benchmarks = {
"/foo": Benchmark.from_file_contents(
"benchmark://example-compiler-v0/foo", "Ir data".encode("utf-8")
),
"/bar": Benchmark.from_file_contents(
"benchmark://example-compiler-v0/bar", "Ir data".encode("utf-8")
),
}
def benchmark_uris(self) -> Iterable[str]:
yield from (
f"benchmark://example-compiler-v0{k}" for k in self._benchmarks.keys()
)
def benchmark_from_parsed_uri(self, uri: BenchmarkUri) -> Benchmark:
if uri.path in self._benchmarks:
return self._benchmarks[uri.path]
else:
raise LookupError("Unknown program name")
# Register the example service on module import. After importing this module,
# the example-compiler-v0 environment will be available to gym.make(...).
register(
id="example-cc-v0",
entry_point="compiler_gym.service.client_service_compiler_env:ClientServiceCompilerEnv",
kwargs={
"service": EXAMPLE_CC_SERVICE_BINARY,
"rewards": [RuntimeReward()],
"datasets": [ExampleDataset()],
},
)
register(
id="example-py-v0",
entry_point="compiler_gym.service.client_service_compiler_env:ClientServiceCompilerEnv",
kwargs={
"service": EXAMPLE_PY_SERVICE_BINARY,
"rewards": [RuntimeReward()],
"datasets": [ExampleDataset()],
},
)
|
CompilerGym-development
|
examples/example_compiler_gym_service/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Smoke test for examples/example_compiler_gym_service/demo_without_bazel.py"""
from example_compiler_gym_service.demo_without_bazel import main
from flaky import flaky
@flaky
def test_demo_without_bazel():
main()
|
CompilerGym-development
|
examples/example_compiler_gym_service/demo_without_bazel_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""This script demonstrates how the Python example service without needing
to use the bazel build system. Usage:
$ python example_compiler_gym_service/demo_without_bazel.py
It is equivalent in behavior to the demo.py script in this directory.
"""
import logging
from pathlib import Path
from typing import Iterable
import gym
from compiler_gym.datasets import Benchmark, Dataset
from compiler_gym.datasets.uri import BenchmarkUri
from compiler_gym.spaces import Reward
from compiler_gym.util.logging import init_logging
from compiler_gym.util.registration import register
EXAMPLE_PY_SERVICE_BINARY: Path = Path(
"example_compiler_gym_service/service_py/example_service.py"
)
assert EXAMPLE_PY_SERVICE_BINARY.is_file(), "Service script not found"
class RuntimeReward(Reward):
"""An example reward that uses changes in the "runtime" observation value
to compute incremental reward.
"""
def __init__(self):
super().__init__(
name="runtime",
observation_spaces=["runtime"],
default_value=0,
default_negates_returns=True,
deterministic=False,
platform_dependent=True,
)
self.previous_runtime = None
def reset(self, benchmark: str, observation_view):
del benchmark # unused
self.previous_runtime = None
def update(self, action, observations, observation_view):
del action
del observation_view
if self.previous_runtime is None:
self.previous_runtime = observations[0]
reward = float(self.previous_runtime - observations[0])
self.previous_runtime = observations[0]
return reward
class ExampleDataset(Dataset):
def __init__(self, *args, **kwargs):
super().__init__(
name="benchmark://example-compiler-v0",
license="MIT",
description="An example dataset",
)
self._benchmarks = {
"/foo": Benchmark.from_file_contents(
"benchmark://example-compiler-v0/foo", "Ir data".encode("utf-8")
),
"/bar": Benchmark.from_file_contents(
"benchmark://example-compiler-v0/bar", "Ir data".encode("utf-8")
),
}
def benchmark_uris(self) -> Iterable[str]:
yield from (
f"benchmark://example-compiler-v0{k}" for k in self._benchmarks.keys()
)
def benchmark_from_parsed_uri(self, uri: BenchmarkUri) -> Benchmark:
if uri.path in self._benchmarks:
return self._benchmarks[uri.path]
else:
raise LookupError("Unknown program name")
# Register the environment for use with gym.make(...).
register(
id="example-compiler-v0",
entry_point="compiler_gym.service.client_service_compiler_env:ClientServiceCompilerEnv",
kwargs={
"service": EXAMPLE_PY_SERVICE_BINARY,
"rewards": [RuntimeReward()],
"datasets": [ExampleDataset()],
},
)
def main():
# Use debug verbosity to print out extra logging information.
init_logging(level=logging.DEBUG)
# Create the environment using the regular gym.make(...) interface.
with gym.make("example-compiler-v0") as env:
env.reset()
for _ in range(20):
observation, reward, done, info = env.step(env.action_space.sample())
if done:
env.reset()
if __name__ == "__main__":
main()
|
CompilerGym-development
|
examples/example_compiler_gym_service/demo_without_bazel.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""This script demonstrates how the example services defined in this directory
can be used as gym environments. Usage:
$ bazel run -c opt //examples/example_compiler_gym_service:demo
"""
import logging
import gym
# To use the example services we simply need to import the module which
# registers the environments.
import examples.example_compiler_gym_service # noqa Register environments
def main():
# Use debug verbosity to print out extra logging information.
logging.basicConfig(level=logging.DEBUG)
# Create the environment using the regular gym.make(...) interface. We could
# use either the C++ service "example-cc-v0" or the Python service
# "example-py-v0".
with gym.make("example-cc-v0") as env:
env.reset()
for _ in range(20):
observation, reward, done, info = env.step(env.action_space.sample())
if done:
env.reset()
if __name__ == "__main__":
main()
|
CompilerGym-development
|
examples/example_compiler_gym_service/demo.py
|
#! /usr/bin/env python3
#
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""An example CompilerGym service in python."""
import logging
from pathlib import Path
from typing import Optional, Tuple
from compiler_gym.service import CompilationSession
from compiler_gym.service.proto import (
ActionSpace,
Benchmark,
DoubleRange,
Event,
Int64Box,
Int64Range,
Int64Tensor,
NamedDiscreteSpace,
ObservationSpace,
Space,
StringSpace,
)
from compiler_gym.service.runtime import create_and_run_compiler_gym_service
class ExampleCompilationSession(CompilationSession):
"""Represents an instance of an interactive compilation session."""
compiler_version: str = "1.0.0"
# The action spaces supported by this service. Here we will implement a
# single action space, called "default", that represents a command line with
# three options: "a", "b", and "c".
action_spaces = [
ActionSpace(
name="default",
space=Space(
named_discrete=NamedDiscreteSpace(
name=[
"a",
"b",
"c",
],
),
),
)
]
# A list of observation spaces supported by this service. Each of these
# ObservationSpace protos describes an observation space.
observation_spaces = [
ObservationSpace(
name="ir",
space=Space(
string_value=StringSpace(length_range=Int64Range(min=0)),
),
deterministic=True,
platform_dependent=False,
default_observation=Event(string_value=""),
),
ObservationSpace(
name="features",
space=Space(
int64_box=Int64Box(
low=Int64Tensor(shape=[3], value=[-100, -100, -100]),
high=Int64Tensor(shape=[3], value=[100, 100, 100]),
),
),
),
ObservationSpace(
name="runtime",
space=Space(
double_value=DoubleRange(min=0),
),
deterministic=False,
platform_dependent=True,
default_observation=Event(
double_value=0,
),
),
]
def __init__(
self, working_directory: Path, action_space: ActionSpace, benchmark: Benchmark
):
super().__init__(working_directory, action_space, benchmark)
logging.info("Started a compilation session for %s", benchmark.uri)
def apply_action(self, action: Event) -> Tuple[bool, Optional[ActionSpace], bool]:
num_choices = len(self.action_spaces[0].space.named_discrete.name)
# This is the index into the action space's values ("a", "b", "c") that
# the user selected, e.g. 0 -> "a", 1 -> "b", 2 -> "c".
choice_index = action.int64_value
logging.info("Applying action %d", choice_index)
if choice_index < 0 or choice_index >= num_choices:
raise ValueError("Out-of-range")
# Here is where we would run the actual action to update the environment's
# state.
return False, None, False
def get_observation(self, observation_space: ObservationSpace) -> Event:
logging.info("Computing observation from space %s", observation_space)
if observation_space.name == "ir":
return Event(string_value="Hello, world!")
elif observation_space.name == "features":
observation = Event(int64_tensor=Int64Tensor(shape=[3], value=[0, 0, 0]))
return observation
elif observation_space.name == "runtime":
return Event(double_value=0)
else:
raise KeyError(observation_space.name)
if __name__ == "__main__":
create_and_run_compiler_gym_service(ExampleCompilationSession)
|
CompilerGym-development
|
examples/example_compiler_gym_service/service_py/example_service.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pickle
import sqlite3
import zlib
import dgl
import numpy as np
import torch
from dgl.data import DGLDataset
class CompilerGymDataset(DGLDataset):
def __init__(
self,
filepath,
num_workers=64,
max_len_nodes=50000,
input_key="dgl_graph",
output_key="reward",
table_name="Observations",
train_prop=0.8,
vocab=None,
dataset_size=-1,
):
"""
The class loads a CompilerGym Database which contains 'States' and 'Observations'
as tables. The tables contain the necessary information for doing supervised learning.
This class handles all of the underlying structure including differentiating between
training and dev, creating the 'dgl graph', and colating individual graphs into a larger
graph, which is used for training.
Inputs:
- filepath: the path to the dataset
- num_wokers: number of workers used to fetch the instances
- max_len_nodes: maximum number of nodes in the grpah
- input_key: the key that we save to the input observation
- output_key: the key that we want to generate supervised loss off of
- table_name: the table name in the database that has the primary keys
- train_prop: proportion of training instances
- vocab: the vocab mapping text to integer indices of a embedding table
- dataset_size: size of the dataset we want to use, default -1 means use the whole datbase
"""
self.filepath = filepath
self.num_workers = num_workers
self.max_len_nodes = max_len_nodes
self.graph_key = input_key
self.output_key = output_key
self.table_name = table_name
self.train_prop = train_prop
self.vocab = vocab
self.dataset_size = dataset_size
self.distribution_type = "train"
print("using filepath: ", self.filepath)
super().__init__(name="CopmilerGym")
def process(self):
"""
Called during initialization of the class and initializes the underlying
functions needed for supervised learning
"""
self.initialize_database()
def initialize_database(self):
print("using: ", self.filepath, " as dataset")
self.cursor = self.get_cursor()
self.train_size = int(self.train_prop * self.get_full_db_length())
self.dev_size = self.get_full_db_length() - self.train_size
self.select_distribution_indices()
self.get_observation_indices()
print("intialized database: ", self.filepath)
def select_distribution_indices(self):
total_size = self.get_full_db_length()
self.all_indices = set(range(total_size))
self.train_indices = np.random.choice(
total_size, size=self.train_size, replace=False
)
self.dev_indices = list(self.all_indices - set(self.train_indices))
assert len(self.train_indices) == self.train_size
assert len(self.dev_indices) == self.dev_size
def get_observation_indices(self):
self.all_state_indices = get_all_states(self.cursor, self.dataset_size)
def get_full_db_length(self):
if self.dataset_size == -1:
return get_database_size(self.cursor, self.table_name)
else:
return self.dataset_size
def __getitem__(self, i):
return self.get_instance(i)
def get_instance(self, i):
"""
Given an index (i), determined by the length of the current dataset ('train', 'dev')
get the desired instance
"""
index = None
if self.distribution_type == "train":
index = self.train_indices[i]
elif self.distribution_type == "dev":
index = self.dev_indices[i]
cursor = self.get_cursor()
cur_state = self.all_state_indices[index]
s = get_observation_from_table(cursor, cur_state[3])
# This reward is hardcoded right now to be the number of instruction
# counts in the given LLVM-IR graph.
reward = s[0][1]
programl = pickle.loads(zlib.decompress(s[0][3]))
dgl_graph = process_networkx_graph(programl, self.vocab)
return {self.output_key: reward, self.graph_key: dgl_graph}
def __len__(self):
if self.distribution_type == "train":
return self.train_size
elif self.distribution_type == "dev":
return self.dev_size
def collate_fn(self, samples):
samples = [sample for sample in samples if sample is not None]
# Takes a list of graphs and makes it into one big graph that dgl operates on
ret = None
if samples:
dgl_graph = dgl.batch([sample[self.graph_key] for sample in samples])
reward = [sample[self.output_key] for sample in samples]
ret = (dgl_graph, reward)
return ret
def set_distribution_type(self, dist_type):
assert dist_type in ["train", "dev"]
self.distribution_type = dist_type
def get_cursor(self):
connection = sqlite3.connect(self.filepath)
return connection.cursor()
def get_database_size(cursor, table):
return cursor.execute(f"SELECT COUNT(*) FROM {table}").fetchall()[0][0]
def get_all_states(cursor, db_size):
if db_size == -1:
cursor.execute("SELECT * from States")
else:
cursor.execute(f"SELECT * from States LIMIT {db_size}")
return cursor.fetchall()
def get_observation_from_table(cursor, hash):
"""
Gets the observation for a state_id from a given database
Inputs:
- cursor: the db cursor
- state_id: the state_id we want (primary key in the table)
"""
cursor.execute(f"SELECT * from Observations where state_id = '{hash}'")
return cursor.fetchall()
def process_networkx_graph(
graph,
vocab,
node_feature_list=["text", "type"],
edge_feature_list=["flow", "position"],
):
"""
Handles all of the requirements of taking a networkx graph and converting it into a
dgl graph
Inputs:
- graph: the networkx graph
- vocab: the vocabulary, a mapping from word to index.
- node_feature_list: a list of textual features from the networkx node that we want to make sure
are featurizable into a vector.
- edge_feature_list: a list of textual features from the networkx edges that we want to make sure
are featurizable into a vector.
"""
update_graph_with_vocab(graph.nodes, node_feature_list, vocab)
update_graph_with_vocab(graph.edges, edge_feature_list, vocab)
dgl_graph = fast_networkx_to_dgl(graph)
return dgl_graph
def fast_networkx_to_dgl(
graph, node_attrs=["text_idx", "type"], edge_attrs=["flow", "position"]
):
"""
Takes a networkx graph and its given node attributes and edge attributes
and converts it corresponding dgl graph
Inputs:
- graph: the networkx graph
- node_attrs: node attributes to convert
- edge_attrs: edge attributes to convert
"""
edges = [edge for edge in graph.edges()]
dgl_graph = dgl.graph(edges, num_nodes=graph.number_of_nodes())
for feat in edge_attrs:
edge_assigns = torch.tensor(
[val[-1] for val in graph.edges(data=feat)], dtype=torch.int64
)
dgl_graph.edata[feat] = edge_assigns
for feat in node_attrs:
node_assigns = torch.tensor(
[val[-1] for val in graph.nodes(data=feat)], dtype=torch.int64
)
dgl_graph.ndata[feat] = node_assigns
return dgl_graph
def update_graph_with_vocab(graph_fn, features, vocab):
"""
Given a networkx attribute (function) and features update it with a vocab if possible.
If it cannot be updated, the features should already be numerical features.
Inputs:
- graph_fn: a networkx graph function (describing nodes or edges)
- features: the feature from the function that should be updated
- vocab: A dict mapping text to int
"""
for feature_name in features:
curr_vocab = None
if feature_name in vocab:
curr_vocab = vocab[feature_name]
for graph_item in graph_fn(data=feature_name):
feature = graph_item[-1]
idx = graph_item[0]
if feature_name in vocab:
# Lookup vocab item, or map to out-of-vocab index if item is not
# found.
vocab_index = curr_vocab.get(feature, len(curr_vocab))
update_networkx_feature(
graph_fn, idx, f"{feature_name}_idx", vocab_index
)
else:
assert isinstance(
feature, int
), f"{(feature_name, feature)} is not an int"
def update_networkx_feature(graph_fn, idx, feature_name, feature):
graph_fn[idx][feature_name] = feature
|
CompilerGym-development
|
examples/gnn_cost_model/compiler_gym_dataset.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for examples/gnn_cost_model/train_cost_model_test.py"""
import sys
import pytest
from absl import flags
from compiler_gym.util.capture_output import capture_output
from .train import main
FLAGS = flags.FLAGS
@pytest.mark.skip(reason="Need to create a small test set")
def test_run_train_smoke_test():
flags = [
"argv0",
"--dataset_size=64",
"--batch_size=4",
"--num_epoch=2",
"--device=cpu",
]
sys.argv = flags
FLAGS(flags)
with capture_output() as out:
main(["argv0"])
assert "Epoch num 0 training" in out.stdout
|
CompilerGym-development
|
examples/gnn_cost_model/train_test.py
|
CompilerGym-development
|
examples/gnn_cost_model/__init__.py
|
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import dgl
import numpy as np
import torch
import torch.nn as nn
class GNNEncoder(nn.Module):
def __init__(
self,
node_vocab_size,
node_hidden_size,
use_node_embedding=True,
n_steps=1,
n_etypes=3,
n_message_passes=0,
reward_dim=1,
gnn_type="GatedGraphConv",
heads=None,
feat_drop=0.0,
concat_intermediate=True,
):
super(GNNEncoder, self).__init__()
self.use_node_embedding = use_node_embedding
self.node_hidden_size = node_hidden_size
self.n_steps = n_steps
self.n_etypes = n_etypes
self.n_message_passes = n_message_passes
self.reward_dim = reward_dim
self.gnn_type = gnn_type
self.heads = heads
self.feat_drop = feat_drop
self.concat_intermediate = concat_intermediate
if self.use_node_embedding:
self.node_embedding = nn.Embedding(node_vocab_size, node_hidden_size)
embed_dim = self.node_hidden_size
if self.gnn_type == "GatedGraphConv":
self.ggcnn = nn.ModuleList(
[
dgl.nn.pytorch.conv.GatedGraphConv(
in_feats=self.node_hidden_size,
out_feats=self.node_hidden_size,
n_steps=self.n_steps,
n_etypes=self.n_etypes,
)
for _ in range(self.n_message_passes)
]
)
if self.concat_intermediate:
embed_dim = (self.n_message_passes + 1) * embed_dim
else:
raise NotImplementedError("")
self.reward_predictor = nn.Sequential(
nn.Linear(embed_dim, self.node_hidden_size),
nn.ReLU(),
nn.Linear(self.node_hidden_size, self.reward_dim),
)
self.mse_loss = nn.MSELoss()
def forward(self, g):
with g.local_scope():
self.featurize_nodes(g)
res = g.ndata["feat"]
if self.concat_intermediate:
intermediate = [dgl.mean_nodes(g, "feat")]
if self.gnn_type == "GatedGraphConv":
for i, layer in enumerate(self.ggcnn):
res = layer(g, res, g.edata["flow"])
if self.concat_intermediate:
g.ndata["feat"] = res
intermediate.append(dgl.mean_nodes(g, "feat"))
g.ndata["feat"] = res
if self.concat_intermediate and self.gnn_type == "GatedGraphConv":
graph_agg = torch.cat(intermediate, axis=1)
else:
graph_agg = dgl.mean_nodes(g, "feat")
res = self.reward_predictor(graph_agg)
return res, graph_agg
def get_loss(self, g, labels, eps=0.0):
"""
Loss function, scales the reward to the same loss function from
R2D2 (https://openreview.net/pdf?id=r1lyTjAqYX). It also allows
us to see the difference between the unscaled reward and its
associated prediction
"""
preds, _ = self.forward(g)
preds = preds.squeeze(1)
scaled_labels = rescale(labels, eps=eps)
inv_scale_pred = inv_rescale(preds, eps=eps)
return (
self.mse_loss(preds, scaled_labels),
self.mse_loss(inv_scale_pred, labels),
((labels - inv_scale_pred).abs() / labels).mean(),
)
def featurize_nodes(self, g):
# This is very CompilerGym specific, can be rewritten for other tasks
features = []
if self.use_node_embedding:
features.append(self.node_embedding(g.ndata["text_idx"]))
g.ndata["feat"] = torch.cat(features)
def get_edge_embedding(self, g):
# TODO: this should can be for positional embeddings
pass
def rescale(x, eps=1e-3):
sign = get_sign(x)
x_abs = get_abs(x)
if isinstance(x, np.ndarray):
return sign * (np.sqrt(x_abs + 1) - 1) + eps * x
else:
return sign * ((x_abs + 1).sqrt() - 1) + eps * x
def inv_rescale(x, eps=1e-3):
sign = get_sign(x)
x_abs = get_abs(x)
if eps == 0:
return sign * (x * x + 2.0 * x_abs)
else:
return sign * (
(((1.0 + 4.0 * eps * (x_abs + 1.0 + eps)).sqrt() - 1.0) / (2.0 * eps)).pow(
2
)
- 1.0
)
def get_sign(x):
if isinstance(x, np.ndarray):
return np.sign(x)
elif isinstance(x, torch.Tensor):
return x.sign()
else:
raise NotImplementedError(f"Data type: {type(x)} is not implemented")
def get_abs(x):
if isinstance(x, np.ndarray):
return np.abs(x)
elif isinstance(x, torch.Tensor):
return x.abs()
else:
raise NotImplementedError(f"Data type: {type(x)} is not implemented")
|
CompilerGym-development
|
examples/gnn_cost_model/model.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
This module trains a cost model with a GNN on a LLVM-IR transition database
predicting some output reward (the default is instruction count).
Example usage:
$ python train_cost_model.py --num_epoch 10 --batch_size 16 --dataset_size 64
"""
import collections
import io
import logging
import pickle
import sys
import tarfile
import time
from pathlib import Path
from threading import Lock
import numpy as np
import torch
from absl import app, flags
from fasteners import InterProcessLock
from torch.utils.data import DataLoader
import compiler_gym.util.flags.nproc # noqa flag definition
from compiler_gym.util.download import download
from compiler_gym.util.filesystem import extract_tar
from compiler_gym.util.runfiles_path import cache_path, transient_cache_path
from compiler_gym.util.timer import Timer, humanize_duration
from .compiler_gym_dataset import CompilerGymDataset
from .model import GNNEncoder
flags.DEFINE_integer(
"dataset_size", -1, "How large should the dataset be, -1 if no constraint"
)
flags.DEFINE_integer("num_epoch", 100, "Number of epochs for training")
flags.DEFINE_integer("batch_size", 4, "Number of epochs for training")
flags.DEFINE_string(
"db",
"https://dl.fbaipublicfiles.com/compiler_gym/state_transition_dataset/2021-11-15-csmith.tar.bz2",
"URL of the dataset to use.",
)
flags.DEFINE_string(
"db_sha256",
"0b101a17fdbb1851f38ca46cc089b0026eb740e4055a4fe06b4c899ca87256a2",
"SHA256 checksum of the dataset database.",
)
flags.DEFINE_string(
"vocab_db",
"https://dl.fbaipublicfiles.com/compiler_gym/state_transition_dataset/2021-11-15-vocab.tar.bz2",
"URL of the vocabulary database to use.",
)
flags.DEFINE_string(
"vocab_db_sha256",
"af7781f57e6ef430c561afb045fc03693783e668b21826b32234e9c45bd1882c",
"SHA256 checksum of the vocabulary database.",
)
flags.DEFINE_string(
"device", "cuda:0" if torch.cuda.is_available() else "cpu", "The device to run on."
)
FLAGS = flags.FLAGS
logger = logging.getLogger(__name__)
_DB_DOWNLOAD_LOCK = Lock()
def dataset_looper(epoch_num, data_loader, model, device, optimizer=None, train=True):
times = collections.defaultdict(float)
losses = []
unscaled_mse = []
epoch_grad_clip = []
t1 = time.time()
for data in data_loader:
if data is None:
continue
graph, labels = data
times["get_data"] += time.time() - t1
t1 = time.time()
labels = torch.Tensor(labels).to(device)
graph = graph.to(device)
loss, unscaled, _ = model.get_loss(graph, labels)
losses.append(loss.cpu().data.numpy())
unscaled_mse.append(unscaled.cpu().data.numpy())
times["model_forward"] += time.time() - t1
t1 = time.time()
if train:
optimizer.zero_grad()
loss.backward()
grad_clip = torch.nn.utils.clip_grad_norm_(
model.parameters(), max_norm=400.0
)
epoch_grad_clip.append(grad_clip.cpu().data.numpy())
optimizer.step()
times["model_backward"] += time.time() - t1
t1 = time.time()
avg_loss, avg_unscaled = (
np.mean(losses),
np.mean(unscaled_mse),
)
avg_grad_clip = None
if train:
avg_grad_clip = np.mean(epoch_grad_clip)
times = ", ".join(f"{k}: {humanize_duration(v)}" for k, v in times.items())
print(
f" Epoch {epoch_num + 1} {'training' if train else 'validation'} took: "
f"{{ {times} }}, loss: {avg_loss}, unscaled: {avg_unscaled}, "
f"grad_clip {avg_grad_clip}"
)
return avg_loss, avg_unscaled, avg_grad_clip
def train(dataset, data_loader, model, num_epoch, device):
optimizer = torch.optim.Adam(model.parameters())
for epoch in range(num_epoch):
with Timer(f"Epoch {epoch + 1} of {num_epoch} ({(epoch + 1) / num_epoch:.1%})"):
dataset.set_distribution_type("train")
dataset_looper(epoch, data_loader, model, device, optimizer)
dataset.set_distribution_type("dev")
dataset_looper(epoch, data_loader, model, device, train=False)
def download_and_unpack_database(db: str, sha256: str) -> Path:
"""Download the given database, unpack it to the local filesystem, and
return the path.
"""
local_dir = cache_path(f"state_transition_dataset/{sha256}")
with _DB_DOWNLOAD_LOCK, InterProcessLock(
transient_cache_path(".state_transition_database_download.LOCK")
):
if not (local_dir / ".installed").is_file():
tar_data = io.BytesIO(download(db, sha256))
local_dir.mkdir(parents=True, exist_ok=True)
logger.info("Unpacking database to %s ...", local_dir)
with tarfile.open(fileobj=tar_data, mode="r:bz2") as arc:
extract_tar(arc, str(local_dir))
(local_dir / ".installed").touch()
unpacked = [f for f in local_dir.iterdir() if f.name != ".installed"]
if len(unpacked) != 1:
print(
f"fatal: Archive {db} expected to contain one file, contains: {len(unpacked)}",
file=sys.stderr,
)
return unpacked[0]
def main(argv):
"""Main entry point."""
del argv # unused
node_vocab_pth = download_and_unpack_database(
db=FLAGS.vocab_db, sha256=FLAGS.vocab_db_sha256
)
root_pth = download_and_unpack_database(db=FLAGS.db, sha256=FLAGS.db_sha256)
with open(node_vocab_pth, "rb") as f:
vocab = pickle.load(f)
model = GNNEncoder(
# Add one to the vocab size to accomodate for the out-of-vocab element.
node_vocab_size=len(vocab) + 1,
node_hidden_size=64,
)
# This is required to get the vocab into the right state
# as the vocab is over all nodes of the graph
vocab = {"text": vocab}
model.to(FLAGS.device)
print(model)
dataset = CompilerGymDataset(root_pth, vocab=vocab, dataset_size=FLAGS.dataset_size)
dataset_loader = DataLoader(
dataset,
batch_size=FLAGS.batch_size,
shuffle=True,
num_workers=FLAGS.nproc,
collate_fn=dataset.collate_fn,
)
train(dataset, dataset_loader, model, FLAGS.num_epoch, FLAGS.device)
if __name__ == "__main__":
app.run(main)
|
CompilerGym-development
|
examples/gnn_cost_model/train.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
|
CompilerGym-development
|
examples/llvm_rl/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import sys
from pathlib import Path
from typing import List
from llvm_rl.model import Model
def main(argv):
paths = argv[1:] or ["~/logs/compiler_gym/llvm_rl"]
models: List[Model] = []
for path in paths:
models += Model.from_logsdir(Path(path).expanduser())
for model in models:
model.test()
if __name__ == "__main__":
main(sys.argv)
|
CompilerGym-development
|
examples/llvm_rl/test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Environment wrappers to closer replicate the MLSys'20 Autophase paper."""
from typing import List
import gym
import numpy as np
from compiler_gym.envs import CompilerEnv, LlvmEnv
from compiler_gym.util.gym_type_hints import ActionType
from compiler_gym.wrappers import (
ConstrainedCommandline,
ObservationWrapper,
RewardWrapper,
)
class ClampedReward(RewardWrapper):
"""A wrapper class that clamps reward signal within a bounded range,
optionally with some leaking for out-of-range values.
"""
def __init__(
self,
env: CompilerEnv,
min: float = -1,
max: float = 1,
leakiness_factor: float = 0.001,
):
super().__init__(env)
self.min = min
self.max = max
self.leakiness_factor = leakiness_factor
def convert_reward(self, reward: float) -> float:
if reward > self.max:
return self.max + (reward - self.max) * self.leakiness_factor
elif reward < self.min:
return self.min + (reward - self.min) * self.leakiness_factor
return reward
class AutophaseNormalizedFeatures(ObservationWrapper):
"""A wrapper for LLVM environments that use the Autophase observation space
to normalize and clip features to the range [0, 1].
"""
# The index of the "TotalInsts" feature of autophase.
TotalInsts_index = 51
def __init__(self, env: CompilerEnv):
super().__init__(env=env)
# Force Autophase observation space.
self.env.observation_space = self.env.unwrapped.observation.spaces["Autophase"]
# Adjust the bounds to reflect the normalized values.
self.env.observation_space_spec.space = gym.spaces.Box(
low=np.full(
self.env.observation_space_spec.space.shape[0], 0, dtype=np.float32
),
high=np.full(
self.env.observation_space_spec.space.shape[0], 1, dtype=np.float32
),
dtype=np.float32,
)
def convert_observation(self, observation):
if observation[self.TotalInsts_index] <= 0:
return np.zeros(observation.shape, dtype=np.float32)
return np.clip(
observation.astype(np.float32) / observation[self.TotalInsts_index], 0, 1
)
class ConcatActionsHistogram(ObservationWrapper):
"""A wrapper that concatenates a histogram of previous actions to each
observation.
The actions histogram is concatenated to the end of the existing 1-D box
observation, expanding the space.
The actions histogram has bounds [0,inf]. If you specify a fixed episode
length `norm_to_episode_len`, each histogram update will be scaled by
1/norm_to_episode_len, so that `sum(observation) == 1` after episode_len
steps.
"""
def __init__(self, env: CompilerEnv, norm_to_episode_len: int = 0):
super().__init__(env=env)
assert isinstance(
self.observation_space, gym.spaces.Box
), f"Can only contatenate actions histogram to box shape, not {self.observation_space}"
assert isinstance(
self.action_space, gym.spaces.Discrete
), "Can only construct histograms from discrete spaces"
assert len(self.observation_space.shape) == 1, "Requires 1-D observation space"
self.increment = 1 / norm_to_episode_len if norm_to_episode_len else 1
# Reshape the observation space.
self.env.observation_space_spec.space = gym.spaces.Box(
low=np.concatenate(
(
self.env.observation_space.low,
np.zeros(
self.action_space.n, dtype=self.env.observation_space.dtype
),
)
),
high=np.concatenate(
(
self.env.observation_space.high,
# The upper bound is 1.0 if we are normalizing to the
# episode length, else infinite for unbounded episode
# lengths.
np.ones(self.action_space.n, dtype=self.env.observation_space.dtype)
* (1.0 if norm_to_episode_len else np.inf),
)
),
dtype=self.env.observation_space.dtype,
)
def reset(self, *args, **kwargs):
self.histogram = np.zeros(
(self.action_space.n,), dtype=self.env.observation_space.dtype
)
return super().reset(*args, **kwargs)
def multistep(
self,
actions: List[ActionType],
observation_spaces=None,
observations=None,
**kwargs,
):
for a in actions:
self.histogram[a] += self.increment
return super().multistep(actions, **kwargs)
def convert_observation(self, observation):
return np.concatenate((observation, self.histogram)).astype(
self.env.observation_space.dtype
)
class AutophaseActionSpace(ConstrainedCommandline):
"""An action space wrapper that limits the action space to that of the
Autophase paper.
The actions used in the Autophase work are taken from:
https://github.com/ucb-bar/autophase/blob/2f2e61ad63b50b5d0e2526c915d54063efdc2b92/gym-hls/gym_hls/envs/getcycle.py#L9
Note that 4 of the 46 flags are not included. Those are:
-codegenprepare Excluded from CompilerGym
-scalarrepl Removed from LLVM in https://reviews.llvm.org/D21316
-scalarrepl-ssa Removed from LLVM in https://reviews.llvm.org/D21316
-terminate Not found in LLVM 10.0.0
"""
def __init__(self, env: LlvmEnv):
super().__init__(
env=env,
flags=[
"-adce",
"-break-crit-edges",
"-constmerge",
"-correlated-propagation",
"-deadargelim",
"-dse",
"-early-cse",
"-functionattrs",
"-functionattrs",
"-globaldce",
"-globalopt",
"-gvn",
"-indvars",
"-inline",
"-instcombine",
"-ipsccp",
"-jump-threading",
"-lcssa",
"-licm",
"-loop-deletion",
"-loop-idiom",
"-loop-reduce",
"-loop-rotate",
"-loop-simplify",
"-loop-unroll",
"-loop-unswitch",
"-lower-expect",
"-loweratomic",
"-lowerinvoke",
"-lowerswitch",
"-mem2reg",
"-memcpyopt",
"-partial-inliner",
"-prune-eh",
"-reassociate",
"-sccp",
"-simplifycfg",
"-sink",
"-sroa",
"-strip",
"-strip-nondebug",
"-tailcallelim",
],
)
|
CompilerGym-development
|
examples/llvm_rl/wrappers.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import sys
import hydra
from hydra.core.hydra_config import HydraConfig
from llvm_rl.model import Model
from omegaconf import DictConfig, OmegaConf
from omegaconf.errors import MissingMandatoryValue
from pydantic import ValidationError
import compiler_gym
def _get_job_id() -> int:
try:
return HydraConfig.get().job.id
except MissingMandatoryValue:
# The numeric job ID is missing if not in a multirun context. In that
# case, there can only be a single run.
return 0
@hydra.main(config_path="config", config_name="default")
def main(config: DictConfig) -> None:
OmegaConf.set_readonly(config, True)
# Parse the config to pydantic models.
try:
model: Model = Model(
# Hydra changes the working directory.
working_directory=os.getcwd(),
job_id=_get_job_id(),
compiler_gym_version=compiler_gym.__version__,
**config
)
except ValidationError as e:
print(e, file=sys.stderr)
sys.exit(1)
model.train()
model.test()
if __name__ == "__main__":
main()
|
CompilerGym-development
|
examples/llvm_rl/train.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import re
import sys
from pathlib import Path
from typing import List
import humanize
import pandas as pd
from llvm_rl.model import Model
from pydantic import ValidationError
from tabulate import tabulate
from typer import Typer
from compiler_gym.util.logging import init_logging
from compiler_gym.util.statistics import geometric_mean
app = Typer()
def models_from_paths(log_dirs: List[Path]):
# Read all the inputs first.
models: List[Model] = []
for path in log_dirs:
try:
models += Model.from_logsdir(Path(path).expanduser())
except ValidationError as e:
print(e, file=sys.stderr)
sys.exit(1)
return models
@app.command()
def train(log_dirs: List[Path] = ["~/logs/compiler_gym/llvm_rl"]):
init_logging()
models = models_from_paths(log_dirs)
dfs = []
for model in models:
df = model.dataframe
if not len(df):
continue
# Select only the rows with a checkpoint.
df = df[df["checkpoint"].values]
df = df[
[
"trial_name",
"experiment_timestamp",
"episodes_total",
"episode_reward_geomean",
"episode_reward_mean",
"evaluation/episode_reward_mean",
"evaluation/episode_reward_geomean",
"time_total_s",
"complete",
"cpus",
"gpus",
]
]
sdf = df.groupby(
["experiment", "config", "replica", "experiment_timestamp"]
).max()
test_results = model.test_dataframes
sdf["test_results"] = [
test_results.get(d, pd.DataFrame()) for d in sdf["trial_name"]
]
sdf["test_ic_mean"] = [
sum(d["instruction_count_reduction"]) / len(d)
if not d.empty
else float("nan")
for d in sdf["test_results"]
]
sdf["test_ic_geomean"] = [
geometric_mean(d["instruction_count_reduction"])
if not d.empty
else float("nan")
for d in sdf["test_results"]
]
sdf["test_os_mean"] = [
sum(d["object_size_reduction"]) / len(d) if not d.empty else float("nan")
for d in sdf["test_results"]
]
sdf["test_os_geomean"] = [
geometric_mean(d["object_size_reduction"]) if not d.empty else float("nan")
for d in sdf["test_results"]
]
sdf["test_checkpoint"] = [
int(d["test_checkpoint"].values[0].split("-")[-1]) if not d.empty else ""
for d in sdf["test_results"]
]
dfs.append(sdf.reset_index())
df = pd.concat(dfs)
# Print everything.
pd.set_option("display.max_columns", None)
pd.set_option("display.max_rows", None)
pd.set_option("display.width", None)
df = df.rename(
columns={
"experiment_timestamp": "timestamp",
"episodes_total": "episodes",
"evaluation/episode_reward_geomean": "val_geomean",
"evaluation/episode_reward_mean": "val_mean",
"episode_reward_mean": "train_mean",
"episode_reward_geomean": "train_geomean",
"time_total_s": "training_time",
"test_reward_mean": "test_mean",
"test_reward_geomean": "test_geomean",
}
)
# Format for printing.
df["complete"] = [f"{x:.1%}" for x in df["complete"]]
df["episodes"] = [f"{int(x):,d}" for x in df["episodes"]]
df["training_time"] = [humanize.naturaldelta(x) for x in df["training_time"]]
for reward in [
"train_mean",
"train_geomean",
"val_mean",
"val_geomean",
"test_ic_geomean",
"test_os_geomean",
"test_ic_mean",
"test_os_mean",
]:
df[reward] = [f"{x:.4f}" for x in df[reward].values]
df = df[
[
"trial_name",
"timestamp",
"complete",
"episodes",
"training_time",
"test_checkpoint",
"train_geomean",
"val_geomean",
]
]
print(tabulate(df, headers="keys", showindex=False, tablefmt="psql"))
@app.command()
def test(
log_dirs: List[Path] = ["~/logs/compiler_gym/llvm_rl"],
format_for_latex: bool = False,
):
models = models_from_paths(log_dirs)
# Print everything.
pd.set_option("display.max_columns", None)
pd.set_option("display.max_rows", None)
pd.set_option("display.width", None)
dfs = {}
for model in models:
for trial, df in model.test_dataframes.items():
df["test_set"] = [
re.search(r"^((benchmark|generator)://)(.+)-v[012]/", d).group(3)
for d in df["benchmark"]
]
# Prune empty test set.
df = df[df["instruction_count_init"] > 0]
gmean_df = (
df[
[
"test_set",
"instruction_count_reduction",
"object_size_reduction",
]
]
.groupby(["test_set"])
.agg(geometric_mean)
)
mean_df = (
df[
[
"test_set",
"inference_walltime_seconds",
]
]
.groupby(["test_set"])
.mean()
)
df = pd.concat((mean_df, gmean_df), axis=1)
df = df.reset_index()
df.insert(0, "trial", trial)
if format_for_latex:
df["instruction_count_reduction"] = [
f"${float(d):.3f}\\times$"
for d in df["instruction_count_reduction"]
]
df["object_size_reduction"] = [
f"${float(d):.3f}\\times$" for d in df["object_size_reduction"]
]
print()
print(tabulate(df, headers="keys", showindex=False, tablefmt="psql"))
dfs[trial] = df
if __name__ == "__main__":
app()
|
CompilerGym-development
|
examples/llvm_rl/info.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import pytest
from llvm_rl import wrappers
import compiler_gym
@pytest.fixture(scope="function")
def env():
with compiler_gym.make("llvm-v0") as env:
yield env
def test_AutophaseNormalizedFeatures(env):
env = wrappers.AutophaseNormalizedFeatures(env)
assert env.observation_space_spec.id == "Autophase"
assert env.observation_space.shape == (56,)
assert env.observation_space.dtype == np.float32
def test_ConcatActionsHistogram(env):
env.observation_space = "Autophase"
num_features = env.observation_space.shape[0]
num_actions = env.action_space.n
env = wrappers.ConcatActionsHistogram(env)
env.reset()
action = env.action_space.sample()
obs, _, _, _ = env.step(action)
assert env.observation_space.shape == (num_features + num_actions,)
assert obs.shape == (num_features + num_actions,)
def test_AutophaseActionSpace(env):
env = wrappers.AutophaseActionSpace(env)
env.reset()
env.step(env.action_space.sample())
assert env.action_space.n == 42
|
CompilerGym-development
|
examples/llvm_rl/tests/wrappers_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from llvm_rl.model.testing import Testing
from omegaconf import OmegaConf
import compiler_gym
def test_testing_config():
cfg = Testing(
**OmegaConf.create(
"""\
timeout_hours: 12
runs_per_benchmark: 6
benchmarks:
- dataset: benchmark://cbench-v1
max_benchmarks: 5
"""
)
)
assert cfg.timeout_hours == 12
with compiler_gym.make("llvm-v0") as env:
assert len(list(cfg.benchmark_uris_iterator(env))) == 5 * 6
|
CompilerGym-development
|
examples/llvm_rl/tests/testing_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from llvm_rl.model.environment import Environment
def test_basic_environment_config():
model = Environment(id="llvm-ic-v0", max_episode_steps=3)
with model.make_env() as env:
assert env.spec.id == "llvm-ic-v0"
assert env.reward_space == "IrInstructionCountOz"
# Test max episode steps:
env.reset()
_, _, done, _ = env.step(env.action_space.sample()) # step 1
assert not done
_, _, done, _ = env.step(env.action_space.sample()) # step 2
assert not done
_, _, done, _ = env.step(env.action_space.sample()) # step 3
assert done
def test_reward_and_observation_space():
model = Environment(
id="llvm-ic-v0",
max_episode_steps=3,
observation_space="Ir",
reward_space="ObjectTextSizeBytes",
)
with model.make_env() as env:
assert env.reward_space == "ObjectTextSizeBytes"
assert env.observation_space_spec.id == "Ir"
def test_wrappers():
model = Environment(
id="llvm-ic-v0",
max_episode_steps=3,
wrappers=[
{
"wrapper": "ConstrainedCommandline",
"args": {"flags": ["-mem2reg", "-reg2mem"]},
}
],
)
with model.make_env() as env:
assert env.action_space.flags == ["-mem2reg", "-reg2mem"]
assert env.action_space.n == 2
|
CompilerGym-development
|
examples/llvm_rl/tests/environment_test.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.