python_code
stringlengths 0
4.04M
| repo_name
stringlengths 7
58
| file_path
stringlengths 5
147
|
---|---|---|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Integrations tests for the LLVM CompilerGym environments."""
import sys
import pytest
from flaky import flaky
from compiler_gym.envs import CompilerEnv
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
@pytest.mark.xfail(
sys.platform == "darwin",
reason="github.com/facebookresearch/CompilerGym/issues/459",
)
@flaky # Runtime can timeout
def test_step(env: CompilerEnv, observation_space: str, reward_space: str):
"""Request every combination of observation and reward in a fresh environment."""
env.reward_space = None
env.observation_space = None
env.reset(benchmark="cbench-v1/crc32")
observation = env.observation[observation_space]
assert observation is not None
reward = env.reward[reward_space]
assert reward is not None
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/llvm/fresh_environment_observation_reward_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for LLVM session parameter handlers."""
import pytest
from flaky import flaky
from compiler_gym.envs.llvm import LlvmEnv
from compiler_gym.errors import ServiceError, SessionNotFound
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
def test_send_param_before_reset(env: LlvmEnv):
"""Test that send_params() before reset() raises an error."""
with pytest.raises(
SessionNotFound, match=r"Must call reset\(\) before send_params\(\)"
):
env.send_params(("test", "test"))
def test_send_param_unknown_key(env: LlvmEnv):
"""Test that send_params() raises an error when the key is not recognized."""
env.reset()
with pytest.raises(ValueError, match="Unknown parameter: unknown.key"):
env.send_params(("unknown.key", ""))
def test_benchmarks_cache_parameters(env: LlvmEnv):
env.reset()
assert int(env.send_param("service.benchmark_cache.get_size_in_bytes", "")) > 0
# Get the default max size.
assert env.send_params(("service.benchmark_cache.get_max_size_in_bytes", "")) == [
str(256 * 1024 * 1024)
]
assert env.send_param( # Same again but using singular API endpoint.
"service.benchmark_cache.get_max_size_in_bytes", ""
) == str(256 * 1024 * 1024)
# Set a new max size.
assert env.send_params(
("service.benchmark_cache.set_max_size_in_bytes", "256")
) == ["256"]
assert env.send_params(("service.benchmark_cache.get_max_size_in_bytes", "")) == [
"256"
]
def test_send_param_invalid_reply_count(env: LlvmEnv, mocker):
"""Test that an error is raised when # replies != # params."""
env.reset()
mocker.patch.object(env, "service")
with pytest.raises(
OSError, match="Sent 1 parameter but received 0 responses from the service"
):
env.send_param("param", "")
def test_benchmarks_cache_parameter_invalid_int_type(env: LlvmEnv):
env.reset()
with pytest.raises(ServiceError, match="stoi"):
env.send_params(("service.benchmark_cache.set_max_size_in_bytes", "not an int"))
@flaky # Runtime can timeout.
@pytest.mark.parametrize("n", [1, 3, 10])
def test_runtime_observation_parameters(env: LlvmEnv, n: int):
env.observation_space = "Runtime"
env.reset(benchmark="cbench-v1/qsort")
assert env.send_param("llvm.set_runtimes_per_observation_count", str(n)) == str(n)
assert env.send_param("llvm.get_runtimes_per_observation_count", "") == str(n)
runtimes = env.observation["Runtime"]
assert len(runtimes) == n
assert env.observation_space.contains(runtimes)
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/llvm/llvm_session_parameters_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from concurrent.futures import ThreadPoolExecutor
from pathlib import Path
from compiler_gym.third_party import llvm
from compiler_gym.util.runfiles_path import site_data_path
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.common"]
def test_download_llvm_threaded_load_test(temporary_environ, tmpwd: Path, mocker):
"""A load test for download_llvm_files() that checks that redundant
downloads are not performed when multiple simultaneous calls to
download_llvm_files() are issued.
"""
mocker.spy(llvm, "_download_llvm_files")
mocker.spy(llvm, "download")
# Force the LLVM download function to run.
llvm._LLVM_DOWNLOADED = False
# Force a temporary new site data path and sanity check it.
temporary_environ["COMPILER_GYM_SITE_DATA"] = str(tmpwd)
assert str(site_data_path(".")).endswith(str(tmpwd))
# Perform a bunch of concurrent calls to download_llvm_files().
with ThreadPoolExecutor() as executor:
futures = [executor.submit(llvm.download_llvm_files) for _ in range(100)]
for future in futures:
future.result()
# For debugging in case of error.
print("Downloads:", llvm._download_llvm_files.call_count) # pylint: disable
for root, _, filenames in os.walk(tmpwd):
print(root)
for filename in filenames:
print(Path(root) / filename)
# Check that the files were unpacked.
assert (tmpwd / "llvm-v0" / "LICENSE").is_file()
assert (tmpwd / "llvm-v0" / "bin" / "clang").is_file()
# Check that the underlying download implementation was only called a single
# time.
assert llvm._download_llvm_files.call_count == 1 # pylint: disable
assert llvm.download.call_count == 1
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/llvm/download_llvm_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Test that LlvmEnv is compatible with OpenAI gym interface."""
import gym
import pytest
from compiler_gym.envs.llvm import LlvmEnv
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
def test_type_classes(env: LlvmEnv):
env.observation_space = "Autophase"
env.reward_space = "IrInstructionCount"
env.reset()
assert isinstance(env, gym.Env)
assert isinstance(env, LlvmEnv)
assert isinstance(env.unwrapped, LlvmEnv)
assert isinstance(env.action_space, gym.Space)
assert isinstance(env.observation_space, gym.Space)
assert isinstance(env.reward_range[0], float)
assert isinstance(env.reward_range[1], float)
def test_optional_properties(env: LlvmEnv):
assert "render.modes" in env.metadata
assert env.spec
def test_contextmanager(env: LlvmEnv, mocker):
mocker.spy(env, "close")
assert env.close.call_count == 0
with env:
pass
assert env.close.call_count == 1
def test_contextmanager_gym_make(mocker):
with gym.make("llvm-v0") as env:
mocker.spy(env, "close")
assert env.close.call_count == 0
with env:
pass
assert env.close.call_count == 1
def test_observation_wrapper(env: LlvmEnv):
class WrappedEnv(gym.ObservationWrapper):
def observation(self, observation):
return "Hello"
wrapped = WrappedEnv(env)
observation = wrapped.reset()
assert observation == "Hello"
observation, _, _, _ = wrapped.step(0)
assert observation == "Hello"
def test_reward_wrapper(env: LlvmEnv):
class WrappedEnv(gym.RewardWrapper):
def reward(self, reward):
return 1
wrapped = WrappedEnv(env)
wrapped.reset()
_, reward, _, _ = wrapped.step(0)
assert reward == 1
@pytest.mark.xfail(
reason="github.com/facebookresearch/CompilerGym/issues/587", strict=True
)
def test_env_spec_make(env: LlvmEnv):
"""Test that demonstrates a failure in gym compatibility: env.spec does
not encode mutable state like benchmark, reward space, and observation
space.
"""
env.reset(benchmark="cbench-v1/bitcount")
with env.spec.make() as new_env:
assert new_env.benchmark == env.benchmark
def test_env_spec_make_workaround(env: LlvmEnv):
"""Demonstrate how #587 would be fixed, by updating the 'kwargs' dict."""
env.reset(benchmark="cbench-v1/bitcount")
env.spec._kwargs[ # pylint: disable=protected-access
"benchmark"
] = "cbench-v1/bitcount"
with env.spec.make() as new_env:
assert new_env.benchmark == env.benchmark
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/llvm/gym_interface_compatability_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Integrations tests for the LLVM CompilerGym environments."""
import re
import tempfile
from pathlib import Path
import pytest
from compiler_gym.datasets import Benchmark
from compiler_gym.envs import CompilerEnv
from compiler_gym.envs.llvm import llvm_benchmark as llvm
from compiler_gym.errors.dataset_errors import BenchmarkInitError
from compiler_gym.service.proto import Benchmark as BenchmarkProto
from compiler_gym.service.proto import File
from tests.pytest_plugins.common import macos_only
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
def test_add_benchmark_invalid_scheme(env: CompilerEnv):
with pytest.raises(ValueError) as ctx:
env.reset(
benchmark=Benchmark(
BenchmarkProto(
uri="benchmark://foo", program=File(uri="https://invalid/scheme")
),
)
)
assert str(ctx.value) == (
"Invalid benchmark data URI. "
'Only the file:/// scheme is supported: "https://invalid/scheme"'
)
def test_add_benchmark_invalid_path(env: CompilerEnv):
with tempfile.TemporaryDirectory() as d:
tmp = Path(d) / "not_a_file"
with pytest.raises(FileNotFoundError) as ctx:
env.reset(benchmark=Benchmark.from_file("benchmark://foo", tmp))
# Use endswith() because on macOS there may be a /private prefix.
assert str(ctx.value).endswith(str(tmp))
def test_get_system_library_flags_not_found():
with pytest.raises(
llvm.HostCompilerFailure, match="Failed to invoke 'not-a-real-binary'"
):
llvm.get_system_library_flags("not-a-real-binary")
def test_get_system_library_flags_nonzero_exit_status():
"""Test that setting the $CXX to an invalid binary raises an error."""
with pytest.raises(llvm.HostCompilerFailure, match="Failed to invoke 'false'"):
llvm.get_system_library_flags("false")
def test_get_system_library_flags_output_parse_failure():
"""Test that setting the $CXX to an invalid binary raises an error."""
with pytest.raises(
llvm.UnableToParseHostCompilerOutput,
match="Failed to parse '#include <...>' search paths from 'echo'",
):
llvm.get_system_library_flags("echo")
def test_get_system_library_flags():
flags = llvm.get_system_library_flags()
assert flags
assert "-isystem" in flags
@macos_only
def test_get_system_library_flags_system_libraries():
flags = llvm.get_system_library_flags()
assert flags
assert flags[-1] == "-L/Library/Developer/CommandLineTools/SDKs/MacOSX.sdk/usr/lib"
def test_ClangInvocation_system_libs():
cmd = llvm.ClangInvocation(["foo.c"]).command("a.out")
assert "-isystem" in cmd
def test_ClangInvocation_no_system_libs():
cmd = llvm.ClangInvocation(["foo.c"], system_includes=False).command("a.out")
assert "-isystem" not in cmd
@pytest.mark.parametrize(
"source",
[
"",
"int A() {return 0;}",
"""
int A() {return 0;}
int B() {return A();}
int C() {return 0;}
""",
],
)
@pytest.mark.parametrize("system_includes", [False, True])
def test_make_benchmark_from_source_valid_source(
env: CompilerEnv, source: str, system_includes: bool
):
benchmark = llvm.make_benchmark_from_source(source, system_includes=system_includes)
env.reset(benchmark=benchmark)
@pytest.mark.parametrize(
"source",
[
"@syntax error!!!", # invalid syntax
"int A() {return a;}", # undefined variable
'#include "missing.h"', # missing include
],
)
@pytest.mark.parametrize("system_includes", [False, True])
def test_make_benchmark_from_source_invalid_source(source: str, system_includes: bool):
with pytest.raises(
BenchmarkInitError, match="Failed to make benchmark with compiler error:"
):
llvm.make_benchmark_from_source(source, system_includes=system_includes)
def test_make_benchmark_from_source_invalid_copt():
with pytest.raises(
BenchmarkInitError, match="Failed to make benchmark with compiler error:"
):
llvm.make_benchmark_from_source(
"int A() {return 0;}", copt=["-invalid-argument!"]
)
def test_make_benchmark_from_source_missing_system_includes():
with pytest.raises(
BenchmarkInitError, match="Failed to make benchmark with compiler error:"
):
llvm.make_benchmark_from_source("#include <stdio.h>", system_includes=False)
def test_make_benchmark_from_source_with_system_includes():
assert llvm.make_benchmark_from_source("#include <stdio.h>", system_includes=True)
def test_split_benchmark_by_function_no_functions():
benchmark = llvm.make_benchmark_from_source("")
with pytest.raises(ValueError, match="No functions found"):
llvm.split_benchmark_by_function(benchmark)
def is_defined(signature: str, ir: str):
"""Return whether the function signature is defined in the IR."""
return re.search(f"^define .*{signature}", ir, re.MULTILINE)
def is_declared(signature: str, ir: str):
"""Return whether the function signature is defined in the IR."""
return re.search(f"^declare .*{signature}", ir, re.MULTILINE)
def test_split_benchmark_by_function_repeated_split_single_function(env: CompilerEnv):
benchmark = llvm.make_benchmark_from_source("int A() {return 0;}", lang="c")
for _ in range(10):
benchmarks = llvm.split_benchmark_by_function(benchmark)
assert len(benchmarks) == 2 # global initializers + extracted function
env.reset(benchmark=benchmarks[-1])
assert is_defined("i32 @A()", env.ir)
benchmark = benchmarks[-1]
def test_split_benchmark_by_function_multiple_functions(env: CompilerEnv):
benchmark = llvm.make_benchmark_from_source(
"""
int A() {return 0;}
int B() {return A();}
""",
lang="c",
)
benchmarks = llvm.split_benchmark_by_function(benchmark)
assert len(benchmarks) == 3
_, A, B = benchmarks
env.reset(benchmark=A)
assert is_defined("i32 @A()", env.ir)
assert not is_defined("i32 @B()", env.ir)
assert not is_declared("i32 @A()", env.ir)
assert not is_declared("i32 @B()", env.ir)
env.reset(benchmark=B)
assert not is_defined("i32 @A()", env.ir)
assert is_defined("i32 @B()", env.ir)
assert is_declared("i32 @A()", env.ir)
assert not is_declared("i32 @B()", env.ir)
def test_split_benchmark_by_function_maximum_function_count(env: CompilerEnv):
benchmark = llvm.make_benchmark_from_source(
"""
int A() {return 0;}
int B() {return A();}
""",
lang="c",
)
benchmarks = llvm.split_benchmark_by_function(
benchmark,
maximum_function_count=1,
)
assert len(benchmarks) == 2 # global initializers + extracted function
env.reset(benchmark=benchmarks[1])
assert is_defined("i32 @A()", env.ir)
def test_merge_benchmarks_single_input(env: CompilerEnv):
A = llvm.make_benchmark_from_source("int A() {return 0;}", lang="c")
merged = llvm.merge_benchmarks([A])
env.reset(benchmark=merged)
assert is_defined("i32 @A()", env.ir)
def test_merge_benchmarks_independent(env: CompilerEnv):
A = llvm.make_benchmark_from_source("int A() {return 0;}", lang="c")
B = llvm.make_benchmark_from_source("int B() {return 0;}", lang="c")
merged = llvm.merge_benchmarks([A, B])
env.reset(benchmark=merged)
assert is_defined("i32 @A()", env.ir)
assert is_defined("i32 @B()", env.ir)
def test_merge_benchmarks_multiply_defined():
A = llvm.make_benchmark_from_source("int A() {return 0;}", lang="c")
with pytest.raises(ValueError, match="symbol multiply defined"):
llvm.merge_benchmarks([A, A])
def test_merge_benchmarks_declarations(env: CompilerEnv):
A = llvm.make_benchmark_from_source("int A() {return 0;}", lang="c")
B = llvm.make_benchmark_from_source("int A(); int B() {return A();}", lang="c")
merged = llvm.merge_benchmarks([A, B])
env.reset(benchmark=merged)
assert is_defined("i32 @A()", env.ir)
assert is_defined("i32 @B()", env.ir)
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/llvm/llvm_benchmark_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Regression tests generated by fuzzing."""
import difflib
import subprocess
import pytest
from tests.test_main import main
pytest_plugins = [
"tests.pytest_plugins.llvm",
"tests.pytest_plugins.common",
]
@pytest.mark.xfail(reason="-separate-const-offset-from-gep", strict=True)
def test_regression_test_const_offset_from_gep(env, tmpwd, llvm_diff, llvm_opt):
env.reset(benchmark="benchmark://cbench-v1/blowfish")
env.write_ir("input.ll")
# FIXME: Removing the -separate-const-offset-from-gep actions from the below
# commandline "fixes" the test.
actions = env.action_space.from_string(
"opt -objc-arc-apelim -separate-const-offset-from-gep -sancov -indvars -loop-reduce -dse -inferattrs -loop-fusion -dce -break-crit-edges -constmerge -indvars -mem2reg -objc-arc-expand -ee-instrument -loop-reroll -break-crit-edges -separate-const-offset-from-gep -loop-idiom -float2int -dce -float2int -ipconstprop -simple-loop-unswitch -coro-cleanup -early-cse-memssa -strip -functionattrs -objc-arc-contract -sink -loop-distribute -loop-reroll -slsr -separate-const-offset-from-gep input.bc -o output.bc"
)
for action in actions:
_, _, done, info = env.step(action)
assert not done, info["error_details"]
env.write_ir("env.ll")
subprocess.check_call(
env.action_space.to_string(env.actions) + " -S -o output.ll",
env={"PATH": str(llvm_opt.parent)},
shell=True,
timeout=60,
)
with open("output.ll") as f1, open("env.ll") as f2:
for line in difflib.unified_diff(f1.readlines()[1:], f2.readlines()[1:]):
subprocess.check_output(
[
llvm_diff,
"output.ll",
]
)
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/llvm/fuzzing_regression_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Regression tests for LlvmEnv.fork() identified by hand or through fuzzing."""
from typing import List, NamedTuple
import pytest
from flaky import flaky
import compiler_gym
from compiler_gym.envs import LlvmEnv
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
class ForkRegressionTest(NamedTuple):
benchmark: str
pre_fork: str
post_fork: str
reward_space: str = "IrInstructionCount"
# A list of testcases where we have identified the parent and child environment
# states differing after forking and running identical actions on both.
#
# NOTE(cummins): To submit a new testcase, run the
# "minimize_fork_regression_testcase()" function below to produce a minimal
# reproducible example and add it to this list.
MINIMIZED_FORK_REGRESSION_TESTS: List[ForkRegressionTest] = [
ForkRegressionTest(
benchmark="benchmark://cbench-v1/tiff2bw",
pre_fork="-globalopt",
post_fork="-gvn-hoist",
reward_space="IrInstructionCount",
),
ForkRegressionTest(
benchmark="benchmark://cbench-v1/bzip2",
pre_fork="-mem2reg",
post_fork="-loop-guard-widening",
reward_space="IrInstructionCount",
),
ForkRegressionTest(
benchmark="benchmark://cbench-v1/jpeg-d",
pre_fork="-sroa",
post_fork="-loop-rotate",
reward_space="IrInstructionCount",
),
ForkRegressionTest(
benchmark="benchmark://cbench-v1/qsort",
pre_fork="-simplifycfg -newgvn -instcombine -break-crit-edges -gvn -inline",
post_fork="-lcssa",
reward_space="IrInstructionCount",
),
ForkRegressionTest(
benchmark="benchmark://poj104-v1/101/859",
pre_fork="-licm",
post_fork="-loop-rotate",
reward_space="IrInstructionCount",
),
]
@flaky
@pytest.mark.parametrize("test", MINIMIZED_FORK_REGRESSION_TESTS)
def test_fork_regression_test(env: LlvmEnv, test: ForkRegressionTest):
"""Run the fork regression test:
1. Initialize an environment.
2. Apply a "pre_fork" sequence of actions.
3. Create a fork of the environment.
4. Apply a "post_fork" sequence of actions in both the fork and parent.
5. Verify that the environment states have gone out of sync.
"""
env.reward_space = test.reward_space
env.reset(test.benchmark)
pre_fork = [env.action_space[f] for f in test.pre_fork.split()]
post_fork = [env.action_space[f] for f in test.post_fork.split()]
_, _, done, info = env.multistep(pre_fork)
assert not done, info
with env.fork() as fkd:
assert env.state == fkd.state # Sanity check
env.multistep(post_fork)
fkd.multistep(post_fork)
# Verify that the environment states no longer line up.
assert env.state != fkd.state
# Utility function for generating test cases. Copy this code into a standalone
# script and call the function on your test case. It will print a minimized
# version of it.
def minimize_fork_regression_testcase(test: ForkRegressionTest):
def _check_hypothesis(
benchmark: str, pre_fork: List[int], post_fork: List[int]
) -> bool:
with compiler_gym.make("llvm-v0", reward_space="IrInstructionCount") as env:
env.reset(benchmark)
_, _, done, info = env.multistep(pre_fork)
assert not done, info # Sanity check
with env.fork() as fkd:
assert env.state == fkd.state # Sanity check
env.multistep(post_fork)
fkd.multistep(post_fork)
return env.state != fkd.state
with compiler_gym.make("llvm-v0", reward_space=test.reward_space) as env:
pre_fork = [env.action_space[f] for f in test.pre_fork.split()]
post_fork = [env.action_space[f] for f in test.post_fork.split()]
pre_fork_mask = [True] * len(pre_fork)
post_fork_mask = [True] * len(post_fork)
print("Minimizing the pre-fork actions list")
for i in range(len(pre_fork)):
pre_fork_mask[i] = False
masked_pre_fork = [p for p, m in zip(pre_fork, pre_fork_mask) if m]
if _check_hypothesis(test.benchmark, masked_pre_fork, post_fork):
print(
f"Removed pre-fork action {env.action_space.names[pre_fork[i]]}, {sum(pre_fork_mask)} remaining"
)
else:
pre_fork_mask[i] = True
pre_fork = [p for p, m in zip(pre_fork, pre_fork_mask) if m]
print("Minimizing the post-fork actions list")
for i in range(len(post_fork)):
post_fork_mask[i] = False
masked_post_fork = [p for p, m in zip(post_fork, post_fork_mask) if m]
if _check_hypothesis(test.benchmark, pre_fork, masked_post_fork):
print(
f"Removed post-fork action {env.action_space.names[post_fork[i]]}, {sum(post_fork_mask)} remaining"
)
else:
pre_fork_mask[i] = True
post_fork = [p for p, m in zip(post_fork, post_fork_mask) if m]
pre_fork = " ".join(env.action_space.names[p] for p in pre_fork)
post_fork = " ".join(env.action_space.names[p] for p in post_fork)
return ForkRegressionTest(
benchmark=test.benchmark,
pre_fork=pre_fork,
post_fork=post_fork,
reward_space=test.reward_space,
)
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/llvm/fork_regression_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for LlvmEnv.fork()."""
import subprocess
import sys
import pytest
from compiler_gym.envs import LlvmEnv
from compiler_gym.util.runfiles_path import runfiles_path
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
EXAMPLE_BITCODE_FILE = runfiles_path(
"compiler_gym/third_party/cbench/cbench-v1/crc32.bc"
)
EXAMPLE_BITCODE_IR_INSTRUCTION_COUNT = 196
def test_with_statement(env: LlvmEnv):
"""Test that the `with` statement context manager works on forks."""
env.reset("cbench-v1/crc32")
env.step(0)
with env.fork() as fkd:
assert fkd.in_episode
assert fkd.actions == [0]
assert not fkd.in_episode
assert env.in_episode
def test_fork_child_process_is_not_orphaned(env: LlvmEnv):
env.reset("cbench-v1/crc32")
with env.fork() as fkd:
# Check that both environments share the same service.
assert isinstance(env.service.connection.process, subprocess.Popen)
assert isinstance(fkd.service.connection.process, subprocess.Popen)
assert env.service.connection.process.pid == fkd.service.connection.process.pid
process = env.service.connection.process
# Sanity check that both services are alive.
assert not env.service.connection.process.poll()
assert not fkd.service.connection.process.poll()
# Close the parent service.
env.close()
# Check that the service is still alive.
assert not env.service
assert not fkd.service.connection.process.poll()
# Close the forked service.
fkd.close()
# Check that the service has been killed.
assert process.poll() is not None
def test_fork_chain_child_processes_are_not_orphaned(env: LlvmEnv):
env.reset("cbench-v1/crc32")
# Create a chain of forked environments.
a = env.fork()
b = a.fork()
c = b.fork()
d = c.fork()
try:
# Sanity check that they share the same underlying service.
assert (
env.service.connection.process
== a.service.connection.process
== b.service.connection.process
== c.service.connection.process
== d.service.connection.process
)
proc = env.service.connection.process
# Kill the forked environments one by one.
a.close()
assert proc.poll() is None
b.close()
assert proc.poll() is None
c.close()
assert proc.poll() is None
d.close()
assert proc.poll() is None
# Kill the final environment, refcount 0, service is closed.
env.close()
assert proc.poll() is not None
finally:
a.close()
b.close()
c.close()
d.close()
def test_fork_before_reset(env: LlvmEnv):
"""Test that fork() before reset() starts an episode."""
assert not env.in_episode
with env.fork() as fkd:
assert env.in_episode
assert fkd.in_episode
def test_fork_closed_service(env: LlvmEnv):
env.reset(benchmark="cbench-v1/crc32")
_, _, done, _ = env.step(0)
assert not done
assert env.actions == [0]
env.close()
assert not env.service
with env.fork() as fkd:
assert env.actions == [0]
assert fkd.actions == [0]
def test_fork_spaces_are_same(env: LlvmEnv):
env.observation_space = "Autophase"
env.reward_space = "IrInstructionCount"
env.reset(benchmark="cbench-v1/crc32")
with env.fork() as fkd:
assert fkd.observation_space == env.observation_space
assert fkd.reward_space == env.reward_space
assert fkd.benchmark == env.benchmark
def test_fork_state(env: LlvmEnv):
env.reset("cbench-v1/crc32")
env.step(0)
assert env.actions == [0]
with env.fork() as fkd:
assert fkd.benchmark == fkd.benchmark
assert fkd.actions == env.actions
def test_fork_reset(env: LlvmEnv):
env.reset("cbench-v1/crc32")
env.step(0)
env.step(1)
env.step(2)
with env.fork() as fkd:
fkd.step(3)
assert env.actions == [0, 1, 2]
assert fkd.actions == [0, 1, 2, 3]
fkd.reset()
assert env.actions == [0, 1, 2]
assert fkd.actions == []
def test_fork_custom_benchmark(env: LlvmEnv):
benchmark = env.make_benchmark(EXAMPLE_BITCODE_FILE)
env.reset(benchmark=benchmark)
def ir(env):
"""Strip the ModuleID line from IR."""
return "\n".join(env.ir.split("\n")[1:])
with env.fork() as fkd:
assert ir(env) == ir(fkd)
fkd.reset()
assert ir(env) == ir(fkd)
def test_fork_twice_test(env: LlvmEnv):
"""Test that fork() on a forked environment works."""
env.reset(benchmark="cbench-v1/crc32")
with env.fork() as fork_a:
with fork_a.fork() as fork_b:
assert env.state == fork_a.state
assert fork_a.state == fork_b.state
def test_fork_modified_ir_is_the_same(env: LlvmEnv):
"""Test that the IR of a forked environment is the same."""
env.reset("cbench-v1/crc32")
# Apply an action that modifies the benchmark.
_, _, done, info = env.step(env.action_space.flags.index("-mem2reg"))
assert not done
assert not info["action_had_no_effect"]
with env.fork() as fkd:
assert "\n".join(env.ir.split("\n")[1:]) == "\n".join(fkd.ir.split("\n")[1:])
# Apply another action.
_, _, done, info = env.step(env.action_space.flags.index("-gvn"))
_, _, done, info = fkd.step(fkd.action_space.flags.index("-gvn"))
assert not done
assert not info["action_had_no_effect"]
# Check that IRs are still equivalent.
assert "\n".join(env.ir.split("\n")[1:]) == "\n".join(fkd.ir.split("\n")[1:])
@pytest.mark.xfail(
sys.platform == "darwin",
reason="github.com/facebookresearch/CompilerGym/issues/459",
)
def test_fork_rewards(env: LlvmEnv, reward_space: str):
"""Test that rewards are equal after fork() is called."""
env.reward_space = reward_space
env.reset("cbench-v1/dijkstra")
actions = [env.action_space.flags.index(n) for n in ["-mem2reg", "-simplifycfg"]]
forked = env.fork()
try:
for action in actions:
_, env_reward, env_done, _ = env.step(action)
_, fkd_reward, fkd_done, _ = forked.step(action)
assert env_done is False
assert fkd_done is False
assert env_reward == fkd_reward
finally:
forked.close()
def test_fork_previous_cost_reward_update(env: LlvmEnv):
env.reward_space = "IrInstructionCount"
env.reset("cbench-v1/crc32")
env.step(env.action_space.flags.index("-mem2reg"))
with env.fork() as fkd:
_, a, _, _ = env.step(env.action_space.flags.index("-mem2reg"))
_, b, _, _ = fkd.step(env.action_space.flags.index("-mem2reg"))
assert a == b
def test_fork_previous_cost_lazy_reward_update(env: LlvmEnv):
env.reset("cbench-v1/crc32")
env.step(env.action_space.flags.index("-mem2reg"))
env.reward["IrInstructionCount"]
with env.fork() as fkd:
env.step(env.action_space.flags.index("-mem2reg"))
fkd.step(env.action_space.flags.index("-mem2reg"))
assert env.reward["IrInstructionCount"] == fkd.reward["IrInstructionCount"]
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/llvm/fork_env_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Integrations tests for LLVM runtime support."""
from pathlib import Path
from typing import List
import numpy as np
import pytest
from flaky import flaky
from compiler_gym.envs.llvm import LlvmEnv, llvm_benchmark
from compiler_gym.spaces.reward import Reward
from compiler_gym.util.gym_type_hints import ActionType, ObservationType
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
@pytest.mark.parametrize("runtime_observation_count", [1, 3, 5])
def test_custom_benchmark_runtime(env: LlvmEnv, tmpdir, runtime_observation_count: int):
env.reset()
env.runtime_observation_count = runtime_observation_count
with open(tmpdir / "program.c", "w") as f:
f.write(
"""
#include <stdio.h>
int main(int argc, char** argv) {
printf("Hello\\n");
for (int i = 0; i < 10; ++i) {
argc += 2;
}
return argc - argc;
}
"""
)
benchmark = env.make_benchmark(Path(tmpdir) / "program.c")
benchmark.proto.dynamic_config.build_cmd.argument.extend(
["$CC", "$IN"] + llvm_benchmark.get_system_library_flags()
)
benchmark.proto.dynamic_config.build_cmd.outfile.extend(["a.out"])
benchmark.proto.dynamic_config.build_cmd.timeout_seconds = 10
benchmark.proto.dynamic_config.run_cmd.argument.extend(["./a.out"])
benchmark.proto.dynamic_config.run_cmd.timeout_seconds = 10
env.reset(benchmark=benchmark)
runtimes = env.observation.Runtime()
assert len(runtimes) == runtime_observation_count
assert np.all(runtimes > 0)
@flaky
def test_custom_benchmark_runtimes_differ(env: LlvmEnv, tmpdir):
"""Same as above, but test that runtimes differ from run to run."""
env.reset()
env.runtime_observation_count = 10
with open(tmpdir / "program.c", "w") as f:
f.write(
"""
#include <stdio.h>
int main(int argc, char** argv) {
printf("Hello\\n");
for (int i = 0; i < 10; ++i) {
argc += 2;
}
return argc - argc;
}
"""
)
benchmark = env.make_benchmark(Path(tmpdir) / "program.c")
benchmark.proto.dynamic_config.build_cmd.argument.extend(
["$CC", "$IN"] + llvm_benchmark.get_system_library_flags()
)
benchmark.proto.dynamic_config.build_cmd.outfile.extend(["a.out"])
benchmark.proto.dynamic_config.build_cmd.timeout_seconds = 10
benchmark.proto.dynamic_config.run_cmd.argument.extend(["./a.out"])
benchmark.proto.dynamic_config.run_cmd.timeout_seconds = 10
env.reset(benchmark=benchmark)
runtimes_a = env.observation.Runtime()
runtimes_b = env.observation.Runtime()
assert not np.all(runtimes_a == runtimes_b)
def test_invalid_runtime_count(env: LlvmEnv):
env.reset()
with pytest.raises(
ValueError, match=r"runtimes_per_observation_count must be >= 1"
):
env.runtime_observation_count = 0
with pytest.raises(
ValueError, match=r"runtimes_per_observation_count must be >= 1"
):
env.runtime_observation_count = -1
def test_runtime_observation_count_before_reset(env: LlvmEnv):
"""Test setting property before reset() is called."""
env.runtime_observation_count = 10
assert env.runtime_observation_count == 10
env.reset()
assert env.runtime_observation_count == 10
def test_runtime_warmup_runs_count_before_reset(env: LlvmEnv):
"""Test setting property before reset() is called."""
env.runtime_warmup_runs_count = 10
assert env.runtime_warmup_runs_count == 10
env.reset()
assert env.runtime_warmup_runs_count == 10
def test_runtime_observation_count_fork(env: LlvmEnv):
"""Test that custom count properties propagate on fork()."""
env.runtime_observation_count = 2
env.runtime_warmup_runs_count = 1
with env.fork() as fkd:
assert fkd.runtime_observation_count == 2
assert fkd.runtime_warmup_runs_count == 1
env.reset()
with env.fork() as fkd:
assert fkd.runtime_observation_count == 2
assert fkd.runtime_warmup_runs_count == 1
def test_default_runtime_observation_count_fork(env: LlvmEnv):
"""Test that default property values propagate on fork()."""
env.reset()
rc = env.runtime_observation_count
wc = env.runtime_warmup_runs_count
with env.fork() as fkd:
assert fkd.runtime_observation_count == rc
assert fkd.runtime_warmup_runs_count == wc
class RewardDerivedFromRuntime(Reward):
"""A custom reward space that is derived from the Runtime observation space."""
def __init__(self):
super().__init__(
name="runtimeseries",
observation_spaces=["Runtime"],
default_value=0,
min=None,
max=None,
default_negates_returns=True,
deterministic=False,
platform_dependent=True,
)
self.last_runtime_observation: List[float] = None
def reset(self, benchmark, observation_view) -> None:
del benchmark # unused
self.last_runtime_observation = observation_view["Runtime"]
def update(
self,
actions: List[ActionType],
observations: List[ObservationType],
observation_view,
) -> float:
del actions # unused
del observation_view # unused
self.last_runtime_observation = observations[0]
return 0
@flaky # runtime may fail
@pytest.mark.parametrize("runtime_observation_count", [1, 3, 5])
def test_correct_number_of_observations_during_reset(
env: LlvmEnv, runtime_observation_count: int
):
env.reward.add_space(RewardDerivedFromRuntime())
env.runtime_observation_count = runtime_observation_count
env.reset(reward_space="runtimeseries")
assert env.runtime_observation_count == runtime_observation_count
# Check that the number of observations that you are receive during reset()
# matches the amount that you asked for.
assert (
len(env.reward.spaces["runtimeseries"].last_runtime_observation)
== runtime_observation_count
)
# Check that the number of observations that you are receive during step()
# matches the amount that you asked for.
env.reward.spaces["runtimeseries"].last_runtime_observation = None
env.step(0)
assert (
len(env.reward.spaces["runtimeseries"].last_runtime_observation)
== runtime_observation_count
)
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/llvm/runtime_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Integrations tests for the LLVM CompilerGym environments."""
import numpy as np
from compiler_gym.envs import CompilerEnv
from compiler_gym.errors import ServiceError
from compiler_gym.third_party.autophase import AUTOPHASE_FEATURE_DIM
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
def test_step(env: CompilerEnv, action_name: str):
"""Run each action on a single benchmark."""
env.reward_space = "IrInstructionCount"
env.observation_space = "Autophase"
env.reset(benchmark="cbench-v1/crc32")
action = env.action_space.from_string(action_name)[0]
observation, reward, done, _ = env.step(action)
assert isinstance(observation, np.ndarray)
assert observation.shape == (AUTOPHASE_FEATURE_DIM,)
assert isinstance(reward, float)
assert isinstance(done, bool)
try:
env.close()
except ServiceError as e:
# env.close() will raise an error if the service terminated
# ungracefully. In that case, the "done" flag should have been set.
assert done, f"Service error was raised when 'done' flag not set: {e}"
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/llvm/all_actions_single_step_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for serializing LLVM datasets."""
import pickle
from compiler_gym.datasets import Dataset
from compiler_gym.envs.llvm import LlvmEnv
from tests.pytest_plugins.common import ci_only, skip_on_ci
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
# Installing all datasets on CI is expensive. Skip these tests, we define
# smaller versions of them below.
@skip_on_ci
def test_pickle_dataset(dataset: Dataset):
"""Test that datasets can be pickled."""
assert pickle.loads(pickle.dumps(dataset)) == dataset
@skip_on_ci
def test_pickle_benchmark(dataset: Dataset):
"""Test that benchmarks can be pickled."""
benchmark = next(dataset.benchmarks())
assert pickle.loads(pickle.dumps(benchmark))
# Smaller versions of the above tests for CI.
@ci_only
def test_pickle_cbench_dataset(env: LlvmEnv):
"""Test that datasets can be pickled."""
dataset = env.datasets["benchmark://cbench-v1"]
assert pickle.loads(pickle.dumps(dataset)) == dataset
@ci_only
def test_pickle_cbench_benchmark(env: LlvmEnv):
"""Test that benchmarks can be pickled."""
dataset = env.datasets["benchmark://cbench-v1"]
benchmark = next(dataset.benchmarks())
assert pickle.loads(pickle.dumps(benchmark))
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/llvm/datasets_pickle_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for the LLVM datasets."""
import gym
import compiler_gym.envs.llvm # noqa register environments
from tests.test_main import main
def test_default_dataset_list():
with gym.make("llvm-v0") as env:
assert list(d.name for d in env.datasets) == [
"benchmark://cbench-v1",
"benchmark://anghabench-v1",
"benchmark://blas-v0",
"benchmark://chstone-v0",
"benchmark://clgen-v0",
"benchmark://github-v0",
"benchmark://jotaibench-v0",
"benchmark://linux-v0",
"benchmark://mibench-v1",
"benchmark://npb-v0",
"benchmark://opencv-v0",
"benchmark://poj104-v1",
"benchmark://tensorflow-v0",
"generator://csmith-v0",
"generator://llvm-stress-v0",
]
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/llvm/datasets/llvm_datasets_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for the POJ104 dataset."""
import sys
from itertools import islice
from pathlib import Path
import gym
import pytest
import compiler_gym.envs.llvm # noqa register environments
from compiler_gym.envs.llvm import LlvmEnv
from compiler_gym.envs.llvm.datasets import POJ104Dataset
from compiler_gym.errors import BenchmarkInitError
from tests.pytest_plugins.common import skip_on_ci
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.common", "tests.pytest_plugins.llvm"]
@pytest.fixture(scope="module")
def poj104_dataset() -> POJ104Dataset:
with gym.make("llvm-v0") as env:
ds = env.datasets["poj104-v1"]
yield ds
def test_poj104_size(poj104_dataset: POJ104Dataset):
if sys.platform == "darwin":
assert poj104_dataset.size == 49815
else:
assert poj104_dataset.size == 49816
@skip_on_ci
@pytest.mark.parametrize("index", range(100))
def test_poj104_random_select(
env: LlvmEnv, poj104_dataset: POJ104Dataset, index: int, tmpwd: Path
):
uri = next(islice(poj104_dataset.benchmark_uris(), index, None))
benchmark = poj104_dataset.benchmark(uri)
env.reset(benchmark=benchmark)
assert benchmark.source
benchmark.write_sources_to_directory(tmpwd)
assert (tmpwd / "source.cc").is_file()
@skip_on_ci
def test_poj104_random_benchmark(env: LlvmEnv, poj104_dataset: POJ104Dataset):
benchmark = poj104_dataset.random_benchmark()
env.reset(benchmark=benchmark)
assert benchmark.source
@pytest.mark.parametrize(
"uri",
[
"benchmark://poj104-v1/1/1486",
],
)
def test_poj104_known_bad_bitcodes(env: LlvmEnv, uri: str):
# This test is intentionally structured in a way that if the benchmark does
# not raise an error, it still passes.
try:
env.reset(benchmark=uri)
except BenchmarkInitError as e:
assert "Failed to parse LLVM bitcode" in str(e)
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/llvm/datasets/poj104_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for the GitHub dataset."""
import sys
from itertools import islice
import gym
import pytest
import compiler_gym.envs.llvm # noqa register environments
from compiler_gym.envs.llvm import LlvmEnv
from compiler_gym.envs.llvm.datasets import GitHubDataset
from tests.pytest_plugins.common import skip_on_ci
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.common", "tests.pytest_plugins.llvm"]
@pytest.fixture(scope="module")
def github_dataset() -> GitHubDataset:
with gym.make("llvm-v0") as env:
ds = env.datasets["github-v0"]
yield ds
def test_github_size(github_dataset: GitHubDataset):
if sys.platform == "linux":
assert github_dataset.size == 49738
else:
assert github_dataset.size == 47806
@skip_on_ci
@pytest.mark.parametrize("index", range(250))
def test_github_random_select(env: LlvmEnv, github_dataset: GitHubDataset, index: int):
uri = next(islice(github_dataset.benchmark_uris(), index, None))
benchmark = github_dataset.benchmark(uri)
env.reset(benchmark=benchmark)
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/llvm/datasets/github_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for the AnghaBench dataset."""
import gym
import pytest
import compiler_gym.envs.llvm # noqa register environments
from compiler_gym.envs.llvm import LlvmEnv
from compiler_gym.envs.llvm.datasets import CHStoneDataset, chstone
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.common", "tests.pytest_plugins.llvm"]
@pytest.fixture(scope="module")
def chstone_dataset() -> CHStoneDataset:
with gym.make("llvm-v0") as env:
ds = env.datasets["chstone-v0"]
yield ds
def test_anghabench_size(chstone_dataset: CHStoneDataset):
assert chstone_dataset.size == 12
def test_missing_benchmark_name(chstone_dataset: CHStoneDataset, mocker):
# Mock install() so that on CI it doesn't download and unpack the tarfile.
mocker.patch.object(chstone_dataset, "install")
with pytest.raises(
LookupError, match=r"^No benchmark specified: benchmark://chstone-v0$"
):
chstone_dataset.benchmark("benchmark://chstone-v0")
chstone_dataset.install.assert_called_once()
with pytest.raises(
LookupError, match=r"^No benchmark specified: benchmark://chstone-v0/$"
):
chstone_dataset.benchmark("benchmark://chstone-v0/")
assert chstone_dataset.install.call_count == 2
@pytest.mark.parametrize("uri", chstone.URIS)
def test_chstone_benchmark_reset(
env: LlvmEnv, chstone_dataset: CHStoneDataset, uri: str
):
env.reset(chstone_dataset.benchmark(uri))
assert env.benchmark == uri
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/llvm/datasets/chstone_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Test for cBench semantics validation."""
import pytest
from compiler_gym import ValidationResult
from compiler_gym.envs.llvm import LlvmEnv
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
@pytest.mark.timeout(600)
def test_validate_benchmark_semantics(env: LlvmEnv, validatable_cbench_uri: str):
"""Run the validation routine on all benchmarks."""
env.reward_space = "IrInstructionCount"
env.reset(benchmark=validatable_cbench_uri)
# Run a single step.
env.step(env.action_space.flags.index("-mem2reg"))
# Validate the environment state.
result: ValidationResult = env.validate()
assert not result.error_details
assert result.reward_validated
assert not result.actions_replay_failed
assert not result.reward_validation_failed
assert result.benchmark_semantics_validated
assert not result.benchmark_semantics_validation_failed
assert result.okay()
@pytest.mark.timeout(600)
def test_non_validatable_benchmark_validate(
env: LlvmEnv, non_validatable_cbench_uri: str
):
"""Run the validation routine on all benchmarks."""
env.reward_space = "IrInstructionCount"
env.reset(benchmark=non_validatable_cbench_uri)
# Run a single step.
env.step(env.action_space.flags.index("-mem2reg"))
# Validate the environment state.
result: ValidationResult = env.validate()
assert not result.error_details
assert result.reward_validated
assert not result.actions_replay_failed
assert not result.reward_validation_failed
assert not result.benchmark_semantics_validated
assert not result.benchmark_semantics_validation_failed
assert result.okay()
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/llvm/datasets/cbench_validate_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the LICENSE file
# in the root directory of this source tree.
#
# tests/**/__init__.py files are needed for pytest Python path resolution. See:
# https://docs.pytest.org/en/latest/explanation/pythonpath.html
|
CompilerGym-development
|
tests/llvm/datasets/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for the AnghaBench dataset."""
import sys
from itertools import islice
import gym
import numpy as np
import pytest
import compiler_gym.envs.llvm # noqa register environments
from compiler_gym.envs.llvm import LlvmEnv
from compiler_gym.envs.llvm.datasets import LlvmStressDataset
from compiler_gym.errors import BenchmarkInitError
from tests.pytest_plugins.common import is_ci
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.common", "tests.pytest_plugins.llvm"]
@pytest.fixture(scope="module")
def llvm_stress_dataset() -> LlvmStressDataset:
with gym.make("llvm-v0") as env:
ds = env.datasets["generator://llvm-stress-v0"]
yield ds
def test_llvm_stress_size(llvm_stress_dataset: LlvmStressDataset):
assert llvm_stress_dataset.size == 0
assert len(llvm_stress_dataset) == 0
@pytest.mark.parametrize("index", range(3) if is_ci() else range(250))
def test_llvm_stress_random_select(
env: LlvmEnv, llvm_stress_dataset: LlvmStressDataset, index: int
):
env.observation_space = "InstCountDict"
uri = next(islice(llvm_stress_dataset.benchmark_uris(), index, None))
benchmark = llvm_stress_dataset.benchmark(uri)
# As of the current version (LLVM 10.0.0), programs generated with the
# following seeds emit an error when compiled: "Cannot emit physreg copy
# instruction".
FAILING_SEEDS = {"linux": {173, 239}, "darwin": {173}}[sys.platform]
if index in FAILING_SEEDS:
with pytest.raises(
BenchmarkInitError, match="Cannot emit physreg copy instruction"
):
env.reset(benchmark=benchmark)
else:
instcount = env.reset(benchmark=benchmark)
print(env.ir) # For debugging in case of error.
assert instcount["TotalInstsCount"] > 0
def test_random_benchmark(llvm_stress_dataset: LlvmStressDataset):
num_benchmarks = 5
rng = np.random.default_rng(0)
random_benchmarks = {
b.uri
for b in (
llvm_stress_dataset.random_benchmark(rng) for _ in range(num_benchmarks)
)
}
assert len(random_benchmarks) == num_benchmarks
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/llvm/datasets/llvm_stress_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for the JotaiBench dataset."""
from itertools import islice
from pathlib import Path
import pytest
import compiler_gym
from compiler_gym.envs.llvm import LlvmEnv
from compiler_gym.envs.llvm.datasets import JotaiBenchDataset
from tests.pytest_plugins.common import skip_on_ci
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.common", "tests.pytest_plugins.llvm"]
@pytest.fixture(scope="module")
def jotaibench_dataset() -> JotaiBenchDataset:
with compiler_gym.make("llvm-v0") as env:
ds = env.datasets["jotaibench-v0"]
yield ds
def test_jotaibench_size(jotaibench_dataset: JotaiBenchDataset):
assert jotaibench_dataset.size == 18761
def test_missing_benchmark_name(jotaibench_dataset: JotaiBenchDataset, mocker):
# Mock install() so that on CI it doesn't download and unpack the tarfile.
mocker.patch.object(jotaibench_dataset, "install")
with pytest.raises(
LookupError, match=r"^No benchmark specified: benchmark://jotaibench-v0$"
):
jotaibench_dataset.benchmark("benchmark://jotaibench-v0")
jotaibench_dataset.install.assert_called_once()
with pytest.raises(
LookupError, match=r"^No benchmark specified: benchmark://jotaibench-v0/$"
):
jotaibench_dataset.benchmark("benchmark://jotaibench-v0/")
assert jotaibench_dataset.install.call_count == 2
@skip_on_ci
@pytest.mark.parametrize("index", range(250))
def test_anghabench_random_select(
env: LlvmEnv, jotaibench_dataset: JotaiBenchDataset, index: int, tmpwd: Path
):
uri = next(islice(jotaibench_dataset.benchmark_uris(), index, None))
benchmark = jotaibench_dataset.benchmark(uri)
env.reset(benchmark=benchmark)
assert benchmark.source
benchmark.write_sources_to_directory(tmpwd)
assert (tmpwd / "function.c").is_file()
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/llvm/datasets/jotaibench_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for the CLgen dataset."""
from itertools import islice
from pathlib import Path
import gym
import pytest
import compiler_gym.envs.llvm # noqa register environments
from compiler_gym.envs.llvm import LlvmEnv
from compiler_gym.envs.llvm.datasets import CLgenDataset
from tests.pytest_plugins.common import is_ci
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.common", "tests.pytest_plugins.llvm"]
@pytest.fixture(scope="module")
def clgen_dataset() -> CLgenDataset:
with gym.make("llvm-v0") as env:
ds = env.datasets["benchmark://clgen-v0"]
yield ds
def test_clgen_size(clgen_dataset: CLgenDataset):
assert clgen_dataset.size == 996
def test_missing_benchmark_name(clgen_dataset: CLgenDataset, mocker):
# Mock install() so that on CI it doesn't download and unpack the tarfile.
mocker.patch.object(clgen_dataset, "install")
with pytest.raises(
LookupError, match=r"^No benchmark specified: benchmark://clgen-v0$"
):
clgen_dataset.benchmark("benchmark://clgen-v0")
clgen_dataset.install.assert_called_once()
with pytest.raises(
LookupError, match=r"^No benchmark specified: benchmark://clgen-v0/$"
):
clgen_dataset.benchmark("benchmark://clgen-v0/")
assert clgen_dataset.install.call_count == 2
@pytest.mark.parametrize("index", range(3) if is_ci() else range(250))
def test_clgen_random_select(
env: LlvmEnv, clgen_dataset: CLgenDataset, index: int, tmpwd: Path
):
uri = next(islice(clgen_dataset.benchmark_uris(), index, None))
benchmark = clgen_dataset.benchmark(uri)
env.reset(benchmark=benchmark)
assert benchmark.source
benchmark.write_sources_to_directory(tmpwd)
assert (tmpwd / "kernel.cl").is_file()
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/llvm/datasets/clgen_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for the Csmith dataset."""
from itertools import islice
from pathlib import Path
import gym
import numpy as np
import pytest
from flaky import flaky
import compiler_gym.envs.llvm # noqa register environments
from compiler_gym.envs.llvm import LlvmEnv
from compiler_gym.envs.llvm.datasets import CsmithBenchmark, CsmithDataset
from compiler_gym.errors import ServiceError
from tests.pytest_plugins.common import is_ci
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.common", "tests.pytest_plugins.llvm"]
@pytest.fixture(scope="module")
def csmith_dataset() -> CsmithDataset:
with gym.make("llvm-v0") as env:
ds = env.datasets["generator://csmith-v0"]
yield ds
def test_csmith_size(csmith_dataset: CsmithDataset):
assert csmith_dataset.size == 0
assert len(csmith_dataset) == 0
@pytest.mark.parametrize("index", range(3) if is_ci() else range(250))
def test_csmith_random_select(
env: LlvmEnv, csmith_dataset: CsmithDataset, index: int, tmpwd: Path
):
uri = next(islice(csmith_dataset.benchmark_uris(), index, None))
benchmark = csmith_dataset.benchmark(uri)
assert isinstance(benchmark, CsmithBenchmark)
env.reset(benchmark=benchmark)
assert benchmark.source
benchmark.write_sources_to_directory(tmpwd)
assert (tmpwd / "source.c").is_file()
def test_random_benchmark(csmith_dataset: CsmithDataset):
num_benchmarks = 5
rng = np.random.default_rng(0)
random_benchmarks = {
b.uri
for b in (csmith_dataset.random_benchmark(rng) for _ in range(num_benchmarks))
}
assert len(random_benchmarks) == num_benchmarks
def test_csmith_from_seed_retry_count_exceeded(csmith_dataset: CsmithDataset):
with pytest.raises(OSError, match="Csmith failed after 5 attempts with seed 1"):
csmith_dataset.benchmark_from_seed(seed=1, max_retries=3, retry_count=5)
csmith_runtime_flaky = flaky(
max_runs=5,
rerun_filter=lambda err, *args: issubclass(err[0], ServiceError)
or isinstance(err[0], TimeoutError),
)
@csmith_runtime_flaky
def test_csmith_positive_runtimes(env: LlvmEnv, csmith_dataset: CsmithDataset):
benchmark = next(csmith_dataset.benchmarks())
env.reset(benchmark=benchmark)
val = env.observation["Runtime"]
print(val.tolist())
assert np.all(np.greater(val, 0))
@csmith_runtime_flaky
def test_csmith_positive_buildtimes(env: LlvmEnv, csmith_dataset: CsmithDataset):
benchmark = next(csmith_dataset.benchmarks())
env.reset(benchmark=benchmark)
val = env.observation["Buildtime"]
print(val.tolist())
assert np.all(np.greater(val, 0))
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/llvm/datasets/csmith_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for the AnghaBench dataset."""
import sys
from itertools import islice
from pathlib import Path
import gym
import pytest
import compiler_gym.envs.llvm # noqa register environments
from compiler_gym.envs.llvm import LlvmEnv
from compiler_gym.envs.llvm.datasets import AnghaBenchDataset
from tests.pytest_plugins.common import skip_on_ci
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.common", "tests.pytest_plugins.llvm"]
@pytest.fixture(scope="module")
def anghabench_dataset() -> AnghaBenchDataset:
with gym.make("llvm-v0") as env:
ds = env.datasets["anghabench-v1"]
yield ds
def test_anghabench_size(anghabench_dataset: AnghaBenchDataset):
if sys.platform == "darwin":
assert anghabench_dataset.size == 1041265
else:
assert anghabench_dataset.size == 1041333
def test_missing_benchmark_name(anghabench_dataset: AnghaBenchDataset, mocker):
# Mock install() so that on CI it doesn't download and unpack the tarfile.
mocker.patch.object(anghabench_dataset, "install")
with pytest.raises(
LookupError, match=r"^No benchmark specified: benchmark://anghabench-v1$"
):
anghabench_dataset.benchmark("benchmark://anghabench-v1")
anghabench_dataset.install.assert_called_once()
with pytest.raises(
LookupError, match=r"^No benchmark specified: benchmark://anghabench-v1/$"
):
anghabench_dataset.benchmark("benchmark://anghabench-v1/")
assert anghabench_dataset.install.call_count == 2
@skip_on_ci
@pytest.mark.parametrize("index", range(250))
def test_anghabench_random_select(
env: LlvmEnv, anghabench_dataset: AnghaBenchDataset, index: int, tmpwd: Path
):
uri = next(islice(anghabench_dataset.benchmark_uris(), index, None))
benchmark = anghabench_dataset.benchmark(uri)
env.reset(benchmark=benchmark)
assert benchmark.source
benchmark.write_sources_to_directory(tmpwd)
assert (tmpwd / "function.c").is_file()
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/llvm/datasets/anghabench_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for the cbench dataset."""
import tempfile
from pathlib import Path
import pytest
from compiler_gym.envs.llvm import LlvmEnv
from compiler_gym.envs.llvm.datasets import CBenchDataset, cbench
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.common", "tests.pytest_plugins.llvm"]
@pytest.fixture(scope="module")
def cbench_dataset() -> CBenchDataset:
with tempfile.TemporaryDirectory() as d:
yield CBenchDataset(site_data_base=Path(d))
def test_cbench_size(cbench_dataset: CBenchDataset):
assert cbench_dataset.size == 23
def test_cbench_uris(cbench_dataset: CBenchDataset):
assert list(cbench_dataset.benchmark_uris()) == [
"benchmark://cbench-v1/adpcm",
"benchmark://cbench-v1/bitcount",
"benchmark://cbench-v1/blowfish",
"benchmark://cbench-v1/bzip2",
"benchmark://cbench-v1/crc32",
"benchmark://cbench-v1/dijkstra",
"benchmark://cbench-v1/ghostscript",
"benchmark://cbench-v1/gsm",
"benchmark://cbench-v1/ispell",
"benchmark://cbench-v1/jpeg-c",
"benchmark://cbench-v1/jpeg-d",
"benchmark://cbench-v1/lame",
"benchmark://cbench-v1/patricia",
"benchmark://cbench-v1/qsort",
"benchmark://cbench-v1/rijndael",
"benchmark://cbench-v1/sha",
"benchmark://cbench-v1/stringsearch",
"benchmark://cbench-v1/stringsearch2",
"benchmark://cbench-v1/susan",
"benchmark://cbench-v1/tiff2bw",
"benchmark://cbench-v1/tiff2rgba",
"benchmark://cbench-v1/tiffdither",
"benchmark://cbench-v1/tiffmedian",
]
def test_validate_sha_output_okay():
output = cbench.BenchmarkExecutionResult(
walltime_seconds=0,
output="1234567890abcdef 1234567890abcd 1234567890abc 1234567890 12345",
)
assert cbench.validate_sha_output(output) is None
def test_validate_sha_output_invalid():
output = cbench.BenchmarkExecutionResult(walltime_seconds=0, output="abcd")
assert cbench.validate_sha_output(output)
def test_cbench_v0_deprecation(env: LlvmEnv):
"""Test that cBench-v0 emits a deprecation warning when used."""
with pytest.deprecated_call(match="Please use 'benchmark://cbench-v1'"):
env.datasets["cBench-v0"].install()
with pytest.deprecated_call(match="Please use 'benchmark://cbench-v1'"):
env.datasets.benchmark("benchmark://cBench-v0/crc32")
def test_cbench_v1_deprecation(env: LlvmEnv):
"""Test that cBench-v1 emits a deprecation warning when used."""
with pytest.deprecated_call(match="Please use 'benchmark://cbench-v1'"):
env.datasets["cBench-v1"].install()
with pytest.deprecated_call(match="Please use 'benchmark://cbench-v1'"):
env.datasets.benchmark("benchmark://cBench-v1/crc32")
def test_cbench_v1_dataset_param(env: LlvmEnv):
a = env.datasets.benchmark("cbench-v1/qsort?dataset=0")
b = env.datasets.benchmark("cbench-v1/qsort?dataset=0") # same as a
c = env.datasets.benchmark("cbench-v1/qsort?dataset=1")
assert a.proto.dynamic_config == b.proto.dynamic_config # sanity check
assert a.proto.dynamic_config != c.proto.dynamic_config # sanity check
def test_cbench_v1_dataset_out_of_range(env: LlvmEnv):
with pytest.raises(ValueError, match="Invalid dataset: 50"):
env.datasets.benchmark("cbench-v1/qsort?dataset=50")
with pytest.raises(ValueError, match="Invalid dataset: abc"):
env.datasets.benchmark("cbench-v1/qsort?dataset=abc")
def test_cbench_v1_init_close_test(env: LlvmEnv, benchmark_name: str):
"""Create an environment for each benchmark and close it."""
env.reset(benchmark=benchmark_name)
assert env.benchmark == benchmark_name
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/llvm/datasets/cbench_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for the GCC CompilerGym service."""
import re
import gym
import numpy as np
import pytest
import compiler_gym.envs.gcc # noqa register environments
from compiler_gym.errors import ServiceError, SessionNotFound
from compiler_gym.spaces import Scalar, Sequence
from tests.pytest_plugins.common import with_docker, without_docker
from tests.pytest_plugins.gcc import docker_is_available, with_gcc_support
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.gcc"]
@without_docker
def test_gcc_env_fails_without_gcc_support():
with pytest.raises(ServiceError):
gym.make("gcc-v0")
@with_docker
def test_docker_default_action_space():
"""Test that the environment reports the service's action spaces."""
with gym.make("gcc-v0") as env:
assert env.action_spaces[0].name == "default"
assert len(env.action_spaces[0].names) == 2280
assert env.action_spaces[0].names[0] == "-O0"
@pytest.mark.xfail(
not docker_is_available(),
reason="github.com/facebookresearch/CompilerGym/issues/459",
)
def test_gcc_bin(gcc_bin: str):
"""Test that the environment reports the service's reward spaces."""
with gym.make("gcc-v0", gcc_bin=gcc_bin) as env:
env.reset()
assert env.gcc_spec.gcc.bin == gcc_bin
@pytest.mark.xfail(
not docker_is_available(),
reason="github.com/facebookresearch/CompilerGym/issues/459",
)
def test_observation_spaces_failing_because_of_bug(gcc_bin: str):
"""Test that the environment reports the service's observation spaces."""
with gym.make("gcc-v0", gcc_bin=gcc_bin) as env:
env.reset()
assert env.observation.spaces.keys() == {
"asm_hash",
"asm_size",
"asm",
"choices",
"command_line",
"instruction_counts",
"obj_hash",
"obj_size",
"obj",
"rtl",
"source",
}
assert env.observation.spaces["obj_size"].space == Scalar(
name="obj_size", min=-1, max=np.iinfo(np.int64).max, dtype=int
)
assert env.observation.spaces["asm"].space == Sequence(
name="asm", size_range=(0, np.iinfo(np.int64).max), dtype=str
)
def test_reward_spaces(gcc_bin: str):
"""Test that the environment reports the service's reward spaces."""
with gym.make("gcc-v0", gcc_bin=gcc_bin) as env:
env.reset()
assert env.reward.spaces.keys() == {"asm_size", "obj_size"}
@with_gcc_support
def test_step_before_reset(gcc_bin: str):
"""Taking a step() before reset() is illegal."""
with gym.make("gcc-v0", gcc_bin=gcc_bin) as env:
with pytest.raises(
SessionNotFound, match=r"Must call reset\(\) before step\(\)"
):
env.step(0)
@with_gcc_support
def test_observation_before_reset(gcc_bin: str):
"""Taking an observation before reset() is illegal."""
with gym.make("gcc-v0", gcc_bin=gcc_bin) as env:
with pytest.raises(
SessionNotFound, match=r"Must call reset\(\) before step\(\)"
):
_ = env.observation["asm"]
@with_gcc_support
def test_reward_before_reset(gcc_bin: str):
"""Taking a reward before reset() is illegal."""
with gym.make("gcc-v0", gcc_bin=gcc_bin) as env:
with pytest.raises(
SessionNotFound, match=r"Must call reset\(\) before step\(\)"
):
_ = env.reward["obj_size"]
@with_gcc_support
def test_reset_invalid_benchmark(gcc_bin: str):
"""Test requesting a specific benchmark."""
with gym.make("gcc-v0", gcc_bin=gcc_bin) as env:
with pytest.raises(
LookupError, match=r"Dataset not found: benchmark://chstone-v1"
):
env.reset(benchmark="chstone-v1/flubbedydubfishface")
@with_gcc_support
def test_invalid_observation_space(gcc_bin: str):
"""Test error handling with invalid observation space."""
with gym.make("gcc-v0", gcc_bin=gcc_bin) as env:
with pytest.raises(LookupError):
env.observation_space = 100
@with_gcc_support
def test_invalid_reward_space(gcc_bin: str):
"""Test error handling with invalid reward space."""
with gym.make("gcc-v0", gcc_bin=gcc_bin) as env:
with pytest.raises(LookupError):
env.reward_space = 100
@with_gcc_support
def test_double_reset(gcc_bin: str):
"""Test that reset() can be called twice."""
with gym.make("gcc-v0", gcc_bin=gcc_bin) as env:
env.reset()
assert env.in_episode
env.step(env.action_space.sample())
env.reset()
_, _, done, info = env.step(env.action_space.sample())
assert not done, info
assert env.in_episode
@with_gcc_support
def test_step_out_of_range(gcc_bin: str):
"""Test error handling with an invalid action."""
with gym.make("gcc-v0", gcc_bin=gcc_bin) as env:
env.reset()
with pytest.raises(ValueError, match="Out-of-range"):
env.step(10000)
@with_gcc_support
def test_default_benchmark(gcc_bin: str):
"""Test that we are working with the expected default benchmark."""
with gym.make("gcc-v0", gcc_bin=gcc_bin) as env:
assert env.benchmark.proto.uri == "benchmark://chstone-v0/adpcm"
@with_gcc_support
def test_default_reward(gcc_bin: str):
"""Test default reward space."""
with gym.make("gcc-v0", gcc_bin=gcc_bin) as env:
env.reward_space = "obj_size"
env.reset()
observation, reward, done, info = env.step(0)
assert observation is None
assert reward == 0
assert not done, info
@with_gcc_support
def test_source_observation(gcc_bin: str):
"""Test observation spaces."""
with gym.make("gcc-v0", gcc_bin=gcc_bin) as env:
env.reset()
lines = env.source.split("\n")
assert re.match(r"# \d+ \"adpcm.c\"", lines[0])
@with_docker
def test_rtl_observation():
"""Test observation spaces."""
with gym.make("gcc-v0") as env:
env.reset()
assert env.rtl.startswith(
"""
;; Function abs (abs, funcdef_no=0, decl_uid=1084, cgraph_uid=1, symbol_order=90)
(note 1 0 4 NOTE_INSN_DELETED)
(note 4 1 38 2 [bb 2] NOTE_INSN_BASIC_BLOCK)"""
)
@with_docker
def test_asm_observation():
"""Test observation spaces."""
with gym.make("gcc-v0") as env:
env.reset()
assert env.asm.startswith('\t.file\t"src.c"\n\t')
@with_docker
def test_asm_size_observation():
"""Test observation spaces."""
with gym.make("gcc-v0") as env:
env.reset()
assert env.asm_size == 39876
@with_docker
def test_asm_hash_observation():
"""Test observation spaces."""
with gym.make("gcc-v0") as env:
env.reset()
assert env.asm_hash == "f4921de395b026a55eab3844c8fe43dd"
@with_docker
def test_instruction_counts_observation():
"""Test observation spaces."""
with gym.make("gcc-v0") as env:
env.reset()
assert env.instruction_counts == {
".align": 95,
".bss": 8,
".cfi": 91,
".file": 1,
".globl": 110,
".ident": 1,
".long": 502,
".section": 10,
".size": 110,
".string": 1,
".text": 4,
".type": 110,
".zero": 83,
"addl": 44,
"addq": 17,
"andl": 2,
"call": 34,
"cltq": 67,
"cmovns": 2,
"cmpl": 30,
"cmpq": 1,
"imulq": 27,
"je": 2,
"jge": 3,
"jle": 21,
"jmp": 24,
"jne": 1,
"jns": 2,
"js": 7,
"leaq": 40,
"leave": 4,
"movl": 575,
"movq": 150,
"movslq": 31,
"negl": 5,
"negq": 1,
"nop": 7,
"orl": 1,
"popq": 11,
"pushq": 16,
"ret": 15,
"sall": 2,
"salq": 7,
"sarl": 9,
"sarq": 20,
"shrl": 2,
"subl": 7,
"subq": 15,
"testl": 1,
"testq": 4,
}
@with_docker
def test_obj_observation():
"""Test observation spaces."""
with gym.make("gcc-v0") as env:
env.reset()
assert env.obj[:5].tobytes() == b"\x7fELF\x02"
@with_docker
def test_obj_size_observation():
"""Test observation spaces."""
with gym.make("gcc-v0") as env:
env.reset()
assert env.obj_size == 21192
@with_docker
def test_obj_hash_observation():
"""Test observation spaces."""
with gym.make("gcc-v0") as env:
env.reset()
assert env.obj_hash == "65937217c3758faf655df98741fe1d52"
@with_docker
def test_choices_observation():
"""Test observation spaces."""
with gym.make("gcc-v0") as env:
env.reset()
choices = env.choices
assert len(choices) == 502
assert all(map(lambda x: x == -1, choices))
@with_docker
def test_action_space_string():
"""Test observation spaces."""
with gym.make("gcc-v0") as env:
env.reset()
assert (
env.action_space.to_string(env.actions)
== "docker:gcc:11.2.0 -w -c src.c -o obj.o"
)
@with_docker
def test_gcc_spec():
"""Test gcc_spec param."""
with gym.make("gcc-v0") as env:
env.reset()
assert env.gcc_spec.gcc.bin == "docker:gcc:11.2.0"
assert min(map(len, env.gcc_spec.options)) > 0
@with_docker
def test_set_choices():
"""Test that we can set the command line parameters"""
with gym.make("gcc-v0") as env:
env.reset()
env.choices = [-1] * len(env.gcc_spec.options)
assert env.action_space.to_string(env.actions).startswith(
"docker:gcc:11.2.0 -w -c src.c -o obj.o"
)
env.choices = [0] * len(env.gcc_spec.options)
assert env.action_space.to_string(env.actions).startswith(
"docker:gcc:11.2.0 -O0 -faggressive-loop-optimizations -falign-functions -falign-jumps -falign-labels"
)
@with_docker
def test_rewards():
"""Test reward spaces."""
with gym.make("gcc-v0") as env:
env.reset()
assert env.reward["asm_size"] == 0
assert env.reward["obj_size"] == 0
env.step(env.action_space.names.index("-O3"))
assert env.reward["asm_size"] == -19235.0
assert env.reward["obj_size"] == -6520.0
@with_gcc_support
def test_timeout(gcc_bin: str):
"""Test that the timeout can be set."""
with gym.make("gcc-v0", gcc_bin=gcc_bin) as env:
env.reset()
env.timeout = 20
assert env.timeout == 20
env.reset()
assert env.timeout == 20
@with_docker
def test_compile():
with gym.make("gcc-v0") as env:
env.observation_space = "obj_size"
observation = env.reset()
assert observation == 21192
observation, _, _, _ = env.step(env.action_space.names.index("-O0"))
assert observation == 21192
observation, _, _, _ = env.step(env.action_space.names.index("-O3"))
assert observation == 27712
observation, _, _, _ = env.step(env.action_space.names.index("-finline"))
assert observation == 27712
@with_gcc_support
def test_fork(gcc_bin: str):
with gym.make("gcc-v0", gcc_bin=gcc_bin) as env:
env.reset()
env.step(0)
env.step(1)
fkd = env.fork()
try:
assert env.benchmark == fkd.benchmark
assert fkd.actions == [0, 1]
fkd.step(0)
assert fkd.actions == [0, 1, 0]
assert env.actions == [0, 1]
finally:
fkd.close()
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/gcc/gcc_env_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for the GCC CompilerGym service."""
import gym
import pytest
import compiler_gym.envs.gcc # noqa register environments
from compiler_gym.errors import ServiceError
from tests.pytest_plugins.common import skip_on_ci, with_docker
from tests.test_main import main
@with_docker
def test_invalid_docker_image():
with pytest.raises(ServiceError):
gym.make("gcc-v0", gcc_bin="docker:not-a-valid-image")
@with_docker
def test_version_11():
with gym.make("gcc-v0", gcc_bin="docker:gcc:11.2.0") as env:
assert env.compiler_version == "gcc (GCC) 11.2.0"
@skip_on_ci
@with_docker
def test_version_10():
with gym.make("gcc-v0", gcc_bin="docker:gcc:10.3.0") as env:
assert env.compiler_version == "gcc (GCC) 10.3.0"
with gym.make("gcc-v0", gcc_bin="docker:gcc:10.3") as env:
assert env.compiler_version == "gcc (GCC) 10.3.0"
with gym.make("gcc-v0", gcc_bin="docker:gcc:10") as env:
assert env.compiler_version == "gcc (GCC) 10.3.0"
@skip_on_ci
@with_docker
def test_version_9():
with gym.make("gcc-v0", gcc_bin="docker:gcc:9.4.0") as env:
assert env.compiler_version == "gcc (GCC) 9.4.0"
with gym.make("gcc-v0", gcc_bin="docker:gcc:9.4") as env:
assert env.compiler_version == "gcc (GCC) 9.4.0"
with gym.make("gcc-v0", gcc_bin="docker:gcc:9") as env:
assert env.compiler_version == "gcc (GCC) 9.4.0"
@skip_on_ci
@with_docker
def test_version_8():
with gym.make("gcc-v0", gcc_bin="docker:gcc:8.5.0") as env:
assert env.compiler_version == "gcc (GCC) 8.5.0"
with gym.make("gcc-v0", gcc_bin="docker:gcc:8.5") as env:
assert env.compiler_version == "gcc (GCC) 8.5.0"
with gym.make("gcc-v0", gcc_bin="docker:gcc:8") as env:
assert env.compiler_version == "gcc (GCC) 8.5.0"
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/gcc/gcc_docker_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the LICENSE file
# in the root directory of this source tree.
#
# tests/**/__init__.py files are needed for pytest Python path resolution. See:
# https://docs.pytest.org/en/latest/explanation/pythonpath.html
|
CompilerGym-development
|
tests/gcc/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for the GCC CompilerGym service."""
import gym
import pytest
import compiler_gym.envs.gcc # noqa register environments
from compiler_gym.errors import ServiceError
from tests.pytest_plugins.gcc import with_system_gcc, without_system_gcc
from tests.test_main import main
def test_missing_gcc_bin():
with pytest.raises(ServiceError):
gym.make("gcc-v0", gcc_bin="not-a-real-file")
def test_invalid_gcc_bin():
with pytest.raises(ServiceError):
gym.make("gcc-v0", gcc_bin="false")
@with_system_gcc
def test_system_gcc():
with gym.make("gcc-v0", gcc_bin="gcc") as env:
assert "gcc" in env.compiler_version
@without_system_gcc
def test_missing_system_gcc():
with pytest.raises(ServiceError):
gym.make("gcc-v0", gcc_bin="gcc")
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/gcc/gcc_bin_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the LICENSE file
# in the root directory of this source tree.
#
# tests/**/__init__.py files are needed for pytest Python path resolution. See:
# https://docs.pytest.org/en/latest/explanation/pythonpath.html
|
CompilerGym-development
|
tests/gcc/datasets/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for the Csmith dataset."""
from itertools import islice
from pathlib import Path
import gym
import numpy as np
import pytest
from compiler_gym.envs.gcc.datasets import CsmithBenchmark
from tests.pytest_plugins.common import is_ci
from tests.pytest_plugins.gcc import with_gcc_support
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.common", "tests.pytest_plugins.gcc"]
@pytest.mark.xfail(
reason="github.com/facebookresearch/CompilerGym/issues/459",
)
@with_gcc_support
def test_csmith_size(gcc_bin: str):
with gym.make("gcc-v0", gcc_bin=gcc_bin) as env:
csmith_dataset = env.datasets["generator://csmith-v0"]
assert csmith_dataset.size == 0
assert len(csmith_dataset) == 0
@pytest.mark.xfail(
reason="github.com/facebookresearch/CompilerGym/issues/459",
)
@with_gcc_support
@pytest.mark.parametrize("index", range(3) if is_ci() else range(10))
def test_csmith_random_select(gcc_bin: str, index: int, tmpwd: Path):
with gym.make("gcc-v0", gcc_bin=gcc_bin) as env:
csmith_dataset = env.datasets["generator://csmith-v0"]
uri = next(islice(csmith_dataset.benchmark_uris(), index, None))
benchmark = csmith_dataset.benchmark(uri)
assert isinstance(benchmark, CsmithBenchmark)
env.reset(benchmark=benchmark)
assert benchmark.source
benchmark.write_sources_to_directory(tmpwd)
assert (tmpwd / "source.c").is_file()
@pytest.mark.xfail(
reason="github.com/facebookresearch/CompilerGym/issues/459",
)
@with_gcc_support
def test_random_benchmark(gcc_bin: str):
with gym.make("gcc-v0", gcc_bin=gcc_bin) as env:
csmith_dataset = env.datasets["generator://csmith-v0"]
num_benchmarks = 5
rng = np.random.default_rng(0)
random_benchmarks = {
b.uri
for b in (
csmith_dataset.random_benchmark(rng) for _ in range(num_benchmarks)
)
}
assert len(random_benchmarks) == num_benchmarks
@pytest.mark.xfail(
reason="github.com/facebookresearch/CompilerGym/issues/459",
)
@with_gcc_support
def test_csmith_from_seed_retry_count_exceeded(gcc_bin: str):
with gym.make("gcc-v0", gcc_bin=gcc_bin) as env:
csmith_dataset = env.datasets["generator://csmith-v0"]
with pytest.raises(OSError, match="Csmith failed after 5 attempts with seed 1"):
csmith_dataset.benchmark_from_seed(seed=1, max_retries=3, retry_count=5)
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/gcc/datasets/csmith_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for the AnghaBench dataset."""
import sys
from itertools import islice
import gym
import pytest
import compiler_gym.envs.gcc # noqa register environments
from tests.pytest_plugins.common import skip_on_ci
from tests.pytest_plugins.gcc import with_gcc_support
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.common", "tests.pytest_plugins.gcc"]
@with_gcc_support
def test_anghabench_size(gcc_bin: str):
with gym.make("gcc-v0", gcc_bin=gcc_bin) as env:
anghabench_dataset = env.datasets["anghabench-v1"]
if sys.platform == "darwin":
assert anghabench_dataset.size == 1041265
else:
assert anghabench_dataset.size == 1041333
@with_gcc_support
def test_missing_benchmark_name(gcc_bin: str, mocker):
with gym.make("gcc-v0", gcc_bin=gcc_bin) as env:
anghabench_dataset = env.datasets["anghabench-v1"]
# Mock install() so that on CI it doesn't download and unpack the tarfile.
mocker.patch.object(anghabench_dataset, "install")
with pytest.raises(
LookupError, match=r"^Benchmark not found: benchmark://anghabench-v1"
):
anghabench_dataset.benchmark("benchmark://anghabench-v1")
anghabench_dataset.install.assert_called_once()
with pytest.raises(
LookupError, match=r"^Benchmark not found: benchmark://anghabench-v1/"
):
anghabench_dataset.benchmark("benchmark://anghabench-v1/")
assert anghabench_dataset.install.call_count == 2
@with_gcc_support
@skip_on_ci
@pytest.mark.parametrize("index", range(10))
def test_anghabench_random_select(gcc_bin: str, index: int):
with gym.make("gcc-v0", gcc_bin=gcc_bin) as env:
anghabench_dataset = env.datasets["anghabench-v1"]
uri = next(islice(anghabench_dataset.benchmark_uris(), index, None))
benchmark = anghabench_dataset.benchmark(uri)
env.reset(benchmark=benchmark)
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/gcc/datasets/anghabench_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for //compiler_gym/leaderboard:llvm_instcount."""
from pathlib import Path
import pytest
from absl import flags
from compiler_gym.leaderboard.llvm_instcount import eval_llvm_instcount_policy
from tests.pytest_plugins.common import set_command_line_flags
from tests.test_main import main
FLAGS = flags.FLAGS
pytest_plugins = ["tests.pytest_plugins.common"]
def null_policy(env) -> None:
"""A policy that does nothing."""
pass
def test_eval_llvm_instcount_policy():
set_command_line_flags(["argv0", "--n=1", "--max_benchmarks=1", "--novalidate"])
with pytest.raises(SystemExit):
eval_llvm_instcount_policy(null_policy)
def test_eval_llvm_instcount_policy_resume(tmpwd):
# Run eval on a single benchmark.
set_command_line_flags(
[
"argv0",
"--n=1",
"--max_benchmarks=1",
"--novalidate",
"--resume",
"--leaderboard_results=test.csv",
]
)
with pytest.raises(SystemExit):
eval_llvm_instcount_policy(null_policy)
# Check that the log has a single entry (and a header row.)
assert Path("test.csv").is_file()
with open("test.csv") as f:
log = f.read()
assert len(log.rstrip().split("\n")) == 2
init_logfile = log
# Repeat, but for two benchmarks.
set_command_line_flags(
[
"argv0",
"--n=1",
"--max_benchmarks=2",
"--novalidate",
"--resume",
"--leaderboard_results=test.csv",
]
)
with pytest.raises(SystemExit):
eval_llvm_instcount_policy(null_policy)
# Check that the log extends the original.
assert Path("test.csv").is_file()
with open("test.csv") as f:
log = f.read()
assert log.startswith(init_logfile)
assert len(log.rstrip().split("\n")) == 3
init_logfile = log
# Repeat, but for two runs of each benchmark.
set_command_line_flags(
[
"argv0",
"--n=2",
"--max_benchmarks=2",
"--novalidate",
"--resume",
"--leaderboard_results=test.csv",
]
)
with pytest.raises(SystemExit):
eval_llvm_instcount_policy(null_policy)
# Check that the log extends the original.
assert Path("test.csv").is_file()
with open("test.csv") as f:
log = f.read()
assert log.startswith(init_logfile)
assert len(log.rstrip().split("\n")) == 5
def test_eval_llvm_instcount_policy_invalid_flag():
set_command_line_flags(["argv0", "--n=-1"])
with pytest.raises(AssertionError):
eval_llvm_instcount_policy(null_policy)
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/leaderboard/llvm_instcount_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the LICENSE file
# in the root directory of this source tree.
#
# tests/**/__init__.py files are needed for pytest Python path resolution. See:
# https://docs.pytest.org/en/latest/explanation/pythonpath.html
|
CompilerGym-development
|
tests/leaderboard/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Integration tests for the MLIR CompilerGym environments."""
from numbers import Real
import gym
import numpy as np
import pytest
import compiler_gym
from compiler_gym.envs import CompilerEnv, mlir
from compiler_gym.envs.mlir import MlirEnv
from compiler_gym.service.connection import CompilerGymServiceConnection
from compiler_gym.spaces import (
ActionSpace,
Box,
Dict,
Discrete,
NamedDiscrete,
Permutation,
Scalar,
SpaceSequence,
)
from compiler_gym.spaces import Tuple as TupleSpace
from compiler_gym.wrappers.mlir import convert_action, make_mlir_rl_wrapper_env
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.common", "tests.pytest_plugins.mlir"]
@pytest.fixture(scope="function", params=["local", "service"])
def env(request) -> CompilerEnv:
"""Create an MLIR environment."""
if request.param == "local":
with gym.make("mlir-v0") as env:
yield env
else:
service = CompilerGymServiceConnection(mlir.MLIR_SERVICE_BINARY)
try:
with MlirEnv(service=service.connection.url) as env:
yield env
finally:
service.close()
def test_service_version(env: MlirEnv):
assert env.version == compiler_gym.__version__
def test_compiler_version(env: MlirEnv):
assert env.compiler_version.startswith("LLVM 14.")
def test_action_spaces_names(env: MlirEnv):
assert {a.name for a in env.action_spaces} == {"MatrixMultiplication"}
def test_action_space(env: MlirEnv):
expected_action_space = ActionSpace(
SpaceSequence(
name="MatrixMultiplication",
size_range=[1, 4],
space=Dict(
name=None,
spaces={
"tile_options": Dict(
name=None,
spaces={
"interchange_vector": Permutation(
name=None,
scalar_range=Scalar(name=None, min=0, max=2, dtype=int),
),
"tile_sizes": Box(
name=None,
low=np.array([1] * 3, dtype=int),
high=np.array([2**32] * 3, dtype=int),
dtype=np.int64,
),
"promote": Scalar(
name=None, min=False, max=True, dtype=bool
),
"promote_full_tile": Scalar(
name=None, min=False, max=True, dtype=bool
),
"loop_type": NamedDiscrete(
name=None,
items=["loops", "affine_loops"],
),
},
),
"vectorize_options": Dict(
name=None,
spaces={
"vectorize_to": NamedDiscrete(
name=None,
items=["dot", "matmul", "outer_product"],
),
"vector_transfer_split": NamedDiscrete(
name=None,
items=["none", "linalg_copy", "vector_transfer"],
),
"unroll_vector_transfers": Scalar(
name=None,
min=False,
max=True,
dtype=bool,
),
},
),
},
),
)
)
assert expected_action_space == env.action_space
def test_set_observation_space_from_spec(env: MlirEnv):
env.observation_space = env.observation.spaces["Runtime"]
obs = env.observation_space
env.observation_space = "Runtime"
assert env.observation_space == obs
def test_set_reward_space_from_spec(env: MlirEnv):
env.reward_space = env.reward.spaces["runtime"]
reward = env.reward_space
env.reward_space = "runtime"
assert env.reward_space == reward
def test_mlir_rl_wrapper_env_action_space(env: MlirEnv):
wrapper_env = make_mlir_rl_wrapper_env(env)
action_space = wrapper_env.action_space
tile_size = NamedDiscrete(
name=None,
items=["1", "2", "4", "8", "16", "32", "64", "128", "256", "512", "1024"],
)
expected_subspace = Dict(
name=None,
spaces={
"tile_options": Dict(
name=None,
spaces={
"interchange_vector": Discrete(name=None, n=6),
"tile_sizes": TupleSpace(
name=None, spaces=[tile_size, tile_size, tile_size]
),
"promote": NamedDiscrete(name=None, items=["False", "True"]),
"promote_full_tile": NamedDiscrete(
name=None, items=["False", "True"]
),
"loop_type": NamedDiscrete(
name=None,
items=["loops", "affine_loops"],
),
},
),
"vectorize_options": Dict(
name=None,
spaces={
"vectorize_to": NamedDiscrete(
name=None, items=["dot", "matmul", "outer_product"]
),
"vector_transfer_split": NamedDiscrete(
name=None,
items=["none", "linalg_copy", "vector_transfer"],
),
"unroll_vector_transfers": NamedDiscrete(
name=None, items=["False", "True"]
),
},
),
},
)
assert action_space[0] == expected_subspace
for i in range(1, 4):
assert action_space[i]["is_present"] == NamedDiscrete(
name=None, items=["False", "True"]
)
assert action_space[i]["space"] == expected_subspace
def test_convert_action():
action = [
{
"tile_options": {
"interchange_vector": 5,
"tile_sizes": [1, 3, 9],
"promote": 1,
"promote_full_tile": 0,
"loop_type": 1,
},
"vectorize_options": {
"vectorize_to": 2,
"vector_transfer_split": 1,
"unroll_vector_transfers": 1,
},
},
{"is_present": 0},
]
converted_action = convert_action(action)
expected_action = [
{
"tile_options": {
"interchange_vector": np.array([2, 1, 0], dtype=int),
"tile_sizes": [2, 8, 512],
"promote": True,
"promote_full_tile": False,
"loop_type": 1,
},
"vectorize_options": {
"vectorize_to": 2,
"vector_transfer_split": 1,
"unroll_vector_transfers": True,
},
}
]
assert len(converted_action) == len(expected_action)
assert len(converted_action[0]) == len(expected_action[0])
assert len(converted_action[0]["tile_options"]) == len(
expected_action[0]["tile_options"]
)
assert len(converted_action[0]["vectorize_options"]) == len(
expected_action[0]["vectorize_options"]
)
def test_mlir_rl_wrapper_env_observation_space(env: MlirEnv):
wrapper_env = make_mlir_rl_wrapper_env(env)
observation_space = wrapper_env.observation_space
assert observation_space == Box(
name="Runtime", shape=[1], low=0, high=np.inf, dtype=float
)
def test_mlir_rl_wrapper_env_step(env: MlirEnv):
wrapper_env = make_mlir_rl_wrapper_env(env)
action_space = wrapper_env.action_space
action_space.seed(123)
action = action_space.sample()
print(action)
observation, reward, done, _ = wrapper_env.step(action)
assert isinstance(observation, np.ndarray)
assert np.array_equal(observation.shape, [1])
assert observation[0] > 0
assert isinstance(reward, Real)
assert observation[0] == -reward
assert isinstance(done, bool)
assert done
def test_mlir_rl_wrapper_env_reset(env: MlirEnv):
wrapper_env = make_mlir_rl_wrapper_env(env)
action_space = wrapper_env.action_space
action_space.seed(123)
observation = wrapper_env.reset()
assert isinstance(observation, np.ndarray)
assert np.array_equal(observation.shape, [1])
assert observation[0] == 0
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/mlir/mlir_env_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the LICENSE file
# in the root directory of this source tree.
#
# tests/**/__init__.py files are needed for pytest Python path resolution. See:
# https://docs.pytest.org/en/latest/explanation/pythonpath.html
|
CompilerGym-development
|
tests/mlir/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import random
import warnings
import gym
import numpy as np
import pytest
import torch
from flaky import flaky
from ray.rllib.agents.ppo import PPOTrainer
from ray.tune.registry import register_env
from compiler_gym.wrappers.mlir import make_mlir_rl_wrapper_env
from tests.test_main import main
# Ignore import deprecation warnings from ray.
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
import ray
@flaky(max_runs=3, min_passes=1)
@pytest.mark.filterwarnings(
"ignore:`np\\.bool` is a deprecated alias for the builtin `bool`\\.",
"ignore:Mean of empty slice",
"ignore::ResourceWarning",
"ignore:using `dtype=` in comparisons is only useful for `dtype=object`",
)
def test_rllib_ppo_smoke():
ray.shutdown()
seed = 123
np.random.seed(seed)
random.seed(seed)
torch.manual_seed(seed)
ray.init(local_mode=True) # Runs PPO training in the same process
register_env(
"mlir_rl_env-v0",
lambda env_config: make_mlir_rl_wrapper_env(env=gym.make("mlir-v0")),
)
config = {
"env": "mlir_rl_env-v0",
"framework": "torch",
"model": {
"fcnet_hiddens": [2, 2],
"fcnet_activation": "relu",
},
"num_workers": 0, # local worker only
"train_batch_size": 2,
"sgd_minibatch_size": 1,
"num_sgd_iter": 1,
"rollout_fragment_length": 2,
}
trainer = PPOTrainer(config=config)
with warnings.catch_warnings():
# Ignore deprecation warnings from internal rllib implementation.
warnings.filterwarnings("ignore", category=DeprecationWarning)
trainer.train()
ray.shutdown()
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/mlir/rllib_ppo_smoke_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the LICENSE file
# in the root directory of this source tree.
#
# tests/**/__init__.py files are needed for pytest Python path resolution. See:
# https://docs.pytest.org/en/latest/explanation/pythonpath.html
|
CompilerGym-development
|
tests/mlir/datasets/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for the matmul dataset."""
import re
from copy import deepcopy
from itertools import islice
from pathlib import Path
import gym
import numpy as np
import pytest
import compiler_gym.envs.mlir # noqa register environments
from compiler_gym.envs.mlir import MlirEnv
from compiler_gym.envs.mlir.datasets import MatmulBenchmark, MatmulDataset
from tests.pytest_plugins.common import is_ci
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.common", "tests.pytest_plugins.mlir"]
@pytest.fixture(scope="module")
def matmul_dataset() -> MatmulDataset:
with gym.make("mlir-v0") as env:
ds = env.datasets["generator://matmul-v0"]
yield ds
def test_matmul_size(matmul_dataset: MatmulDataset):
assert matmul_dataset.size == 1
assert len(matmul_dataset) == 1
@pytest.mark.parametrize("index", range(1) if is_ci() else range(1))
def test_matmul_random_select(
env: MlirEnv, matmul_dataset: MatmulDataset, index: int, tmpwd: Path
):
uri = next(islice(matmul_dataset.benchmark_uris(), index, None))
benchmark = matmul_dataset.benchmark(uri)
assert isinstance(benchmark, MatmulBenchmark)
env.reset(benchmark=benchmark)
assert benchmark.source
benchmark.write_sources_to_directory(tmpwd)
assert (tmpwd / "source.mlir").is_file()
def test_matmul_from_seed_retry_count_exceeded(matmul_dataset: MatmulDataset):
with pytest.raises(
OSError, match=re.escape("matmul failed after 5 attempts with size (4, 4, 4)")
):
matmul_dataset.benchmark_from_size(mnk=(4, 4, 4), max_retries=3, retry_count=5)
def test_matmul_positive_runtimes(env: MlirEnv, matmul_dataset: MatmulDataset):
benchmark = next(matmul_dataset.benchmarks())
env.reset(benchmark=benchmark)
action_space = deepcopy(env.action_space)
action_space.seed(123)
env.step(action_space.sample())
val = env.observation["Runtime"]
assert np.all(np.greater(val, 0))
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/mlir/datasets/matmul_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Utilities for random testing."""
import random
from time import time
from typing import List, Tuple
from compiler_gym.envs import CompilerEnv
from compiler_gym.util.gym_type_hints import ObservationType
def apply_random_trajectory(
env: CompilerEnv,
random_trajectory_length_range=(1, 50),
timeout: int = 0,
) -> List[Tuple[int, ObservationType, float, bool]]:
"""Evaluate and return a random trajectory."""
end_time = time() + timeout
num_actions = random.randint(*random_trajectory_length_range)
trajectory = []
for _ in range(num_actions):
action = env.action_space.sample()
observation, reward, done, _ = env.step(action)
if done:
break # Broken trajectory.
trajectory.append((action, observation, reward, done))
if timeout and time() > end_time:
break
return trajectory
|
CompilerGym-development
|
tests/pytest_plugins/random_util.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the LICENSE file
# in the root directory of this source tree.
#
# tests/**/__init__.py files are needed for pytest Python path resolution. See:
# https://docs.pytest.org/en/latest/explanation/pythonpath.html
|
CompilerGym-development
|
tests/pytest_plugins/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Pytest fixtures for the LLVM CompilerGym environments."""
import os
from pathlib import Path
from typing import Iterable, List
import gym
import pytest
from compiler_gym.datasets import Dataset
from compiler_gym.envs.llvm import LlvmEnv
from compiler_gym.envs.llvm.datasets.cbench import VALIDATORS
from compiler_gym.third_party import llvm
from compiler_gym.util.runfiles_path import runfiles_path
BENCHMARKS_LIST = Path(runfiles_path("compiler_gym/third_party/cbench/benchmarks.txt"))
def _read_list_file(path: Path) -> Iterable[str]:
with open(str(path)) as f:
for action in f:
if action.strip():
yield action.strip()
BENCHMARK_NAMES = list(_read_list_file(BENCHMARKS_LIST))
# Skip ghostscript on CI as it is just too heavy.
if bool(os.environ.get("CI")):
BENCHMARK_NAMES = [
b for b in BENCHMARK_NAMES if b != "benchmark://cbench-v1/ghostscript"
]
with gym.make("llvm-v0") as env:
ACTION_NAMES = list(env.action_space.names)
OBSERVATION_SPACE_NAMES = sorted(env.observation.spaces.keys())
REWARD_SPACE_NAMES = sorted(env.reward.spaces.keys())
DATASET_NAMES = sorted(d.name for d in env.datasets)
@pytest.fixture(scope="module")
def action_names() -> List[str]:
"""A list of every action."""
return ACTION_NAMES
@pytest.fixture(scope="module", params=OBSERVATION_SPACE_NAMES)
def observation_space(request) -> str:
return request.param
@pytest.fixture(scope="module", params=REWARD_SPACE_NAMES)
def reward_space(request) -> str:
return request.param
@pytest.fixture(scope="module")
def benchmark_names() -> List[str]:
"""A list of every benchmarks."""
return BENCHMARK_NAMES
@pytest.fixture(scope="module", params=ACTION_NAMES)
def action_name(request) -> str:
"""Enumerate the names of actions."""
yield request.param
@pytest.fixture(scope="module", params=BENCHMARK_NAMES)
def benchmark_name(request) -> str:
"""Enumerate the names of benchmarks."""
yield request.param
VALIDATABLE_CBENCH_URIS = [b for b in BENCHMARK_NAMES if b in VALIDATORS]
NON_VALIDATABLE_CBENCH_URIS = [b for b in BENCHMARK_NAMES if b not in VALIDATORS]
@pytest.fixture(scope="module", params=VALIDATABLE_CBENCH_URIS)
def validatable_cbench_uri(request) -> str:
"""Enumerate the names of benchmarks whose semantics can be validated."""
yield request.param
@pytest.fixture(scope="module", params=NON_VALIDATABLE_CBENCH_URIS)
def non_validatable_cbench_uri(request) -> str:
"""Enumerate the names of benchmarks whose semantics cannot be validated."""
yield request.param
@pytest.fixture(scope="function")
def env() -> LlvmEnv:
"""Create an LLVM environment."""
with gym.make("llvm-v0") as env_:
yield env_
@pytest.fixture(scope="module")
def llvm_opt() -> Path:
"""Test fixture that yields the path of opt."""
return llvm.opt_path()
@pytest.fixture(scope="module")
def llvm_diff() -> Path:
"""Test fixture that yields the path of llvm-diff."""
return llvm.llvm_diff_path()
@pytest.fixture(scope="module")
def clang() -> Path:
"""Test fixture that yields the path of clang."""
return llvm.clang_path()
@pytest.fixture(scope="module", params=DATASET_NAMES)
def dataset_name(request) -> str:
return request.param
@pytest.fixture(scope="module", params=DATASET_NAMES)
def dataset(request) -> Dataset:
with gym.make("llvm-v0") as env:
return env.datasets[request.param]
|
CompilerGym-development
|
tests/pytest_plugins/llvm.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Pytest fixtures for CompilerGym tests."""
import os
import sys
import tempfile
from pathlib import Path
from typing import List
import docker
import pytest
from absl import flags as absl_flags
from compiler_gym.util.runfiles_path import transient_cache_path
FLAGS = absl_flags.FLAGS
def is_ci() -> bool:
"""Return whether running in CI environment."""
return os.environ.get("CI", "") != ""
def in_bazel() -> bool:
"""Return whether running under bazel."""
return os.environ.get("TEST_WORKSPACE", "") != ""
def docker_is_available() -> bool:
"""Return whether docker is available."""
try:
docker.from_env()
return True
except docker.errors.DockerException:
return False
# Decorator to skip a test in the CI environment.
skip_on_ci = pytest.mark.skipif(is_ci(), reason="Skip on CI")
# Decorator to run a test only in the CI environment.
ci_only = pytest.mark.skipif(not is_ci(), reason="Runs only on CI")
# Decorator to mark a test as skipped if not on Linux.
linux_only = pytest.mark.skipif(
not sys.platform.lower().startswith("linux"), reason="Linux only"
)
# Decorator to mark a test as skipped if not on macOS.
macos_only = pytest.mark.skipif(
not sys.platform.lower().startswith("darwin"), reason="macOS only"
)
# Decorator to mark a test as skipped if not running under bazel.
bazel_only = pytest.mark.skipif(not in_bazel(), reason="bazel only")
# Decorator to mark a test as skipped if not running in the `make test`
# environment.
install_test_only = pytest.mark.skipif(in_bazel(), reason="test only")
# Decorator to skip a test if docker is not available.
with_docker = pytest.mark.skipif(
not docker_is_available(), reason="Docker is not available"
)
# Decorator to skip a test if docker is available.
without_docker = pytest.mark.skipif(
docker_is_available(), reason="Docker is not available"
)
@pytest.fixture(scope="function")
def tmpwd() -> Path:
"""A fixture that creates a temporary directory, changes to it, and yields the path."""
tmpdir_root = transient_cache_path("tests")
tmpdir_root.mkdir(exist_ok=True, parents=True)
with tempfile.TemporaryDirectory(dir=tmpdir_root, prefix="tmpwd-") as d:
pwd = os.getcwd()
try:
os.chdir(d)
yield Path(d)
finally:
os.chdir(pwd)
@pytest.fixture(scope="function")
def temporary_environ():
"""A fixture that allows you to modify os.environ without affecting other tests."""
old_env = os.environ.copy()
try:
yield os.environ
finally:
os.environ.clear()
os.environ.update(old_env)
def set_command_line_flags(flags: List[str]):
"""Set the command line flags."""
sys.argv = flags
FLAGS.unparse_flags()
FLAGS(flags)
|
CompilerGym-development
|
tests/pytest_plugins/common.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Pytest fixtures for the GCC CompilerGym environments."""
import subprocess
from functools import lru_cache
from typing import Iterable
import pytest
from tests.pytest_plugins.common import docker_is_available
@lru_cache(maxsize=2)
def system_gcc_is_available() -> bool:
"""Return whether there is a system GCC available."""
try:
stdout = subprocess.check_output(
["gcc", "--version"], universal_newlines=True, stderr=subprocess.DEVNULL
)
# On some systems "gcc" may alias to a different compiler, so check for
# the presence of the name "gcc" in the first line of output.
return "gcc" in stdout.split("\n")[0].lower()
except (subprocess.CalledProcessError, FileNotFoundError):
return False
def system_gcc_path() -> str:
"""Return the path of the system GCC as a string."""
return subprocess.check_output(
["which", "gcc"], universal_newlines=True, stderr=subprocess.DEVNULL
).strip()
def gcc_environment_is_supported() -> bool:
"""Return whether the requirements for the GCC environment are met."""
return docker_is_available() or system_gcc_is_available()
def gcc_bins() -> Iterable[str]:
"""Return a list of available GCCs."""
if docker_is_available():
yield "docker:gcc:11.2.0"
if system_gcc_is_available():
yield system_gcc_path()
@pytest.fixture(scope="module", params=gcc_bins())
def gcc_bin(request) -> str:
return request.param
# Decorator to skip a test if GCC environment is not supported.
with_gcc_support = pytest.mark.skipif(
not gcc_environment_is_supported(), reason="Docker is not available"
)
# Decorator to skip a test if GCC environment is supported.
without_gcc_support = pytest.mark.skipif(
gcc_environment_is_supported(), reason="Docker is not available"
)
# Decorator to skip a test if system GCC is not availbale.
with_system_gcc = pytest.mark.skipif(
not system_gcc_is_available(), reason="GCC is not available"
)
# Decorator to skip a test if system GCC is availbale.
without_system_gcc = pytest.mark.skipif(
system_gcc_is_available(), reason="GCC is available"
)
|
CompilerGym-development
|
tests/pytest_plugins/gcc.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Pytest fixtures for the MLIR CompilerGym environments."""
from pathlib import Path
from typing import Iterable
import gym
import pytest
from compiler_gym.datasets import Dataset
from compiler_gym.envs.mlir import MlirEnv
def _read_list_file(path: Path) -> Iterable[str]:
with open(str(path)) as f:
for action in f:
if action.strip():
yield action.strip()
with gym.make("mlir-v0") as env:
OBSERVATION_SPACE_NAMES = sorted(env.observation.spaces.keys())
REWARD_SPACE_NAMES = sorted(env.reward.spaces.keys())
DATASET_NAMES = sorted(d.name for d in env.datasets)
@pytest.fixture(scope="module", params=OBSERVATION_SPACE_NAMES)
def observation_space(request) -> str:
return request.param
@pytest.fixture(scope="module", params=REWARD_SPACE_NAMES)
def reward_space(request) -> str:
return request.param
@pytest.fixture(scope="function")
def env() -> MlirEnv:
"""Create an LLVM environment."""
with gym.make("mlir-v0") as env_:
yield env_
@pytest.fixture(scope="module", params=DATASET_NAMES)
def dataset_name(request) -> str:
return request.param
@pytest.fixture(scope="module", params=DATASET_NAMES)
def dataset(request) -> Dataset:
with gym.make("mlir-v0") as env:
return env.datasets[request.param]
|
CompilerGym-development
|
tests/pytest_plugins/mlir.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the LICENSE file
# in the root directory of this source tree.
#
# tests/**/__init__.py files are needed for pytest Python path resolution. See:
# https://docs.pytest.org/en/latest/explanation/pythonpath.html
|
CompilerGym-development
|
tests/third_party/__init__.py
|
# Adapted from David Malcolm's gcc invocation library.
#
# Copyright 2013 David Malcolm <dmalcolm@redhat.com>
# Copyright 2013 Red Hat, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
# USA
from compiler_gym.third_party.gccinvocation.gccinvocation import (
GccInvocation,
cmdline_to_argv,
)
from tests.test_main import main
def test_cmdline_to_argv_simple():
argstr = (
"gcc -o scripts/genksyms/genksyms"
" scripts/genksyms/genksyms.o"
" scripts/genksyms/parse.tab.o"
" scripts/genksyms/lex.lex.o"
)
assert cmdline_to_argv(argstr) == [
"gcc",
"-o",
"scripts/genksyms/genksyms",
"scripts/genksyms/genksyms.o",
"scripts/genksyms/parse.tab.o",
"scripts/genksyms/lex.lex.o",
]
def test_cmdline_to_argv_quoted():
# (heavily edited from a kernel build)
argstr = (
"cc1 -quiet"
" -DCONFIG_AS_CFI_SIGNAL_FRAME=1"
# Here's the awkward argument:
' -DIPATH_IDSTR="QLogic kernel.org driver"'
" -DIPATH_KERN_TYPE=0 -DKBUILD_STR(s)=#s"
" -fprofile-arcs -"
)
assert cmdline_to_argv(argstr) == [
"cc1",
"-quiet",
"-DCONFIG_AS_CFI_SIGNAL_FRAME=1",
'-DIPATH_IDSTR="QLogic kernel.org driver"',
"-DIPATH_KERN_TYPE=0",
"-DKBUILD_STR(s)=#s",
"-fprofile-arcs",
"-",
]
def test_parse_compile():
args = (
"gcc -pthread -fno-strict-aliasing -O2 -g -pipe -Wall"
" -Wp,-D_FORTIFY_SOURCE=2 -fexceptions -fstack-protector"
" --param=ssp-buffer-size=4 -m64 -mtune=generic -D_GNU_SOURCE"
" -fPIC -fwrapv -DNDEBUG -O2 -g -pipe -Wall -Wp,-D_FORTIFY_SOURCE=2"
" -fexceptions -fstack-protector --param=ssp-buffer-size=4 -m64"
' -mtune=generic -D_GNU_SOURCE -fPIC -fwrapv -fPIC -DVERSION="0.7"'
" -I/usr/include/python2.7 -c python-ethtool/ethtool.c"
" -o build/temp.linux-x86_64-2.7/python-ethtool/ethtool.o"
).split()
gccinv = GccInvocation(args)
assert gccinv.argv == args
assert gccinv.executable == "gcc"
assert gccinv.is_driver
assert gccinv.sources == ["python-ethtool/ethtool.c"]
assert gccinv.defines == ["_GNU_SOURCE", "NDEBUG", "_GNU_SOURCE", 'VERSION="0.7"']
assert gccinv.includepaths == ["/usr/include/python2.7"]
assert gccinv.otherargs == [
"-pthread",
"-fno-strict-aliasing",
"-O2",
"-g",
"-pipe",
"-Wall",
"-Wp,-D_FORTIFY_SOURCE=2",
"-fexceptions",
"-fstack-protector",
"--param=ssp-buffer-size=4",
"-m64",
"-mtune=generic",
"-fPIC",
"-fwrapv",
"-O2",
"-g",
"-pipe",
"-Wall",
"-Wp,-D_FORTIFY_SOURCE=2",
"-fexceptions",
"-fstack-protector",
"--param=ssp-buffer-size=4",
"-m64",
"-mtune=generic",
"-fPIC",
"-fwrapv",
"-fPIC",
"-c",
]
def test_parse_link():
args = (
"gcc -pthread -shared -Wl,-z,relro"
" build/temp.linux-x86_64-2.7/python-ethtool/ethtool.o"
" build/temp.linux-x86_64-2.7/python-ethtool/etherinfo.o"
" build/temp.linux-x86_64-2.7/python-ethtool/etherinfo_obj.o"
" build/temp.linux-x86_64-2.7/python-ethtool/etherinfo_ipv6_obj.o"
" -L/usr/lib64 -lnl -lpython2.7"
" -o build/lib.linux-x86_64-2.7/ethtool.so"
).split()
gccinv = GccInvocation(args)
assert gccinv.argv == args
assert gccinv.executable == "gcc"
assert gccinv.sources == [
"build/temp.linux-x86_64-2.7/python-ethtool/ethtool.o",
"build/temp.linux-x86_64-2.7/python-ethtool/etherinfo.o",
"build/temp.linux-x86_64-2.7/python-ethtool/etherinfo_obj.o",
"build/temp.linux-x86_64-2.7/python-ethtool/etherinfo_ipv6_obj.o",
]
assert gccinv.defines == []
assert gccinv.includepaths == []
def test_parse_cplusplus():
args = (
"/usr/bin/c++ -DPYSIDE_EXPORTS -DQT_GUI_LIB -DQT_CORE_LIB"
" -DQT_NO_DEBUG -O2 -g -pipe -Wall -Wp,-D_FORTIFY_SOURCE=2"
" -fexceptions -fstack-protector --param=ssp-buffer-size=4"
" -m64 -mtune=generic -Wall -fvisibility=hidden"
" -Wno-strict-aliasing -O3 -DNDEBUG -fPIC"
" -I/usr/include/QtGui -I/usr/include/QtCore"
" -I/builddir/build/BUILD/pyside-qt4.7+1.1.0/libpyside"
" -I/usr/include/shiboken -I/usr/include/python2.7"
" -o CMakeFiles/pyside.dir/dynamicqmetaobject.cpp.o"
" -c /builddir/build/BUILD/pyside-qt4.7+1.1.0/libpyside/dynamicqmetaobject.cpp"
)
gccinv = GccInvocation(args.split())
assert gccinv.executable == "/usr/bin/c++"
assert gccinv.progname == "c++"
assert gccinv.is_driver
assert gccinv.sources == [
"/builddir/build/BUILD/pyside-qt4.7+1.1.0/libpyside/dynamicqmetaobject.cpp"
]
assert "PYSIDE_EXPORTS" in gccinv.defines
assert "NDEBUG" in gccinv.defines
assert "/builddir/build/BUILD/pyside-qt4.7+1.1.0/libpyside" in gccinv.includepaths
assert "--param=ssp-buffer-size=4" in gccinv.otherargs
def test_complex_invocation():
# A command line taken from libreoffice/3.5.0.3/5.fc17/x86_64/build.log was:
# R=/builddir/build/BUILD && S=$R/libreoffice-3.5.0.3 && O=$S/solver/unxlngx6.pro && W=$S/workdir/unxlngx6.pro && mkdir -p $W/CxxObject/xml2cmp/source/support/ $W/Dep/CxxObject/xml2cmp/source/support/ && g++ -DCPPU_ENV=gcc3 -DENABLE_GRAPHITE -DENABLE_GTK -DENABLE_KDE4 -DGCC -DGXX_INCLUDE_PATH=/usr/include/c++/4.7.2 -DHAVE_GCC_VISIBILITY_FEATURE -DHAVE_THREADSAFE_STATICS -DLINUX -DNDEBUG -DOPTIMIZE -DOSL_DEBUG_LEVEL=0 -DPRODUCT -DSOLAR_JAVA -DSUPD=350 -DUNIX -DUNX -DVCL -DX86_64 -D_PTHREADS -D_REENTRANT -Wall -Wendif-labels -Wextra -fmessage-length=0 -fno-common -pipe -fPIC -Wshadow -Wsign-promo -Woverloaded-virtual -Wno-non-virtual-dtor -fvisibility=hidden -fvisibility-inlines-hidden -std=c++0x -ggdb2 -Wp,-D_FORTIFY_SOURCE=2 -fstack-protector --param=ssp-buffer-size=4 -m64 -mtune=generic -DEXCEPTIONS_ON -fexceptions -fno-enforce-eh-specs -Wp,-D_FORTIFY_SOURCE=2 -fstack-protector --param=ssp-buffer-size=4 -m64 -mtune=generic -c $S/xml2cmp/source/support/cmdline.cxx -o $W/CxxObject/xml2cmp/source/support/cmdline.o -MMD -MT $W/CxxObject/xml2cmp/source/support/cmdline.o -MP -MF $W/Dep/CxxObject/xml2cmp/source/support/cmdline.d -I$S/xml2cmp/source/support/ -I$O/inc/stl -I$O/inc/external -I$O/inc -I$S/solenv/inc/unxlngx6 -I$S/solenv/inc -I$S/res -I/usr/lib/jvm/java-1.7.0-openjdk.x86_64/include -I/usr/lib/jvm/java-1.7.0-openjdk.x86_64/include/linux -I/usr/lib/jvm/java-1.7.0-openjdk.x86_64/include/native_threads/include
args = (
"g++ -DCPPU_ENV=gcc3 -DENABLE_GRAPHITE -DENABLE_GTK"
" -DENABLE_KDE4 -DGCC -DGXX_INCLUDE_PATH=/usr/include/c++/4.7.2"
" -DHAVE_GCC_VISIBILITY_FEATURE -DHAVE_THREADSAFE_STATICS"
" -DLINUX -DNDEBUG -DOPTIMIZE -DOSL_DEBUG_LEVEL=0 -DPRODUCT"
" -DSOLAR_JAVA -DSUPD=350 -DUNIX -DUNX -DVCL -DX86_64"
" -D_PTHREADS -D_REENTRANT -Wall -Wendif-labels -Wextra"
" -fmessage-length=0 -fno-common -pipe -fPIC -Wshadow"
" -Wsign-promo -Woverloaded-virtual -Wno-non-virtual-dtor"
" -fvisibility=hidden -fvisibility-inlines-hidden"
" -std=c++0x -ggdb2 -Wp,-D_FORTIFY_SOURCE=2"
" -fstack-protector --param=ssp-buffer-size=4 -m64"
" -mtune=generic -DEXCEPTIONS_ON -fexceptions"
" -fno-enforce-eh-specs -Wp,-D_FORTIFY_SOURCE=2"
" -fstack-protector --param=ssp-buffer-size=4 -m64"
" -mtune=generic -c $S/xml2cmp/source/support/cmdline.cxx"
" -o $W/CxxObject/xml2cmp/source/support/cmdline.o -MMD"
" -MT $W/CxxObject/xml2cmp/source/support/cmdline.o -MP"
" -MF $W/Dep/CxxObject/xml2cmp/source/support/cmdline.d"
" -I$S/xml2cmp/source/support/ -I$O/inc/stl"
" -I$O/inc/external -I$O/inc -I$S/solenv/inc/unxlngx6"
" -I$S/solenv/inc -I$S/res"
" -I/usr/lib/jvm/java-1.7.0-openjdk.x86_64/include"
" -I/usr/lib/jvm/java-1.7.0-openjdk.x86_64/include/linux"
" -I/usr/lib/jvm/java-1.7.0-openjdk.x86_64/include/native_threads/include"
)
# Expand the shell vars in the arguments:
args = args.replace("$W", "$S/workdir/unxlngx6.pro")
args = args.replace("$O", "$S/solver/unxlngx6.pro")
args = args.replace("$S", "$R/libreoffice-3.5.0.3")
args = args.replace("$R", "/builddir/build/BUILD")
assert "$" not in args
gccinv = GccInvocation(args.split())
assert gccinv.executable == "g++"
assert gccinv.sources == [
"/builddir/build/BUILD/libreoffice-3.5.0.3/xml2cmp/source/support/cmdline.cxx"
]
assert "CPPU_ENV=gcc3" in gccinv.defines
assert "EXCEPTIONS_ON" in gccinv.defines
assert (
"/builddir/build/BUILD/libreoffice-3.5.0.3/solver/unxlngx6.pro/inc/stl"
in gccinv.includepaths
)
assert (
"/usr/lib/jvm/java-1.7.0-openjdk.x86_64/include/native_threads/include"
in gccinv.includepaths
)
assert "-Wall" in gccinv.otherargs
def test_restrict_to_one_source():
args = (
"gcc -fPIC -shared -flto -flto-partition=none"
" -Isomepath -DFOO"
" -o output.o input-f.c input-g.c input-h.c"
)
gccinv = GccInvocation(args.split())
assert gccinv.sources == ["input-f.c", "input-g.c", "input-h.c"]
gccinv2 = gccinv.restrict_to_one_source("input-g.c")
assert gccinv2.sources == ["input-g.c"]
assert gccinv2.argv == [
"gcc",
"-DFOO",
"-Isomepath",
"-fPIC",
"-shared",
"-flto",
"-flto-partition=none",
"input-g.c",
]
def test_kernel_build():
argstr = (
"gcc -Wp,-MD,drivers/media/pci/mantis/.mantis_uart.o.d"
" -nostdinc -isystem /usr/lib/gcc/x86_64-redhat-linux/4.4.7/include"
" -I/home/david/linux-3.9.1/arch/x86/include"
" -Iarch/x86/include/generated -Iinclude"
" -I/home/david/linux-3.9.1/arch/x86/include/uapi"
" -Iarch/x86/include/generated/uapi"
" -I/home/david/linux-3.9.1/include/uapi"
" -Iinclude/generated/uapi"
" -include /home/david/linux-3.9.1/include/linux/kconfig.h"
" -D__KERNEL__ -Wall -Wundef -Wstrict-prototypes"
" -Wno-trigraphs -fno-strict-aliasing -fno-common"
" -Werror-implicit-function-declaration"
" -Wno-format-security -fno-delete-null-pointer-checks"
" -Os -m64 -mtune=generic -mno-red-zone -mcmodel=kernel"
" -funit-at-a-time -maccumulate-outgoing-args"
" -fstack-protector -DCONFIG_AS_CFI=1"
" -DCONFIG_AS_CFI_SIGNAL_FRAME=1"
" -DCONFIG_AS_CFI_SECTIONS=1 -DCONFIG_AS_FXSAVEQ=1"
" -DCONFIG_AS_AVX=1 -pipe -Wno-sign-compare"
" -fno-asynchronous-unwind-tables -mno-sse -mno-mmx"
" -mno-sse2 -mno-3dnow -mno-avx -fno-reorder-blocks"
" -fno-ipa-cp-clone -Wframe-larger-than=2048"
" -Wno-unused-but-set-variable -fno-omit-frame-pointer"
" -fno-optimize-sibling-calls -g"
" -femit-struct-debug-baseonly -fno-var-tracking -pg"
" -fno-inline-functions-called-once"
" -Wdeclaration-after-statement -Wno-pointer-sign"
" -fno-strict-overflow -fconserve-stack"
" -DCC_HAVE_ASM_GOTO -Idrivers/media/dvb-core/"
" -Idrivers/media/dvb-frontends/ -fprofile-arcs"
" -ftest-coverage -DKBUILD_STR(s)=#s"
" -DKBUILD_BASENAME=KBUILD_STR(mantis_uart)"
" -DKBUILD_MODNAME=KBUILD_STR(mantis_core) -c"
" -o drivers/media/pci/mantis/.tmp_mantis_uart.o"
" drivers/media/pci/mantis/mantis_uart.c"
)
gccinv = GccInvocation(argstr.split())
assert gccinv.executable == "gcc"
assert gccinv.progname == "gcc"
assert gccinv.sources == ["drivers/media/pci/mantis/mantis_uart.c"]
assert "__KERNEL__" in gccinv.defines
assert "KBUILD_STR(s)=#s" in gccinv.defines
def test_kernel_cc1():
argstr = (
"/usr/libexec/gcc/x86_64-redhat-linux/4.4.7/cc1 -quiet"
" -nostdinc"
" -I/home/david/linux-3.9.1/arch/x86/include"
" -Iarch/x86/include/generated -Iinclude"
" -I/home/david/linux-3.9.1/arch/x86/include/uapi"
" -Iarch/x86/include/generated/uapi"
" -I/home/david/linux-3.9.1/include/uapi"
" -Iinclude/generated/uapi -Idrivers/media/dvb-core/"
" -Idrivers/media/dvb-frontends/ -D__KERNEL__"
" -DCONFIG_AS_CFI=1 -DCONFIG_AS_CFI_SIGNAL_FRAME=1"
" -DCONFIG_AS_CFI_SECTIONS=1 -DCONFIG_AS_FXSAVEQ=1"
" -DCONFIG_AS_AVX=1 -DCC_HAVE_ASM_GOTO -DKBUILD_STR(s)=#s"
" -DKBUILD_BASENAME=KBUILD_STR(mantis_uart)"
" -DKBUILD_MODNAME=KBUILD_STR(mantis_core)"
" -isystem /usr/lib/gcc/x86_64-redhat-linux/4.4.7/include"
" -include /home/david/linux-3.9.1/include/linux/kconfig.h"
" -MD drivers/media/pci/mantis/.mantis_uart.o.d"
" drivers/media/pci/mantis/mantis_uart.c -quiet"
" -dumpbase mantis_uart.c -m64 -mtune=generic"
" -mno-red-zone -mcmodel=kernel -maccumulate-outgoing-args"
" -mno-sse -mno-mmx -mno-sse2 -mno-3dnow -mno-avx"
" -auxbase-strip drivers/media/pci/mantis/.tmp_mantis_uart.o"
" -g -Os -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs"
" -Werror-implicit-function-declaration -Wno-format-security"
" -Wno-sign-compare -Wframe-larger-than=2048"
" -Wno-unused-but-set-variable -Wdeclaration-after-statement"
" -Wno-pointer-sign -p -fno-strict-aliasing -fno-common"
" -fno-delete-null-pointer-checks -funit-at-a-time"
" -fstack-protector -fno-asynchronous-unwind-tables"
" -fno-reorder-blocks -fno-ipa-cp-clone"
" -fno-omit-frame-pointer -fno-optimize-sibling-calls"
" -femit-struct-debug-baseonly -fno-var-tracking"
" -fno-inline-functions-called-once -fno-strict-overflow"
" -fconserve-stack -fprofile-arcs -ftest-coverage -o -"
)
gccinv = GccInvocation(argstr.split())
assert gccinv.executable == "/usr/libexec/gcc/x86_64-redhat-linux/4.4.7/cc1"
assert gccinv.progname == "cc1"
assert not gccinv.is_driver
assert gccinv.sources == ["drivers/media/pci/mantis/mantis_uart.c"]
def test_not_gcc():
argstr = "objdump -h drivers/media/pci/mantis/.tmp_mantis_uart.o"
gccinv = GccInvocation(argstr.split())
assert gccinv.executable == "objdump"
assert gccinv.progname == "objdump"
assert not gccinv.is_driver
def test_dash_x():
argstr = (
"gcc -D__KERNEL__ -Wall -Wundef -Wstrict-prototypes"
" -Wno-trigraphs -fno-strict-aliasing -fno-common"
" -Werror-implicit-function-declaration"
" -Wno-format-security -fno-delete-null-pointer-checks"
" -Os -m64 -mno-sse -mpreferred-stack-boundary=3"
" -c -x c /dev/null -o .20355.tmp"
)
gccinv = GccInvocation(argstr.split())
assert gccinv.executable == "gcc"
assert gccinv.sources == ["/dev/null"]
def test_pipes():
argstr = (
"gcc -D__KERNEL__ -S -x c -c -O0 -mcmodel=kernel" " -fstack-protector" " - -o -"
)
gccinv = GccInvocation(argstr.split())
assert gccinv.sources == ["-"]
def test_print_file_name():
argstr = "gcc -print-file-name=include"
gccinv = GccInvocation(argstr.split())
assert gccinv.sources == []
assert "-print-file-name=include" in gccinv.otherargs
def test_collect2():
# From a kernel build:
argstr = (
"/usr/libexec/gcc/x86_64-redhat-linux/4.4.7/collect2"
" --eh-frame-hdr --build-id -m elf_x86_64"
" --hash-style=gnu -dynamic-linker"
" /lib64/ld-linux-x86-64.so.2 -o .20501.tmp"
" -L/usr/lib/gcc/x86_64-redhat-linux/4.4.7"
" -L/usr/lib/gcc/x86_64-redhat-linux/4.4.7"
" -L/usr/lib/gcc/x86_64-redhat-linux/4.4.7/../../../../lib64"
" -L/lib/../lib64 -L/usr/lib/../lib64"
" -L/usr/lib/gcc/x86_64-redhat-linux/4.4.7/../../.."
" --build-id /tmp/cckRREmI.o"
)
gccinv = GccInvocation(argstr.split())
assert gccinv.progname == "collect2"
assert not gccinv.is_driver
assert gccinv.sources == []
def test_link():
# From a kernel build:
argstr = (
"gcc -o scripts/genksyms/genksyms"
" scripts/genksyms/genksyms.o"
" scripts/genksyms/parse.tab.o"
" scripts/genksyms/lex.lex.o"
)
gccinv = GccInvocation(argstr.split())
assert gccinv.progname == "gcc"
assert gccinv.sources == [
"scripts/genksyms/genksyms.o",
"scripts/genksyms/parse.tab.o",
"scripts/genksyms/lex.lex.o",
]
def test_quoted_spaces():
# Ensure we can handle spaces within a quoted argument
argstr = (
"/usr/libexec/gcc/x86_64-redhat-linux/4.4.7/cc1 -quiet"
" -nostdinc"
" -I/home/david/linux-3.9.1/arch/x86/include"
" -Iarch/x86/include/generated -Iinclude"
" -I/home/david/linux-3.9.1/arch/x86/include/uapi"
" -Iarch/x86/include/generated/uapi"
" -I/home/david/linux-3.9.1/include/uapi"
" -Iinclude/generated/uapi -D__KERNEL__ -DCONFIG_AS_CFI=1"
" -DCONFIG_AS_CFI_SIGNAL_FRAME=1"
" -DCONFIG_AS_CFI_SECTIONS=1 -DCONFIG_AS_FXSAVEQ=1"
" -DCONFIG_AS_AVX=1 -DCC_HAVE_ASM_GOTO"
# Here's the awkward argument:
' -DIPATH_IDSTR="QLogic kernel.org driver"'
" -DIPATH_KERN_TYPE=0 -DKBUILD_STR(s)=#s"
" -DKBUILD_BASENAME=KBUILD_STR(ipath_cq)"
" -DKBUILD_MODNAME=KBUILD_STR(ib_ipath)"
" -isystem /usr/lib/gcc/x86_64-redhat-linux/4.4.7/include"
" -include /home/david/linux-3.9.1/include/linux/kconfig.h"
" -MD drivers/infiniband/hw/ipath/.ipath_cq.o.d"
" drivers/infiniband/hw/ipath/ipath_cq.c"
" -quiet -dumpbase ipath_cq.c -m64 -mtune=generic"
" -mno-red-zone -mcmodel=kernel"
" -maccumulate-outgoing-args -mno-sse -mno-mmx -mno-sse2"
" -mno-3dnow -mno-avx -auxbase-strip"
" drivers/infiniband/hw/ipath/.tmp_ipath_cq.o"
" -g -Os -Wall -Wundef -Wstrict-prototypes"
" -Wno-trigraphs -Werror-implicit-function-declaration"
" -Wno-format-security -Wno-sign-compare"
" -Wframe-larger-than=2048 -Wno-unused-but-set-variable"
" -Wdeclaration-after-statement -Wno-pointer-sign -p"
" -fno-strict-aliasing -fno-common"
" -fno-delete-null-pointer-checks -funit-at-a-time"
" -fstack-protector -fno-asynchronous-unwind-tables"
" -fno-reorder-blocks -fno-ipa-cp-clone"
" -fno-omit-frame-pointer -fno-optimize-sibling-calls"
" -femit-struct-debug-baseonly -fno-var-tracking"
" -fno-inline-functions-called-once"
" -fno-strict-overflow -fconserve-stack"
" -fprofile-arcs -ftest-coverage -o -"
)
gccinv = GccInvocation.from_cmdline(argstr)
assert gccinv.sources == ["drivers/infiniband/hw/ipath/ipath_cq.c"]
assert 'IPATH_IDSTR="QLogic kernel.org driver"' in gccinv.defines
assert "KBUILD_STR(s)=#s" in gccinv.defines
assert "KBUILD_BASENAME=KBUILD_STR(ipath_cq)" in gccinv.defines
def test_space_after_dash_D():
# Note the space between the -D and its argument:
argstr = (
"gcc -c -x c -D __KERNEL__ -D SOME_OTHER_DEFINE /dev/null -o /tmp/ccqbm5As.s"
)
gccinv = GccInvocation.from_cmdline(argstr)
assert gccinv.defines == ["__KERNEL__", "SOME_OTHER_DEFINE"]
assert gccinv.sources == ["/dev/null"]
def test_space_after_dash_I():
argstr = (
"./install/libexec/gcc/x86_64-unknown-linux-gnu/4.9.0/cc1 -quiet"
" -nostdinc"
" -I somedir"
" -I some/other/dir"
" -D __KERNEL__"
" -D CONFIG_AS_CFI=1"
" -D CONFIG_AS_CFI_SIGNAL_FRAME=1"
" -D KBUILD_STR(s)=#s"
" -D KBUILD_BASENAME=KBUILD_STR(empty)"
" -D KBUILD_MODNAME=KBUILD_STR(empty)"
" scripts/mod/empty.c"
" -o -"
)
gccinv = GccInvocation.from_cmdline(argstr)
assert gccinv.defines == [
"__KERNEL__",
"CONFIG_AS_CFI=1",
"CONFIG_AS_CFI_SIGNAL_FRAME=1",
"KBUILD_STR(s)=#s",
"KBUILD_BASENAME=KBUILD_STR(empty)",
"KBUILD_MODNAME=KBUILD_STR(empty)",
]
assert gccinv.sources == ["scripts/mod/empty.c"]
def test_space_after_dash_U():
argstr = (
"./install/libexec/gcc/x86_64-unknown-linux-gnu/4.9.0/cc1"
" -E -lang-asm -quiet -nostdinc -C -C"
"-P -P"
" -U x86"
" -isystem /some/dir"
" -include /some/path/to/kconfig.h"
" -MD arch/x86/vdso/.vdso.lds.d"
" arch/x86/vdso/vdso.lds.S"
" -o arch/x86/vdso/vdso.lds"
" -mtune=generic -march=x86-64 -fno-directives-only"
)
gccinv = GccInvocation.from_cmdline(argstr)
assert gccinv.sources == ["arch/x86/vdso/vdso.lds.S"]
def test_MD_without_arg():
argstr = (
"/usr/bin/gcc"
" -Wp,-MD,arch/x86/purgatory/.purgatory.o.d"
" -nostdinc"
" -isystem"
" /usr/lib/gcc/x86_64-redhat-linux/5.1.1/include"
" -I./arch/x86/include"
" -Iarch/x86/include/generated/uapi"
" -Iarch/x86/include/generated"
" -Iinclude"
" -I./arch/x86/include/uapi"
" -Iarch/x86/include/generated/uapi"
" -I./include/uapi"
" -Iinclude/generated/uapi"
" -include"
" ./include/linux/kconfig.h"
" -D__KERNEL__"
" -fno-strict-aliasing"
" -Wall"
" -Wstrict-prototypes"
" -fno-zero-initialized-in-bss"
" -fno-builtin"
" -ffreestanding"
" -c"
" -MD"
" -Os"
" -mcmodel=large"
" -m64"
" -DKBUILD_STR(s)=#s"
" -DKBUILD_BASENAME=KBUILD_STR(purgatory)"
" -DKBUILD_MODNAME=KBUILD_STR(purgatory)"
" -c"
" -o"
" arch/x86/purgatory/purgatory.o"
" arch/x86/purgatory/purgatory.c"
)
gccinv = GccInvocation.from_cmdline(argstr)
assert gccinv.sources == ["arch/x86/purgatory/purgatory.c"]
def test_openssl_invocation():
argstr = (
"/usr/bin/gcc"
" -Werror"
" -D"
" OPENSSL_DOING_MAKEDEPEND"
" -M"
" -fPIC"
" -DOPENSSL_PIC"
" -DZLIB"
" -DOPENSSL_THREADS"
" -D_REENTRANT"
" -DDSO_DLFCN"
" -DHAVE_DLFCN_H"
" -DKRB5_MIT"
" -m64"
" -DL_ENDIAN"
" -DTERMIO"
" -Wall"
" -O2"
" -g"
" -pipe"
" -Wall"
" -Werror=format-security"
" -Wp,-D_FORTIFY_SOURCE=2"
" -fexceptions"
" -fstack-protector-strong"
" --param=ssp-buffer-size=4"
" -grecord-gcc-switches"
" -m64"
" -mtune=generic"
" -Wa,--noexecstack"
" -DPURIFY"
" -DOPENSSL_IA32_SSE2"
" -DOPENSSL_BN_ASM_MONT"
" -DOPENSSL_BN_ASM_MONT5"
" -DOPENSSL_BN_ASM_GF2m"
" -DSHA1_ASM"
" -DSHA256_ASM"
" -DSHA512_ASM"
" -DMD5_ASM"
" -DAES_ASM"
" -DVPAES_ASM"
" -DBSAES_ASM"
" -DWHIRLPOOL_ASM"
" -DGHASH_ASM"
" -I."
" -I.."
" -I../include"
" -DOPENSSL_NO_DEPRECATED"
" -DOPENSSL_NO_EC2M"
" -DOPENSSL_NO_EC_NISTP_64_GCC_128"
" -DOPENSSL_NO_GMP"
" -DOPENSSL_NO_GOST"
" -DOPENSSL_NO_JPAKE"
" -DOPENSSL_NO_MDC2"
" -DOPENSSL_NO_RC5"
" -DOPENSSL_NO_RSAX"
" -DOPENSSL_NO_SCTP"
" -DOPENSSL_NO_SRP"
" -DOPENSSL_NO_STORE"
" -DOPENSSL_NO_UNIT_TEST"
" cryptlib.c"
" mem.c"
" mem_clr.c"
" mem_dbg.c"
" cversion.c"
" ex_data.c"
" cpt_err.c"
" ebcdic.c"
" uid.c"
" o_time.c"
" o_str.c"
" o_dir.c"
" o_fips.c"
" o_init.c"
" fips_ers.c"
)
gccinv = GccInvocation.from_cmdline(argstr)
assert gccinv.sources == [
"cryptlib.c",
"mem.c",
"mem_clr.c",
"mem_dbg.c",
"cversion.c",
"ex_data.c",
"cpt_err.c",
"ebcdic.c",
"uid.c",
"o_time.c",
"o_str.c",
"o_dir.c",
"o_fips.c",
"o_init.c",
"fips_ers.c",
]
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/third_party/gccinvocation/gccinvocation_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the LICENSE file
# in the root directory of this source tree.
#
# tests/**/__init__.py files are needed for pytest Python path resolution. See:
# https://docs.pytest.org/en/latest/explanation/pythonpath.html
|
CompilerGym-development
|
tests/third_party/gccinvocation/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for compiler_gym/service/service_cache.py."""
from compiler_gym.service.service_cache import ServiceCache
from tests.test_main import main
def test_service_cache(cache: ServiceCache):
cache = ServiceCache()
try:
# Test that expected files exist.
assert cache.path.is_dir()
assert (cache / "logs").is_dir()
assert (cache / "disk").exists()
# Test permissions by creating some empty files.
(cache / "foo.txt").touch()
(cache / "logs" / "foo.txt").touch()
(cache / "disk" / "foo.txt").touch()
finally:
cache.close()
assert not cache.path.is_dir()
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/service/service_cache_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym/service:connection."""
import gym
import pytest
import compiler_gym.envs # noqa Register LLVM environments.
from compiler_gym.errors import ServiceError
from compiler_gym.service import CompilerGymServiceConnection, ConnectionOpts
from compiler_gym.service.proto import GetSpacesRequest
from tests.test_main import main
@pytest.fixture(scope="function")
def connection() -> CompilerGymServiceConnection:
"""Yields a connection to a local service."""
with gym.make("llvm-v0") as env:
yield env.service
@pytest.fixture(scope="function")
def dead_connection() -> CompilerGymServiceConnection:
"""Yields a connection to a dead local service service."""
with gym.make("llvm-v0") as env:
# Kill the service.
env.service.connection.process.terminate()
env.service.connection.process.communicate()
yield env.service
def test_create_invalid_options():
with pytest.raises(TypeError, match="No endpoint provided for service connection"):
CompilerGymServiceConnection("")
def test_create_channel_failed_subprocess(
dead_connection: CompilerGymServiceConnection,
):
with pytest.raises(
(ServiceError, TimeoutError), match="Failed to create connection to localhost:"
):
CompilerGymServiceConnection(
f"{dead_connection.connection.url}",
ConnectionOpts(
init_max_seconds=1,
init_max_attempts=2,
rpc_init_max_seconds=0.1,
),
)
def test_create_channel_failed_subprocess_rpc_timeout(
dead_connection: CompilerGymServiceConnection,
):
"""Same as the above test, but RPC timeout is long enough that only a single
attempt can be made.
"""
with pytest.raises(
OSError,
match=(
r"Failed to create connection to localhost:\d+ after "
r"[\d\.]+ seconds \(1 attempt made\)"
),
):
CompilerGymServiceConnection(
f"{dead_connection.connection.url}",
ConnectionOpts(
init_max_seconds=0.1,
init_max_attempts=2,
rpc_init_max_seconds=1,
),
)
def test_call_stub_invalid_type(connection: CompilerGymServiceConnection):
with pytest.raises(
TypeError, match="Exception serializing request! Request type: type"
):
connection(connection.stub.GetSpaces, int)
def test_call_stub_negative_timeout(connection: CompilerGymServiceConnection):
with pytest.raises(TimeoutError, match=r"Deadline Exceeded \(-10.0 seconds\)"):
connection(connection.stub.GetSpaces, GetSpacesRequest(), timeout=-10)
def test_ManagedConnection_repr(connection: CompilerGymServiceConnection):
cnx = connection.connection
assert (
repr(cnx)
== f"Connection to service at {cnx.url} running on PID {cnx.process.pid}"
)
# Kill the service.
cnx.process.terminate()
cnx.process.communicate()
assert repr(cnx) == f"Connection to dead service at {cnx.url}"
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/service/connection_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the LICENSE file
# in the root directory of this source tree.
#
# tests/**/__init__.py files are needed for pytest Python path resolution. See:
# https://docs.pytest.org/en/latest/explanation/pythonpath.html
|
CompilerGym-development
|
tests/service/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym:validate."""
from collections.abc import Collection, Mapping
import google.protobuf.any_pb2 as any_pb2
import numpy as np
import pytest
from compiler_gym.service.proto import (
BooleanBox,
BooleanRange,
BooleanSequenceSpace,
BooleanTensor,
ByteBox,
ByteSequenceSpace,
BytesSequenceSpace,
ByteTensor,
CommandlineSpace,
DictEvent,
DictSpace,
DiscreteSpace,
DoubleBox,
DoubleRange,
DoubleSequenceSpace,
DoubleTensor,
Event,
FloatBox,
FloatRange,
FloatSequenceSpace,
FloatTensor,
Int64Box,
Int64Range,
Int64SequenceSpace,
Int64Tensor,
ListEvent,
ListSpace,
NamedDiscreteSpace,
Opaque,
Space,
SpaceSequenceSpace,
StringSpace,
StringTensor,
py_converters,
)
from compiler_gym.spaces import (
Box,
Commandline,
Dict,
Discrete,
NamedDiscrete,
Permutation,
Scalar,
Sequence,
SpaceSequence,
Tuple,
)
from tests.test_main import main
def test_convert_boolean_tensor_message_to_numpy():
shape = [1, 2, 3]
values = [True, False, True, True, False, False]
tensor_message = BooleanTensor(shape=shape, value=values)
np_array = py_converters.convert_tensor_message_to_numpy(tensor_message)
assert np_array.dtype == bool
assert np.array_equal(np_array.shape, shape)
flat_np_array = np_array.flatten()
assert np.array_equal(flat_np_array, values)
def test_convert_numpy_to_boolean_tensor_message():
tensor = np.array([[True], [False]], dtype=bool)
tensor_message = py_converters.convert_numpy_to_boolean_tensor_message(tensor)
assert isinstance(tensor_message, BooleanTensor)
assert np.array_equal(tensor.shape, tensor_message.shape)
assert np.array_equal(tensor.flatten(), tensor_message.value)
def test_convert_byte_tensor_message_to_numpy():
shape = [1, 2, 3]
values = [1, 2, 3, 4, 5, 6]
tensor_message = ByteTensor(shape=shape, value=bytes(values))
np_array = py_converters.convert_tensor_message_to_numpy(tensor_message)
assert np_array.dtype == np.byte
assert np.array_equal(np_array.shape, shape)
flat_np_array = np_array.flatten()
assert np.array_equal(flat_np_array, values)
def test_convert_numpy_to_byte_tensor_message():
tensor = np.array([[1], [2]], dtype=np.int8)
tensor_message = py_converters.convert_numpy_to_byte_tensor_message(tensor)
assert isinstance(tensor_message, ByteTensor)
assert np.array_equal(tensor.shape, tensor_message.shape)
assert tensor.tobytes() == tensor_message.value
def test_convert_int64_tensor_message_to_numpy():
shape = [1, 2, 3]
values = [1, 2, 3, 4, 5, 6]
tensor_message = Int64Tensor(shape=shape, value=values)
np_array = py_converters.convert_tensor_message_to_numpy(tensor_message)
assert np_array.dtype == np.int64
assert np.array_equal(np_array.shape, shape)
flat_np_array = np_array.flatten()
assert np.array_equal(flat_np_array, values)
def test_convert_numpy_to_int64_tensor_message():
tensor = np.array([[1], [2]], dtype=np.int64)
tensor_message = py_converters.convert_numpy_to_int64_tensor_message(tensor)
assert isinstance(tensor_message, Int64Tensor)
assert np.array_equal(tensor.shape, tensor_message.shape)
assert np.array_equal(tensor.flatten(), tensor_message.value)
def test_convert_float_tensor_message_to_numpy():
shape = [1, 2, 3]
values = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]
tensor_message = FloatTensor(shape=shape, value=values)
np_array = py_converters.convert_tensor_message_to_numpy(tensor_message)
assert np_array.dtype == np.float32
assert np.array_equal(np_array.shape, shape)
flat_np_array = np_array.flatten()
assert np.array_equal(flat_np_array, values)
def test_convert_numpy_to_float_tensor_message():
tensor = np.array([[1], [2]], dtype=np.float32)
tensor_message = py_converters.convert_numpy_to_float_tensor_message(tensor)
assert isinstance(tensor_message, FloatTensor)
assert np.array_equal(tensor.shape, tensor_message.shape)
assert np.array_equal(tensor.flatten(), tensor_message.value)
def test_convert_double_tensor_message_to_numpy():
shape = [1, 2, 3]
values = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]
tensor_message = DoubleTensor(shape=shape, value=values)
np_array = py_converters.convert_tensor_message_to_numpy(tensor_message)
assert np_array.dtype == np.float64
assert np.array_equal(np_array.shape, shape)
flat_np_array = np_array.flatten()
assert np.array_equal(flat_np_array, values)
def test_convert_numpy_to_double_tensor_message():
tensor = np.array([[1], [2]], dtype=float)
tensor_message = py_converters.convert_numpy_to_double_tensor_message(tensor)
assert isinstance(tensor_message, DoubleTensor)
assert np.array_equal(tensor.shape, tensor_message.shape)
assert np.array_equal(tensor.flatten(), tensor_message.value)
def test_convert_string_tensor_message_to_numpy():
shape = [1, 2]
values = ["a", "b"]
tensor_message = StringTensor(shape=shape, value=values)
np_array = py_converters.convert_tensor_message_to_numpy(tensor_message)
assert np_array.dtype == object
assert np.array_equal(np_array.shape, shape)
flat_np_array = np_array.flatten()
assert np.array_equal(flat_np_array, values)
def test_convert_numpy_to_string_tensor_message():
tensor = np.array([["a"], ["b"]], dtype=object)
tensor_message = py_converters.convert_numpy_to_string_tensor_message(tensor)
assert isinstance(tensor_message, StringTensor)
assert np.array_equal(tensor.shape, tensor_message.shape)
assert np.array_equal(tensor.flatten(), tensor_message.value)
def test_numpy_to_tensor_message_converter():
converter = py_converters.NumpyToTensorMessageConverter()
tensor = np.array([[1], [2]], dtype=float)
tensor_message = converter(tensor)
assert isinstance(tensor_message, DoubleTensor)
assert np.array_equal(tensor.shape, tensor_message.shape)
assert np.array_equal(tensor.flatten(), tensor_message.value)
def test_type_based_converter():
converter = py_converters.TypeBasedConverter(
conversion_map={FloatTensor: py_converters.convert_tensor_message_to_numpy}
)
tensor_message = FloatTensor(shape=[1], value=[1])
numpy_array = converter(tensor_message)
assert isinstance(numpy_array, np.ndarray)
def test_event_message_default_converter():
message_converter = py_converters.TypeBasedConverter(
conversion_map={FloatTensor: py_converters.convert_tensor_message_to_numpy}
)
event_converter = py_converters.EventMessageDefaultConverter(message_converter)
tensor_message = FloatTensor(shape=[1], value=[1])
event_message = Event(float_tensor=tensor_message)
numpy_array = event_converter(event_message)
assert isinstance(numpy_array, np.ndarray)
def test_list_event_message_converter():
message_converter = py_converters.TypeBasedConverter(
conversion_map={FloatTensor: py_converters.convert_tensor_message_to_numpy}
)
event_converter = py_converters.EventMessageDefaultConverter(message_converter)
list_converter = py_converters.ListEventMessageConverter(event_converter)
tensor_message = FloatTensor(shape=[1], value=[1])
event_message = Event(float_tensor=tensor_message)
list_message = ListEvent(event=[event_message])
converted_list = list_converter(list_message)
assert isinstance(converted_list, Collection)
assert len(converted_list) == 1
assert isinstance(converted_list[0], np.ndarray)
def test_to_list_event_message_converter():
converter = py_converters.TypeBasedConverter(
conversion_map={int: lambda x: Event(int64_value=x)}
)
list_converter = py_converters.ToListEventMessageConverter(converter)
original_list = [1, 2]
converted_list = list_converter(original_list)
assert isinstance(converted_list, ListEvent)
assert len(converted_list.event) == len(original_list)
assert converted_list.event[0].int64_value == original_list[0]
assert converted_list.event[1].int64_value == original_list[1]
def test_to_dict_event_message_converter():
converter = py_converters.TypeBasedConverter(
conversion_map={int: lambda x: Event(int64_value=x)}
)
dict_converter = py_converters.ToDictEventMessageConverter(converter)
original_dict = {"a": 1}
converted_dict = dict_converter(original_dict)
assert isinstance(converted_dict, DictEvent)
assert len(converted_dict.event) == len(original_dict)
assert converted_dict.event["a"].int64_value == original_dict["a"]
def test_dict_event_message_converter():
message_converter = py_converters.TypeBasedConverter(
conversion_map={FloatTensor: py_converters.convert_tensor_message_to_numpy}
)
event_converter = py_converters.EventMessageDefaultConverter(message_converter)
dict_converter = py_converters.DictEventMessageConverter(event_converter)
tensor_message = FloatTensor(shape=[1], value=[1])
event_message = Event(float_tensor=tensor_message)
dict_message = DictEvent(event={"event_message_key": event_message})
converted_list = dict_converter(dict_message)
assert isinstance(converted_list, Mapping)
assert len(converted_list) == 1
assert "event_message_key" in converted_list
assert isinstance(converted_list["event_message_key"], np.ndarray)
def test_protobuf_any_unpacker():
unpacker = py_converters.ProtobufAnyUnpacker(
{"compiler_gym.FloatTensor": FloatTensor}
)
any_msg = any_pb2.Any()
tensor_message = FloatTensor(shape=[1], value=[1])
any_msg.Pack(tensor_message)
unpacked_tensor_message = unpacker(any_msg)
assert tensor_message == unpacked_tensor_message
def test_protobuf_any_unpacker_value_error():
unpacker = py_converters.ProtobufAnyUnpacker(
{"IntentionallyWrongType": FloatTensor}
)
any_msg = any_pb2.Any()
tensor_message = FloatTensor(shape=[1], value=[1])
any_msg.Pack(tensor_message)
any_msg.type_url = "IntentionallyWrongType"
with pytest.raises(ValueError):
unpacker(any_msg)
def test_protobuf_any_converter():
unpacker = py_converters.ProtobufAnyUnpacker(
{"compiler_gym.FloatTensor": FloatTensor}
)
type_based_converter = py_converters.TypeBasedConverter(
conversion_map={FloatTensor: py_converters.convert_tensor_message_to_numpy}
)
converter = py_converters.ProtobufAnyConverter(
unpacker=unpacker, message_converter=type_based_converter
)
any_msg = any_pb2.Any()
tensor_message = FloatTensor(shape=[1], value=[1])
any_msg.Pack(tensor_message)
tensor = converter(any_msg)
assert isinstance(tensor, np.ndarray)
def test_message_default_converter():
value = 5
converter = py_converters.make_message_default_converter()
message = Event(int64_value=value)
converted = converter(message)
assert type(converted) == int
assert value == converted
def test_to_event_message_default_converter():
converter = py_converters.to_event_message_default_converter()
val = [{"a": 1}]
converted_val = converter(val)
assert isinstance(converted_val, Event)
assert isinstance(converted_val.event_list, ListEvent)
assert len(converted_val.event_list.event) == 1
assert isinstance(converted_val.event_list.event[0], Event)
assert isinstance(converted_val.event_list.event[0].event_dict, DictEvent)
assert (
converted_val.event_list.event[0].event_dict.event["a"].int64_value
== val[0]["a"]
)
def test_convert_boolean_range_message():
range = BooleanRange(min=False, max=True)
converted_range = py_converters.convert_range_message(range)
assert converted_range.dtype == bool
assert converted_range.min == range.min
assert converted_range.max == range.max
range_default = BooleanRange()
converted_range_default = py_converters.convert_range_message(range_default)
assert converted_range_default.min == False # noqa: E712
assert converted_range_default.max == True # noqa: E712
def test_convert_to_boolean_range_message():
scalar = Scalar(min=False, max=True, dtype=bool, name=None)
range = py_converters.convert_to_range_message(scalar)
assert isinstance(range, BooleanRange)
assert range.min == scalar.min
assert range.max == scalar.max
def test_convert_int64_range_message():
range = Int64Range(min=2, max=3)
converted_range = py_converters.convert_range_message(range)
assert converted_range.dtype == np.int64
assert converted_range.min == range.min
assert converted_range.max == range.max
range_default = Int64Range()
converted_range_default = py_converters.convert_range_message(range_default)
assert converted_range_default.min == np.iinfo(np.int64).min
assert converted_range_default.max == np.iinfo(np.int64).max
def test_convert_to_int64_range_message():
scalar = Scalar(min=2, max=3, dtype=np.int64, name=None)
range = py_converters.convert_to_range_message(scalar)
assert isinstance(range, Int64Range)
assert range.min == 2
assert range.max == 3
def test_convert_float_range_message():
range = FloatRange(min=2, max=3)
converted_range = py_converters.convert_range_message(range)
assert converted_range.dtype == np.float32
assert converted_range.min == range.min
assert converted_range.max == range.max
range_default = DoubleRange()
converted_range_default = py_converters.convert_range_message(range_default)
assert np.isneginf(converted_range_default.min)
assert np.isposinf(converted_range_default.max)
def test_convert_to_float_range_message():
scalar = Scalar(min=2, max=3, dtype=np.float32, name=None)
range = py_converters.convert_to_range_message(scalar)
assert isinstance(range, FloatRange)
assert range.min == 2
assert range.max == 3
def test_convert_double_range_message():
range = DoubleRange(min=2, max=3)
converted_range = py_converters.convert_range_message(range)
assert converted_range.dtype == float
assert converted_range.min == range.min
assert converted_range.max == range.max
range_default = DoubleRange()
converted_range_default = py_converters.convert_range_message(range_default)
assert np.isneginf(converted_range_default.min)
assert np.isposinf(converted_range_default.max)
def test_convert_to_double_range_message():
scalar = Scalar(min=2, max=3, dtype=np.float64, name=None)
range = py_converters.convert_to_range_message(scalar)
assert isinstance(range, DoubleRange)
assert range.min == 2
assert range.max == 3
def test_convert_boolean_box_message():
box = BooleanBox(
low=BooleanTensor(value=[1, 2], shape=[1, 2]),
high=BooleanTensor(value=[2, 3], shape=[1, 2]),
)
converted_box = py_converters.convert_box_message(box)
assert isinstance(converted_box, Box)
assert converted_box.dtype == bool
assert np.array_equal(box.low.shape, converted_box.shape)
assert np.array_equal(box.high.shape, converted_box.shape)
assert np.array_equal(box.low.value, converted_box.low.flatten())
assert np.array_equal(box.high.value, converted_box.high.flatten())
def test_convert_to_boolean_box_message():
box = Box(
low=np.array([[False], [True]]),
high=np.array([[False], [True]]),
name=None,
dtype=bool,
)
converted_box = py_converters.convert_to_box_message(box)
assert isinstance(converted_box, BooleanBox)
assert isinstance(converted_box.low, BooleanTensor)
assert np.array_equal(converted_box.low.shape, box.shape)
assert np.array_equal(converted_box.low.value, box.low.flatten())
assert isinstance(converted_box.high, BooleanTensor)
assert np.array_equal(converted_box.high.shape, box.shape)
assert np.array_equal(converted_box.high.value, box.high.flatten())
def test_convert_byte_box_message():
box = ByteBox(
low=ByteTensor(value=bytes([1, 2]), shape=[1, 2]),
high=ByteTensor(value=bytes([2, 3]), shape=[1, 2]),
)
converted_box = py_converters.convert_box_message(box)
assert isinstance(converted_box, Box)
assert converted_box.dtype == np.int8
assert np.array_equal(box.low.shape, converted_box.shape)
assert np.array_equal(box.high.shape, converted_box.shape)
assert np.array_equal(box.low.value, bytes(converted_box.low.flatten()))
assert np.array_equal(box.high.value, bytes(converted_box.high.flatten()))
def test_convert_to_byte_box_message():
box = Box(
low=np.array([[1], [2]]), high=np.array([[3], [4]]), name=None, dtype=np.int8
)
converted_box = py_converters.convert_to_box_message(box)
assert isinstance(converted_box, ByteBox)
assert isinstance(converted_box.low, ByteTensor)
assert np.array_equal(converted_box.low.shape, box.shape)
assert np.array_equal(
np.frombuffer(converted_box.low.value, dtype=np.int8), box.low.flatten()
)
assert isinstance(converted_box.high, ByteTensor)
assert np.array_equal(converted_box.high.shape, box.shape)
assert np.array_equal(
np.frombuffer(converted_box.high.value, dtype=np.int8), box.high.flatten()
)
def test_convert_int64_box_message():
box = Int64Box(
low=Int64Tensor(value=[1, 2], shape=[1, 2]),
high=Int64Tensor(value=[2, 3], shape=[1, 2]),
)
converted_box = py_converters.convert_box_message(box)
assert isinstance(converted_box, Box)
assert converted_box.dtype == np.int64
assert np.array_equal(box.low.shape, converted_box.shape)
assert np.array_equal(box.high.shape, converted_box.shape)
assert np.array_equal(box.low.value, converted_box.low.flatten())
assert np.array_equal(box.high.value, converted_box.high.flatten())
def test_convert_to_int64_box_message():
box = Box(
low=np.array([[1], [2]]), high=np.array([[3], [4]]), name=None, dtype=np.int64
)
converted_box = py_converters.convert_to_box_message(box)
assert isinstance(converted_box, Int64Box)
assert isinstance(converted_box.low, Int64Tensor)
assert np.array_equal(converted_box.low.shape, box.shape)
assert np.array_equal(converted_box.low.value, box.low.flatten())
assert isinstance(converted_box.high, Int64Tensor)
assert np.array_equal(converted_box.high.shape, box.shape)
assert np.array_equal(converted_box.high.value, box.high.flatten())
def test_convert_float_box_message():
box = FloatBox(
low=FloatTensor(value=[1, 2], shape=[1, 2]),
high=FloatTensor(value=[2, 3], shape=[1, 2]),
)
converted_box = py_converters.convert_box_message(box)
assert isinstance(converted_box, Box)
assert converted_box.dtype == np.float32
assert np.array_equal(box.low.shape, converted_box.shape)
assert np.array_equal(box.high.shape, converted_box.shape)
assert np.array_equal(box.low.value, converted_box.low.flatten())
assert np.array_equal(box.high.value, converted_box.high.flatten())
def test_convert_to_float_box_message():
box = Box(
low=np.array([[1], [2]], dtype=np.float32),
high=np.array([[3], [4]], dtype=np.float32),
name=None,
dtype=np.float32,
)
converted_box = py_converters.convert_to_box_message(box)
assert isinstance(converted_box, FloatBox)
assert isinstance(converted_box.low, FloatTensor)
assert np.array_equal(converted_box.low.shape, box.shape)
assert np.array_equal(converted_box.low.value, box.low.flatten())
assert isinstance(converted_box.high, FloatTensor)
assert np.array_equal(converted_box.high.shape, box.shape)
assert np.array_equal(converted_box.high.value, box.high.flatten())
def test_convert_double_box_message():
box = DoubleBox(
low=DoubleTensor(value=[1, 2], shape=[1, 2]),
high=DoubleTensor(value=[2, 3], shape=[1, 2]),
)
converted_box = py_converters.convert_box_message(box)
assert isinstance(converted_box, Box)
assert converted_box.dtype == float
assert np.array_equal(box.low.shape, converted_box.shape)
assert np.array_equal(box.high.shape, converted_box.shape)
assert np.array_equal(box.low.value, converted_box.low.flatten())
assert np.array_equal(box.high.value, converted_box.high.flatten())
def test_convert_to_double_box_message():
box = Box(
low=np.array([[1.0], [2.0]]),
high=np.array([[3.0], [4.0]]),
name=None,
dtype=np.float64,
)
converted_box = py_converters.convert_to_box_message(box)
assert isinstance(converted_box, DoubleBox)
assert isinstance(converted_box.low, DoubleTensor)
assert np.array_equal(converted_box.low.shape, box.shape)
assert np.array_equal(converted_box.low.value, box.low.flatten())
assert isinstance(converted_box.high, DoubleTensor)
assert np.array_equal(converted_box.high.shape, box.shape)
assert np.array_equal(converted_box.high.value, box.high.flatten())
def test_convert_discrete_space_message():
message = DiscreteSpace(n=5)
converted_message = py_converters.convert_discrete_space_message(message)
assert message.n == converted_message.n
def test_convert_to_discrete_space_message():
space = Discrete(name=None, n=5)
converted_space = py_converters.convert_to_discrete_space_message(space)
assert isinstance(converted_space, DiscreteSpace)
assert converted_space.n == 5
def test_convert_to_named_discrete_space_message():
space = NamedDiscrete(name=None, items=["a", "b"])
converted_space = py_converters.convert_to_named_discrete_space_message(space)
assert isinstance(converted_space, NamedDiscreteSpace)
assert np.array_equal(space.names, converted_space.name)
def test_convert_named_discrete_space_message():
message = NamedDiscreteSpace(name=["a", "b", "c"])
converted_message = py_converters.convert_named_discrete_space_message(message)
assert isinstance(converted_message, NamedDiscrete)
assert np.array_equal(message.name, converted_message.names)
def test_convert_commandline_space_message():
message = CommandlineSpace(name=["a", "b", "c"])
converted_message = py_converters.convert_commandline_space_message(message)
assert isinstance(converted_message, Commandline)
assert np.array_equal(message.name, converted_message.names)
def test_convert_boolean_sequence_space():
seq = BooleanSequenceSpace(
length_range=Int64Range(min=1, max=2),
scalar_range=BooleanRange(min=True, max=False),
)
converted_seq = py_converters.convert_sequence_space(seq)
assert isinstance(converted_seq, Sequence)
assert converted_seq.dtype == bool
assert converted_seq.size_range[0] == 1
assert converted_seq.size_range[1] == 2
assert isinstance(converted_seq.scalar_range, Scalar)
assert converted_seq.scalar_range.min == True # noqa: E712
assert converted_seq.scalar_range.max == False # noqa: E712
def test_convert_to_boolean_sequence_space():
seq = Sequence(
name=None,
dtype=bool,
size_range=(1, 2),
scalar_range=Scalar(name=None, min=True, max=False, dtype=bool),
)
converted_seq = py_converters.convert_to_ranged_sequence_space(seq)
assert isinstance(converted_seq, BooleanSequenceSpace)
assert converted_seq.length_range.min == 1
assert converted_seq.length_range.max == 2
assert isinstance(converted_seq.scalar_range, BooleanRange)
assert converted_seq.scalar_range.min == True # noqa: E712
assert converted_seq.scalar_range.max == False # noqa: E712
def test_convert_bytes_sequence_space():
seq = BytesSequenceSpace(length_range=Int64Range(min=1, max=2))
converted_seq = py_converters.convert_sequence_space(seq)
assert isinstance(converted_seq, Sequence)
assert converted_seq.dtype == bytes
assert converted_seq.size_range[0] == 1
assert converted_seq.size_range[1] == 2
def test_convert_to_bytes_sequence_space():
seq = Sequence(name=None, dtype=bytes, size_range=(1, 2))
converted_seq = py_converters.convert_to_bytes_sequence_space(seq)
assert isinstance(converted_seq, BytesSequenceSpace)
assert converted_seq.length_range.min == 1
assert converted_seq.length_range.max == 2
def test_convert_byte_sequence_space():
seq = ByteSequenceSpace(
length_range=Int64Range(min=1, max=2), scalar_range=Int64Range(min=3, max=4)
)
converted_seq = py_converters.convert_sequence_space(seq)
assert isinstance(converted_seq, Sequence)
assert converted_seq.dtype == np.int8
assert converted_seq.size_range[0] == 1
assert converted_seq.size_range[1] == 2
assert isinstance(converted_seq.scalar_range, Scalar)
assert converted_seq.scalar_range.min == 3
assert converted_seq.scalar_range.max == 4
def test_convert_to_byte_sequence_space():
seq = Sequence(
name=None,
dtype=np.int8,
size_range=(1, 2),
scalar_range=Scalar(name=None, min=4, max=5, dtype=np.int8),
)
converted_seq = py_converters.convert_to_ranged_sequence_space(seq)
assert isinstance(converted_seq, ByteSequenceSpace)
assert converted_seq.length_range.min == 1
assert converted_seq.length_range.max == 2
assert isinstance(converted_seq.scalar_range, Int64Range)
assert converted_seq.scalar_range.min == 4
assert converted_seq.scalar_range.max == 5
def test_convert_int64_sequence_space():
seq = Int64SequenceSpace(
length_range=Int64Range(min=1, max=2), scalar_range=Int64Range(min=3, max=4)
)
converted_seq = py_converters.convert_sequence_space(seq)
assert isinstance(converted_seq, Sequence)
assert converted_seq.dtype == np.int64
assert converted_seq.size_range[0] == 1
assert converted_seq.size_range[1] == 2
assert isinstance(converted_seq.scalar_range, Scalar)
assert converted_seq.scalar_range.min == 3
assert converted_seq.scalar_range.max == 4
def test_convert_to_int64_sequence_space():
seq = Sequence(
name=None,
dtype=np.int64,
size_range=(1, 2),
scalar_range=Scalar(name=None, min=4, max=5, dtype=np.int64),
)
converted_seq = py_converters.convert_to_ranged_sequence_space(seq)
assert isinstance(converted_seq, Int64SequenceSpace)
assert converted_seq.length_range.min == 1
assert converted_seq.length_range.max == 2
assert isinstance(converted_seq.scalar_range, Int64Range)
assert converted_seq.scalar_range.min == 4
assert converted_seq.scalar_range.max == 5
def test_convert_float_sequence_space():
seq = FloatSequenceSpace(
length_range=Int64Range(min=1, max=2), scalar_range=FloatRange(min=3.1, max=4)
)
converted_seq = py_converters.convert_sequence_space(seq)
assert isinstance(converted_seq, Sequence)
assert converted_seq.dtype == np.float32
assert converted_seq.size_range[0] == 1
assert converted_seq.size_range[1] == 2
assert isinstance(converted_seq.scalar_range, Scalar)
assert np.isclose(converted_seq.scalar_range.min, 3.1)
assert converted_seq.scalar_range.max == 4
def test_convert_to_float_sequence_space():
seq = Sequence(
name=None,
dtype=np.float32,
size_range=(1, 2),
scalar_range=Scalar(name=None, min=4, max=5, dtype=np.float32),
)
converted_seq = py_converters.convert_to_ranged_sequence_space(seq)
assert isinstance(converted_seq, FloatSequenceSpace)
assert converted_seq.length_range.min == 1
assert converted_seq.length_range.max == 2
assert isinstance(converted_seq.scalar_range, FloatRange)
assert np.isclose(converted_seq.scalar_range.min, 4)
assert np.isclose(converted_seq.scalar_range.max, 5)
def test_convert_double_sequence_space():
seq = DoubleSequenceSpace(
length_range=Int64Range(min=1, max=2), scalar_range=DoubleRange(min=3.1, max=4)
)
converted_seq = py_converters.convert_sequence_space(seq)
assert isinstance(converted_seq, Sequence)
assert converted_seq.dtype == float
assert converted_seq.size_range[0] == 1
assert converted_seq.size_range[1] == 2
assert isinstance(converted_seq.scalar_range, Scalar)
assert converted_seq.scalar_range.min == 3.1
assert converted_seq.scalar_range.max == 4
def test_convert_to_double_sequence_space():
seq = Sequence(
name=None,
dtype=np.float64,
size_range=(1, 2),
scalar_range=Scalar(name=None, min=4.0, max=5.0, dtype=np.float64),
)
converted_seq = py_converters.convert_to_ranged_sequence_space(seq)
assert isinstance(converted_seq, DoubleSequenceSpace)
assert converted_seq.length_range.min == 1
assert converted_seq.length_range.max == 2
assert isinstance(converted_seq.scalar_range, DoubleRange)
assert converted_seq.scalar_range.min == 4.0
assert converted_seq.scalar_range.max == 5.0
def test_convert_string_space():
space = StringSpace(length_range=Int64Range(min=1, max=2))
converted_space = py_converters.convert_sequence_space(space)
assert isinstance(converted_space, Sequence)
assert converted_space.dtype == str
assert converted_space.size_range[0] == 1
assert converted_space.size_range[1] == 2
def test_convert_to_string_space():
space = Sequence(name=None, size_range=(1, 2), dtype=str)
converted_space = py_converters.convert_to_string_space(space)
assert isinstance(converted_space, StringSpace)
assert converted_space.length_range.min == 1
assert converted_space.length_range.max == 2
def test_convert_space_sequence_space():
space = Space(
space_sequence=SpaceSequenceSpace(
length_range=Int64Range(min=0, max=2),
space=Space(int64_value=Int64Range(min=-1, max=1)),
),
)
converted_space = py_converters.message_default_converter(space)
assert isinstance(converted_space, SpaceSequence)
assert converted_space.size_range[0] == space.space_sequence.length_range.min
assert converted_space.size_range[1] == space.space_sequence.length_range.max
assert isinstance(converted_space.space, Scalar)
assert np.dtype(converted_space.space.dtype) == np.int64
assert converted_space.space.min == space.space_sequence.space.int64_value.min
assert converted_space.space.max == space.space_sequence.space.int64_value.max
def test_space_message_default_converter():
message_converter = py_converters.TypeBasedConverter(
conversion_map={StringSpace: py_converters.convert_sequence_space}
)
space_converter = py_converters.SpaceMessageDefaultConverter(message_converter)
val = StringSpace(length_range=Int64Range(min=1, max=2))
space_message = Space(string_value=val)
converted_space = space_converter(space_message)
assert isinstance(converted_space, Sequence)
assert converted_space.dtype == str
assert converted_space.size_range[0] == 1
assert converted_space.size_range[1] == 2
def test_list_space_message_converter():
message_converter = py_converters.TypeBasedConverter(
conversion_map={StringSpace: py_converters.convert_sequence_space}
)
space_converter = py_converters.SpaceMessageDefaultConverter(message_converter)
list_converter = py_converters.ListSpaceMessageConverter(space_converter)
space_message = ListSpace(
space=[
Space(
string_value=StringSpace(length_range=Int64Range(min=1, max=2)),
)
]
)
converted_space = list_converter(space_message)
assert isinstance(converted_space, Tuple)
assert len(converted_space.spaces) == 1
assert converted_space.spaces[0].dtype == str
assert converted_space.spaces[0].size_range[0] == 1
assert converted_space.spaces[0].size_range[1] == 2
def test_tuple_to_list_space_message_converter():
to_message_converter = py_converters.TypeBasedConverter(
conversion_map={Discrete: py_converters.convert_to_discrete_space_message}
)
to_space_converter = py_converters.ToSpaceMessageConverter(to_message_converter)
to_list_converter = py_converters.ToListSpaceMessageConverter(to_space_converter)
space = Tuple(name=None, spaces=[Discrete(name=None, n=5)])
converted_space = to_list_converter(space)
assert isinstance(converted_space, ListSpace)
assert len(converted_space.space) == 1
assert isinstance(converted_space.space[0], Space)
assert hasattr(converted_space.space[0], "discrete")
assert converted_space.space[0].discrete.n == 5
def test_to_list_space_message_converter():
to_message_converter = py_converters.TypeBasedConverter(
conversion_map={Discrete: py_converters.convert_to_discrete_space_message}
)
to_space_converter = py_converters.ToSpaceMessageConverter(to_message_converter)
to_list_converter = py_converters.ToListSpaceMessageConverter(to_space_converter)
space = Tuple(name=None, spaces=[Discrete(name=None, n=5)])
converted_space = to_list_converter(space)
assert isinstance(converted_space, ListSpace)
assert len(converted_space.space) == 1
assert isinstance(converted_space.space[0], Space)
assert hasattr(converted_space.space[0], "discrete")
assert converted_space.space[0].discrete.n == 5
def test_dict_space_message_converter():
message_converter = py_converters.TypeBasedConverter(
conversion_map={StringSpace: py_converters.convert_sequence_space}
)
space_converter = py_converters.SpaceMessageDefaultConverter(message_converter)
dict_converter = py_converters.DictSpaceMessageConverter(space_converter)
space_message = DictSpace(
space={
"key": Space(
string_value=StringSpace(length_range=Int64Range(min=1, max=2)),
)
}
)
converted_space = dict_converter(space_message)
assert isinstance(converted_space, Dict)
assert len(converted_space.spaces) == 1
assert "key" in converted_space.spaces
assert converted_space.spaces["key"].dtype == str
assert converted_space.spaces["key"].size_range[0] == 1
assert converted_space.spaces["key"].size_range[1] == 2
def test_to_dict_space_message_converter():
to_message_converter = py_converters.TypeBasedConverter(
conversion_map={Discrete: py_converters.convert_to_discrete_space_message}
)
to_space_converter = py_converters.ToSpaceMessageConverter(to_message_converter)
to_dict_converter = py_converters.ToDictSpaceMessageConverter(to_space_converter)
space = Dict(name=None, spaces={"key": Discrete(name=None, n=5)})
converted_space = to_dict_converter(space)
assert isinstance(converted_space, DictSpace)
assert len(converted_space.space) == 1
assert "key" in converted_space.space
assert isinstance(converted_space.space["key"], Space)
assert hasattr(converted_space.space["key"], "discrete")
assert converted_space.space["key"].discrete.n == 5
def test_to_space_message_default_converter():
space = Tuple(
name=None,
spaces=[
Dict(
name=None,
spaces={"key": Box(name=None, low=0, high=1, shape=[1, 2])},
)
],
)
converted_space = py_converters.to_space_message_default_converter()(space)
assert isinstance(converted_space, Space)
assert isinstance(
converted_space.space_list.space[0].space_dict.space["key"].float_box,
FloatBox,
)
def test_opaque_json_message_converter():
message = Opaque(format="json://", data='{"key": "val"}'.encode("utf-8"))
converted_message = py_converters.message_default_converter(message)
assert isinstance(converted_message, Mapping)
assert len(converted_message) == 1
assert "key" in converted_message
assert converted_message["key"] == "val"
def test_type_id_dispatch_converter():
def default_converter(msg):
return msg.string_value + "_default"
conversion_map = {
"type_1": lambda msg: msg.string_value + "_type_1",
"type_2": lambda msg: msg.string_value + "_type_2",
}
type_id_converter = py_converters.TypeIdDispatchConverter(
default_converter=default_converter, conversion_map=conversion_map
)
assert type_id_converter(Event(string_value="msg_val")) == "msg_val_default"
assert (
type_id_converter(Event(string_value="msg_val", type_id="type_1"))
== "msg_val_type_1"
)
assert (
type_id_converter(Event(string_value="msg_val", type_id="type_2"))
== "msg_val_type_2"
)
def test_convert_permutation_space_message():
msg = Space(
type_id="permutation",
int64_sequence=Int64SequenceSpace(
length_range=Int64Range(min=5, max=5), scalar_range=Int64Range(min=0, max=4)
),
)
permutation = py_converters.message_default_converter(msg)
assert isinstance(permutation, Permutation)
assert permutation.scalar_range.min == 0
assert permutation.scalar_range.max == 4
assert permutation.size_range[0] == 5
assert permutation.size_range[1] == 5
invalid_permutation_space_msg = Space(
type_id="permutation",
int64_sequence=Int64SequenceSpace(
length_range=Int64Range(min=3, max=5), scalar_range=Int64Range(min=0, max=4)
),
)
with pytest.raises(ValueError, match="Invalid permutation space message"):
py_converters.message_default_converter(invalid_permutation_space_msg)
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/service/proto/py_converters_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the LICENSE file
# in the root directory of this source tree.
#
# tests/**/__init__.py files are needed for pytest Python path resolution. See:
# https://docs.pytest.org/en/latest/explanation/pythonpath.html
|
CompilerGym-development
|
tests/service/proto/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the LICENSE file
# in the root directory of this source tree.
#
# tests/**/__init__.py files are needed for pytest Python path resolution. See:
# https://docs.pytest.org/en/latest/explanation/pythonpath.html
|
CompilerGym-development
|
tests/service/runtime/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym/service/runtime:benchmark_cache."""
import pytest
from compiler_gym.service.proto import Benchmark, File
from compiler_gym.service.runtime.benchmark_cache import BenchmarkCache, logger
from tests.test_main import main
def make_benchmark_of_size(size_in_bytes: int, target: int = 0) -> Benchmark:
"""Test helper. Generate a benchmark of the given size in bytes."""
target = target or size_in_bytes
bm = Benchmark(program=File(contents=("." * target).encode("utf-8")))
size_offset = bm.ByteSize() - size_in_bytes
if size_offset:
return make_benchmark_of_size(size_in_bytes, size_in_bytes - size_offset)
return bm
@pytest.mark.parametrize("size", [5, 10, 100, 1024])
def test_make_benchmark_of_size(size: int):
"""Sanity check for test helper function."""
assert make_benchmark_of_size(size).ByteSize() == size
def test_oversized_benchmark_triggers_evict_to_capacity(mocker):
cache = BenchmarkCache(max_size_in_bytes=10)
mocker.spy(cache, "evict_to_capacity")
cache["test"] = make_benchmark_of_size(50)
assert cache.size == 1
assert cache.size_in_bytes == 50
cache.evict_to_capacity.assert_called_once()
def test_replace_existing_item():
cache = BenchmarkCache()
cache["a"] = make_benchmark_of_size(30)
assert cache.size == 1
assert cache.size_in_bytes == 30
cache["a"] = make_benchmark_of_size(50)
assert cache.size == 1
assert cache.size_in_bytes == 50
def test_evict_to_capacity_on_max_size_reached(mocker):
"""Test that cache is evict_to_capacityd when the maximum size is exceeded."""
cache = BenchmarkCache(max_size_in_bytes=100)
mocker.spy(cache, "evict_to_capacity")
mocker.spy(logger, "info")
cache["a"] = make_benchmark_of_size(30)
cache["b"] = make_benchmark_of_size(30)
cache["c"] = make_benchmark_of_size(30)
assert cache.evict_to_capacity.call_count == 0
cache["d"] = make_benchmark_of_size(30)
assert cache.evict_to_capacity.call_count == 1
assert cache.size == 2
assert cache.size_in_bytes == 60
logger.info.assert_called_once_with(
"Evicted %d benchmarks from cache. Benchmark cache size now %d bytes, "
"%d items",
2,
30,
1,
)
def test_oversized_benchmark_emits_warning(mocker):
"""Test that a warning is emitted when a single item is larger than the
entire target cache size.
"""
cache = BenchmarkCache(max_size_in_bytes=10)
mocker.spy(logger, "warning")
cache["test"] = make_benchmark_of_size(50)
logger.warning.assert_called_once_with(
"Adding new benchmark with size %d bytes exceeds total target cache "
"size of %d bytes",
50,
10,
)
def test_contains():
cache = BenchmarkCache(max_size_in_bytes=100)
cache["a"] = make_benchmark_of_size(30)
assert "a" in cache
assert "b" not in cache
def test_getter():
cache = BenchmarkCache(max_size_in_bytes=100)
a = make_benchmark_of_size(30)
b = make_benchmark_of_size(40)
cache["a"] = a
cache["b"] = b
assert cache["a"] == a
assert cache["a"] != b
assert cache["b"] == b
with pytest.raises(KeyError, match="c"):
cache["c"]
def test_evict_to_capacity_on_maximum_size_update(mocker):
"""Test that cache is evict_to_capacityd when the maximum size is exceeded."""
cache = BenchmarkCache(max_size_in_bytes=100)
mocker.spy(cache, "evict_to_capacity")
mocker.spy(logger, "info")
cache["a"] = make_benchmark_of_size(30)
cache["b"] = make_benchmark_of_size(30)
cache["c"] = make_benchmark_of_size(30)
assert cache.evict_to_capacity.call_count == 0
cache.max_size_in_bytes = 50
assert cache.evict_to_capacity.call_count == 1
assert cache.size_in_bytes == 30
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/service/runtime/benchmark_cache_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the LICENSE file
# in the root directory of this source tree.
#
# tests/**/__init__.py files are needed for pytest Python path resolution. See:
# https://docs.pytest.org/en/latest/explanation/pythonpath.html
|
CompilerGym-development
|
tests/views/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym/views."""
import pytest
from compiler_gym.views import RewardView
from tests.test_main import main
class MockReward:
def __init__(self, name, ret=None):
self.name = name
self.ret = list(reversed(ret or []))
self.observation_spaces = []
def update(self, *args, **kwargs):
ret = self.ret[-1]
del self.ret[-1]
return ret
class MockObservationView:
pass
def test_empty_space():
reward = RewardView([], MockObservationView())
with pytest.raises(ValueError) as ctx:
_ = reward["foo"]
assert str(ctx.value) == "No reward spaces"
def test_invalid_reward_name():
reward = RewardView([MockReward(name="foo")], MockObservationView())
with pytest.raises(KeyError):
_ = reward["invalid"]
def test_reward_values():
spaces = [
MockReward(name="codesize", ret=[-5]),
MockReward(name="runtime", ret=[10]),
]
reward = RewardView(spaces, MockObservationView())
value = reward["codesize"]
assert value == -5
value = reward["runtime"]
assert value == 10
def test_reward_values_bound_methods():
spaces = [
MockReward(name="codesize", ret=[-5]),
MockReward(name="runtime", ret=[10]),
]
reward = RewardView(spaces, MockObservationView())
value = reward.codesize()
assert value == -5
value = reward.runtime()
assert value == 10
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/views/reward_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym/views."""
import numpy as np
import pytest
from compiler_gym.errors import ServiceError
from compiler_gym.service.proto import (
DoubleBox,
DoubleTensor,
Int64Box,
Int64Range,
Int64Tensor,
ObservationSpace,
Space,
StringSpace,
)
from compiler_gym.views import ObservationView
from tests.test_main import main
class MockRawStep:
"""Mock for the raw_step callack of ObservationView."""
def __init__(self, ret=None):
self.called_observation_spaces = []
self.ret = list(reversed(ret or [None]))
def __call__(self, actions, observation_spaces, reward_spaces):
assert not actions
assert len(observation_spaces) == 1
assert not reward_spaces
self.called_observation_spaces.append(observation_spaces[0].id)
ret = self.ret[-1]
del self.ret[-1]
return [ret], [], False, {}
def test_empty_space():
with pytest.raises(ValueError) as ctx:
ObservationView(MockRawStep(), [])
assert str(ctx.value) == "No observation spaces"
def test_observed_value_types():
spaces = [
ObservationSpace(
name="ir",
space=Space(string_value=StringSpace(length_range=Int64Range(min=0))),
),
ObservationSpace(
name="features",
space=Space(
int64_box=Int64Box(
low=Int64Tensor(shape=[2], value=[-100, -100]),
high=Int64Tensor(shape=[2], value=[100, 100]),
),
),
),
ObservationSpace(
name="dfeat",
space=Space(
double_box=DoubleBox(
low=DoubleTensor(shape=[1], value=[0.5]),
high=DoubleTensor(shape=[1], value=[2.5]),
),
),
),
ObservationSpace(
name="binary",
space=Space(int64_value=Int64Range(min=5, max=5)),
),
]
mock = MockRawStep(
ret=[
"Hello, IR",
[1.0, 2.0],
[-5, 15],
b"Hello, bytes\0",
"Hello, IR",
[1.0, 2.0],
[-5, 15],
b"Hello, bytes\0",
]
)
observation = ObservationView(mock, spaces)
value = observation["ir"]
assert isinstance(value, str)
assert value == "Hello, IR"
value = observation["dfeat"]
np.testing.assert_array_almost_equal(value, [1.0, 2.0])
value = observation["features"]
np.testing.assert_array_equal(value, [-5, 15])
value = observation["binary"]
assert value == b"Hello, bytes\0"
# Check that the correct observation_space_list indices were used.
assert mock.called_observation_spaces == ["ir", "dfeat", "features", "binary"]
mock.called_observation_spaces = []
# Repeat the above tests using the generated bound methods.
value = observation.ir()
assert isinstance(value, str)
assert value == "Hello, IR"
value = observation.dfeat()
np.testing.assert_array_almost_equal(value, [1.0, 2.0])
value = observation.features()
np.testing.assert_array_equal(value, [-5, 15])
value = observation.binary()
assert value == b"Hello, bytes\0"
# Check that the correct observation_space_list indices were used.
assert mock.called_observation_spaces == ["ir", "dfeat", "features", "binary"]
def test_observation_when_raw_step_returns_incorrect_no_of_observations():
"""Test that a ServiceError is propagated when raw_step() returns unexpected
number of observations."""
def make_failing_raw_step(n: int):
def failing_raw_step(*args, **kwargs):
"""A callback that returns done=True."""
del args # Unused
del kwargs # Unused
return ["ir"] * n, None, False, {}
return failing_raw_step
spaces = [
ObservationSpace(
name="ir",
space=Space(int64_value=Int64Range(min=0)),
)
]
observation = ObservationView(make_failing_raw_step(0), spaces)
with pytest.raises(
ServiceError, match=r"^Expected 1 'ir' observation but the service returned 0$"
):
observation["ir"]
observation = ObservationView(make_failing_raw_step(3), spaces)
with pytest.raises(
ServiceError, match=r"^Expected 1 'ir' observation but the service returned 3$"
):
observation["ir"]
def test_observation_when_raw_step_returns_done():
"""Test that a SessionNotFoundError from the raw_step() callback propagates as a"""
def make_failing_raw_step(error_msg=None):
def failing_raw_step(*args, **kwargs):
"""A callback that returns done=True."""
info = {}
if error_msg:
info["error_details"] = error_msg
return [], None, True, info
return failing_raw_step
spaces = [
ObservationSpace(
name="ir",
space=Space(int64_value=Int64Range(min=0)),
)
]
observation = ObservationView(make_failing_raw_step(), spaces)
with pytest.raises(ServiceError, match=r"^Failed to compute observation 'ir'$"):
observation["ir"] # pylint: disable=pointless-statement
observation = ObservationView(make_failing_raw_step("Oh no!"), spaces)
with pytest.raises(
ServiceError, match=r"^Failed to compute observation 'ir': Oh no!$"
):
observation["ir"] # pylint: disable=pointless-statement
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/views/observation_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Evaluate tabular_q policy for leaderboard."""
import os
import sys
from typing import Dict
from absl import app, flags
from compiler_gym.envs import LlvmEnv
from compiler_gym.leaderboard.llvm_instcount import eval_llvm_instcount_policy
sys.path.insert(0, os.path.dirname(os.path.realpath(__file__)) + "/../../../examples")
from tabular_q import ( # noqa pylint: disable=wrong-import-position
StateActionTuple,
rollout,
train,
)
FLAGS = flags.FLAGS
def train_and_run(env: LlvmEnv) -> None:
"""Run tabular Q learning on an environment"""
FLAGS.log_every = 0 # Disable printing to stdout
q_table: Dict[StateActionTuple, float] = {}
env.observation_space = "Autophase"
training_env = env.fork()
train(q_table, training_env)
training_env.close()
rollout(q_table, env, printout=False)
if __name__ == "__main__":
app.run(eval_llvm_instcount_policy(train_and_run))
|
CompilerGym-development
|
leaderboard/llvm_instcount/tabular_q/tabular_q_eval.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for //leaderboard/llvm_instcount/tabular_q_eval."""
import pytest
from absl import flags
from compiler_gym.leaderboard.llvm_instcount import eval_llvm_instcount_policy
from leaderboard.llvm_instcount.tabular_q.tabular_q_eval import train_and_run
from tests.test_main import main as _test_main
FLAGS = flags.FLAGS
pytest_plugins = ["tests.pytest_plugins.llvm"]
def test_tabular_q():
FLAGS.unparse_flags()
FLAGS(
[
"argv0",
"--n=1",
"--max_benchmarks=1",
"--nproc=1",
"--novalidate",
]
)
with pytest.raises(SystemExit):
eval_llvm_instcount_policy(train_and_run)
if __name__ == "__main__":
_test_main()
|
CompilerGym-development
|
leaderboard/llvm_instcount/tabular_q/tabular_q_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""An implementation of a random search policy for the LLVM codesize task.
The search is the same as the included compiler_gym.bin.random_search. See
README.md in this directory for a detailed description.
"""
from time import sleep
import gym
from absl import flags
from compiler_gym.envs import LlvmEnv
from compiler_gym.leaderboard.llvm_instcount import eval_llvm_instcount_policy
from compiler_gym.random_search import RandomAgentWorker
flags.DEFINE_float(
"patience_ratio",
1.0,
"The ratio of patience to the size of the action space. "
"Patience = patience_ratio * action_space_size",
)
flags.DEFINE_integer(
"search_time",
60,
"The minimum number of seconds to run the random search for. After this "
"many seconds have elapsed the best results are aggregated from the "
"search threads and the search is terminated.",
)
FLAGS = flags.FLAGS
def random_search(env: LlvmEnv) -> None:
"""Run a random search on the given environment."""
patience = int(env.action_space.n * FLAGS.patience_ratio)
# Start parallel random search workers.
workers = [
RandomAgentWorker(
make_env=lambda: gym.make("llvm-ic-v0", benchmark=env.benchmark),
patience=patience,
)
for _ in range(FLAGS.nproc)
]
for worker in workers:
worker.start()
sleep(FLAGS.search_time)
# Stop the workers.
for worker in workers:
worker.alive = False
for worker in workers:
worker.join()
# Aggregate the best results.
best_actions = []
best_reward = -float("inf")
for worker in workers:
if worker.best_returns > best_reward:
best_reward, best_actions = worker.best_returns, list(worker.best_actions)
# Replay the best sequence of actions to produce the final environment
# state.
for action in best_actions:
_, _, done, _ = env.step(action)
assert not done
if __name__ == "__main__":
eval_llvm_instcount_policy(random_search)
|
CompilerGym-development
|
leaderboard/llvm_instcount/random_search/random_search.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for //leaderboard/llvm_instcount/random_search."""
import pytest
from leaderboard.llvm_instcount.random_search.random_search import (
eval_llvm_instcount_policy,
random_search,
)
from tests.pytest_plugins.common import set_command_line_flags
from tests.test_main import main as _test_main
def test_random_search():
set_command_line_flags(
[
"argv0",
"--n=1",
"--max_benchmarks=1",
"--search_time=1",
"--nproc=1",
"--patience_ratio=0.1",
"--novalidate",
]
)
with pytest.raises(SystemExit):
eval_llvm_instcount_policy(random_search)
if __name__ == "__main__":
_test_main()
|
CompilerGym-development
|
leaderboard/llvm_instcount/random_search/random_search_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for //leaderboard/llvm_instcount/e_greedy."""
import sys
from concurrent.futures import ThreadPoolExecutor
import pytest
from absl import flags
from compiler_gym.envs import LlvmEnv
from compiler_gym.leaderboard.llvm_instcount import eval_llvm_instcount_policy
from leaderboard.llvm_instcount.e_greedy.e_greedy import (
e_greedy_search,
select_best_action,
)
from tests.test_main import main as _test_main
FLAGS = flags.FLAGS
pytest_plugins = ["tests.pytest_plugins.llvm"]
def test_random_search():
sys.argv = [
"argv0",
"--n=1",
"--max_benchmarks=1",
"--nproc=1",
"--novalidate",
]
with pytest.raises(SystemExit):
eval_llvm_instcount_policy(e_greedy_search)
def test_select_best_action_closed_environment(env: LlvmEnv):
"""Test that select_best_action() recovers from an environment whose service
has closed."""
env.reward_space = "IrInstructionCount"
env.reset(benchmark="cbench-v1/crc32")
with ThreadPoolExecutor() as executor:
best_a = select_best_action(env, executor)
env.close()
best_b = select_best_action(env, executor)
assert best_a == best_b
if __name__ == "__main__":
_test_main()
|
CompilerGym-development
|
leaderboard/llvm_instcount/e_greedy/e_greedy_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""ϵ-greedy policy for LLVM codesize."""
import logging
import random
from concurrent.futures import ThreadPoolExecutor, as_completed
from typing import NamedTuple
from absl import flags
from compiler_gym.envs import CompilerEnv, LlvmEnv
from compiler_gym.leaderboard.llvm_instcount import eval_llvm_instcount_policy
flags.DEFINE_float(
"epsilon", 0, "The ratio of patience to the size of the action space. "
)
FLAGS = flags.FLAGS
class RewardAction(NamedTuple):
"""An action -> reward tuple for a single step()."""
# Use reward as the element in the tuple as the reward is used for ordering.
reward: float
action: int
def select_best_action(env: CompilerEnv, executor: ThreadPoolExecutor) -> RewardAction:
"""Determine the best action by trying all possible options and ranking them."""
def eval_action(fkd: CompilerEnv, action: int) -> RewardAction:
"""Evaluate the given action."""
try:
_, reward, _, _ = fkd.step(action)
finally:
fkd.close()
return RewardAction(reward=reward, action=action)
# Select the best action using the reward that the action produces, then
# action index as a tie-breaker. Do this by creating n forks of the
# environment, one for every action, and evaluting the actions in parallel
# threads. Note that calls to fork() occur in the main thread for thread
# safety in case of environment restart.
futures = (
executor.submit(eval_action, env.fork(), action)
for action in range(env.action_space.n)
)
best_reward_action = RewardAction(reward=-float("inf"), action=0)
for future in as_completed(futures):
reward_action: RewardAction = future.result()
if reward_action > best_reward_action:
best_reward_action = reward_action
return best_reward_action
def e_greedy_search(env: LlvmEnv) -> None:
"""Run an ϵ-greedy search on an environment."""
step_count = 0
with ThreadPoolExecutor(max_workers=FLAGS.nproc) as executor:
while True:
step_count += 1
if random.random() < FLAGS.epsilon:
# Exploratory step. Randomly select and apply an action.
action = env.action_space.sample()
_, reward, done, _ = env.step(action)
logging.debug(
"Step %d, exploratory action %s, reward %.4f, cumulative %.4f",
step_count,
env.action_space.flags[action],
reward,
env.episode_reward,
)
else:
# Select the best reward and apply it, or terminate the search
# if no positive reward is attainable.
best = select_best_action(env, executor)
if best.reward <= 0:
logging.debug(
"Greedy search terminated after %d steps, "
"no further reward attainable",
step_count,
)
done = True
else:
_, reward, done, _ = env.step(best.action)
logging.debug(
"Step %d, greedy action %s, reward %.4f, cumulative %.4f",
step_count,
env.action_space.flags[best.action],
reward,
env.episode_reward,
)
if env.reward_space.deterministic and reward != best.reward:
logging.warning(
"Action %s produced different reward on replay, %.4f != %.4f",
env.action_space.flags[best.action],
best.reward,
reward,
)
# Stop the search if we have reached a terminal state.
if done:
return
if __name__ == "__main__":
eval_llvm_instcount_policy(e_greedy_search)
|
CompilerGym-development
|
leaderboard/llvm_instcount/e_greedy/e_greedy.py
|
#!/usr/bin/env python3
#
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""A script to auto-populate RST files from the CompilerGym header files.
Usage:
$ python generate_cc_rst.py
"""
import os
from pathlib import Path
from typing import List
SOURCES = Path("../compiler_gym")
OUTPUT_DIR = Path("source/cc")
def header(message, underline="="):
underline = underline * (len(str(message)) // len(underline))
return f"{message}\n{underline}"
def main():
valid_files: List[Path] = []
for root, _, files in os.walk(SOURCES):
if "third_party" in root:
continue
headers = [
f
for f in files
if (f.endswith(".h") or f.endswith(".proto")) and not f.endswith("Impl.h")
]
if not headers:
continue
while root.startswith("../"):
root = root[len("../") :]
root = Path(root)
(OUTPUT_DIR / root).parent.mkdir(parents=True, exist_ok=True)
output_path = Path(f"{OUTPUT_DIR / root}.rst")
valid_files.append(output_path)
print("Generating", output_path)
with open(output_path, "w") as f:
print(header(str(root)), file=f)
print(file=f)
print(".. contents::", file=f)
print(" :local:", file=f)
for header_name in headers:
print(file=f)
print(header(header_name, "-"), file=f)
print(file=f)
print(f':code:`#include "{root}/{header_name}"`', file=f)
print(file=f)
print(f".. doxygenfile:: {root}/{header_name}", file=f)
for root, _, files in os.walk(OUTPUT_DIR):
for file in files:
path = Path(root) / file
if path not in valid_files:
print("rm", path)
path.unlink()
if __name__ == "__main__":
main()
|
CompilerGym-development
|
docs/generate_cc_rst.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
# Configuration file for the Sphinx documentation builder.
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
import sphinx.errors
sphinx.application.ExtensionError = sphinx.errors.ExtensionError
# -- Project information -----------------------------------------------------
project = "CompilerGym"
copyright = "Meta Platforms, Inc"
author = "Meta Platforms, Inc"
# Read the version from the //:VERSION file.
with open("../../VERSION") as f:
version = f.read().strip()
release = version
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.doctest",
"sphinx.ext.intersphinx",
"sphinx.ext.todo",
"sphinx.ext.coverage",
"sphinx.ext.mathjax",
"sphinx.ext.napoleon",
"sphinx.ext.viewcode",
"sphinx_rtd_theme",
"sphinx.ext.autosectionlabel",
"sphinxemoji.sphinxemoji",
"breathe",
"sphinx_reredirects",
]
autosectionlabel_prefix_document = True
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
html_theme = "sphinx_rtd_theme"
html_theme_options = {
"analytics_id": "G-WJN2CKJJKH",
"collapse_navigation": True,
"display_version": True,
"logo_only": True,
}
html_css_files = [
"css/custom.css",
]
html_static_path = ["_static"]
html_logo = "_static/img/logo.png"
html_favicon = "_static/img/favicon.png"
# -- Breathe configuration -
breathe_default_project = "CompilerGym"
breathe_projects = {"CompilerGym": "../doxygen/xml"}
redirects = {
"explorer/index.html": "https://compilergym.metademolab.com/",
}
|
CompilerGym-development
|
docs/source/conf.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""A CompilerGym API and web frontend.
This exposes an API with two operations:
1. /api/v4/describe
Describe the CompilerGym interface. This generates a list of action
names and their numeric values, a list of benchmark datasets and the
benchmarks within them, and a list of reward spaces.
Example usage:
$ curl localhost:5000/api/v4/describe
{
"actions": {
"-adce": 1,
...
"-tailcallelim": 122
},
"benchmarks": {
"benchmark://anghabench-v1": [
"8cc/extr_buffer.c_buf_append",
...
"8cc/extr_buffer.c_quote_cstring_len"
],
"benchmark://blas-v0": [
...
],
"benchmark://cbench-v1": [
"adpcm",
...
"jpeg-c"
],
...
},
"rewards": [
"IrInstructionCount",
...
"ObjectTextSizeOz"
]
}
2. /ap/v4/step
Compute the state from the given environment description. Query
arguments:
benchmark: The name of the benchmark. If "benchmark_source" is set
(see below), this is the name of the local file that the user
selected.
benchmark_source: An inline string of code to use as the benchmark.
reward: The name of the reward signal to use.
actions: An optional, command-separated list of actions to run.
all_states: An optional string that if "1" means that a list of
all states will be returned, one for each action. Else, only
the state for the final action is returned.
Example usage:
$ curl 'localhost:5000/api/v4/step?benchmark=benchmark://cbench-v1/adpcm&reward=IrInstructionCountOz&actions=1,2,3'
{
"commandline": "opt - ...",
"rewards": [0.003],
"done": false,
"ir": "...",
"states": [
{
"instcount": {...},
"autophase": {...},
"reward": 0.003
},
]
}
"""
import logging
import os
import sys
import tempfile
from functools import lru_cache
from itertools import islice
from pathlib import Path
from threading import Lock
from typing import Any, Dict, List, Optional
from flask import Flask, jsonify, request, send_file
from flask_cors import CORS
from pydantic import BaseModel
import compiler_gym
from compiler_gym.datasets.benchmark import Benchmark
from compiler_gym.envs import LlvmEnv
from compiler_gym.envs.llvm import make_benchmark
from compiler_gym.util.truncate import truncate
app = Flask("compiler_gym")
CORS(app)
resource_dir: Path = (Path(__file__).parent / "frontends/compiler_gym/build").absolute()
logger = logging.getLogger(__name__)
# A single compiler environment that is used to serve all endpoints.
env: LlvmEnv = compiler_gym.make("llvm-v0")
env_lock = Lock()
class StateToVisualize(BaseModel):
"""Encapsulates the state to visualize in the frontend."""
instcount: Dict[str, int]
autophase: Dict[str, int]
# The reward signal measures how "good" the previous action was. Over time
# the sequence of actions that produces the highest cumulative reward is the
# best:
reward: float
class StepRequest(BaseModel):
"""User arguments to /api/v4/step."""
# The name of the benchmark.
benchmark: str
# The inline source code for a benchmark.
benchmark_source: Optional[str]
# The reward space to use.
reward: str
# A comma-separated list of actions to perform.
actions: List[int]
# Whether to return a state for every action, or only the final action. See
# StepReply.states.
all_states: bool
@classmethod
def from_request(cls):
"""Parse the arguments from Flask's request arguments."""
def required_arg(name: str) -> str:
value = request.args.get(name)
if not value:
raise ValueError(f"Missing requirement argument: {name}")
return value
actions_str: str = request.args.get("actions")
actions: List[int] = (
[int(x) for x in actions_str.split(",")] if actions_str else []
)
return cls(
benchmark=required_arg("benchmark"),
benchmark_source=request.args.get("benchmark_source"),
reward=required_arg("reward"),
actions=actions,
all_states=request.args.get("all_states", "0") == "1",
)
class StepReply(BaseModel):
"""The data returned by a call to /api/v4/step."""
# This summarizes the sequence of actions that the user has selected so far:
commandline: str
# If the compiler environment dies, crashes, or encounters some
# unrecoverable error, this "done" flag is set. At this point the user
# should start a new session.
done: bool
# The current LLVM-IR:
ir: str
# A list of states to visualize, ordered from first to last.
states: List[StateToVisualize]
@app.route("/api/v4/describe")
def describe():
with env_lock:
env.reset()
return jsonify(
{
# A mapping from dataset name to benchmark name. To generate a full
# benchmark URI, join the two values with a '/'. E.g. given a benchmark
# "qsort" in the dataset "benchmark://cbench-v1", the full URI is
# "benchmark://cbench-v1/qsort".
"benchmarks": {
dataset.name: list(
islice(
(
x[len(dataset.name) + 1 :]
for x in dataset.benchmark_uris()
),
10,
)
)
for dataset in env.datasets
},
# A mapping from the name of an action to the numeric value. This
# numeric value is what is passed as argument to the step() function.
"actions": {k: v for v, k in enumerate(env.action_space.flags)},
# A list of reward space names. You select the reward space to use
# during start().
"rewards": sorted(list(env.reward.spaces.keys())),
}
)
@lru_cache(maxsize=16)
def _make_benchmark(name: str, source: str) -> Benchmark:
"""Construct a benchmark from a file name and contents."""
with tempfile.TemporaryDirectory() as d:
tmpfile = Path(d) / Path(name).name
with open(tmpfile, "w") as f:
f.write(source)
try:
return make_benchmark(tmpfile, timeout=60)
except Exception as e:
raise ValueError(f"Failed to compiler benchmark {name}: {e}")
def _step(request: StepRequest) -> StepReply:
"""Run the actual step with parsed arguments."""
states: List[StateToVisualize] = []
with env_lock:
env.reward_space = request.reward
# Create a benchmark from user-supplied code, or just look up the
# benchmark by name.
if request.benchmark_source:
benchmark = _make_benchmark(request.benchmark, request.benchmark_source)
else:
benchmark = request.benchmark
env.reset(benchmark=benchmark)
# Replay all actions except the last one.
if request.all_states:
# Replay actions one at a time to receive incremental rewards. The
# first item represents the state prior to any actions.
(instcount, autophase), _, done, info = env.multistep(
actions=[],
observation_spaces=[
env.observation.spaces["InstCountDict"],
env.observation.spaces["AutophaseDict"],
],
)
if done:
raise ValueError(
f"Failed to compute initial state: {info['error_details']}"
)
states.append(
StateToVisualize(
instcount=instcount,
autophase=autophase,
reward=0,
)
)
for action in request.actions[:-1]:
(instcount, autophase), reward, done, info = env.step(
action,
observation_spaces=[
env.observation.spaces["InstCountDict"],
env.observation.spaces["AutophaseDict"],
],
)
states.append(
StateToVisualize(
instcount=instcount,
autophase=autophase,
reward=reward,
)
)
if done:
raise ValueError(
f"Failed to apply action {action}: {info['error_details']}"
)
else:
# Replay actions in a single batch.
_, _, done, info = env.step(request.actions[:-1])
if done:
raise ValueError(
f"Failed to apply actions {request.actions}: {info['error_details']}"
)
# Perform the final action.
(ir, instcount, autophase), (reward,), done, _ = env.multistep(
actions=request.actions[-1:],
observation_spaces=[
env.observation.spaces["Ir"],
env.observation.spaces["InstCountDict"],
env.observation.spaces["AutophaseDict"],
],
reward_spaces=[env.reward_space],
)
states.append(
StateToVisualize(
instcount=instcount,
autophase=autophase,
reward=reward,
)
)
return StepReply(
commandline=env.action_space.to_string(env.actions),
done=done,
ir=truncate(ir, max_line_len=250, max_lines=1024),
states=states,
)
@app.route("/api/v4/step")
def step() -> Dict[str, Any]:
try:
request = StepRequest.from_request()
except ValueError as e:
return jsonify({"error": f"Invalid actions: {e}"}), 400
try:
return jsonify(_step(request).dict())
except Exception as e:
return jsonify({"error": str(e)}), 400
# Web endpoints.
@app.route("/")
def index_resource():
return send_file(resource_dir / "index.html")
@app.route("/<path>")
def root_resource(path: str):
return send_file(resource_dir / path)
@app.route("/static/css/<path>")
def css_resource(path: str):
return send_file(resource_dir / "static/css/" / path)
@app.route("/static/js/<path>")
def js_resource(path: str):
return send_file(resource_dir / "static/js/" / path)
if __name__ == "__main__":
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler(sys.stderr)
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.info("Serving from %s", resource_dir)
app.run(port=int(os.environ.get("PORT", "5000")), host="0.0.0.0")
|
CompilerGym-development
|
www/www.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym/bin:random_walk."""
import re
from absl.flags import FLAGS
from random_walk import run_random_walk
import compiler_gym
from compiler_gym.util.capture_output import capture_output
def test_run_random_walk_smoke_test():
FLAGS.unparse_flags()
FLAGS(["argv0"])
with capture_output() as out:
with compiler_gym.make("llvm-autophase-ic-v0") as env:
env.benchmark = "cbench-v1/crc32"
run_random_walk(env=env, step_count=5)
print(out.stdout)
# Note the ".*" before and after the step count to ignore the shell
# formatting.
assert re.search(r"Completed .*5.* steps in ", out.stdout)
|
CompilerGym-development
|
examples/random_walk_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import explore
def test_run_explore_smoke_test(capsys):
explore.main(
[
"explore",
"--env=llvm-ic-v0",
"--benchmark=cbench-v1/dijkstra",
"--episode_length=2",
"--explore_actions=-newgvn,-instcombine,-mem2reg",
"--nproc=2",
]
)
out, _ = capsys.readouterr()
assert "depth 2 of 2" in out
|
CompilerGym-development
|
examples/explore_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for //compiler_gym/bin:actor_critic."""
import sys
from absl import flags
from actor_critic import main
from compiler_gym.util.capture_output import capture_output
FLAGS = flags.FLAGS
def test_run_actor_critic_smoke_test():
flags = [
"argv0",
"--seed=0",
"--episode_len=2",
"--episodes=10",
"--log_interval=5",
"--benchmark=cbench-v1/crc32",
]
sys.argv = flags
FLAGS.unparse_flags()
FLAGS(flags)
with capture_output() as out:
main(["argv0"])
assert "Final performance (avg reward)" in out.stdout
|
CompilerGym-development
|
examples/actor_critic_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Simple compiler gym tabular q learning example.
Usage: python tabular_q.py --benchmark=<benchmark>
Using selected features from Autophase observation space, given a specific training
program as gym environment, find the best action sequence using online q learning.
"""
import random
from typing import Dict, NamedTuple
import gym
from absl import app, flags
import compiler_gym.util.flags.episode_length # noqa Flag definition.
import compiler_gym.util.flags.episodes # noqa Flag definition.
import compiler_gym.util.flags.learning_rate # noqa Flag definition.
from compiler_gym.util.flags.benchmark_from_flags import benchmark_from_flags
from compiler_gym.util.timer import Timer
flags.DEFINE_list(
"tabular_q_actions",
[
"-break-crit-edges",
"-early-cse-memssa",
"-gvn-hoist",
"-gvn",
"-instcombine",
"-instsimplify",
"-jump-threading",
"-loop-reduce",
"-loop-rotate",
"-loop-versioning",
"-mem2reg",
"-newgvn",
"-reg2mem",
"-simplifycfg",
"-sroa",
],
"A list of action names to explore from.",
)
flags.DEFINE_float("discount", 1.0, "The discount factor.")
flags.DEFINE_list(
"features_indices",
[19, 22, 51],
"Indices of Alphaphase features that are used to construct a state",
)
flags.DEFINE_integer(
"log_every", 50, "number of episode interval where progress is reported."
)
flags.DEFINE_float("epsilon", 0.2, "Epsilon rate of exploration. ")
FLAGS = flags.FLAGS
class StateActionTuple(NamedTuple):
"""An state action tuple used as q-table keys"""
autophase0: int
autophase1: int
autophase2: int
cur_step: int
action_index: int
def make_q_table_key(autophase_feature, action, step):
"""Create a hashable Q-table key.
For tabular learning we will be constructing a Q-table which maps a
(state, action) pair to an expected (remaining) reward. The purpose of this
function is to convert the (state, action) properties into a hashable tuple
that can be used as a key for a Q-table dictionary.
In the CompilerGym setup, encoding the true state the program is not obvious,
and this solution turns to use the observations from Autophase features instead.
The default arguments handpicked 3 indices from the Autophase feature that
appear to change a lot during optimization.
In addition, the current step in the episode is added to the state representation
as well. In the current fixed-episode-length setup, we need to differentiate
reaching a state at different steps, as they can lead to different final rewards,
depending on the remaining optimization steps.
Finally, we add the action index to the key.
"""
return StateActionTuple(
*autophase_feature[FLAGS.features_indices],
step,
FLAGS.tabular_q_actions.index(action),
)
def select_action(q_table, ob, step, epsilon=0.0):
qs = [
q_table.get(make_q_table_key(ob, act, step), -1)
for act in FLAGS.tabular_q_actions
]
if random.random() < epsilon:
return random.choice(FLAGS.tabular_q_actions)
max_indices = [i for i, x in enumerate(qs) if x == max(qs)]
# Breaking ties at random by selecting any of the indices.
return FLAGS.tabular_q_actions[random.choice(max_indices)]
def get_max_q_value(q_table, ob, step):
max_q = 0
for act in FLAGS.tabular_q_actions:
hashed = make_q_table_key(ob, act, step)
max_q = max(q_table.get(hashed, 0), max_q)
return max_q
def rollout(qtable, env, printout=False):
# rollout the policy using a given Q table greedily.
observation = env.reset()
action_seq, rewards = [], []
for i in range(FLAGS.episode_length):
a = select_action(qtable, observation, i)
action_seq.append(a)
observation, reward, done, info = env.step(env.action_space.flags.index(a))
rewards.append(reward)
if done:
break
if printout:
print(
"Resulting sequence: ", ",".join(action_seq), f"total reward {sum(rewards)}"
)
return sum(rewards)
def train(q_table, env):
# Buffer an old version of q table to inspect training progress.
prev_q = {}
# Run the training process "online", where the policy evaluation and
# policy improvement happens directly after one another.
for i in range(1, FLAGS.episodes + 1):
current_length = 0
observation = env.reset()
while current_length < FLAGS.episode_length:
# Run epsilon greedy policy to allow exploration.
a = select_action(q_table, observation, current_length, FLAGS.epsilon)
hashed = make_q_table_key(observation, a, current_length)
if hashed not in q_table:
q_table[hashed] = 0
# Take a stap in the environment, record the reward and state transition.
# Effectively we are evaluating the policy by taking a step in the
# environment.
observation, reward, done, info = env.step(env.action_space.flags.index(a))
if done:
break
current_length += 1
# Compute the target value of the current state, by using the current
# step-reward and bootstrapping from the next state. In Q-learning,
# a greedy policy is implied by the Q-table, thus we can approximate
# the expected reward at the next state as the maximum value of
# all the associated state-action pair rewards (Q values). A discount
# can be used to emphasize on immediate early rewards, and encourage
# the agent to achieve higher rewards sooner than later.
target = reward + FLAGS.discount * get_max_q_value(
q_table, observation, current_length
)
# Update Q value. Instead of replacing the Q value at the current
# state action pair directly, a learning rate is introduced to interpolate
# between the current value and target value, effectively damping the
# changes. By updating the Q-table, we effectively updated the policy.
q_table[hashed] = (
FLAGS.learning_rate * target
+ (1 - FLAGS.learning_rate) * q_table[hashed]
)
if FLAGS.log_every and i % FLAGS.log_every == 0:
def compare_qs(q_old, q_new):
diff = [q_new[k] - v for k, v in q_old.items()]
return sum(diff) / len(diff) if diff else 0.0
difference = compare_qs(prev_q, q_table)
# Evaluate the current policy
cur_rewards = rollout(q_table, env)
print(
f"episode={i:4d}, cur_reward={cur_rewards:.5f}, Q-table_entries={len(q_table):5d}, Q-table_diff={difference:.7f}"
)
prev_q = q_table.copy()
def main(argv):
# Initialize a Q table.
q_table: Dict[StateActionTuple, float] = {}
benchmark = benchmark_from_flags()
assert benchmark, "You must specify a benchmark using the --benchmark flag"
with gym.make("llvm-ic-v0", benchmark=benchmark) as env:
env.observation_space = "Autophase"
# Train a Q-table.
with Timer("Constructing Q-table"):
train(q_table, env)
# Rollout resulting policy.
rollout(q_table, env, printout=True)
if __name__ == "__main__":
app.run(main)
|
CompilerGym-development
|
examples/tabular_q.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Simple PT compiler gym actor-critic RL example.
Usage: python actor_critic.py
Use --help to list the configurable options.
The objective is to minimize the size of a benchmark (program) using
LLVM compiler passes. At each step there is a choice of which pass to
pick next and an episode consists of a sequence of such choices,
yielding the number of saved instructions as the overall reward.
For simplification of the learning task, only a (configurable) subset
of LLVM passes are considered and every episode has the same
(configurable) length.
Based on the PT actor-critic example:
https://github.com/pytorch/examples/blob/master/reinforcement_learning/actor_critic.py
"""
import random
import statistics
from collections import namedtuple
from typing import List
import gym
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from absl import app, flags
from torch.distributions import Categorical
import compiler_gym.util.flags.episodes # noqa Flag definition.
import compiler_gym.util.flags.learning_rate # noqa Flag definition.
import compiler_gym.util.flags.seed # noqa Flag definition.
from compiler_gym.util.flags.benchmark_from_flags import benchmark_from_flags
from compiler_gym.util.flags.env_from_flags import env_from_flags
from compiler_gym.wrappers import ConstrainedCommandline, TimeLimit
flags.DEFINE_list(
"flags",
[
"-break-crit-edges",
"-early-cse-memssa",
"-gvn-hoist",
"-gvn",
"-instcombine",
"-instsimplify",
"-jump-threading",
"-loop-reduce",
"-loop-rotate",
"-loop-versioning",
"-mem2reg",
"-newgvn",
"-reg2mem",
"-simplifycfg",
"-sroa",
],
"List of optimizatins to explore.",
)
flags.DEFINE_integer("episode_len", 5, "Number of transitions per episode.")
flags.DEFINE_integer("hidden_size", 64, "Latent vector size.")
flags.DEFINE_integer("log_interval", 100, "Episodes per log output.")
flags.DEFINE_integer("iterations", 1, "Times to redo entire training.")
flags.DEFINE_float("exploration", 0.0, "Rate to explore random transitions.")
flags.DEFINE_float("mean_smoothing", 0.95, "Smoothing factor for mean normalization.")
flags.DEFINE_float("std_smoothing", 0.4, "Smoothing factor for std dev normalization.")
eps = np.finfo(np.float32).eps.item()
SavedAction = namedtuple("SavedAction", ["log_prob", "value"])
FLAGS = flags.FLAGS
class MovingExponentialAverage:
"""Simple class to calculate exponential moving averages."""
def __init__(self, smoothing_factor):
self.smoothing_factor = smoothing_factor
self.value = None
def next(self, entry):
assert entry is not None
if self.value is None:
self.value = entry
else:
self.value = (
entry * (1 - self.smoothing_factor) + self.value * self.smoothing_factor
)
return self.value
class HistoryObservation(gym.ObservationWrapper):
"""For the input representation (state), if there are N possible
actions, then an action x is represented by a one-hot vector V(x)
with N entries. A sequence of M actions (x, y, ...) is represented
by an MxN matrix of 1-hot vectors (V(x), V(y), ...). Actions that
have not been taken yet are represented as the zero vector. This
way the input does not have a variable size since each episode has
a fixed number of actions.
"""
def __init__(self, env):
super().__init__(env=env)
self.observation_space = gym.spaces.Box(
low=np.full(len(FLAGS.flags), 0, dtype=np.float32),
high=np.full(len(FLAGS.flags), float("inf"), dtype=np.float32),
dtype=np.float32,
)
def reset(self, *args, **kwargs):
self._steps_taken = 0
self._state = np.zeros(
(FLAGS.episode_len - 1, self.action_space.n), dtype=np.int32
)
return super().reset(*args, **kwargs)
def step(self, action: int):
assert self._steps_taken < FLAGS.episode_len
if self._steps_taken < FLAGS.episode_len - 1:
# Don't need to record the last action since there are no
# further decisions to be made at that point, so that
# information need never be presented to the model.
self._state[self._steps_taken][action] = 1
self._steps_taken += 1
return super().step(action)
def observation(self, observation):
return self._state
class Policy(nn.Module):
"""A very simple actor critic policy model."""
def __init__(self):
super().__init__()
self.affine1 = nn.Linear(
(FLAGS.episode_len - 1) * len(FLAGS.flags), FLAGS.hidden_size
)
self.affine2 = nn.Linear(FLAGS.hidden_size, FLAGS.hidden_size)
self.affine3 = nn.Linear(FLAGS.hidden_size, FLAGS.hidden_size)
self.affine4 = nn.Linear(FLAGS.hidden_size, FLAGS.hidden_size)
# Actor's layer
self.action_head = nn.Linear(FLAGS.hidden_size, len(FLAGS.flags))
# Critic's layer
self.value_head = nn.Linear(FLAGS.hidden_size, 1)
# Action & reward buffer
self.saved_actions: List[SavedAction] = []
self.rewards: List[float] = []
# Keep exponential moving average of mean and standard
# deviation for use in normalization of the input.
self.moving_mean = MovingExponentialAverage(FLAGS.mean_smoothing)
self.moving_std = MovingExponentialAverage(FLAGS.std_smoothing)
def forward(self, x):
"""Forward of both actor and critic"""
# Initial layer maps the sequence of one-hot vectors into a
# vector of the hidden size. Next layers stay with the same
# size and use residual connections.
x = F.relu(self.affine1(x))
x = x.add(F.relu(self.affine2(x)))
x = x.add(F.relu(self.affine3(x)))
x = x.add(F.relu(self.affine4(x)))
# actor: choses action to take from state s_t
# by returning probability of each action
action_prob = F.softmax(self.action_head(x), dim=-1)
# critic: evaluates being in the state s_t
state_values = self.value_head(x)
# return values for both actor and critic as a tuple of 2 values:
# 1. a list with the probability of each action over the action space
# 2. the value from state s_t
return action_prob, state_values
def select_action(model, state, exploration_rate=0.0):
"""Selects an action and registers it with the action buffer."""
state = torch.from_numpy(state.flatten()).float()
probs, state_value = model(state)
# Create a probability distribution where the probability of
# action i is probs[i].
m = Categorical(probs)
# Sample an action using the distribution, or pick an action
# uniformly at random if in an exploration mode.
if random.random() < exploration_rate:
action = torch.tensor(random.randrange(0, len(probs)))
else:
action = m.sample()
# Save to action buffer. The drawing of a sample above simply
# returns a constant integer that we cannot back-propagate
# through, so it is important here that log_prob() is symbolic.
model.saved_actions.append(SavedAction(m.log_prob(action), state_value))
# The action to take.
return action.item()
def finish_episode(model, optimizer) -> float:
"""The training code. Calculates actor and critic loss and performs backprop."""
R = 0
saved_actions = model.saved_actions
policy_losses = [] # list to save actor (policy) loss
value_losses = [] # list to save critic (value) loss
returns = [] # list to save the true values
# Calculate the true value using rewards returned from the
# environment. We are iterating in reverse order while inserting
# at each step to the front of the returns list, which implies
# that returns[i] is the sum of rewards[j] for j >= i. We do not
# use a discount factor as the episode length is fixed and not
# very long, but if we had used one, it would appear here.
for r in model.rewards[::-1]:
R += r
returns.insert(0, R)
# Update the moving averages for mean and standard deviation and
# use that to normalize the input.
returns = torch.tensor(returns)
model.moving_mean.next(returns.mean())
model.moving_std.next(returns.std())
returns = (returns - model.moving_mean.value) / (model.moving_std.value + eps)
for (log_prob, value), R in zip(saved_actions, returns):
# The advantage is how much better a situation turned out in
# this case than the critic expected it to.
advantage = R - value.item()
# Calculate the actor (policy) loss. Because log_prob is
# symbolic, back propagation will increase the probability of
# taking the action that was taken if advantage is positive
# and will decrease it if advantage is negative. In this way
# we are learning a probability distribution without directly
# being able to back propagate through the drawing of the
# sample from that distribution.
#
# It may seem that once the critic becomes accurate, so that
# the advantage is always 0, then the policy can no longer
# learn because multiplication by 0 impedes back
# propagation. However, the critic does not know which action
# will be taken, so as long as there are worse-than-average or
# better-than-average policies with a non-zero probability,
# then the critic has to be wrong sometimes because it can
# only make one prediction across all actions, so learning
# will proceed.
policy_losses.append(-log_prob * advantage)
# Calculate critic (value) loss using L1 smooth loss.
value_losses.append(F.smooth_l1_loss(value, torch.tensor([R])))
# Reset gradients.
optimizer.zero_grad()
# Sum up all the values of policy_losses and value_losses.
loss = torch.stack(policy_losses).sum() + torch.stack(value_losses).sum()
loss_value = loss.item()
# Perform backprop.
loss.backward()
optimizer.step()
# Reset rewards and action buffer.
del model.rewards[:]
del model.saved_actions[:]
return loss_value
def TrainActorCritic(env):
model = Policy()
optimizer = optim.Adam(model.parameters(), lr=FLAGS.learning_rate)
# These statistics are just for logging.
max_ep_reward = -float("inf")
avg_reward = MovingExponentialAverage(0.95)
avg_loss = MovingExponentialAverage(0.95)
for episode in range(1, FLAGS.episodes + 1):
# Reset environment and episode reward.
state = env.reset()
ep_reward = 0
# The environment keeps track of when the episode is done, so
# we can loop infinitely here.
while True:
# Select action from policy.
action = select_action(model, state, FLAGS.exploration)
# Take the action
state, reward, done, _ = env.step(action)
model.rewards.append(reward)
ep_reward += reward
if done:
break
# Perform back propagation.
loss = finish_episode(model, optimizer)
# Update statistics.
max_ep_reward = max(max_ep_reward, ep_reward)
avg_reward.next(ep_reward)
avg_loss.next(loss)
# Log statistics.
if (
episode == 1
or episode % FLAGS.log_interval == 0
or episode == FLAGS.episodes
):
print(
f"Episode {episode}\t"
f"Last reward: {ep_reward:.2f}\t"
f"Avg reward: {avg_reward.value:.2f}\t"
f"Best reward: {max_ep_reward:.2f}\t"
f"Last loss: {loss:.6f}\t"
f"Avg loss: {avg_loss.value:.6f}\t",
flush=True,
)
print(f"\nFinal performance (avg reward): {avg_reward.value:.2f}")
print(f"Final avg reward versus own best: {avg_reward.value - max_ep_reward:.2f}")
# One could also return the best found solution here, though that
# is more random and noisy, while the average reward indicates how
# well the model is working on a consistent basis.
return avg_reward.value
def make_env():
FLAGS.env = "llvm-v0"
if not FLAGS.reward:
FLAGS.reward = "IrInstructionCountOz"
env = env_from_flags(benchmark=benchmark_from_flags())
env = ConstrainedCommandline(env, flags=FLAGS.flags)
env = TimeLimit(env, max_episode_steps=FLAGS.episode_len)
env = HistoryObservation(env)
return env
def main(argv):
"""Main entry point."""
del argv # unused
torch.manual_seed(FLAGS.seed)
random.seed(FLAGS.seed)
with make_env() as env:
print(f"Seed: {FLAGS.seed}")
print(f"Episode length: {FLAGS.episode_len}")
print(f"Exploration: {FLAGS.exploration:.2%}")
print(f"Learning rate: {FLAGS.learning_rate}")
print(f"Reward: {FLAGS.reward}")
print(f"Benchmark: {FLAGS.benchmark}")
print(f"Action space: {env.action_space}")
if FLAGS.iterations == 1:
TrainActorCritic(env)
return
# Performance varies greatly with random initialization and
# other random choices, so run the process multiple times to
# determine the distribution of outcomes.
performances = []
for i in range(1, FLAGS.iterations + 1):
print(f"\n*** Iteration {i} of {FLAGS.iterations}")
performances.append(TrainActorCritic(env))
print("\n*** Summary")
print(f"Final performances: {performances}\n")
print(f" Best performance: {max(performances):.2f}")
print(f"Median performance: {statistics.median(performances):.2f}")
print(f" Avg performance: {statistics.mean(performances):.2f}")
print(f" Worst performance: {min(performances):.2f}")
if __name__ == "__main__":
app.run(main)
|
CompilerGym-development
|
examples/actor_critic.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym/bin:brute_force."""
import tempfile
from pathlib import Path
import gym
from brute_force import run_brute_force
def test_run_brute_force_smoke_test():
with tempfile.TemporaryDirectory() as tmp:
outdir = Path(tmp)
run_brute_force(
make_env=lambda: gym.make("llvm-ic-v0", benchmark="cbench-v1/crc32"),
action_names=["-sroa", "-mem2reg"],
episode_length=2,
outdir=outdir,
nproc=1,
chunksize=2,
)
assert (outdir / "meta.json").is_file()
assert (outdir / "results.csv").is_file()
|
CompilerGym-development
|
examples/brute_force_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Sweep the inner loop size of CUDA loop nests."""
import logging
from itertools import product
from pathlib import Path
from typing import List, Optional
from typer import Typer
import compiler_gym
from compiler_gym.util.runfiles_path import create_user_logs_dir
logger = logging.getLogger(__name__)
app = Typer()
def wrapped_step(env, action):
done = True
while done:
observation, reward, done, info = env.step(action)
if done:
logger.warning("Step failed: %s", info["error_details"])
env.reset()
return observation, reward, done, info
def flops_after_steps(env, num_steps):
wrapped_step(env, [1] * (num_steps - 1))
env.observation_space = "flops"
observation, _, _, _ = wrapped_step(env, 1)
env.observation_space = None
return observation
def run_one_sweep(
device: str,
k: int,
vectorize: int = 1,
linear: bool = False,
logdir: Optional[Path] = None,
):
"""Run a single sweep."""
logdir = logdir or create_user_logs_dir("loop_tool_sweep")
logfile = logdir / f"k{k}-v{vectorize}-{device}-{'linear' if linear else 'log'}.txt"
print("Logging results to", logfile)
print()
print("Device", "K", "Inner", "Vec.", "FLOPS", sep="\t")
with open(logfile, "w") as f:
print("device", "k", "inner", "vectorize", "flops", sep=",", file=f)
def log(k, inner, vectorize, flops):
print(device.upper(), k, inner, vectorize, flops, sep="\t", flush=True)
with open(logfile, "a") as f:
print(device, k, inner, vectorize, flops, sep=",", file=f)
actions = [3, 0, 1, 3, 0]
k *= 1024 # raw number of elements
with compiler_gym.make("loop_tool-v0") as env:
env.reset(
benchmark=env.datasets.benchmark(
uri=f"benchmark://loop_tool-{device}-v0/{k}"
),
action_space="simple",
)
if vectorize - 1:
vs = [1] * (vectorize - 1)
actions += vs + [0, 1, 0] + vs + [0, 2, 0]
for a in actions:
wrapped_step(env, a)
if linear:
for i in range(k // (vectorize * 1024)):
step_count = 1022 if i == 0 else 1023
flops = flops_after_steps(env, step_count)
log(k, (i + 1) * 1024, vectorize, flops)
else: # linear=False (log)
inner = 1
step = 512
wrapped_step(env, [1] * (step - 1))
inner += step - 1
while inner * vectorize <= k:
flops = flops_after_steps(env, step)
inner += step
log(k, inner, vectorize, flops)
step *= 2
@app.command()
def sweep(
device: List[str] = ["cuda"],
k: List[int] = [512, 1024, 2048, 4096, 8192],
vectorize: List[int] = [1],
linear: List[bool] = [False],
logdir: Optional[Path] = None,
):
logdir = logdir or create_user_logs_dir("loop_tool_sweep")
for device_, k_, vectorize_, linear_ in product(device, k, vectorize, linear):
run_one_sweep(
device=device_, k=k_, vectorize=vectorize_, linear=linear_, logdir=logdir
)
if __name__ == "__main__":
app()
|
CompilerGym-development
|
examples/loop_tool_sweep.py
|
#!/usr/bin/env python3
#
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import distutils.util
import setuptools
with open("../VERSION") as f:
version = f.read().strip()
with open("requirements.txt") as f:
requirements = [ln.split("#")[0].rstrip() for ln in f.readlines()]
with open("../tests/requirements.txt") as f:
requirements += [ln.split("#")[0].rstrip() for ln in f.readlines()]
setuptools.setup(
name="compiler_gym_examples",
version=version,
description="Example code for CompilerGym",
author="Facebook AI Research",
url="https://github.com/facebookresearch/CompilerGym",
license="MIT",
install_requires=requirements,
packages=[
"llvm_autotuning",
"llvm_autotuning.autotuners",
"llvm_rl",
"llvm_rl.model",
],
package_data={
"llvm_autotuning": [
"config/*.yaml",
"config/**/*.yaml",
],
"llvm_rl": [
"config/*.yaml",
"config/**/*.yaml",
],
},
python_requires=">=3.8",
platforms=[distutils.util.get_platform()],
zip_safe=False,
)
|
CompilerGym-development
|
examples/setup.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for //compiler_gym/bin:tabular_q."""
from absl import flags
from tabular_q import main
from compiler_gym.util.capture_output import capture_output
FLAGS = flags.FLAGS
def test_run_tabular_q_smoke_test():
FLAGS.unparse_flags()
FLAGS(
[
"argv0",
"--episode_length=5",
"--episodes=10",
"--log_every=2",
"--benchmark=cbench-v1/crc32",
]
)
with capture_output() as out:
main(["argv0"])
assert "Resulting sequence" in out.stdout
|
CompilerGym-development
|
examples/tabular_q_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Run a parallelized brute force of an action space.
This script enumerates all possible combinations of actions up to a finite
length and evaluates them, logging the incremental rewards of each.
Example usage:
$ python brute_force.py --env=llvm-ic-v0 --benchmark=cbench-v1/dijkstra \
--episode_length=8 --brute_force_action_list=-sroa,-mem2reg,-newgvn
Enumerating all episodes of 3 actions x 8 steps
Started 24 brute force workers for benchmark benchmark://cbench-v1/dijkstra using reward IrInstructionCountOz.
=== Running 6,561 trials ===
Runtime: 8 seconds. Progress: 100.00%. Best reward found: 0.8571428571428572.
Ending jobs ... I1014 12:04:51.671775 3245811 CreateAndRunCompilerGymServiceImpl.h:128] Service "/dev/shm/compiler_gym_cec/s/1014T120451-646797-5770" listening on 37505, PID = 3245811
completed 6,561 of 6,561 trials (100.000%), best sequence -mem2reg -mem2reg -sroa -sroa -mem2reg -sroa -sroa -newgvn
Use --help to list the configurable options.
"""
import itertools
import json
import logging
import math
import os
import sys
from pathlib import Path
from queue import Queue
from threading import Thread
from time import time
from typing import List
import humanize
from absl import app, flags
import compiler_gym.util.flags.episode_length # noqa Flag definition.
import compiler_gym.util.flags.nproc # noqa Flag definition.
import compiler_gym.util.flags.output_dir # noqa Flag definition.
from compiler_gym.envs import CompilerEnv
from compiler_gym.util.flags.benchmark_from_flags import benchmark_from_flags
from compiler_gym.util.flags.env_from_flags import env_from_flags
from compiler_gym.util.gym_type_hints import ActionType
from compiler_gym.util.runfiles_path import create_user_logs_dir
flags.DEFINE_list(
"brute_force_action_list",
[],
"A list of action names to enumerate. If not provided, all actions are used "
"(warning: this might make a long time!)",
)
FLAGS = flags.FLAGS
def grouper(iterable, n):
"""Split an iterable into chunks of length `n`, padded if required."""
args = [iter(iterable)] * n
return itertools.zip_longest(*args, fillvalue=None)
class BruteForceProducer(Thread):
"""A thread which enumerates all possible combinations of actions up to
length episode_length and writes chunks of these combinations to a queue.
"""
def __init__(
self,
in_q: Queue,
actions: List[ActionType],
episode_length: int,
nproc: int,
chunksize: int = 128,
):
super().__init__()
self.in_q = in_q
self.actions = actions
self.episode_length = episode_length
self.nproc = nproc
self.chunksize = chunksize
self.alive = True # Set this to False to signal the thread to stop.
def run(self):
for chunk in grouper(
itertools.product(*[self.actions] * self.episode_length), self.chunksize
):
if not self.alive:
break
self.in_q.put(chunk)
# Signal for each worker to end.
for _ in range(self.nproc):
self.in_q.put(None)
class BruteForceWorker(Thread):
"""Worker thread which reads chunks of action lists and evaluates them.
Chunks of action lists are read from in_q and written to out_q, along with
the incremental reward of each action.
"""
def __init__(
self,
worker_id: int,
in_q: Queue,
out_q: Queue,
env: CompilerEnv,
):
super().__init__()
self.id = worker_id
self.in_q = in_q
self.out_q = out_q
self.env = env
# Incremental progress.
self.num_trials = 0
self.alive = True # Set this to False to signal the thread to stop.
def log(self, *args, **kwargs):
logging.debug(
f"Worker {self.id} ({self.num_trials} trials):", *args, **kwargs, flush=True
)
def run(self) -> None:
"""Grab chunks of work from in_q and write results to out_q."""
chunk = self.in_q.get()
while chunk and self.alive:
results = []
self.log("Processing chunk")
for actions in chunk:
# A "None" value is used to pad an incomplete chunk. There will
# be no more work to do after this.
if not actions:
break
self.num_trials += 1
rewards = self.run_one_episode(actions)
results.append((actions, rewards))
self.out_q.put(results)
chunk = self.in_q.get()
# Signal that we're done.
self.out_q.put(None)
self.env.close()
self.log("Worker is done")
def run_one_episode(self, actions: List[int]) -> List[float]:
"""Evaluate the reward of every action in a list."""
self.env.reset()
rewards = []
for action in actions:
_, reward, done, _ = self.env.step(action)
rewards.append(reward)
if done:
break
return rewards
def run_brute_force(
make_env,
action_names: List[str],
episode_length: int,
outdir: Path,
nproc: int,
chunksize: int = 128,
):
"""Run a brute force job."""
meta_path = outdir / "meta.json"
results_path = outdir / "results.csv"
with make_env() as env:
env.reset()
action_names = action_names or env.action_space.names
if not env.reward_space:
raise ValueError("A reward space must be specified for random search")
reward_space_name = env.reward_space.name
actions = [env.action_space.names.index(a) for a in action_names]
benchmark_uri = str(env.benchmark)
meta = {
"env": env.spec.id,
"action_names": action_names,
"benchmark": benchmark_uri,
"reward": reward_space_name,
"init_reward": env.reward[reward_space_name],
"episode_length": episode_length,
"nproc": nproc,
"chunksize": chunksize,
}
with open(str(meta_path), "w") as f:
json.dump(meta, f)
print(f"Wrote {meta_path}")
print(f"Writing results to {results_path}")
# A queue for communicating action sequences to workers, and a queue for
# workers to report <action_sequence, reward_sequence> results.
in_q = Queue(maxsize=32)
out_q = Queue(maxsize=128)
# Generate the action sequences to run.
producer = BruteForceProducer(
in_q=in_q,
nproc=nproc,
actions=actions,
episode_length=episode_length,
chunksize=chunksize,
)
producer.start()
# Worker threads that will consume the action sequences and produce rewards.
workers = [
BruteForceWorker(worker_id=i, env=make_env(), in_q=in_q, out_q=out_q)
for i in range(1, nproc + 1)
]
for worker in workers:
worker.start()
# The consumer loop. Read results from workers as they come in and write
# them to file.
started = time()
expected_trial_count = len(actions) ** episode_length
expected_chunk_count = math.ceil(expected_trial_count / chunksize)
chunk_count = 0
best_reward = -float("inf")
best_action_sequence = []
print(
f"Enumerating all episodes of {len(actions)} actions x {episode_length} steps"
)
print(
f"Started {len(workers)} brute force workers for benchmark "
f"{benchmark_uri} using reward {reward_space_name}."
)
print(f"=== Running {humanize.intcomma(expected_trial_count)} trials ===")
try:
with open(str(results_path), "w") as f:
print(
*[f"action_{i}" for i in range(1, episode_length + 1)],
*[f"reward_{i}" for i in range(1, episode_length + 1)],
sep=",",
file=f,
flush=True,
)
nproc_completed = 0
while nproc_completed < nproc:
chunk = out_q.get()
if not chunk:
nproc_completed += 1
continue
chunk_count += 1
print(
f"\r\033[KRuntime: {humanize.naturaldelta(time() - started)}. "
f"Progress: {chunk_count/expected_chunk_count:.2%}. "
f"Best reward found: {best_reward}.",
file=sys.stderr,
flush=True,
end="",
)
for actions, rewards in chunk:
print(*actions, *rewards, sep=",", file=f, flush=True)
if rewards and rewards[-1] is not None:
if sum(rewards) > best_reward:
best_reward = sum(rewards)
best_action_sequence = actions
except KeyboardInterrupt:
print("\nkeyboard interrupt", end="", flush=True)
print(file=sys.stderr, flush=True)
print("Ending jobs ... ", end="", flush=True)
# In case of early exit, signal to the threads to terminate.
producer.alive = False
for worker in workers:
worker.alive = False
# Wait for everyone to finish.
producer.join()
for worker in workers:
worker.join()
num_trials = sum(worker.num_trials for worker in workers)
with make_env() as env:
print(
f"completed {humanize.intcomma(num_trials)} of "
f"{humanize.intcomma(expected_trial_count)} trials "
f"({num_trials / expected_trial_count:.3%}), best sequence",
" ".join([env.action_space.flags[i] for i in best_action_sequence]),
)
def main(argv):
"""Main entry point."""
argv = FLAGS(argv)
if len(argv) != 1:
raise app.UsageError(f"Unknown command line arguments: {argv[1:]}")
# Use default logdir of <base>/brute_force/<benchmark> unless told
# otherwise.
benchmark = benchmark_from_flags()
if not benchmark:
raise app.UsageError("No benchmark specified.")
with env_from_flags(benchmark) as env:
env.reset()
logs_dir = Path(
FLAGS.output_dir
or create_user_logs_dir(
f'brute_force/{os.path.normpath(f"random/{env.benchmark.uri.scheme}/{env.benchmark.uri.path}")}'
)
)
run_brute_force(
make_env=lambda: env_from_flags(benchmark_from_flags()),
action_names=FLAGS.brute_force_action_list,
episode_length=FLAGS.episode_length,
outdir=logs_dir,
nproc=FLAGS.nproc,
)
if __name__ == "__main__":
app.run(main)
|
CompilerGym-development
|
examples/brute_force.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""This script runs microbenchmarks of CompilerGym environment operations.
To collect new measurements, run one of the following commands:
$ python -m op_benchmarks {run,init,reset,step,observations} --env=llvm-v0 --n=100
To aggregate results from prior runs:
$ python -m op_benchmarks info
"""
import logging
import os
import re
from collections import defaultdict
from itertools import islice
from math import ceil
from multiprocessing import cpu_count
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional
import numpy as np
import pandas as pd
import typer
from tabulate import tabulate
import compiler_gym
from compiler_gym import CompilerEnv
from compiler_gym.errors import BenchmarkInitError
from compiler_gym.util.executor import Executor
from compiler_gym.util.logging import init_logging
from compiler_gym.util.runfiles_path import create_user_logs_dir
from compiler_gym.util.timer import Timer
app = typer.Typer()
logger = logging.getLogger(__name__)
def get_runtimes(op: Callable[[], Any], n: int):
"""Run `n` reptitions of function `op`, ignoring any errors."""
runtimes = []
for _ in range(n):
try:
with Timer() as timer:
op()
runtimes.append(timer.time)
except Exception as e: # pylint: disable=broad-except
logger.warning("Op failed: %s", e)
return runtimes
@app.command()
def init(
n: int = int(1e6),
j: int = cpu_count(),
env: str = "llvm-autophase-ic-v0",
outdir: Optional[Path] = None,
):
"""Benchmark the environment startup time."""
executor = Executor(type="local", cpus=j)
outdir = Path(outdir or create_user_logs_dir("op_benchmarks"))
with executor.get_executor(logs_dir=outdir) as session:
_init(n=n, outdir=outdir, j=j, env_name=env, session=session)
def _init(n: int, outdir: Path, j: int, env_name: str, session: Executor):
outdir.mkdir(exist_ok=True, parents=True)
for i in range(1, j + 1):
session.submit(
_init_worker,
env_name=env_name,
n=int(ceil(n / j)),
outfile=outdir / f".op:1:startup-shard-{i:02d}.txt",
)
def _init_worker(env_name: str, n: int, outfile: Path):
with open(outfile, "w") as f:
for _ in range(0, n, min(100, n)):
runtimes = get_runtimes(
lambda: compiler_gym.make(env_name).close(), min(100, n)
)
print("\n".join(f"{x:.8f}" for x in runtimes), file=f, flush=True)
def get_benchmarks(env_name: str, n: int, seed: int, outdir: Path) -> List[str]:
"""Get `n` benchmarks from all datasets.
If the dataset is smaller than `n`, benchmarks are repeated. If the dataset
is larger than `n`, `n` random unique programs are sampled.
"""
benchmarks = []
with compiler_gym.make(env_name) as env:
datasets = sorted(list(env.datasets))
benchmarks_per_dataset = int(ceil(n / len(datasets)))
for ds in datasets:
logger.info(
"Enumerating %s benchmarks from dataset from %s ...",
benchmarks_per_dataset,
ds,
)
if ds.size == 0 or ds.size > benchmarks_per_dataset:
rng = np.random.default_rng(seed)
uniq_bm_uris = set()
benchmarks_from_dataset = []
while len(benchmarks_from_dataset) < benchmarks_per_dataset:
bm = ds.random_benchmark(rng)
if bm.uri in uniq_bm_uris:
continue
uniq_bm_uris.add(bm.uri)
# Start an environment to check that the benchmark can be
# initialized.
try:
env.reset(benchmark=bm)
except (BenchmarkInitError, ValueError, TimeoutError):
continue
benchmarks_from_dataset.append(bm.uri)
benchmarks += benchmarks_from_dataset
else:
bms = list(ds.benchmark_uris())
bms *= int(ceil(benchmarks_per_dataset / len(bms)))
benchmarks += bms[:benchmarks_per_dataset]
benchmarks = sorted(benchmarks)
with open(outdir / "benchmarks.txt", "w") as f:
for bm in benchmarks:
print(bm, file=f)
return benchmarks
def chunkify(iterable, n):
iterable = iter(iterable)
chunk = list(islice(iterable, n))
while chunk:
yield chunk
chunk = list(islice(iterable, n))
@app.command()
def reset(
n: int = int(1e6),
num_benchmarks: int = int(1e3),
env: str = "llvm-autophase-ic-v0",
j: int = cpu_count(),
seed: int = 0xCC,
outdir: Optional[Path] = None,
):
"""Benchmark the env.reset() operator."""
executor = Executor(type="local", cpus=j)
outdir = Path(outdir or create_user_logs_dir("op_benchmarks"))
benchmarks = get_benchmarks(
env_name=env, n=min(n, num_benchmarks), seed=seed, outdir=outdir
)
with executor.get_executor(logs_dir=outdir) as session:
_reset(
benchmarks=benchmarks,
n=n,
outdir=outdir,
j=j,
env_name=env,
session=session,
)
def _reset(
benchmarks: List[str],
n: int,
outdir: Path,
env_name: str,
j: int,
session: Executor,
):
outdir.mkdir(exist_ok=True, parents=True)
num_measurements_per_benchmark = int(ceil(n / len(benchmarks)))
for i, benchmarks_chunk in enumerate(chunkify(benchmarks, j), start=1):
session.submit(
_reset_worker,
num_measurements_per_benchmark=num_measurements_per_benchmark,
benchmarks=benchmarks_chunk,
env_name=env_name,
outfile=outdir / f".op:2:reset-shard-{i:02d}.txt",
)
def _reset_worker(
num_measurements_per_benchmark: int,
benchmarks: List[str],
env_name: str,
outfile: Path,
):
with compiler_gym.make(env_name) as env:
with open(outfile, "w") as f:
for benchmark in benchmarks:
env.reset(benchmark=benchmark)
runtimes = get_runtimes(
lambda: env.reset(benchmark=benchmark),
num_measurements_per_benchmark,
)
print("\n".join(f"{x:.8f} {benchmark}" for x in runtimes), file=f)
@app.command()
def step(
n: int = int(1e6),
num_benchmarks: int = int(1e3),
env: str = "llvm-autophase-ic-v0",
j: int = cpu_count(),
seed: int = 0xCC,
outdir: Optional[Path] = None,
):
"""Benchmark the env.step() operator."""
executor = Executor(type="local", cpus=j)
outdir = Path(outdir or create_user_logs_dir("op_benchmarks"))
benchmarks = get_benchmarks(
env_name=env, n=min(n, num_benchmarks), seed=seed, outdir=outdir
)
with executor.get_executor(logs_dir=outdir) as session:
_step(
session=session,
outdir=outdir,
benchmarks=benchmarks,
n=n,
j=j,
env_name=env,
seed=seed,
)
def _step(
n: int,
benchmarks: List[str],
env_name: str,
seed: int,
j: int,
outdir: Path,
session: Executor,
):
outdir.mkdir(exist_ok=True, parents=True)
num_measurements_per_benchmark = int(ceil(n / len(benchmarks)))
for i, benchmarks_chunk in enumerate(chunkify(benchmarks, j), start=1):
session.submit(
_step_worker,
num_measurements_per_benchmark=num_measurements_per_benchmark,
seed=seed + (i * len(benchmarks_chunk)),
benchmarks=benchmarks_chunk,
env_name=env_name,
step_outfile=outdir / f".op:3:step-shard-{i:02d}.txt",
batched_outfile=outdir / f".op:3:step-batched-shard-{i:02d}.txt",
)
def _step_worker(
num_measurements_per_benchmark: int,
benchmarks: List[str],
env_name: str,
seed: str,
step_outfile: Path,
batched_outfile: Path,
):
def get_step_times(env: CompilerEnv, num_steps: int, batched=False):
while batched:
# Run all actions in a single step().
steps = [env.action_space.sample() for _ in range(num_steps)]
with Timer() as timer:
_, _, done, _ = env.multistep(steps)
if not done:
return [timer.time / num_steps] * num_steps
env.reset()
# Run each action as a step().
runtimes = []
while len(runtimes) < num_steps:
with Timer() as timer:
_, _, done, _ = env.step(env.action_space.sample())
if done:
env.reset()
else:
runtimes.append(timer.time)
return runtimes
with compiler_gym.make(env_name) as env:
with open(step_outfile, "w") as f:
for i, benchmark in enumerate(benchmarks, start=seed):
env.reset(benchmark=benchmark)
env.seed(i)
runtimes = get_step_times(env, num_measurements_per_benchmark)
print("\n".join(f"{x:.8f} {benchmark}" for x in runtimes), file=f)
with open(batched_outfile, "w") as f:
for i, benchmark in enumerate(benchmarks, start=seed):
env.reset(benchmark=benchmark)
env.seed(i)
runtimes = get_step_times(
env, num_measurements_per_benchmark, batched=True
)
print("\n".join(f"{x:.8f} {benchmark}" for x in runtimes), file=f)
@app.command()
def observations(
env: str = "llvm-autophase-ic-v0",
observation_spaces: List[str] = [
"Ir",
"InstCount",
"Autophase",
"Inst2vec",
"Programl",
"IrInstructionCount",
"ObjectTextSizeBytes",
"Runtime",
],
n: int = int(1e6),
num_benchmarks: int = int(1e3),
j: int = cpu_count(),
seed: int = 0xCC,
outdir: Optional[Path] = None,
) -> List[float]:
"""Benchmark the environment observation spaces."""
executor = Executor(type="local", cpus=j)
outdir = Path(outdir or create_user_logs_dir("op_benchmarks"))
benchmarks = get_benchmarks(
env_name=env, n=min(n, num_benchmarks), seed=seed, outdir=outdir
)
with executor.get_executor(logs_dir=outdir) as session:
_observations(
session=session,
env_name=env,
benchmarks=benchmarks,
j=j,
outdir=outdir,
observation_spaces=observation_spaces,
n=n,
)
def _observations(
observation_spaces: List[str],
benchmarks: List[str],
n: int,
j: int,
session: Executor,
outdir: Path,
env_name: str,
):
outdir.mkdir(exist_ok=True, parents=True)
num_measurements_per_benchmark = int(ceil(n / len(benchmarks)))
for i, benchmarks_chunk in enumerate(chunkify(benchmarks, j), start=1):
for observation_space in observation_spaces:
session.submit(
_observations_worker,
observation_space=observation_space,
num_measurements_per_benchmark=num_measurements_per_benchmark,
benchmarks=benchmarks_chunk,
env_name=env_name,
outfile=outdir / f".observation:{observation_space}-shard-{i:02d}.txt",
)
def _observations_worker(
observation_space: str,
num_measurements_per_benchmark: int,
benchmarks: List[str],
env_name: str,
outfile: Path,
):
with compiler_gym.make(env_name) as env:
with open(outfile, "w") as f:
for benchmark in benchmarks:
env.reset(benchmark=benchmark)
if "llvm-" in env_name and observation_space == "Runtime":
if not env.observation.IsRunnable():
return []
env.runtime_observation_count = 1
env.runtime_warmups_count = 0
runtimes = get_runtimes(
lambda: env.observation[observation_space],
num_measurements_per_benchmark,
)
print("\n".join(f"{x:.8f}" for x in runtimes), file=f, flush=True)
@app.command()
def run(
env: str = "llvm-autophase-ic-v0",
observation_spaces: List[str] = [
"Ir",
"InstCount",
"Autophase",
"Inst2vec",
"Programl",
"IrInstructionCount",
"ObjectTextSizeBytes",
"Runtime",
],
n: int = int(1e6),
num_benchmarks: int = int(1e3),
j: int = cpu_count(),
outdir: Optional[Path] = None,
seed: int = 0xCC,
):
"""Run all of the environment benchmarks."""
executor = Executor(type="local", cpus=j)
outdir = Path(outdir or create_user_logs_dir("op_benchmarks"))
benchmarks = get_benchmarks(
env_name=env, n=min(n, num_benchmarks), seed=seed, outdir=outdir
)
with executor.get_executor(logs_dir=outdir) as session:
_init(env_name=env, session=session, j=j, n=n, outdir=outdir)
_reset(
benchmarks=benchmarks,
n=n,
outdir=outdir,
j=j,
env_name=env,
session=session,
)
_step(
n=n,
j=j,
benchmarks=benchmarks,
env_name=env,
seed=seed,
outdir=outdir,
session=session,
)
_observations(
n=n,
j=j,
benchmarks=benchmarks,
env_name=env,
outdir=outdir,
session=session,
observation_spaces=observation_spaces,
)
info([outdir])
def _aggregate(
root: Path, files: List[str], outfile: Path
) -> Optional[Dict[str, float]]:
if not files:
return
if not (outfile).is_file():
runtimes = []
for file in files:
with open(root / file) as f:
runtimes += [float(x.split()[0]) for x in f if x.strip()]
if not runtimes:
return
runtimes = np.sort(runtimes)
with open(outfile, "w") as f:
print("\n".join(map(str, sorted(runtimes))), file=f)
else:
with open(outfile) as f:
runtimes = np.array(list(map(float, f)))
return {
"n": len(runtimes),
"p50": np.median(runtimes),
"p99": np.percentile(runtimes, 99),
"mean": np.mean(runtimes),
}
@app.command()
def info(outdirs: List[Path] = []):
"""Aggregate logs from previous runs."""
outdirs = outdirs or ["~/logs/compiler_gym/op_benchmarks"]
rows = []
for outdir in outdirs:
for root, _, files in os.walk(Path(outdir).expanduser()):
root = Path(root)
timestamp = "-".join([root.parent.name, root.name])
shards = defaultdict(list)
for file in files:
match = re.match(r"\.([:\w-]+)-shard-\d+\.txt", file)
if match:
shards[match.group(1)].append(file)
for shard, files in shards.items():
agg = _aggregate(root, files, root / f"{shard}.txt")
if agg:
rows.append(
{
"timestamp": timestamp,
"op": shard,
**agg,
}
)
df = pd.DataFrame(rows)
df.sort_values(["op", "timestamp"], inplace=True)
# Scale to milliseconds.
df["p50"] *= 1000
df["p99"] *= 1000
df["mean"] *= 1000
df = df.rename(columns={"p50": "p50 (ms)", "p99": "p99 (ms)", "mean": "mean (ms)"})
print(tabulate(df, headers="keys", showindex=False, tablefmt="psql"))
if __name__ == "__main__":
init_logging()
app()
|
CompilerGym-development
|
examples/op_benchmarks.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Run a parallelized exhaustive search of an action space.
All possible combinations of actions up to a finite limit are
evaluated, but partial sequences of actions that end up in the same
state are deduplicated, sometimes dramatically reducing the size of
the search space. Can also be configured to do a beam search.
Example usage:
$ python explore.py --env=llvm-ic-v0 --benchmark=cbench-v1/dijkstra \
--episode_length=10 --actions=-simplifycfg,-instcombine,-mem2reg,-newgvn
Use --help to list the configurable options.
"""
import hashlib
import math
from enum import IntEnum
from heapq import nlargest
from multiprocessing.pool import ThreadPool
from queue import Queue
from threading import Lock
from time import time
import humanize
from absl import app, flags
import compiler_gym.util.flags.episode_length # noqa Flag definition.
import compiler_gym.util.flags.nproc # noqa Flag definition.
from compiler_gym.util.flags.benchmark_from_flags import benchmark_from_flags
from compiler_gym.util.flags.env_from_flags import env_from_flags
from compiler_gym.wrappers import ConstrainedCommandline
flags.DEFINE_list(
"explore_actions",
[],
"A list of flag names to enumerate. If not provided, all actions are used.",
)
flags.DEFINE_integer(
"topn",
0,
"If positive, explore only the top n states for each sequence length. "
"This is in effect the width of a beam search.",
)
flags.DEFINE_integer(
"show_topn", 3, "Show this many top sequences " "at each sequence length."
)
FLAGS = flags.FLAGS
def make_env():
env = env_from_flags(benchmark=benchmark_from_flags())
if FLAGS.explore_actions:
env = ConstrainedCommandline(env, flags=FLAGS.explore_actions)
return env
# Used to determine if two rewards are equal up to a small
# tolerance. Cannot use math.isclose with default parameters as it
# sets abs_tol to 0, which means that a zero reward will compare
# unequal with e.g. 1e-100, leading to bugs.
def rewards_close(a, b):
return math.isclose(a, b, rel_tol=1e-5, abs_tol=1e-10)
NO_EDGE = -1
class Node:
def __init__(self, reward_sum, edge_count):
self.reward_sum = reward_sum
self.edges = [NO_EDGE] * edge_count
self.back_edge = None
# Represents env states as nodes and actions as edges.
class StateGraph:
def __init__(self, edges_per_node):
self._edges_per_node = edges_per_node
self._nodes = []
self._fingerprint_to_index = dict()
def add_or_find_node(self, fingerprint, reward_sum):
if fingerprint in self._fingerprint_to_index:
node_index = self._fingerprint_to_index[fingerprint]
assert rewards_close(
self._nodes[node_index].reward_sum, reward_sum
), f"{self._nodes[node_index].reward_sum} != {reward_sum}"
return (node_index, False)
node_index = self.node_count()
self._fingerprint_to_index[fingerprint] = node_index
node = Node(reward_sum, self._edges_per_node)
self._nodes.append(node)
return (node_index, True)
def add_edge(self, from_node_index, edge_index, to_node_index):
assert edge_index in range(self._edges_per_node)
assert from_node_index in range(self.node_count())
assert to_node_index in range(self.node_count())
assert self.get_edge(from_node_index, edge_index) == NO_EDGE
from_node = self._nodes[from_node_index]
from_node.edges[edge_index] = to_node_index
to_node = self._nodes[to_node_index]
if to_node.back_edge is None:
to_node.back_edge = (from_node_index, edge_index)
def get_edge(self, from_node_index, edge_index):
assert edge_index < self._edges_per_node
assert from_node_index < self.node_count()
return self._nodes[from_node_index].edges[edge_index]
# Returns a path back to node 0. For this to work, edges have to
# be added in a order so that the subgraph consisting of the first
# in-coming edge to each node defines a tree with node 0 as the
# root.
def node_path(self, node_index):
assert node_index < self.node_count()
path = []
while node_index != 0:
back_edge = self._nodes[node_index].back_edge
assert back_edge is not None
(prior_node_index, edge_index) = back_edge
node_index = prior_node_index
path.append(edge_index)
path.reverse()
return path
def reward_sum(self, node_index):
return self._nodes[node_index].reward_sum
def node_count(self):
return len(self._nodes)
def env_to_fingerprint(env):
# TODO: There is some sort of state in the env that is not
# captured by this. Figure out what it is and fix it. Also
# consider adding a fingerprint observation to env.
if False:
# BitcodeFile is slower, so using Ir instead.
path = env.observation["BitcodeFile"]
with open(path, "rb") as f:
data = f.read()
else:
data = env.observation["Ir"].encode()
return hashlib.sha256(data).digest()
def compute_edges(env, sequence):
edges = []
for action in range(env.action_space.n):
env.reset()
reward_sum = 0.0
for action in sequence + [action]:
_, reward, _, _ = env.step(action)
reward_sum += reward
edges.append((env_to_fingerprint(env), reward_sum))
return edges
class NodeTypeStats:
"""Keeps statistics on the exploration."""
class EdgeType(IntEnum):
unpruned = 0
self_pruned = 1
cross_pruned = 2
back_pruned = 3
dropped = 4
def __init__(self, action_count):
self._action_count = action_count
self._depth = 0
self._depth_start_time_in_seconds = time()
# Nodes added at this depth.
self._depth_stats = [0] * len(self.EdgeType)
# Nodes added across all depths.
self._all_stats = [0] * len(self.EdgeType)
# The full number of nodes that is theoretically in the graph
# at this depth if no nodes had been pruned anywhere.
self._full_depth_stats = [0] * len(self.EdgeType)
# The full number of nodes across depths if no nodes had been
# pruned anywhere.
self._full_all_stats = [0] * len(self.EdgeType)
def start_depth_and_print(self, episode_length):
self._depth += 1
print(
f"*** Processing depth {self._depth} of {episode_length} with",
f"{self._depth_stats[self.EdgeType.unpruned]} states and",
f"{self._action_count} actions.\n",
)
self._depth_start_time_in_seconds = time()
self._full_depth_stats[self.EdgeType.unpruned] = 0
for e in self.EdgeType:
self._depth_stats[e] = 0
if e != self.EdgeType.unpruned:
# The pruned nodes at the prior depth would have
# turned into this many more nodes at the next depth.
self._full_depth_stats[e] *= self._action_count
self._full_all_stats[e] += self._full_depth_stats[e]
# At a certain point these large numbers just clutter up
# the display.
if self._full_all_stats[e] > 1e9:
self._full_all_stats[e] = float("inf")
if self._full_depth_stats[e] > 1e9:
self._full_depth_stats[e] = float("inf")
def note_edge(self, edge_type):
self._adjust_edges(edge_type, 1)
def drop_unpruned_edge(self):
self._adjust_edges(self.EdgeType.unpruned, -1)
self._adjust_edges(self.EdgeType.dropped, 1)
def _adjust_edges(self, edge_type, adjustment):
self._depth_stats[edge_type] += adjustment
self._all_stats[edge_type] += adjustment
self._full_depth_stats[edge_type] += adjustment
self._full_all_stats[edge_type] += adjustment
def end_depth_and_print(self, env, graph, best_node):
align = 16
def number_list(stats):
return "".join(
[humanize.intcomma(n).rjust(align) for n in stats + [sum(stats)]]
)
legend = [e.name for e in self.EdgeType] + ["sum"]
print(
" ",
"".join([header.rjust(align) for header in legend]),
)
print(" added this depth", number_list(self._depth_stats))
print(" full nodes this depth", number_list(self._full_depth_stats))
print(" added across depths", number_list(self._all_stats))
print("full added across depths", number_list(self._full_all_stats))
# If this does not match then something was over or under
# counted. Based on x^0 + x^1 ... + x^n = (x^(n+1) - 1) / (x -
# 1), which is the number of nodes in a complete tree where
# every interior node has x children. If the numbers are too
# large then there may not be equality due to rounding, so do
# not check this in that case.
full_all_sum = sum(self._full_all_stats)
assert full_all_sum > 1e9 or full_all_sum == (
pow(env.action_space.n, self._depth + 1) - 1
) / (env.action_space.n - 1)
depth_time_in_seconds = time() - self._depth_start_time_in_seconds
print()
print(f"Time taken for depth: {depth_time_in_seconds:0.2f} s")
if FLAGS.show_topn >= 1:
print(f"Top {FLAGS.show_topn} sequence(s):")
for n in nlargest(
FLAGS.show_topn,
range(graph.node_count()),
key=lambda n: graph.reward_sum(n),
):
print(
f" {graph.reward_sum(n):0.4f} ",
", ".join(env.action_space.flags[f] for f in graph.node_path(n)),
)
print("\n")
# Compute an action graph and use it to find the optimal sequence
# within episode_length actions. Uses as many threads as there are
# elements in envs.
def compute_action_graph(pool, envs, episode_length):
assert len(envs) >= 1
env_queue = Queue()
for env in envs:
env_queue.put(env)
stats = NodeTypeStats(action_count=env.action_space.n)
graph = StateGraph(edges_per_node=env.action_space.n)
# Add the empty sequence of actions as the starting state.
envs[0].reset()
best_node, _ = graph.add_or_find_node(env_to_fingerprint(envs[0]), 0.0)
stats.note_edge(NodeTypeStats.EdgeType.unpruned)
# A node is defined by a sequence of actions that end up in that
# node. Nodes are deduplicated based on a hash (fingerprint) of
# their state, so that if two sequences of actions end up with the
# same state than they will also converge on the same node in the
# graph.
#
# The outer loop goes through sequences by the depth/length of the
# sequence, first all sequences of one element, then all sequences
# of two elements and so on. This partition of the nodes creates
# multiple kinds of edges:
#
# Back edges. Edges pointing to the same or lower depth. These
# edges represent sequences that are equivalent to a shorter
# sequence. These edges are pruned as no new nodes can be
# discovered from them and they cannot participate in a minimal
# best sequence as they are not minimal. Self edges are excluded
# from this definition.
#
# Self edges. Loops, i.e. edges that go from a node to
# itself. This represents actions that do not change the
# state. These are pruned for the same reason as back edges and
# have their own category as they are a very common case.
#
# Cross edges. These are edges that go forward to the next depth
# but there is already another edge that goes to the same
# node. The edge itself is not pruned from the graph, as it can
# be part of a minimal optimal sequence, but since the
# destination node already exists there is no new node introduced
# by a cross edge, so you could consider that the hypothetical
# distinct node that this edge might have created is pruned
# through deduplication.
#
# Unpruned edges. These are edges that go forward to the next
# depth and there is not yet any other edge that goes to that
# node. This kind of edge causes a new node to be created that
# will be expanded at the next depth.
#
# Dropped. These are otherwise unpruned edges that end up
# getting dropped due to a limit on how many states to explore
# per depth.
#
# If there are N nodes, then they are indexed as [0, N) in order
# of insertion. New nodes are added to the graph when an unpruned
# edge is found that points to them. A node is expanded when its
# edges are computed and added to the graph, potentially causing
# new nodes to be added.
#
# The nodes are partitioned into 3 ranges:
#
# [0; depth_start) These nodes are already expanded and done with.
#
# [depth_start; next_depth_start) These are the nodes at the
# current depth that will be expanded to create nodes at the next
# depth.
#
# [next_depth_start, N) These are the nodes that have been added
# at this iteration of the loop to be expanded at the next
# iteration of the loop.
dropped = set()
next_depth_start = 0
for depth in range(episode_length):
stats.start_depth_and_print(episode_length)
depth_start = next_depth_start
next_depth_start = graph.node_count()
if depth_start == next_depth_start:
print("There are no more states to process, stopping early.")
break
lock = Lock()
def expand_node(node_index):
with lock:
if node_index in dropped:
return node_index, ()
path = graph.node_path(node_index)
# ThreadPool.map doesn't support giving each thread its
# own env, so we use a queue instead. Each thread gets
# some env and has exclusive use of it while it has it.
local_env = env_queue.get()
edges = compute_edges(local_env, path)
env_queue.put(local_env)
return node_index, edges
undropped = [
n for n in range(depth_start, next_depth_start) if n not in dropped
]
computed_edges = pool.map(expand_node, undropped)
# This could easily be done also with a lock as above, saving
# the memory for computed_edges, and when done that way, the
# lock is not at all contended. However, there is currently an
# issue with non-determinism with multithreading and so it's
# preferable for right now to make the node ordering
# deterministic, so as to not add to the non-determinism, even
# though the node ordering shouldn't matter.
for node_index, edges in computed_edges:
for i, (fingerprint, reward_sum) in zip(range(len(edges)), edges):
target_node_index, inserted = graph.add_or_find_node(
fingerprint, reward_sum
)
if target_node_index == node_index: # self edge
assert not inserted
stats.note_edge(NodeTypeStats.EdgeType.self_pruned)
continue
if target_node_index < next_depth_start: # back edge
assert not inserted
stats.note_edge(NodeTypeStats.EdgeType.back_pruned)
continue
if not inserted: # cross edge
stats.note_edge(NodeTypeStats.EdgeType.cross_pruned)
else: # unpruned - node was added
stats.note_edge(NodeTypeStats.EdgeType.unpruned)
graph.add_edge(node_index, i, target_node_index)
best_reward = graph.reward_sum(best_node)
if reward_sum > best_reward and not rewards_close(
best_reward, reward_sum
):
best_node = target_node_index
if FLAGS.topn > 0:
top_nodes = list(range(next_depth_start, graph.node_count()))
top_nodes.sort(key=lambda n: graph.reward_sum(n), reverse=True)
for n in top_nodes[FLAGS.topn :]:
dropped.add(n)
stats.drop_unpruned_edge()
stats.end_depth_and_print(envs[0], graph, best_node)
def main(argv):
"""Main entry point."""
argv = FLAGS(argv)
if len(argv) != 1:
raise app.UsageError(f"Unknown command line arguments: {argv[1:]}")
print(f"Running with {FLAGS.nproc} threads.")
assert FLAGS.nproc >= 1
envs = []
try:
for _ in range(FLAGS.nproc):
envs.append(make_env())
with ThreadPool(len(envs)) as pool:
compute_action_graph(pool, envs, episode_length=FLAGS.episode_length)
finally:
for env in envs:
env.close()
if __name__ == "__main__":
app.run(main)
|
CompilerGym-development
|
examples/explore.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Perform a random walk of the action space of a CompilerGym environment.
Example usage:
# Run a random walk on cBench example program using instruction count reward.
$ python3 random_walk.py --env=llvm-v0 --step_min=100 --step_max=100 \
--benchmark=cbench-v1/dijkstra --reward=IrInstructionCount
"""
import random
import humanize
from absl import app, flags
from compiler_gym.envs import CompilerEnv
from compiler_gym.util.flags.benchmark_from_flags import benchmark_from_flags
from compiler_gym.util.flags.env_from_flags import env_from_flags
from compiler_gym.util.shell_format import emph
from compiler_gym.util.timer import Timer
flags.DEFINE_integer(
"step_min",
12,
"The minimum number of steps. Fewer steps may be performed if the "
"environment ends the episode early.",
)
flags.DEFINE_integer("step_max", 256, "The maximum number of steps.")
FLAGS = flags.FLAGS
def run_random_walk(env: CompilerEnv, step_count: int) -> None:
"""Perform a random walk of the action space.
:param env: The environment to use.
:param step_count: The number of steps to run. This value is an upper bound -
fewer steps will be performed if any of the actions lead the
environment to end the episode.
"""
rewards = []
step_num = 0
with Timer() as episode_time:
env.reset()
for step_num in range(1, step_count + 1):
action_index = env.action_space.sample()
with Timer() as step_time:
observation, reward, done, info = env.step(action_index)
print(
f"\n=== Step {humanize.intcomma(step_num)} ===\n"
f"Action: {env.action_space.names[action_index]} "
f"(changed={not info.get('action_had_no_effect')})\n"
f"Reward: {reward}"
)
rewards.append(reward)
if env.observation_space:
print(f"Observation:\n{observation}")
print(f"Step time: {step_time}")
if done:
print("Episode ended by environment")
break
def reward_percentage(reward, rewards):
if sum(rewards) == 0:
return 0
percentage = reward / sum(rewards)
return emph(f"{'+' if percentage >= 0 else ''}{percentage:.2%}")
print(
f"\nCompleted {emph(humanize.intcomma(step_num))} steps in {episode_time} "
f"({step_num / episode_time.time:.1f} steps / sec).\n"
f"Total reward: {sum(rewards)}\n"
f"Max reward: {max(rewards)} ({reward_percentage(max(rewards), rewards)} "
f"at step {humanize.intcomma(rewards.index(max(rewards)) + 1)})"
)
def main(argv):
"""Main entry point."""
assert len(argv) == 1, f"Unrecognized flags: {argv[1:]}"
with env_from_flags(benchmark=benchmark_from_flags()) as env:
step_min = min(FLAGS.step_min, FLAGS.step_max)
step_max = max(FLAGS.step_min, FLAGS.step_max)
run_random_walk(env=env, step_count=random.randint(step_min, step_max))
if __name__ == "__main__":
app.run(main)
|
CompilerGym-development
|
examples/random_walk.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for the unrolling CompilerGym service example."""
import subprocess
from pathlib import Path
import gym
import numpy as np
import pytest
import compiler_gym
import examples.example_unrolling_service as unrolling_service
from compiler_gym.envs import CompilerEnv
from compiler_gym.errors import SessionNotFound
from compiler_gym.spaces import Box, NamedDiscrete, Scalar, Sequence
from compiler_gym.util.commands import Popen
from tests.test_main import main
@pytest.fixture(scope="function")
def env() -> CompilerEnv:
"""Text fixture that yields an environment."""
with gym.make("unrolling-py-v0") as env_:
yield env_
@pytest.fixture(scope="module")
def bin() -> Path:
return unrolling_service.UNROLLING_PY_SERVICE_BINARY
def test_invalid_arguments(bin: Path):
"""Test that running the binary with unrecognized arguments is an error."""
def run(cmd):
with Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True
) as p:
stdout, stderr = p.communicate(timeout=10)
return p.returncode, stdout, stderr
returncode, _, stderr = run([str(bin), "foobar"])
assert "ERROR:" in stderr
assert "'foobar'" in stderr
assert returncode == 1
returncode, _, stderr = run([str(bin), "--foobar"])
# C++ and python flag parsing library emit slightly different error
# messages.
assert "ERROR:" in stderr or "FATAL" in stderr
assert "'foobar'" in stderr
assert returncode == 1
def test_versions(env: CompilerEnv):
"""Tests the GetVersion() RPC endpoint."""
assert env.version == compiler_gym.__version__
assert env.compiler_version == "1.0.0"
def test_action_space(env: CompilerEnv):
"""Test that the environment reports the service's action spaces."""
assert env.action_spaces == [
NamedDiscrete(
name="unrolling",
items=[
"-loop-unroll -unroll-count=2",
"-loop-unroll -unroll-count=4",
"-loop-unroll -unroll-count=8",
],
)
]
def test_observation_spaces(env: CompilerEnv):
"""Test that the environment reports the service's observation spaces."""
env.reset()
assert env.observation.spaces.keys() == {"ir", "features", "runtime", "size"}
assert env.observation.spaces["ir"].space == Sequence(
name="ir",
size_range=(0, np.iinfo(np.int64).max),
dtype=str,
opaque_data_format=None,
)
assert env.observation.spaces["features"].space == Box(
name="features", shape=(3,), low=0, high=100000, dtype=int
)
assert env.observation.spaces["runtime"].space == Scalar(
name="runtime", min=0, max=np.inf, dtype=float
)
assert env.observation.spaces["size"].space == Scalar(
name="size", min=0, max=np.inf, dtype=float
)
def test_reward_spaces(env: CompilerEnv):
"""Test that the environment reports the service's reward spaces."""
env.reset()
assert env.reward.spaces.keys() == {"runtime", "size"}
def test_step_before_reset(env: CompilerEnv):
"""Taking a step() before reset() is illegal."""
with pytest.raises(SessionNotFound, match=r"Must call reset\(\) before step\(\)"):
env.step(0)
def test_observation_before_reset(env: CompilerEnv):
"""Taking an observation before reset() is illegal."""
with pytest.raises(SessionNotFound, match=r"Must call reset\(\) before step\(\)"):
_ = env.observation["ir"]
def test_reward_before_reset(env: CompilerEnv):
"""Taking a reward before reset() is illegal."""
with pytest.raises(SessionNotFound, match=r"Must call reset\(\) before step\(\)"):
_ = env.reward["runtime"]
def test_reset_invalid_benchmark(env: CompilerEnv):
"""Test requesting a specific benchmark."""
with pytest.raises(LookupError) as ctx:
env.reset(benchmark="unrolling-v0/foobar")
assert str(ctx.value) == "Unknown program name"
def test_invalid_observation_space(env: CompilerEnv):
"""Test error handling with invalid observation space."""
with pytest.raises(LookupError):
env.observation_space = 100
def test_invalid_reward_space(env: CompilerEnv):
"""Test error handling with invalid reward space."""
with pytest.raises(LookupError):
env.reward_space = 100
def test_double_reset(env: CompilerEnv):
"""Test that reset() can be called twice."""
env.reset()
assert env.in_episode
env.reset()
assert env.in_episode
def test_Step_out_of_range(env: CompilerEnv):
"""Test error handling with an invalid action."""
env.reset()
with pytest.raises(ValueError) as ctx:
env.step(100)
assert str(ctx.value) == "Out-of-range"
def test_default_ir_observation(env: CompilerEnv):
"""Test default observation space."""
env.observation_space = "ir"
observation = env.reset()
assert len(observation) > 0
observation, reward, done, info = env.step(0)
assert not done, info
assert len(observation) > 0
assert reward is None
def test_default_features_observation(env: CompilerEnv):
"""Test default observation space."""
env.observation_space = "features"
observation = env.reset()
assert isinstance(observation, np.ndarray)
assert observation.shape == (3,)
assert observation.dtype == np.int64
assert all(obs >= 0 for obs in observation.tolist())
def test_default_reward(env: CompilerEnv):
"""Test default reward space."""
env.reward_space = "runtime"
env.reset()
observation, reward, done, info = env.step(0)
assert not done, info
assert observation is None
assert reward is not None
def test_observations(env: CompilerEnv):
"""Test observation spaces."""
env.reset()
assert len(env.observation["ir"]) > 0
np.testing.assert_array_less([-1, -1, -1], env.observation["features"])
def test_rewards(env: CompilerEnv):
"""Test reward spaces."""
env.reset()
assert env.reward["runtime"] is not None
def test_benchmarks(env: CompilerEnv):
assert list(env.datasets.benchmark_uris()) == [
"benchmark://unrolling-v0/offsets1",
"benchmark://unrolling-v0/conv2d",
]
def test_fork(env: CompilerEnv):
env.reset()
env.step(0)
env.step(1)
other_env = env.fork()
try:
assert env.benchmark == other_env.benchmark
assert other_env.actions == [0, 1]
finally:
other_env.close()
if __name__ == "__main__":
main()
|
CompilerGym-development
|
examples/example_unrolling_service/env_tests.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for the unrolling CompilerGym service example."""
import os
import subprocess
import sys
from getpass import getuser
from pathlib import Path
from typing import Iterable, List, Optional
import gym
import numpy as np
import pytest
import compiler_gym
from compiler_gym.datasets import Benchmark, Dataset
from compiler_gym.datasets.uri import BenchmarkUri
from compiler_gym.envs import CompilerEnv
from compiler_gym.envs.llvm.llvm_benchmark import get_system_library_flags
from compiler_gym.errors import SessionNotFound
from compiler_gym.spaces import Box, NamedDiscrete, Reward, Scalar, Sequence
from compiler_gym.third_party import llvm
from compiler_gym.util import debug_util as dbg
from compiler_gym.util.commands import Popen
from compiler_gym.util.registration import register
UNROLLING_PY_SERVICE_BINARY: Path = Path(
"example_unrolling_service/service_py/example_service.py"
)
assert UNROLLING_PY_SERVICE_BINARY.is_file(), "Service script not found"
BENCHMARKS_PATH: Path = Path("example_unrolling_service/benchmarks")
NEURO_VECTORIZER_HEADER: Path = Path(
"../compiler_gym/third_party/neuro-vectorizer/header.h"
)
class RuntimeReward(Reward):
"""An example reward that uses changes in the "runtime" observation value
to compute incremental reward.
"""
def __init__(self):
super().__init__(
name="runtime",
observation_spaces=["runtime"],
default_value=0,
default_negates_returns=True,
deterministic=False,
platform_dependent=True,
)
self.baseline_runtime = 0
def reset(self, benchmark: str, observation_view):
del benchmark # unused
self.baseline_runtime = observation_view["runtime"]
def update(self, action, observations, observation_view):
del action # unused
del observation_view # unused
return float(self.baseline_runtime - observations[0]) / self.baseline_runtime
class SizeReward(Reward):
"""An example reward that uses changes in the "size" observation value
to compute incremental reward.
"""
def __init__(self):
super().__init__(
name="size",
observation_spaces=["size"],
default_value=0,
default_negates_returns=True,
deterministic=False,
platform_dependent=True,
)
self.baseline_size = 0
def reset(self, benchmark: str, observation_view):
del benchmark # unused
self.baseline_runtime = observation_view["size"]
def update(self, action, observations, observation_view):
del action # unused
del observation_view # unused
return float(self.baseline_size - observations[0]) / self.baseline_size
class UnrollingDataset(Dataset):
def __init__(self, *args, **kwargs):
super().__init__(
name="benchmark://unrolling-v2",
license="MIT",
description="Unrolling example dataset",
)
self._benchmarks = {
"/offsets1": Benchmark.from_file_contents(
"benchmark://unrolling-v2/offsets1",
self.preprocess(BENCHMARKS_PATH / "offsets1.c"),
),
"/conv2d": Benchmark.from_file_contents(
"benchmark://unrolling-v2/conv2d",
self.preprocess(BENCHMARKS_PATH / "conv2d.c"),
),
}
@staticmethod
def preprocess(src: Path) -> bytes:
"""Front a C source through the compiler frontend."""
# TODO(github.com/facebookresearch/CompilerGym/issues/325): We can skip
# this pre-processing, or do it on the service side, once support for
# multi-file benchmarks lands.
cmd = [
str(llvm.clang_path()),
"-E",
"-o",
"-",
"-I",
str(NEURO_VECTORIZER_HEADER.parent),
src,
]
cmd += get_system_library_flags()
return subprocess.check_output(
cmd,
timeout=300,
)
def benchmark_uris(self) -> Iterable[str]:
yield from (f"benchmark://unrolling-v2{k}" for k in self._benchmarks.keys())
def benchmark_from_parsed_uri(self, uri: BenchmarkUri) -> Benchmark:
if uri.path in self._benchmarks:
return self._benchmarks[uri.path]
else:
raise LookupError("Unknown program name")
# Register the unrolling example service on module import. After importing this module,
# the unrolling-py-v2 environment will be available to gym.make(...).
register(
id="unrolling-py-v2",
entry_point="compiler_gym.service.client_service_compiler_env:ClientServiceCompilerEnv",
kwargs={
"service": UNROLLING_PY_SERVICE_BINARY,
"rewards": [RuntimeReward(), SizeReward()],
"datasets": [UnrollingDataset()],
},
)
@pytest.fixture(scope="function")
def env() -> CompilerEnv:
"""Text fixture that yields an environment."""
with gym.make("unrolling-py-v2") as env_:
yield env_
@pytest.fixture(scope="module")
def bin() -> Path:
return UNROLLING_PY_SERVICE_BINARY
def test_invalid_arguments(bin: Path):
"""Test that running the binary with unrecognized arguments is an error."""
def run(cmd):
with Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True
) as p:
stdout, stderr = p.communicate(timeout=10)
return p.returncode, stdout, stderr
returncode, _, stderr = run([str(bin), "foobar"])
assert "ERROR:" in stderr
assert "'foobar'" in stderr
assert returncode == 1
returncode, _, stderr = run([str(bin), "--foobar"])
# C++ and python flag parsing library emit slightly different error
# messages.
assert "ERROR:" in stderr or "FATAL" in stderr
assert "'foobar'" in stderr
assert returncode == 1
def test_versions(env: CompilerEnv):
"""Tests the GetVersion() RPC endpoint."""
assert env.version == compiler_gym.__version__
assert env.compiler_version == "1.0.0"
def test_action_space(env: CompilerEnv):
"""Test that the environment reports the service's action spaces."""
assert env.action_spaces == [
NamedDiscrete(
name="unrolling",
items=[
"-loop-unroll -unroll-count=2",
"-loop-unroll -unroll-count=4",
"-loop-unroll -unroll-count=8",
],
)
]
def test_observation_spaces(env: CompilerEnv):
"""Test that the environment reports the service's observation spaces."""
env.reset()
assert env.observation.spaces.keys() == {"ir", "features", "runtime", "size"}
assert env.observation.spaces["ir"].space == Sequence(
name="ir",
size_range=(0, np.iinfo(np.int64).max),
dtype=str,
opaque_data_format=None,
)
assert env.observation.spaces["features"].space == Box(
name="features", shape=(3,), low=0, high=100000, dtype=int
)
assert env.observation.spaces["runtime"].space == Scalar(
name="runtime", min=0, max=np.inf, dtype=float
)
assert env.observation.spaces["size"].space == Scalar(
name="size", min=0, max=np.inf, dtype=float
)
def test_reward_spaces(env: CompilerEnv):
"""Test that the environment reports the service's reward spaces."""
env.reset()
assert env.reward.spaces.keys() == {"runtime", "size"}
def test_step_before_reset(env: CompilerEnv):
"""Taking a step() before reset() is illegal."""
with pytest.raises(SessionNotFound, match=r"Must call reset\(\) before step\(\)"):
env.step(0)
def test_observation_before_reset(env: CompilerEnv):
"""Taking an observation before reset() is illegal."""
with pytest.raises(SessionNotFound, match=r"Must call reset\(\) before step\(\)"):
_ = env.observation["ir"]
def test_reward_before_reset(env: CompilerEnv):
"""Taking a reward before reset() is illegal."""
with pytest.raises(SessionNotFound, match=r"Must call reset\(\) before step\(\)"):
_ = env.reward["runtime"]
def test_reset_invalid_benchmark(env: CompilerEnv):
"""Test requesting a specific benchmark."""
with pytest.raises(LookupError) as ctx:
env.reset(benchmark="unrolling-v2/foobar")
assert str(ctx.value) == "Unknown program name"
def test_invalid_observation_space(env: CompilerEnv):
"""Test error handling with invalid observation space."""
with pytest.raises(LookupError):
env.observation_space = 100
def test_invalid_reward_space(env: CompilerEnv):
"""Test error handling with invalid reward space."""
with pytest.raises(LookupError):
env.reward_space = 100
def test_double_reset(env: CompilerEnv):
"""Test that reset() can be called twice."""
env.reset()
assert env.in_episode
env.reset()
assert env.in_episode
def test_Step_out_of_range(env: CompilerEnv):
"""Test error handling with an invalid action."""
env.reset()
with pytest.raises(ValueError) as ctx:
env.step(100)
assert str(ctx.value) == "Out-of-range"
def test_default_ir_observation(env: CompilerEnv):
"""Test default observation space."""
env.observation_space = "ir"
observation = env.reset()
assert len(observation) > 0
observation, reward, done, info = env.step(0)
assert not done, info
assert len(observation) > 0
assert reward is None
def test_default_features_observation(env: CompilerEnv):
"""Test default observation space."""
env.observation_space = "features"
observation = env.reset()
assert isinstance(observation, np.ndarray)
assert observation.shape == (3,)
assert observation.dtype == np.int64
assert all(obs >= 0 for obs in observation.tolist())
def test_default_reward(env: CompilerEnv):
"""Test default reward space."""
env.reward_space = "runtime"
env.reset()
observation, reward, done, info = env.step(0)
assert not done, info
assert observation is None
assert reward is not None
def test_observations(env: CompilerEnv):
"""Test observation spaces."""
env.reset()
assert len(env.observation["ir"]) > 0
np.testing.assert_array_less([-1, -1, -1], env.observation["features"])
def test_rewards(env: CompilerEnv):
"""Test reward spaces."""
env.reset()
assert env.reward["runtime"] is not None
def test_benchmarks(env: CompilerEnv):
assert list(env.datasets.benchmark_uris()) == [
"benchmark://unrolling-v2/offsets1",
"benchmark://unrolling-v2/conv2d",
]
def test_fork(env: CompilerEnv):
env.reset()
env.step(0)
env.step(1)
other_env = env.fork()
try:
assert env.benchmark == other_env.benchmark
assert other_env.actions == [0, 1]
finally:
other_env.close()
# Copied from CompilerGym/tests/test_main.py because there were errors in trying to import it here
def main(extra_pytest_args: Optional[List[str]] = None, debug_level: int = 1):
dbg.set_debug_level(debug_level)
# Keep test data isolated from user data.
os.environ[
"COMPILER_GYM_SITE_DATA"
] = f"/tmp/compiler_gym_{getuser()}/tests/site_data"
os.environ["COMPILER_GYM_CACHE"] = f"/tmp/compiler_gym_{getuser()}/tests/cache"
pytest_args = sys.argv + [
# Run pytest verbosely to print out test names to provide context in
# case of failures.
"-vv",
# Disable "Module already imported" warnings. See:
# https://docs.pytest.org/en/latest/how-to/usage.html#calling-pytest-from-python-code
"-W",
"ignore:Module already imported:pytest.PytestWarning",
# Disable noisy "Flaky tests passed" messages.
"--no-success-flaky-report",
]
# Support for sharding. If a py_test target has the shard_count attribute
# set (in the range [1,50]), then the pytest-shard module is used to divide
# the tests among the shards. See https://pypi.org/project/pytest-shard/
sharded_test = os.environ.get("TEST_TOTAL_SHARDS")
if sharded_test:
num_shards = int(os.environ["TEST_TOTAL_SHARDS"])
shard_index = int(os.environ["TEST_SHARD_INDEX"])
pytest_args += [f"--shard-id={shard_index}", f"--num-shards={num_shards}"]
else:
pytest_args += ["-p", "no:pytest-shard"]
pytest_args += extra_pytest_args or []
returncode = pytest.main(pytest_args)
# By default pytest will fail with an error if no tests are collected.
# Disable that behavior here (with a warning) since there are legitimate
# cases where we may want to run a test file with no tests in it. For
# example, when running on a continuous integration service where all the
# tests are marked with the @skip_on_ci decorator.
if returncode == pytest.ExitCode.NO_TESTS_COLLECTED.value:
print(
"WARNING: The test suite was empty. Is that intended?",
file=sys.stderr,
)
returncode = 0
sys.exit(returncode)
if __name__ == "__main__":
main(
extra_pytest_args=[
"-W",
"ignore::UserWarning",
]
)
|
CompilerGym-development
|
examples/example_unrolling_service/env_without_bazel_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Smoke test for examples/example_unrolling_service/example_without_bazel.py"""
from example_unrolling_service.example_without_bazel import main
from flaky import flaky
@flaky
def test_example_without_bazel():
main()
|
CompilerGym-development
|
examples/example_unrolling_service/example_without_bazel_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""This module defines and registers the example gym environments."""
import os
import subprocess
from pathlib import Path
from typing import Iterable
from compiler_gym.datasets import Benchmark, Dataset
from compiler_gym.datasets.uri import BenchmarkUri
from compiler_gym.envs.llvm.llvm_benchmark import get_system_library_flags
from compiler_gym.spaces import Reward
from compiler_gym.third_party import llvm
from compiler_gym.util.registration import register
from compiler_gym.util.runfiles_path import runfiles_path
UNROLLING_PY_SERVICE_BINARY: Path = runfiles_path(
"examples/example_unrolling_service/service_py/example-unrolling-service-py"
)
BENCHMARKS_PATH: Path = runfiles_path("examples/example_unrolling_service/benchmarks")
if not os.path.exists(BENCHMARKS_PATH):
BENCHMARKS_PATH = Path("example_unrolling_service/benchmarks")
NEURO_VECTORIZER_HEADER: Path = runfiles_path(
"compiler_gym/third_party/neuro-vectorizer/header.h"
)
if not os.path.exists(NEURO_VECTORIZER_HEADER):
NEURO_VECTORIZER_HEADER: Path = Path(
"../compiler_gym/third_party/neuro-vectorizer/header.h"
)
class RuntimeReward(Reward):
"""An example reward that uses changes in the "runtime" observation value
to compute incremental reward.
"""
def __init__(self):
super().__init__(
name="runtime",
observation_spaces=["runtime"],
default_value=0,
default_negates_returns=True,
deterministic=False,
platform_dependent=True,
)
self.baseline_runtime = 0
def reset(self, benchmark: str, observation_view):
del benchmark # unused
self.baseline_runtime = observation_view["runtime"]
def update(self, action, observations, observation_view):
del action # unused
del observation_view # unused
return float(self.baseline_runtime - observations[0]) / self.baseline_runtime
class SizeReward(Reward):
"""An example reward that uses changes in the "size" observation value
to compute incremental reward.
"""
def __init__(self):
super().__init__(
name="size",
observation_spaces=["size"],
default_value=0,
default_negates_returns=True,
deterministic=False,
platform_dependent=True,
)
self.baseline_size = 0
def reset(self, benchmark: str, observation_view):
del benchmark # unused
self.baseline_runtime = observation_view["size"]
def update(self, action, observations, observation_view):
del action # unused
del observation_view # unused
return float(self.baseline_size - observations[0]) / self.baseline_size
class UnrollingDataset(Dataset):
def __init__(self, *args, **kwargs):
super().__init__(
name="benchmark://unrolling-v0",
license="MIT",
description="Unrolling example dataset",
)
self._benchmarks = {
"/offsets1": Benchmark.from_file_contents(
"benchmark://unrolling-v0/offsets1",
self.preprocess(BENCHMARKS_PATH / "offsets1.c"),
),
"/conv2d": Benchmark.from_file_contents(
"benchmark://unrolling-v0/conv2d",
self.preprocess(BENCHMARKS_PATH / "conv2d.c"),
),
}
@staticmethod
def preprocess(src: Path) -> bytes:
"""Front a C source through the compiler frontend."""
# TODO(github.com/facebookresearch/CompilerGym/issues/325): We can skip
# this pre-processing, or do it on the service side, once support for
# multi-file benchmarks lands.
cmd = [
str(llvm.clang_path()),
"-E",
"-o",
"-",
"-I",
str(NEURO_VECTORIZER_HEADER.parent),
src,
] + get_system_library_flags()
return subprocess.check_output(
cmd,
timeout=300,
)
def benchmark_uris(self) -> Iterable[str]:
yield from (f"benchmark://unrolling-v0{k}" for k in self._benchmarks.keys())
def benchmark_from_parsed_uri(self, uri: BenchmarkUri) -> Benchmark:
if uri.path in self._benchmarks:
return self._benchmarks[uri.path]
else:
raise LookupError("Unknown program name")
# Register the unrolling example service on module import. After importing this module,
# the unrolling-py-v0 environment will be available to gym.make(...).
register(
id="unrolling-py-v0",
entry_point="compiler_gym.service.client_service_compiler_env:ClientServiceCompilerEnv",
kwargs={
"service": UNROLLING_PY_SERVICE_BINARY,
"rewards": [RuntimeReward(), SizeReward()],
"datasets": [UnrollingDataset()],
},
)
|
CompilerGym-development
|
examples/example_unrolling_service/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""This script demonstrates how the Python example service without needing
to use the bazel build system.
Prerequisite:
# In the repo's INSTALL.md, follow the 'Building from source using CMake' instructions with `-DCOMPILER_GYM_BUILD_EXAMPLES=ON` added to the `cmake` command
$ cd <path to source directory>/examples
Usage:
$ python example_unrolling_service/examples_without_bazel.py
It is equivalent in behavior to the example.py script in this directory.
"""
import logging
import subprocess
from pathlib import Path
from typing import Iterable
import compiler_gym
from compiler_gym.datasets import Benchmark, Dataset
from compiler_gym.datasets.uri import BenchmarkUri
from compiler_gym.envs.llvm.llvm_benchmark import get_system_library_flags
from compiler_gym.spaces import Reward
from compiler_gym.third_party import llvm
from compiler_gym.util.logging import init_logging
from compiler_gym.util.registration import register
UNROLLING_PY_SERVICE_BINARY: Path = Path(
"example_unrolling_service/service_py/example_service.py"
)
assert UNROLLING_PY_SERVICE_BINARY.is_file(), "Service script not found"
BENCHMARKS_PATH: Path = Path("example_unrolling_service/benchmarks")
NEURO_VECTORIZER_HEADER: Path = Path(
"../compiler_gym/third_party/neuro-vectorizer/header.h"
)
class RuntimeReward(Reward):
"""An example reward that uses changes in the "runtime" observation value
to compute incremental reward.
"""
def __init__(self):
super().__init__(
name="runtime",
observation_spaces=["runtime"],
default_value=0,
default_negates_returns=True,
deterministic=False,
platform_dependent=True,
)
self.baseline_runtime = 0
def reset(self, benchmark: str, observation_view):
del benchmark # unused
self.baseline_runtime = observation_view["runtime"]
def update(self, action, observations, observation_view):
del action # unused
del observation_view # unused
return float(self.baseline_runtime - observations[0]) / self.baseline_runtime
class SizeReward(Reward):
"""An example reward that uses changes in the "size" observation value
to compute incremental reward.
"""
def __init__(self):
super().__init__(
name="size",
observation_spaces=["size"],
default_value=0,
default_negates_returns=True,
deterministic=False,
platform_dependent=True,
)
self.baseline_size = 0
def reset(self, benchmark: str, observation_view):
del benchmark # unused
self.baseline_runtime = observation_view["size"]
def update(self, action, observations, observation_view):
del action # unused
del observation_view # unused
return float(self.baseline_size - observations[0]) / self.baseline_size
class UnrollingDataset(Dataset):
def __init__(self, *args, **kwargs):
super().__init__(
name="benchmark://unrolling-v1",
license="MIT",
description="Unrolling example dataset",
)
self._benchmarks = {
"/offsets1": Benchmark.from_file_contents(
"benchmark://unrolling-v1/offsets1",
self.preprocess(BENCHMARKS_PATH / "offsets1.c"),
),
"/conv2d": Benchmark.from_file_contents(
"benchmark://unrolling-v1/conv2d",
self.preprocess(BENCHMARKS_PATH / "conv2d.c"),
),
}
@staticmethod
def preprocess(src: Path) -> bytes:
"""Front a C source through the compiler frontend."""
# TODO(github.com/facebookresearch/CompilerGym/issues/325): We can skip
# this pre-processing, or do it on the service side, once support for
# multi-file benchmarks lands.
cmd = [
str(llvm.clang_path()),
"-E",
"-o",
"-",
"-I",
str(NEURO_VECTORIZER_HEADER.parent),
src,
]
cmd += get_system_library_flags()
return subprocess.check_output(
cmd,
timeout=300,
)
def benchmark_uris(self) -> Iterable[str]:
yield from (f"benchmark://unrolling-v1{k}" for k in self._benchmarks.keys())
def benchmark_from_parsed_uri(self, uri: BenchmarkUri) -> Benchmark:
if uri.path in self._benchmarks:
return self._benchmarks[uri.path]
else:
raise LookupError("Unknown program name")
# Register the unrolling example service on module import. After importing this module,
# the unrolling-py-v1 environment will be available to gym.make(...).
register(
id="unrolling-py-v1",
entry_point="compiler_gym.service.client_service_compiler_env:ClientServiceCompilerEnv",
kwargs={
"service": UNROLLING_PY_SERVICE_BINARY,
"rewards": [RuntimeReward(), SizeReward()],
"datasets": [UnrollingDataset()],
},
)
def main():
# Use debug verbosity to print out extra logging information.
init_logging(level=logging.DEBUG)
with compiler_gym.make(
"unrolling-py-v1",
benchmark="unrolling-v1/offsets1",
observation_space="features",
reward_space="runtime",
) as env:
compiler_gym.set_debug_level(4) # TODO: check why this has no effect
observation = env.reset()
print("observation: ", observation)
print()
observation, reward, done, info = env.step(env.action_space.sample())
print("observation: ", observation)
print("reward: ", reward)
print("done: ", done)
print("info: ", info)
print()
observation, reward, done, info = env.step(env.action_space.sample())
print("observation: ", observation)
print("reward: ", reward)
print("done: ", done)
print("info: ", info)
# TODO: implement write_bitcode(..) or write_ir(..)
# env.write_bitcode("/tmp/output.bc")
if __name__ == "__main__":
main()
|
CompilerGym-development
|
examples/example_unrolling_service/example_without_bazel.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""This script demonstrates how the example services defined in this directory
can be used as gym environments. Usage:
$ bazel run -c opt //examples/example_unrolling_service:example
"""
import compiler_gym
import examples.example_unrolling_service as unrolling_service # noqa Register environments.
with compiler_gym.make(
"unrolling-py-v0",
benchmark="unrolling-v0/offsets1",
observation_space="features",
reward_space="runtime",
) as env:
compiler_gym.set_debug_level(4) # TODO: check why this has no effect
observation = env.reset()
print("observation: ", observation)
print()
observation, reward, done, info = env.step(env.action_space.sample())
print("observation: ", observation)
print("reward: ", reward)
print("done: ", done)
print("info: ", info)
print()
observation, reward, done, info = env.step(env.action_space.sample())
print("observation: ", observation)
print("reward: ", reward)
print("done: ", done)
print("info: ", info)
env.reset()
# TODO: implement write_bitcode(..) or write_ir(..)
# env.write_bitcode("/tmp/output.bc")
|
CompilerGym-development
|
examples/example_unrolling_service/example.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
def extract_statistics_from_ir(ir: str):
stats = {"control_flow": 0, "arithmetic": 0, "memory": 0}
for line in ir.splitlines():
tokens = line.split()
if len(tokens) > 0:
opcode = tokens[0]
if opcode in [
"br",
"call",
"ret",
"switch",
"indirectbr",
"invoke",
"callbr",
"resume",
"catchswitch",
"catchret",
"cleanupret",
"unreachable",
]:
stats["control_flow"] += 1
elif opcode in [
"fneg",
"add",
"fadd",
"sub",
"fsub",
"mul",
"fmul",
"udiv",
"sdiv",
"fdiv",
"urem",
"srem",
"frem",
"shl",
"lshr",
"ashr",
"and",
"or",
"xor",
]:
stats["arithmetic"] += 1
elif opcode in [
"alloca",
"load",
"store",
"fence",
"cmpxchg",
"atomicrmw",
"getelementptr",
]:
stats["memory"] += 1
return stats
|
CompilerGym-development
|
examples/example_unrolling_service/service_py/utils.py
|
#! /usr/bin/env python3
#
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""An example CompilerGym service in python."""
import logging
import os
import shutil
import subprocess
from pathlib import Path
from typing import Optional, Tuple
import numpy as np
import utils
import compiler_gym.third_party.llvm as llvm
from compiler_gym.envs.llvm.llvm_benchmark import get_system_library_flags
from compiler_gym.service import CompilationSession
from compiler_gym.service.proto import (
ActionSpace,
Benchmark,
DoubleRange,
Event,
Int64Box,
Int64Range,
Int64Tensor,
NamedDiscreteSpace,
ObservationSpace,
Space,
StringSpace,
)
from compiler_gym.service.runtime import create_and_run_compiler_gym_service
from compiler_gym.util.commands import run_command
class UnrollingCompilationSession(CompilationSession):
"""Represents an instance of an interactive compilation session."""
compiler_version: str = "1.0.0"
# The list of actions that are supported by this service.
action_spaces = [
ActionSpace(
name="unrolling",
space=Space(
named_discrete=NamedDiscreteSpace(
name=[
"-loop-unroll -unroll-count=2",
"-loop-unroll -unroll-count=4",
"-loop-unroll -unroll-count=8",
],
),
),
)
]
# A list of observation spaces supported by this service. Each of these
# ObservationSpace protos describes an observation space.
observation_spaces = [
ObservationSpace(
name="ir",
space=Space(
string_value=StringSpace(length_range=Int64Range(min=0)),
),
deterministic=True,
platform_dependent=False,
default_observation=Event(string_value=""),
),
ObservationSpace(
name="features",
space=Space(
int64_box=Int64Box(
low=Int64Tensor(shape=[3], value=[0, 0, 0]),
high=Int64Tensor(shape=[3], value=[100000, 100000, 100000]),
),
),
),
ObservationSpace(
name="runtime",
space=Space(
double_value=DoubleRange(min=0),
),
deterministic=False,
platform_dependent=True,
default_observation=Event(
double_value=0,
),
),
ObservationSpace(
name="size",
space=Space(
double_value=DoubleRange(min=0),
),
deterministic=True,
platform_dependent=True,
default_observation=Event(
double_value=0,
),
),
]
def __init__(
self,
working_directory: Path,
action_space: ActionSpace,
benchmark: Benchmark,
use_custom_opt: bool = True,
):
super().__init__(working_directory, action_space, benchmark)
logging.info("Started a compilation session for %s", benchmark.uri)
self._benchmark = benchmark
self._action_space = action_space
# Resolve the paths to LLVM binaries once now.
self._clang = str(llvm.clang_path())
self._llc = str(llvm.llc_path())
self._llvm_diff = str(llvm.llvm_diff_path())
self._opt = str(llvm.opt_path())
# LLVM's opt does not always enforce the unrolling options passed as cli arguments. Hence, we created our own exeutable with custom unrolling pass in examples/example_unrolling_service/loop_unroller that enforces the unrolling factors passed in its cli.
# if self._use_custom_opt is true, use our custom exeutable, otherwise use LLVM's opt
self._use_custom_opt = use_custom_opt
# Dump the benchmark source to disk.
self._src_path = str(self.working_dir / "benchmark.c")
with open(self.working_dir / "benchmark.c", "wb") as f:
f.write(benchmark.program.contents)
self._llvm_path = str(self.working_dir / "benchmark.ll")
self._llvm_before_path = str(self.working_dir / "benchmark.previous.ll")
self._obj_path = str(self.working_dir / "benchmark.o")
self._exe_path = str(self.working_dir / "benchmark.exe")
run_command(
[
self._clang,
"-Xclang",
"-disable-O0-optnone",
"-emit-llvm",
"-S",
self._src_path,
"-o",
self._llvm_path,
]
+ get_system_library_flags(),
timeout=30,
)
def apply_action(self, action: Event) -> Tuple[bool, Optional[ActionSpace], bool]:
num_choices = len(self._action_space.space.named_discrete.name)
# This is the index into the action space's values ("a", "b", "c") that
# the user selected, e.g. 0 -> "a", 1 -> "b", 2 -> "c".
choice_index = action.int64_value
if choice_index < 0 or choice_index >= num_choices:
raise ValueError("Out-of-range")
args = self._action_space.space.named_discrete.name[choice_index]
logging.info(
"Applying action %d, equivalent command-line arguments: '%s'",
choice_index,
args,
)
args = args.split()
# make a copy of the LLVM file to compare its contents after applying the action
shutil.copyfile(self._llvm_path, self._llvm_before_path)
# apply action
if self._use_custom_opt:
# our custom unroller has an additional `f` at the beginning of each argument
for i, arg in enumerate(args):
# convert -<argument> to -f<argument>
arg = arg[0] + "f" + arg[1:]
args[i] = arg
run_command(
[
"../loop_unroller/loop_unroller",
self._llvm_path,
*args,
"-S",
"-o",
self._llvm_path,
],
timeout=30,
)
else:
run_command(
[
self._opt,
*args,
self._llvm_path,
"-S",
"-o",
self._llvm_path,
],
timeout=30,
)
# compare the IR files to check if the action had an effect
try:
subprocess.check_call(
[self._llvm_diff, self._llvm_before_path, self._llvm_path],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
timeout=60,
)
action_had_no_effect = True
except subprocess.CalledProcessError:
action_had_no_effect = False
end_of_session = False # TODO: this needs investigation: for how long can we apply loop unrolling? e.g., detect if there are no more loops in the IR?
new_action_space = None
return (end_of_session, new_action_space, action_had_no_effect)
@property
def ir(self) -> str:
with open(self._llvm_path) as f:
return f.read()
def get_observation(self, observation_space: ObservationSpace) -> Event:
logging.info("Computing observation from space %s", observation_space.name)
if observation_space.name == "ir":
return Event(string_value=self.ir)
elif observation_space.name == "features":
stats = utils.extract_statistics_from_ir(self.ir)
observation = Event(
int64_tensor=Int64Tensor(
shape=[len(list(stats.values()))], value=list(stats.values())
)
)
return observation
elif observation_space.name == "runtime":
# compile LLVM to object file
run_command(
[
self._llc,
"-filetype=obj",
self._llvm_path,
"-o",
self._obj_path,
],
timeout=30,
)
# build object file to binary
run_command(
[
self._clang,
self._obj_path,
"-O3",
"-o",
self._exe_path,
]
+ get_system_library_flags(),
timeout=30,
)
# TODO: add documentation that benchmarks need print out execution time
# Running 5 times and taking the average of middle 3
exec_times = []
for _ in range(5):
stdout = run_command(
[self._exe_path],
timeout=30,
)
try:
exec_times.append(int(stdout))
except ValueError:
raise ValueError(
f"Error in parsing execution time from output of command\n"
f"Please ensure that the source code of the benchmark measures execution time and prints to stdout\n"
f"Stdout of the program: {stdout}"
)
exec_times = np.sort(exec_times)
avg_exec_time = np.mean(exec_times[1:4])
return Event(double_value=avg_exec_time)
elif observation_space.name == "size":
# compile LLVM to object file
run_command(
[
self._llc,
"-filetype=obj",
self._llvm_path,
"-o",
self._obj_path,
],
timeout=30,
)
# build object file to binary
run_command(
[
self._clang,
self._obj_path,
"-Oz",
"-o",
self._exe_path,
]
+ get_system_library_flags(),
timeout=30,
)
binary_size = os.path.getsize(self._exe_path)
return Event(double_value=binary_size)
else:
raise KeyError(observation_space.name)
if __name__ == "__main__":
create_and_run_compiler_gym_service(UnrollingCompilationSession)
|
CompilerGym-development
|
examples/example_unrolling_service/service_py/example_service.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for the unrolling CompilerGym service example."""
import subprocess
from pathlib import Path
import gym
import numpy as np
import pytest
import compiler_gym
import examples.loop_optimizations_service as loop_optimizations_service
from compiler_gym.envs import CompilerEnv
from compiler_gym.errors import SessionNotFound
from compiler_gym.service.client_service_compiler_env import ClientServiceCompilerEnv
from compiler_gym.spaces import ActionSpace, Dict, NamedDiscrete, Scalar, Sequence
from compiler_gym.third_party.autophase import AUTOPHASE_FEATURE_NAMES
from tests.test_main import main
@pytest.fixture(scope="function")
def env() -> CompilerEnv:
"""Text fixture that yields an environment."""
with gym.make("loops-opt-py-v0") as env_:
yield env_
@pytest.fixture(scope="module")
def bin() -> Path:
return loop_optimizations_service.LOOPS_OPT_PY_SERVICE_BINARY
def test_invalid_arguments(bin: Path):
"""Test that running the binary with unrecognized arguments is an error."""
def run(cmd):
p = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True
)
stdout, stderr = p.communicate(timeout=10)
return p.returncode, stdout, stderr
returncode, _, stderr = run([str(bin), "foobar"])
assert "ERROR:" in stderr
assert "'foobar'" in stderr
assert returncode == 1
returncode, _, stderr = run([str(bin), "--foobar"])
# C++ and python flag parsing library emit slightly different error
# messages.
assert "ERROR:" in stderr or "FATAL" in stderr
assert "'foobar'" in stderr
assert returncode == 1
def test_versions(env: ClientServiceCompilerEnv):
"""Tests the GetVersion() RPC endpoint."""
assert env.version == compiler_gym.__version__
assert env.compiler_version == "1.0.0"
def test_action_space(env: CompilerEnv):
"""Test that the environment reports the service's action spaces."""
assert env.action_spaces == [
ActionSpace(
NamedDiscrete(
name="loop-opt",
items=[
"--loop-unroll --unroll-count=2",
"--loop-unroll --unroll-count=4",
"--loop-unroll --unroll-count=8",
"--loop-unroll --unroll-count=16",
"--loop-unroll --unroll-count=32",
"--loop-vectorize -force-vector-width=2",
"--loop-vectorize -force-vector-width=4",
"--loop-vectorize -force-vector-width=8",
"--loop-vectorize -force-vector-width=16",
"--loop-vectorize -force-vector-width=32",
],
)
)
]
def test_observation_spaces(env: CompilerEnv):
"""Test that the environment reports the service's observation spaces."""
env.reset()
assert env.observation.spaces.keys() == {
"ir",
"Inst2vec",
"Autophase",
"AutophaseDict",
"Programl",
"runtime",
"size",
}
assert env.observation.spaces["ir"].space == Sequence(
name="ir",
size_range=(0, np.iinfo(int).max),
dtype=str,
)
assert env.observation.spaces["Inst2vec"].space == Sequence(
name="Inst2vec",
size_range=(0, np.iinfo(int).max),
scalar_range=Scalar(
name=None,
min=np.iinfo(np.int64).min,
max=np.iinfo(np.int64).max,
dtype=np.int64,
),
dtype=int,
)
assert env.observation.spaces["Autophase"].space == Sequence(
name="Autophase",
size_range=(len(AUTOPHASE_FEATURE_NAMES), len(AUTOPHASE_FEATURE_NAMES)),
scalar_range=Scalar(
name=None,
min=np.iinfo(np.int64).min,
max=np.iinfo(np.int64).max,
dtype=np.int64,
),
dtype=int,
)
assert env.observation.spaces["AutophaseDict"].space == Dict(
name="AutophaseDict",
spaces={
name: Scalar(name=None, min=0, max=np.iinfo(np.int64).max, dtype=np.int64)
for name in AUTOPHASE_FEATURE_NAMES
},
)
assert env.observation.spaces["Programl"].space == Sequence(
name="Programl",
size_range=(0, np.iinfo(int).max),
dtype=str,
)
assert env.observation.spaces["runtime"].space == Scalar(
name="runtime", min=0, max=np.inf, dtype=float
)
assert env.observation.spaces["size"].space == Scalar(
name="size", min=0, max=np.inf, dtype=float
)
def test_reward_spaces(env: CompilerEnv):
"""Test that the environment reports the service's reward spaces."""
env.reset()
assert env.reward.spaces.keys() == {"runtime", "size"}
def test_step_before_reset(env: CompilerEnv):
"""Taking a step() before reset() is illegal."""
with pytest.raises(SessionNotFound, match=r"Must call reset\(\) before step\(\)"):
env.step(0)
def test_observation_before_reset(env: CompilerEnv):
"""Taking an observation before reset() is illegal."""
with pytest.raises(SessionNotFound, match=r"Must call reset\(\) before step\(\)"):
_ = env.observation["ir"]
def test_reward_before_reset(env: CompilerEnv):
"""Taking a reward before reset() is illegal."""
with pytest.raises(SessionNotFound, match=r"Must call reset\(\) before step\(\)"):
_ = env.reward["runtime"]
def test_reset_invalid_benchmark(env: CompilerEnv):
"""Test requesting a specific benchmark."""
with pytest.raises(LookupError) as ctx:
env.reset(benchmark="loops-opt-v0/foobar")
assert str(ctx.value) == "Unknown program name"
def test_invalid_observation_space(env: CompilerEnv):
"""Test error handling with invalid observation space."""
with pytest.raises(LookupError):
env.observation_space = 100
def test_invalid_reward_space(env: CompilerEnv):
"""Test error handling with invalid reward space."""
with pytest.raises(LookupError):
env.reward_space = 100
def test_double_reset(env: CompilerEnv):
"""Test that reset() can be called twice."""
env.reset()
assert env.in_episode
env.reset()
assert env.in_episode
def test_Step_out_of_range(env: CompilerEnv):
"""Test error handling with an invalid action."""
env.reset()
with pytest.raises(ValueError) as ctx:
env.step(100)
assert str(ctx.value) == "Out-of-range"
def test_default_ir_observation(env: CompilerEnv):
"""Test default IR observation space."""
env.observation_space = "ir"
observation = env.reset()
assert len(observation) > 0
observation, reward, done, info = env.step(0)
assert not done, info
assert len(observation) > 0
assert reward is None
def test_default_inst2vec_observation(env: CompilerEnv):
"""Test default inst2vec observation space."""
env.observation_space = "Inst2vec"
observation = env.reset()
assert isinstance(observation, np.ndarray)
assert len(observation) >= 0
assert observation.dtype == np.int64
assert all(obs >= 0 for obs in observation.tolist())
def test_default_autophase_observation(env: CompilerEnv):
"""Test default autophase observation space."""
env.observation_space = "Autophase"
observation = env.reset()
assert isinstance(observation, np.ndarray)
assert observation.shape == (len(AUTOPHASE_FEATURE_NAMES),)
assert observation.dtype == np.int64
assert all(obs >= 0 for obs in observation.tolist())
def test_default_autophase_dict_observation(env: CompilerEnv):
"""Test default autophase dict observation space."""
env.observation_space = "AutophaseDict"
observation = env.reset()
assert isinstance(observation, dict)
assert sorted(observation.keys()) == sorted(AUTOPHASE_FEATURE_NAMES)
assert len(observation.values()) == len(AUTOPHASE_FEATURE_NAMES)
assert all(obs >= 0 for obs in observation.values())
def test_default_programl_observation(env: CompilerEnv):
"""Test default observation space."""
env.observation_space = "Programl"
observation = env.reset()
assert len(observation) > 0
observation, reward, done, info = env.step(0)
assert not done, info
assert len(observation) > 0
assert reward is None
def test_default_reward(env: CompilerEnv):
"""Test default reward space."""
env.reward_space = "runtime"
env.reset()
observation, reward, done, info = env.step(0)
assert not done, info
assert observation is None
assert reward is not None
def test_observations(env: CompilerEnv):
"""Test observation spaces."""
env.reset()
assert len(env.observation["ir"]) > 0
assert all(env.observation["Inst2vec"] >= 0)
assert all(env.observation["Autophase"] >= 0)
assert len(env.observation["Programl"]) > 0
def test_rewards(env: CompilerEnv):
"""Test reward spaces."""
env.reset()
assert env.reward["runtime"] is not None
def test_benchmarks(env: CompilerEnv):
assert list(env.datasets.benchmark_uris()) == [
"benchmark://loops-opt-v0/add",
"benchmark://loops-opt-v0/offsets1",
"benchmark://loops-opt-v0/conv2d",
]
def test_fork(env: CompilerEnv):
env.reset()
env.step(0)
env.step(1)
other_env = env.fork()
try:
assert env.benchmark == other_env.benchmark
assert other_env.actions == [0, 1]
finally:
other_env.close()
if __name__ == "__main__":
main()
|
CompilerGym-development
|
examples/loop_optimizations_service/env_tests.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for the loop optimizations environment example."""
import os
import subprocess
import sys
from getpass import getuser
from pathlib import Path
from typing import Iterable, List, Optional
import gym
import numpy as np
import pytest
import compiler_gym
import examples.loop_optimizations_service as loop_optimizations_service
from compiler_gym.datasets import Benchmark, Dataset
from compiler_gym.datasets.uri import BenchmarkUri
from compiler_gym.envs import CompilerEnv
from compiler_gym.envs.llvm.llvm_benchmark import get_system_library_flags
from compiler_gym.errors import SessionNotFound
from compiler_gym.service.client_service_compiler_env import ClientServiceCompilerEnv
from compiler_gym.spaces import Dict, NamedDiscrete, Reward, Scalar, Sequence
from compiler_gym.third_party import llvm
from compiler_gym.third_party.autophase import AUTOPHASE_FEATURE_NAMES
from compiler_gym.util import debug_util as dbg
from compiler_gym.util.registration import register
LOOPS_OPT_PY_SERVICE_BINARY: Path = Path(
"loop_optimizations_service/service_py/loops_opt_service.py"
)
assert LOOPS_OPT_PY_SERVICE_BINARY.is_file(), "Service script not found"
BENCHMARKS_PATH: Path = Path("loop_optimizations_service/benchmarks")
NEURO_VECTORIZER_HEADER: Path = Path(
"../compiler_gym/third_party/neuro-vectorizer/header.h"
)
class RuntimeReward(Reward):
"""An example reward that uses changes in the "runtime" observation value
to compute incremental reward.
"""
def __init__(self):
super().__init__(
name="runtime",
observation_spaces=["runtime"],
default_value=0,
default_negates_returns=True,
deterministic=False,
platform_dependent=True,
)
self.baseline_runtime = 0
def reset(self, benchmark: str, observation_view):
del benchmark # unused
self.baseline_runtime = observation_view["runtime"]
def update(self, action, observations, observation_view):
del action # unused
del observation_view # unused
return float(self.baseline_runtime - observations[0]) / self.baseline_runtime
class SizeReward(Reward):
"""An example reward that uses changes in the "size" observation value
to compute incremental reward.
"""
def __init__(self):
super().__init__(
name="size",
observation_spaces=["size"],
default_value=0,
default_negates_returns=True,
deterministic=False,
platform_dependent=True,
)
self.baseline_size = 0
def reset(self, benchmark: str, observation_view):
del benchmark # unused
self.baseline_runtime = observation_view["size"]
def update(self, action, observations, observation_view):
del action # unused
del observation_view # unused
return float(self.baseline_size - observations[0]) / self.baseline_size
class LoopsDataset(Dataset):
def __init__(self, *args, **kwargs):
super().__init__(
name="benchmark://loops-opt-v2",
license="MIT",
description="Loops optimization dataset",
)
self._benchmarks = {
"/add": Benchmark.from_file_contents(
"benchmark://loops-opt-v2/add",
self.preprocess(BENCHMARKS_PATH / "add.c"),
),
"/offsets1": Benchmark.from_file_contents(
"benchmark://loops-opt-v2/offsets1",
self.preprocess(BENCHMARKS_PATH / "offsets1.c"),
),
"/conv2d": Benchmark.from_file_contents(
"benchmark://loops-opt-v2/conv2d",
self.preprocess(BENCHMARKS_PATH / "conv2d.c"),
),
}
@staticmethod
def preprocess(src: Path) -> bytes:
"""Front a C source through the compiler frontend."""
# TODO(github.com/facebookresearch/CompilerGym/issues/325): We can skip
# this pre-processing, or do it on the service side, once support for
# multi-file benchmarks lands.
cmd = [
str(llvm.clang_path()),
"-E",
"-o",
"-",
"-I",
str(NEURO_VECTORIZER_HEADER.parent),
src,
]
cmd += get_system_library_flags()
return subprocess.check_output(
cmd,
timeout=300,
)
def benchmark_uris(self) -> Iterable[str]:
yield from (f"benchmark://loops-opt-v2{k}" for k in self._benchmarks.keys())
def benchmark_from_parsed_uri(self, uri: BenchmarkUri) -> Benchmark:
if uri.path in self._benchmarks:
return self._benchmarks[uri.path]
else:
raise LookupError("Unknown program name")
# Register the unrolling example service on module import. After importing this module,
# the loops-opt-py-v0 environment will be available to gym.make(...).
register(
id="loops-opt-py-v2",
entry_point="compiler_gym.service.client_service_compiler_env:ClientServiceCompilerEnv",
kwargs={
"service": LOOPS_OPT_PY_SERVICE_BINARY,
"rewards": [RuntimeReward(), SizeReward()],
"datasets": [LoopsDataset()],
},
)
@pytest.fixture(scope="function")
def env() -> CompilerEnv:
"""Text fixture that yields an environment."""
with gym.make("loops-opt-py-v0") as env_:
yield env_
@pytest.fixture(scope="module")
def bin() -> Path:
return loop_optimizations_service.LOOPS_OPT_PY_SERVICE_BINARY
def test_invalid_arguments(bin: Path):
"""Test that running the binary with unrecognized arguments is an error."""
def run(cmd):
p = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True
)
stdout, stderr = p.communicate(timeout=10)
return p.returncode, stdout, stderr
returncode, _, stderr = run([str(bin), "foobar"])
assert "ERROR:" in stderr
assert "'foobar'" in stderr
assert returncode == 1
returncode, _, stderr = run([str(bin), "--foobar"])
# C++ and python flag parsing library emit slightly different error
# messages.
assert "ERROR:" in stderr or "FATAL" in stderr
assert "'foobar'" in stderr
assert returncode == 1
def test_versions(env: ClientServiceCompilerEnv):
"""Tests the GetVersion() RPC endpoint."""
assert env.version == compiler_gym.__version__
assert env.compiler_version == "1.0.0"
def test_action_space(env: CompilerEnv):
"""Test that the environment reports the service's action spaces."""
assert env.action_spaces == [
NamedDiscrete(
name="loop-opt",
items=[
"--loop-unroll --unroll-count=2",
"--loop-unroll --unroll-count=4",
"--loop-unroll --unroll-count=8",
"--loop-unroll --unroll-count=16",
"--loop-unroll --unroll-count=32",
"--loop-vectorize -force-vector-width=2",
"--loop-vectorize -force-vector-width=4",
"--loop-vectorize -force-vector-width=8",
"--loop-vectorize -force-vector-width=16",
"--loop-vectorize -force-vector-width=32",
],
)
]
def test_observation_spaces(env: CompilerEnv):
"""Test that the environment reports the service's observation spaces."""
env.reset()
assert env.observation.spaces.keys() == {
"ir",
"Inst2vec",
"Autophase",
"AutophaseDict",
"Programl",
"runtime",
"size",
}
assert env.observation.spaces["ir"].space == Sequence(
name="ir",
size_range=(0, np.iinfo(int).max),
dtype=str,
)
assert env.observation.spaces["Inst2vec"].space == Sequence(
name="Inst2vec",
size_range=(0, np.iinfo(int).max),
scalar_range=Scalar(
name=None,
min=np.iinfo(np.int64).min,
max=np.iinfo(np.int64).max,
dtype=np.int64,
),
dtype=int,
)
assert env.observation.spaces["Autophase"].space == Sequence(
name="Autophase",
size_range=(len(AUTOPHASE_FEATURE_NAMES), len(AUTOPHASE_FEATURE_NAMES)),
scalar_range=Scalar(
name=None,
min=np.iinfo(np.int64).min,
max=np.iinfo(np.int64).max,
dtype=np.int64,
),
dtype=int,
)
assert env.observation.spaces["AutophaseDict"].space == Dict(
name="AutophaseDict",
spaces={
name: Scalar(name=None, min=0, max=np.iinfo(np.int64).max, dtype=np.int64)
for name in AUTOPHASE_FEATURE_NAMES
},
)
assert env.observation.spaces["Programl"].space == Sequence(
name="Programl",
size_range=(0, np.iinfo(int).max),
dtype=str,
)
assert env.observation.spaces["runtime"].space == Scalar(
name="runtime", min=0, max=np.inf, dtype=float
)
assert env.observation.spaces["size"].space == Scalar(
name="size", min=0, max=np.inf, dtype=float
)
def test_reward_spaces(env: CompilerEnv):
"""Test that the environment reports the service's reward spaces."""
env.reset()
assert env.reward.spaces.keys() == {"runtime", "size"}
def test_step_before_reset(env: CompilerEnv):
"""Taking a step() before reset() is illegal."""
with pytest.raises(SessionNotFound, match=r"Must call reset\(\) before step\(\)"):
env.step(0)
def test_observation_before_reset(env: CompilerEnv):
"""Taking an observation before reset() is illegal."""
with pytest.raises(SessionNotFound, match=r"Must call reset\(\) before step\(\)"):
_ = env.observation["ir"]
def test_reward_before_reset(env: CompilerEnv):
"""Taking a reward before reset() is illegal."""
with pytest.raises(SessionNotFound, match=r"Must call reset\(\) before step\(\)"):
_ = env.reward["runtime"]
def test_reset_invalid_benchmark(env: CompilerEnv):
"""Test requesting a specific benchmark."""
with pytest.raises(LookupError) as ctx:
env.reset(benchmark="loops-opt-v2/foobar")
assert str(ctx.value) == "Unknown program name"
def test_invalid_observation_space(env: CompilerEnv):
"""Test error handling with invalid observation space."""
with pytest.raises(LookupError):
env.observation_space = 100
def test_invalid_reward_space(env: CompilerEnv):
"""Test error handling with invalid reward space."""
with pytest.raises(LookupError):
env.reward_space = 100
def test_double_reset(env: CompilerEnv):
"""Test that reset() can be called twice."""
env.reset()
assert env.in_episode
env.reset()
assert env.in_episode
def test_Step_out_of_range(env: CompilerEnv):
"""Test error handling with an invalid action."""
env.reset()
with pytest.raises(ValueError) as ctx:
env.step(100)
assert str(ctx.value) == "Out-of-range"
def test_default_ir_observation(env: CompilerEnv):
"""Test default IR observation space."""
env.observation_space = "ir"
observation = env.reset()
assert len(observation) > 0
observation, reward, done, info = env.step(0)
assert not done, info
assert len(observation) > 0
assert reward is None
def test_default_inst2vec_observation(env: CompilerEnv):
"""Test default inst2vec observation space."""
env.observation_space = "Inst2vec"
observation = env.reset()
assert isinstance(observation, np.ndarray)
assert len(observation) >= 0
assert observation.dtype == np.int64
assert all(obs >= 0 for obs in observation.tolist())
def test_default_autophase_observation(env: CompilerEnv):
"""Test default autophase observation space."""
env.observation_space = "Autophase"
observation = env.reset()
assert isinstance(observation, np.ndarray)
assert observation.shape == (len(AUTOPHASE_FEATURE_NAMES),)
assert observation.dtype == np.int64
assert all(obs >= 0 for obs in observation.tolist())
def test_default_autophase_dict_observation(env: CompilerEnv):
"""Test default autophase dict observation space."""
env.observation_space = "AutophaseDict"
observation = env.reset()
assert isinstance(observation, dict)
assert sorted(observation.keys()) == sorted(AUTOPHASE_FEATURE_NAMES)
assert len(observation.values()) == len(AUTOPHASE_FEATURE_NAMES)
assert all(obs >= 0 for obs in observation.values())
def test_default_programl_observation(env: CompilerEnv):
"""Test default observation space."""
env.observation_space = "Programl"
observation = env.reset()
assert len(observation) > 0
observation, reward, done, info = env.step(0)
assert not done, info
assert len(observation) > 0
assert reward is None
def test_default_reward(env: CompilerEnv):
"""Test default reward space."""
env.reward_space = "runtime"
env.reset()
observation, reward, done, info = env.step(0)
assert not done, info
assert observation is None
assert reward is not None
def test_observations(env: CompilerEnv):
"""Test observation spaces."""
env.reset()
assert len(env.observation["ir"]) > 0
assert all(env.observation["Inst2vec"] >= 0)
assert all(env.observation["Autophase"] >= 0)
assert len(env.observation["Programl"]) > 0
def test_rewards(env: CompilerEnv):
"""Test reward spaces."""
env.reset()
assert env.reward["runtime"] is not None
def test_benchmarks(env: CompilerEnv):
assert list(env.datasets.benchmark_uris()) == [
"benchmark://loops-opt-v2/add",
"benchmark://loops-opt-v2/offsets1",
"benchmark://loops-opt-v2/conv2d",
]
def test_fork(env: CompilerEnv):
env.reset()
env.step(0)
env.step(1)
other_env = env.fork()
try:
assert env.benchmark == other_env.benchmark
assert other_env.actions == [0, 1]
finally:
other_env.close()
# Copied from CompilerGym/tests/test_main.py because there were errors in trying to import it here
def main(extra_pytest_args: Optional[List[str]] = None, debug_level: int = 1):
dbg.set_debug_level(debug_level)
# Keep test data isolated from user data.
os.environ[
"COMPILER_GYM_SITE_DATA"
] = f"/tmp/compiler_gym_{getuser()}/tests/site_data"
os.environ["COMPILER_GYM_CACHE"] = f"/tmp/compiler_gym_{getuser()}/tests/cache"
pytest_args = sys.argv + [
# Run pytest verbosely to print out test names to provide context in
# case of failures.
"-vv",
# Disable "Module already imported" warnings. See:
# https://docs.pytest.org/en/latest/how-to/usage.html#calling-pytest-from-python-code
"-W",
"ignore:Module already imported:pytest.PytestWarning",
# Disable noisy "Flaky tests passed" messages.
"--no-success-flaky-report",
]
# Support for sharding. If a py_test target has the shard_count attribute
# set (in the range [1,50]), then the pytest-shard module is used to divide
# the tests among the shards. See https://pypi.org/project/pytest-shard/
sharded_test = os.environ.get("TEST_TOTAL_SHARDS")
if sharded_test:
num_shards = int(os.environ["TEST_TOTAL_SHARDS"])
shard_index = int(os.environ["TEST_SHARD_INDEX"])
pytest_args += [f"--shard-id={shard_index}", f"--num-shards={num_shards}"]
else:
pytest_args += ["-p", "no:pytest-shard"]
pytest_args += extra_pytest_args or []
returncode = pytest.main(pytest_args)
# By default pytest will fail with an error if no tests are collected.
# Disable that behavior here (with a warning) since there are legitimate
# cases where we may want to run a test file with no tests in it. For
# example, when running on a continuous integration service where all the
# tests are marked with the @skip_on_ci decorator.
if returncode == pytest.ExitCode.NO_TESTS_COLLECTED.value:
print(
"WARNING: The test suite was empty. Is that intended?",
file=sys.stderr,
)
returncode = 0
sys.exit(returncode)
if __name__ == "__main__":
main(
extra_pytest_args=[
"-W",
"ignore::UserWarning",
]
)
|
CompilerGym-development
|
examples/loop_optimizations_service/env_without_bazel_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Smoke test for examples/loop_optimizations_service/example_without_bazel.py"""
from flaky import flaky
from loop_optimizations_service.example_without_bazel import main
@flaky
def test_example_without_bazel():
main()
|
CompilerGym-development
|
examples/loop_optimizations_service/example_without_bazel_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""This module registers the Loop Optimizations CompilerGym environment """
import os
import subprocess
from pathlib import Path
from typing import Iterable
from compiler_gym.datasets import Benchmark, Dataset
from compiler_gym.datasets.uri import BenchmarkUri
from compiler_gym.envs.llvm.llvm_benchmark import get_system_library_flags
from compiler_gym.spaces import Reward
from compiler_gym.third_party import llvm
from compiler_gym.util.registration import register
from compiler_gym.util.runfiles_path import runfiles_path
LOOPS_OPT_PY_SERVICE_BINARY: Path = runfiles_path(
"examples/loop_optimizations_service/service_py/loops-opt-service-py"
)
BENCHMARKS_PATH: Path = runfiles_path("examples/loop_optimizations_service/benchmarks")
if not os.path.exists(BENCHMARKS_PATH):
BENCHMARKS_PATH = Path("loop_optimizations_service/benchmarks")
NEURO_VECTORIZER_HEADER: Path = runfiles_path(
"compiler_gym/third_party/neuro-vectorizer/header.h"
)
if not os.path.exists(NEURO_VECTORIZER_HEADER):
NEURO_VECTORIZER_HEADER: Path = Path(
"../compiler_gym/third_party/neuro-vectorizer/header.h"
)
class RuntimeReward(Reward):
"""An example reward that uses changes in the "runtime" observation value
to compute incremental reward.
"""
def __init__(self):
super().__init__(
name="runtime",
observation_spaces=["runtime"],
default_value=0,
default_negates_returns=True,
deterministic=False,
platform_dependent=True,
)
self.baseline_runtime = 0
def reset(self, benchmark: str, observation_view):
del benchmark # unused
self.baseline_runtime = observation_view["runtime"]
def update(self, action, observations, observation_view):
del action # unused
del observation_view # unused
return float(self.baseline_runtime - observations[0]) / self.baseline_runtime
class SizeReward(Reward):
"""An example reward that uses changes in the "size" observation value
to compute incremental reward.
"""
def __init__(self):
super().__init__(
name="size",
observation_spaces=["size"],
default_value=0,
default_negates_returns=True,
deterministic=False,
platform_dependent=True,
)
self.baseline_size = 0
def reset(self, benchmark: str, observation_view):
del benchmark # unused
self.baseline_runtime = observation_view["size"]
def update(self, action, observations, observation_view):
del action # unused
del observation_view # unused
return float(self.baseline_size - observations[0]) / self.baseline_size
class LoopsDataset(Dataset):
def __init__(self, *args, **kwargs):
super().__init__(
name="benchmark://loops-opt-v0",
license="MIT",
description="Loops optimization dataset",
)
self._benchmarks = {
"/add": Benchmark.from_file_contents(
"benchmark://loops-opt-v0/add",
self.preprocess(BENCHMARKS_PATH / "add.c"),
),
"/offsets1": Benchmark.from_file_contents(
"benchmark://loops-opt-v0/offsets1",
self.preprocess(BENCHMARKS_PATH / "offsets1.c"),
),
"/conv2d": Benchmark.from_file_contents(
"benchmark://loops-opt-v0/conv2d",
self.preprocess(BENCHMARKS_PATH / "conv2d.c"),
),
}
@staticmethod
def preprocess(src: Path) -> bytes:
"""Front a C source through the compiler frontend."""
# TODO(github.com/facebookresearch/CompilerGym/issues/325): We can skip
# this pre-processing, or do it on the service side, once support for
# multi-file benchmarks lands.
cmd = [
str(llvm.clang_path()),
"-E",
"-o",
"-",
"-I",
str(NEURO_VECTORIZER_HEADER.parent),
src,
]
cmd += get_system_library_flags()
return subprocess.check_output(
cmd,
timeout=300,
)
def benchmark_uris(self) -> Iterable[str]:
yield from (f"benchmark://loops-opt-v0{k}" for k in self._benchmarks.keys())
def benchmark_from_parsed_uri(self, uri: BenchmarkUri) -> Benchmark:
if uri.path in self._benchmarks:
return self._benchmarks[uri.path]
else:
raise LookupError("Unknown program name")
# Register the unrolling example service on module import. After importing this module,
# the loops-opt-py-v0 environment will be available to gym.make(...).
register(
id="loops-opt-py-v0",
entry_point="compiler_gym.service.client_service_compiler_env:ClientServiceCompilerEnv",
kwargs={
"service": LOOPS_OPT_PY_SERVICE_BINARY,
"rewards": [RuntimeReward(), SizeReward()],
"datasets": [LoopsDataset()],
},
)
|
CompilerGym-development
|
examples/loop_optimizations_service/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""This script uses the loop optimizations service without needing
to use the bazel build system.
Prerequisite:
# In the repo's INSTALL.md, follow the 'Building from source using CMake' instructions with `-DCOMPILER_GYM_BUILD_EXAMPLES=ON` added to the `cmake` command
$ cd <path to source directory>/examples
Usage:
$ python loop_optimizations_service/examples_without_bazel.py
It is equivalent in behavior to the example.py script in this directory.
"""
import logging
import subprocess
from pathlib import Path
from typing import Iterable
import compiler_gym
from compiler_gym.datasets import Benchmark, Dataset
from compiler_gym.datasets.uri import BenchmarkUri
from compiler_gym.envs.llvm.llvm_benchmark import get_system_library_flags
from compiler_gym.spaces import Reward
from compiler_gym.third_party import llvm
from compiler_gym.util.logging import init_logging
from compiler_gym.util.registration import register
LOOPS_OPT_PY_SERVICE_BINARY: Path = Path(
"loop_optimizations_service/service_py/loops_opt_service.py"
)
BENCHMARKS_PATH: Path = Path("loop_optimizations_service/benchmarks")
NEURO_VECTORIZER_HEADER: Path = Path(
"../compiler_gym/third_party/neuro-vectorizer/header.h"
)
class RuntimeReward(Reward):
"""An example reward that uses changes in the "runtime" observation value
to compute incremental reward.
"""
def __init__(self):
super().__init__(
name="runtime",
observation_spaces=["runtime"],
default_value=0,
default_negates_returns=True,
deterministic=False,
platform_dependent=True,
)
self.baseline_runtime = 0
def reset(self, benchmark: str, observation_view):
del benchmark # unused
self.baseline_runtime = observation_view["runtime"]
def update(self, action, observations, observation_view):
del action # unused
del observation_view # unused
return float(self.baseline_runtime - observations[0]) / self.baseline_runtime
class SizeReward(Reward):
"""An example reward that uses changes in the "size" observation value
to compute incremental reward.
"""
def __init__(self):
super().__init__(
name="size",
observation_spaces=["size"],
default_value=0,
default_negates_returns=True,
deterministic=False,
platform_dependent=True,
)
self.baseline_size = 0
def reset(self, benchmark: str, observation_view):
del benchmark # unused
self.baseline_runtime = observation_view["size"]
def update(self, action, observations, observation_view):
del action # unused
del observation_view # unused
return float(self.baseline_size - observations[0]) / self.baseline_size
class LoopsDataset(Dataset):
def __init__(self, *args, **kwargs):
super().__init__(
name="benchmark://loops-opt-v1",
license="MIT",
description="Loops optimization dataset",
)
self._benchmarks = {
"/add": Benchmark.from_file_contents(
"benchmark://loops-opt-v1/add",
self.preprocess(BENCHMARKS_PATH / "add.c"),
),
"/offsets1": Benchmark.from_file_contents(
"benchmark://loops-opt-v1/offsets1",
self.preprocess(BENCHMARKS_PATH / "offsets1.c"),
),
"/conv2d": Benchmark.from_file_contents(
"benchmark://loops-opt-v1/conv2d",
self.preprocess(BENCHMARKS_PATH / "conv2d.c"),
),
}
@staticmethod
def preprocess(src: Path) -> bytes:
"""Front a C source through the compiler frontend."""
# TODO(github.com/facebookresearch/CompilerGym/issues/325): We can skip
# this pre-processing, or do it on the service side, once support for
# multi-file benchmarks lands.
cmd = [
str(llvm.clang_path()),
"-E",
"-o",
"-",
"-I",
str(NEURO_VECTORIZER_HEADER.parent),
src,
] + get_system_library_flags()
return subprocess.check_output(
cmd,
timeout=300,
)
def benchmark_uris(self) -> Iterable[str]:
yield from (f"benchmark://loops-opt-v1{k}" for k in self._benchmarks.keys())
def benchmark_from_parsed_uri(self, uri: BenchmarkUri) -> Benchmark:
if uri.path in self._benchmarks:
return self._benchmarks[uri.path]
else:
raise LookupError("Unknown program name")
# Register the unrolling example service on module import. After importing this module,
# the loops-opt-py-v1 environment will be available to gym.make(...).
register(
id="loops-opt-py-v1",
entry_point="compiler_gym.service.client_service_compiler_env:ClientServiceCompilerEnv",
kwargs={
"service": LOOPS_OPT_PY_SERVICE_BINARY,
"rewards": [RuntimeReward(), SizeReward()],
"datasets": [LoopsDataset()],
},
)
def main():
# Use debug verbosity to print out extra logging information.
init_logging(level=logging.DEBUG)
with compiler_gym.make(
"loops-opt-py-v1",
benchmark="loops-opt-v1/add",
observation_space="Programl",
reward_space="runtime",
) as env:
compiler_gym.set_debug_level(4) # TODO: check why this has no effect
observation = env.reset()
print("observation: ", observation)
print()
observation, reward, done, info = env.step(env.action_space.sample())
print("observation: ", observation)
print("reward: ", reward)
print("done: ", done)
print("info: ", info)
env.close()
# TODO: implement write_bitcode(..) or write_ir(..)
# env.write_bitcode("/tmp/output.bc")
if __name__ == "__main__":
main()
|
CompilerGym-development
|
examples/loop_optimizations_service/example_without_bazel.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import compiler_gym
import examples.loop_optimizations_service as loop_optimizations_service # noqa Register environments.
with compiler_gym.make(
"loops-opt-py-v0",
benchmark="loops-opt-v0/add",
observation_space="AutophaseDict",
reward_space="runtime",
) as env:
compiler_gym.set_debug_level(4) # TODO: check why this has no effect
observation = env.reset()
print("observation: ", observation)
print()
observation, reward, done, info = env.step(env.action_space.sample())
print("observation: ", observation)
print("reward: ", reward)
print("done: ", done)
print("info: ", info)
env.close()
# TODO: implement write_bitcode(..) or write_ir(..)
# env.write_bitcode("/tmp/output.bc")
|
CompilerGym-development
|
examples/loop_optimizations_service/example.py
|
#! /usr/bin/env python3
#
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""An example CompilerGym service in python."""
import logging
import os
import shutil
import subprocess
from pathlib import Path
from typing import Optional, Tuple
import numpy as np
import compiler_gym.third_party.llvm as llvm
from compiler_gym.service import CompilationSession
from compiler_gym.service.proto import (
ActionSpace,
Benchmark,
DictEvent,
DictSpace,
DoubleRange,
Event,
Int64Range,
Int64SequenceSpace,
Int64Tensor,
NamedDiscreteSpace,
ObservationSpace,
Space,
StringSpace,
)
from compiler_gym.service.runtime import create_and_run_compiler_gym_service
from compiler_gym.third_party.autophase import AUTOPHASE_FEATURE_NAMES
from compiler_gym.third_party.inst2vec import Inst2vecEncoder
from compiler_gym.util.commands import run_command
from compiler_gym.util.runfiles_path import runfiles_path # noqa
_INST2VEC_ENCODER = Inst2vecEncoder()
class LoopsOptCompilationSession(CompilationSession):
"""Represents an instance of an interactive compilation session."""
compiler_version: str = "1.0.0"
# The list of actions that are supported by this service.
action_spaces = [
ActionSpace(
name="loop-opt",
space=Space(
named_discrete=NamedDiscreteSpace(
name=[
"--loop-unroll --unroll-count=2",
"--loop-unroll --unroll-count=4",
"--loop-unroll --unroll-count=8",
"--loop-unroll --unroll-count=16",
"--loop-unroll --unroll-count=32",
"--loop-vectorize -force-vector-width=2",
"--loop-vectorize -force-vector-width=4",
"--loop-vectorize -force-vector-width=8",
"--loop-vectorize -force-vector-width=16",
"--loop-vectorize -force-vector-width=32",
]
),
),
)
]
# A list of observation spaces supported by this service. Each of these
# ObservationSpace protos describes an observation space.
observation_spaces = [
ObservationSpace(
name="ir",
space=Space(string_value=StringSpace(length_range=Int64Range(min=0))),
deterministic=True,
platform_dependent=False,
default_observation=Event(string_value=""),
),
ObservationSpace(
name="Inst2vec",
space=Space(
int64_sequence=Int64SequenceSpace(length_range=Int64Range(min=0)),
),
),
ObservationSpace(
name="Autophase",
space=Space(
int64_sequence=Int64SequenceSpace(
length_range=Int64Range(
min=len(AUTOPHASE_FEATURE_NAMES),
max=len(AUTOPHASE_FEATURE_NAMES),
)
),
),
deterministic=True,
platform_dependent=False,
),
ObservationSpace(
name="AutophaseDict",
space=Space(
space_dict=DictSpace(
space={
name: Space(int64_value=Int64Range(min=0))
for name in AUTOPHASE_FEATURE_NAMES
}
)
),
deterministic=True,
platform_dependent=False,
),
ObservationSpace(
name="Programl",
space=Space(string_value=StringSpace(length_range=Int64Range(min=0))),
deterministic=True,
platform_dependent=False,
default_observation=Event(string_value=""),
),
ObservationSpace(
name="runtime",
space=Space(double_value=DoubleRange(min=0)),
deterministic=False,
platform_dependent=True,
default_observation=Event(
double_value=0,
),
),
ObservationSpace(
name="size",
space=Space(double_value=DoubleRange(min=0)),
deterministic=True,
platform_dependent=True,
default_observation=Event(
double_value=0,
),
),
]
def __init__(
self,
working_directory: Path,
action_space: ActionSpace,
benchmark: Benchmark,
use_custom_opt: bool = True,
):
super().__init__(working_directory, action_space, benchmark)
logging.info("Started a compilation session for %s", benchmark.uri)
self._benchmark = benchmark
self._action_space = action_space
self.inst2vec = _INST2VEC_ENCODER
# Resolve the paths to LLVM binaries once now.
self._clang = str(llvm.clang_path())
self._llc = str(llvm.llc_path())
self._llvm_diff = str(llvm.llvm_diff_path())
self._opt = str(llvm.opt_path())
# LLVM's opt does not always enforce the loop optimization options passed as cli arguments.
# Hence, we created our own exeutable with custom unrolling and vectorization pass in examples/loops_opt_service/opt_loops that enforces the unrolling and vectorization factors passed in its cli.
# if self._use_custom_opt is true, use our custom exeutable, otherwise use LLVM's opt
self._use_custom_opt = use_custom_opt
# Dump the benchmark source to disk.
self._src_path = str(self.working_dir / "benchmark.c")
with open(self.working_dir / "benchmark.c", "wb") as f:
f.write(benchmark.program.contents)
self._llvm_path = str(self.working_dir / "benchmark.ll")
self._llvm_before_path = str(self.working_dir / "benchmark.previous.ll")
self._obj_path = str(self.working_dir / "benchmark.o")
self._exe_path = str(self.working_dir / "benchmark.exe")
run_command(
[
self._clang,
"-Xclang",
"-disable-O0-optnone",
"-emit-llvm",
"-S",
self._src_path,
"-o",
self._llvm_path,
],
timeout=30,
)
def apply_action(self, action: Event) -> Tuple[bool, Optional[ActionSpace], bool]:
num_choices = len(self._action_space.space.named_discrete.name)
# This is the index into the action space's values ("a", "b", "c") that
# the user selected, e.g. 0 -> "a", 1 -> "b", 2 -> "c".
choice_index = action.int64_value
if choice_index < 0 or choice_index >= num_choices:
raise ValueError("Out-of-range")
args = self._action_space.space.named_discrete.name[choice_index]
logging.info(
"Applying action %d, equivalent command-line arguments: '%s'",
choice_index,
args,
)
args = args.split()
# make a copy of the LLVM file to compare its contents after applying the action
shutil.copyfile(self._llvm_path, self._llvm_before_path)
# apply action
if self._use_custom_opt:
# our custom opt-loops has an additional `f` at the beginning of each argument
for i, arg in enumerate(args):
# convert --<argument> to --f<argument>
arg = arg[0:2] + "f" + arg[2:]
args[i] = arg
run_command(
[
os.path.join(os.path.dirname(__file__), "../opt_loops/opt_loops"),
self._llvm_path,
*args,
"-S",
"-o",
self._llvm_path,
],
timeout=30,
)
else:
run_command(
[
self._opt,
*args,
self._llvm_path,
"-S",
"-o",
self._llvm_path,
],
timeout=30,
)
# compare the IR files to check if the action had an effect
try:
subprocess.check_call(
[self._llvm_diff, self._llvm_before_path, self._llvm_path],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
timeout=60,
)
action_had_no_effect = True
except subprocess.CalledProcessError:
action_had_no_effect = False
end_of_session = False # TODO: this needs investigation: for how long can we apply loop optimizations? e.g., detect if there are no more loops in the IR? or look at the metadata?
new_action_space = None
return (end_of_session, new_action_space, action_had_no_effect)
@property
def ir(self) -> str:
with open(self._llvm_path) as f:
return f.read()
def get_observation(self, observation_space: ObservationSpace) -> Event:
logging.info("Computing observation from space %s", observation_space.name)
if observation_space.name == "ir":
return Event(string_value=self.ir)
elif observation_space.name == "Inst2vec":
Inst2vec_str = self.inst2vec.preprocess(self.ir)
Inst2vec_ids = self.inst2vec.encode(Inst2vec_str)
return Event(
int64_tensor=Int64Tensor(shape=[len(Inst2vec_ids)], value=Inst2vec_ids)
)
elif observation_space.name == "Autophase":
Autophase_str = run_command(
[
os.path.join(
os.path.dirname(__file__),
"../../../compiler_gym/third_party/autophase/compute_autophase-prelinked",
),
self._llvm_path,
],
timeout=30,
)
Autophase_list = list(map(int, list(Autophase_str.split(" "))))
return Event(
int64_tensor=Int64Tensor(
shape=[len(Autophase_list)], value=Autophase_list
)
)
elif observation_space.name == "AutophaseDict":
Autophase_str = run_command(
[
os.path.join(
os.path.dirname(__file__),
"../../../compiler_gym/third_party/autophase/compute_autophase-prelinked",
),
self._llvm_path,
],
timeout=30,
)
Autophase_list = list(map(int, list(Autophase_str.split(" "))))
Autophase_dict = {
name: Event(int64_value=val)
for name, val in zip(AUTOPHASE_FEATURE_NAMES, Autophase_list)
}
return Event(event_dict=DictEvent(event=Autophase_dict))
elif observation_space.name == "Programl":
Programl_str = run_command(
[
os.path.join(
os.path.dirname(__file__),
"../../../compiler_gym/third_party/programl/compute_programl",
),
self._llvm_path,
],
timeout=30,
)
return Event(string_value=Programl_str)
elif observation_space.name == "runtime":
# compile LLVM to object file
run_command(
[
self._llc,
"-filetype=obj",
self._llvm_path,
"-o",
self._obj_path,
],
timeout=30,
)
# build object file to binary
run_command(
[
self._clang,
self._obj_path,
"-O3",
"-o",
self._exe_path,
],
timeout=30,
)
# TODO: add documentation that benchmarks need print out execution time
# Running 5 times and taking the average of middle 3
exec_times = []
for _ in range(5):
stdout = run_command(
[self._exe_path],
timeout=30,
)
try:
exec_times.append(int(stdout))
except ValueError:
raise ValueError(
f"Error in parsing execution time from output of command\n"
f"Please ensure that the source code of the benchmark measures execution time and prints to stdout\n"
f"Stdout of the program: {stdout}"
)
exec_times = np.sort(exec_times)
avg_exec_time = np.mean(exec_times[1:4])
return Event(double_value=avg_exec_time)
elif observation_space.name == "size":
# compile LLVM to object file
run_command(
[
self._llc,
"-filetype=obj",
self._llvm_path,
"-o",
self._obj_path,
],
timeout=30,
)
# build object file to binary
run_command(
[
self._clang,
self._obj_path,
"-Oz",
"-o",
self._exe_path,
],
timeout=30,
)
binary_size = os.path.getsize(self._exe_path)
return Event(double_value=binary_size)
else:
raise KeyError(observation_space.name)
if __name__ == "__main__":
create_and_run_compiler_gym_service(LoopsOptCompilationSession)
|
CompilerGym-development
|
examples/loop_optimizations_service/service_py/loops_opt_service.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from enum import Enum
from pathlib import Path
from threading import Lock
from typing import Union
import numpy as np
from llvm_autotuning.just_keep_going_env import JustKeepGoingEnv
import compiler_gym
from compiler_gym.datasets import Benchmark
from compiler_gym.envs import LlvmEnv
from compiler_gym.wrappers import RuntimePointEstimateReward
logger = logging.getLogger(__name__)
_RUNTIME_LOCK = Lock()
class OptimizationTarget(str, Enum):
CODESIZE = "codesize"
BINSIZE = "binsize"
RUNTIME = "runtime"
@property
def optimization_space_enum_name(self) -> str:
return {
OptimizationTarget.CODESIZE: "IrInstructionCount",
OptimizationTarget.BINSIZE: "ObjectTextSizeBytes",
OptimizationTarget.RUNTIME: "Runtime",
}[self.value]
def make_env(self, benchmark: Union[str, Benchmark]) -> LlvmEnv:
env: LlvmEnv = compiler_gym.make("llvm-v0")
# TODO(cummins): This does not work with custom benchmarks, as the URI
# will not be known to the new environment.
if str(benchmark).startswith("file:///"):
benchmark = env.make_benchmark(Path(benchmark[len("file:///") :]))
env.benchmark = benchmark
if self.value == OptimizationTarget.CODESIZE:
env.reward_space = "IrInstructionCountOz"
elif self.value == OptimizationTarget.BINSIZE:
env.reward_space = "ObjectTextSizeOz"
elif self.value == OptimizationTarget.RUNTIME:
env = RuntimePointEstimateReward(env, warmup_count=0, runtime_count=3)
else:
assert False, f"Unknown OptimizationTarget: {self.value}"
# Wrap the env to ignore errors during search.
env = JustKeepGoingEnv(env)
return env
def final_reward(self, env: LlvmEnv, runtime_count: int = 30) -> float:
"""Compute the final reward of the environment.
Note that this may modify the environment state. You should call
:code:`reset()` before continuing to use the environment after this.
"""
# Reapply the environment state in a retry loop.
actions = list(env.actions)
env.reset()
for i in range(1, 5 + 1):
_, _, done, info = env.multistep(actions)
if not done:
break
logger.warning(
"Attempt %d to apply actions during final reward failed: %s",
i,
info.get("error_details"),
)
else:
raise ValueError("Failed to replay environment's actions")
if self.value == OptimizationTarget.CODESIZE:
return env.observation.IrInstructionCountOz() / max(
env.observation.IrInstructionCount(), 1
)
if self.value == OptimizationTarget.BINSIZE:
return env.observation.ObjectTextSizeOz() / max(
env.observation.ObjectTextSizeBytes(), 1
)
if self.value == OptimizationTarget.RUNTIME:
with _RUNTIME_LOCK:
with compiler_gym.make("llvm-v0", benchmark=env.benchmark) as new_env:
new_env.reset()
new_env.runtime_observation_count = runtime_count
new_env.runtime_warmup_count = 0
new_env.apply(env.state)
final_runtimes = new_env.observation.Runtime()
assert len(final_runtimes) == runtime_count
new_env.reset()
new_env.send_param("llvm.apply_baseline_optimizations", "-O3")
o3_runtimes = new_env.observation.Runtime()
assert len(o3_runtimes) == runtime_count
logger.debug("O3 runtimes: %s", o3_runtimes)
logger.debug("Final runtimes: %s", final_runtimes)
speedup = np.median(o3_runtimes) / max(np.median(final_runtimes), 1e-12)
logger.debug("Speedup: %.4f", speedup)
return speedup
assert False, f"Unknown OptimizationTarget: {self.value}"
|
CompilerGym-development
|
examples/llvm_autotuning/optimization_target.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
|
CompilerGym-development
|
examples/llvm_autotuning/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import logging
from pathlib import Path
from typing import Dict, Iterable, List
import gym
import pandas as pd
import yaml
from llvm_autotuning.autotuners import Autotuner
from llvm_autotuning.benchmarks import Benchmarks
from pydantic import BaseModel, Field
from compiler_gym import CompilerEnvStateWriter
from compiler_gym.util.executor import Executor
logger = logging.getLogger(__name__)
class Experiment(BaseModel):
"""The composition of a full autotuning experiment, comprising autotuner,
executor, and programs to tune.
"""
# === Start of fields list. ===
executor: Executor
"""The execution environment to use for training / testing jobs."""
autotuner: Autotuner
benchmarks: Benchmarks
"""The set of benchmarks to test on."""
working_directory: Path
"""The working directory where logs and other artifacts are written to."""
experiment: str = "unnamed_experiment"
"""A logical name for this experiment. This is used for naming RLlib
trials.
"""
num_replicas: int = Field(default=1, ge=1)
"""The number of duplicate jobs to run. E.g. for training, this will train
:code:`n` independent models in trials that share the same working
directory.
"""
seed: int = 0xCC
"""A numeric random seed."""
# === Start of public API. ===
def run(self) -> None:
"""Run the experiment."""
# The working directory may already have been created by hydra, so we
# will check for the config.json file below to see if this experiment
# has already run.
self.working_directory.mkdir(parents=True, exist_ok=True)
# Dump the parsed config to file.
assert not self.config_path.is_file(), (
f"Refusing to overwrite file: {self.config_path}. "
"Is the working directory clean?"
)
with open(self.config_path, "w") as f:
print(json.dumps(json.loads(self.json()), indent=2), file=f)
logger.info("Wrote %s", self.config_path)
results_num = 0
with self.executor.get_executor(
logs_dir=self.working_directory / "logs"
) as executor:
with gym.make("llvm-v0") as env:
for replica_num in range(self.num_replicas):
for benchmark in self.benchmarks.benchmark_uris_iterator(env):
results_num += 1
results_path = (
self.working_directory / f"results-{results_num:03d}.csv"
)
errors_path = (
self.working_directory / f"errors-{results_num:03d}.json"
)
executor.submit(
_experiment_worker,
autotuner=self.autotuner,
benchmark=benchmark,
results_path=results_path,
errors_path=errors_path,
seed=self.seed + replica_num,
)
def yaml(self) -> str:
"""Serialize the model configuration to a YAML string."""
# We can't directly dump the dict() representation because we need to
# simplify the types first, so we go via JSON.
simplified_data = json.loads(self.json())
return yaml.dump(simplified_data)
@property
def config_path(self) -> Path:
return self.working_directory / "config.json"
@property
def results_paths(self) -> Iterable[Path]:
"""Return an iterator over results files."""
for path in self.working_directory.iterdir():
if path.is_file() and path.name.startswith("results-"):
yield path
@property
def errors(self) -> Iterable[Dict[str, str]]:
"""Return an iterator over errors.
An error is a dictionary with keys: "benchmark", "error_type", and
"error_message".
"""
for path in self.working_directory.iterdir():
if path.is_file() and path.name.startswith("errors-"):
with open(path, "r") as f:
yield json.load(f)
@property
def configuration_number(self) -> str:
return self.working_directory.name.split("-")[-1]
@property
def timestamp(self) -> str:
return f"{self.working_directory.parent.parent.name}/{self.working_directory.parent.name}"
@property
def dataframe(self) -> pd.DataFrame:
"""Return the results as a dataframe."""
dfs = []
for path in self.results_paths:
dfs.append(pd.read_csv(path))
if not dfs:
return pd.DataFrame()
return pd.concat(dfs)
@classmethod
def from_logsdir(cls, working_directory: Path) -> List["Experiment"]:
"""Reconstruct experiments by recursively reading from logs directories."""
def find_config_dumps(dir: Path) -> Iterable[Path]:
"""Attempt to locate config files recursively in directories."""
if (dir / "config.json").is_file():
yield dir / "config.json"
return
for entry in dir.iterdir():
if entry.is_dir():
yield from find_config_dumps(entry)
experiments: List[Experiment] = []
for config_path in find_config_dumps(working_directory):
with open(config_path) as f:
try:
config = json.load(f)
config["working_directory"] = config_path.parent
experiments.append(cls(**config))
except json.decoder.JSONDecodeError as e:
logger.warning(
"Failed to parse JSON for model file %s: %s", config, e
)
continue
return experiments
# === Start of implementation details. ===
class Config:
validate_assignment = True
def _experiment_worker(
autotuner: Autotuner,
benchmark: str,
results_path: Path,
errors_path: Path,
seed: int,
) -> None:
try:
with autotuner.optimization_target.make_env(benchmark) as env:
env.seed(seed)
env.action_space.seed(seed)
state = autotuner(env, seed=seed)
except Exception as e: # pylint: disable=broad-except
logger.warning("Autotuner failed on benchmark %s: %s", benchmark, e)
with open(errors_path, "w") as f:
json.dump(
{
"benchmark": benchmark,
"error_type": type(e).__name__,
"error_message": str(e),
},
f,
)
return
logger.info("State %s", state)
with CompilerEnvStateWriter(open(results_path, "w")) as writer:
writer.write_state(state, flush=True)
|
CompilerGym-development
|
examples/llvm_autotuning/experiment.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import sys
import hydra
from llvm_autotuning.experiment import Experiment
from omegaconf import DictConfig, OmegaConf
from pydantic import ValidationError
from compiler_gym.util.shell_format import indent
@hydra.main(config_path="config", config_name="default")
def main(config: DictConfig) -> None:
# Parse the config to pydantic models.
OmegaConf.set_readonly(config, True)
try:
model: Experiment = Experiment(working_directory=os.getcwd(), **config)
except ValidationError as e:
print(e, file=sys.stderr)
sys.exit(1)
print("Experiment configuration:")
print()
print(indent(model.yaml()))
print()
model.run()
print()
print("Results written to", model.working_directory)
if __name__ == "__main__":
main()
|
CompilerGym-development
|
examples/llvm_autotuning/tune.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from compiler_gym.wrappers import CompilerEnvWrapper
logger = logging.getLogger(__name__)
# TODO(github.com/facebookresearch/CompilerGym/issues/469): Once step() and
# reset() no longer raise exceptions than this wrapper class can be removed.
class JustKeepGoingEnv(CompilerEnvWrapper):
"""This wrapper class prevents the step() and close() methods from raising
an exception.
Just keep swimming ...
|\\ o
| \\ o
|\\ / .\\ o
| | (
|/\\ /
| /
|/
Usage:
>>> env = compiler_gym.make("llvm-v0")
>>> env = JustKeepGoingEnv(env)
# enjoy ...
"""
def step(self, *args, **kwargs):
try:
return self.env.step(*args, **kwargs)
except Exception as e: # pylint: disable=broad-except
logger.warning("step() error: %s", e)
# Return "null" observation / reward.
default_observation = (
self.env.observation_space_spec.default_value
if self.env.observation_space
else None
)
default_reward = (
float(
self.env.reward_space_spec.reward_on_error(self.env.episode_reward)
)
if self.env.reward_space
else None
)
self.close()
return default_observation, default_reward, True, {"error_details": str(e)}
def reset(self, *args, **kwargs):
for _ in range(5):
try:
return super().reset(*args, **kwargs)
except Exception as e: # pylint: disable=broad-except
logger.warning("reset() error, retrying: %s", e)
self.close()
return self.reset(*args, **kwargs)
# No more retries.
return super().reset(*args, **kwargs)
def close(self):
try:
self.env.close()
except Exception as e: # pylint: disable=broad-except
logger.warning("Ignoring close() error: %s", e)
|
CompilerGym-development
|
examples/llvm_autotuning/just_keep_going_env.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import sys
from pathlib import Path
from typing import List
import pandas as pd
from llvm_autotuning.experiment import Experiment
from pydantic import ValidationError
from tabulate import tabulate
from typer import Typer
from compiler_gym.util.statistics import geometric_mean
app = Typer()
def experiments_from_paths(log_dirs: List[Path]) -> List[Experiment]:
experiments: List[Experiment] = []
for path in log_dirs:
try:
experiments += Experiment.from_logsdir(Path(path).expanduser())
except ValidationError as e:
print(e, file=sys.stderr)
sys.exit(1)
return experiments
@app.command()
def info(
log_dirs: List[Path] = ["~/logs/compiler_gym/llvm_autotuning"],
all_runs: bool = False,
group_by_working_directory: bool = False,
only_nonzero_reward: bool = False,
):
experiments = experiments_from_paths(log_dirs)
results = []
for experiment in experiments:
df = experiment.dataframe
# Exclude runs where reward was zero, used for pruning false results if
# the environment is flaky or can fail.
if only_nonzero_reward:
df = df[df.reward != 0]
if not len(df):
continue
df.to_csv(experiment.working_directory / "results.csv", index=False)
walltimes = df[["benchmark", "walltime"]].groupby("benchmark").mean()
rewards = df[["benchmark", "reward"]].groupby("benchmark").agg(geometric_mean)
num_results = len(df)
num_benchmarks = len(set(df["benchmark"]))
df = pd.concat((walltimes, rewards), axis=1)
avg_walltime = df["walltime"].mean()
avg_reward = geometric_mean(df["reward"])
df = pd.concat(
(
df,
pd.DataFrame(
[{"walltime": avg_walltime, "reward": avg_reward}],
index=["Average"],
),
)
)
df = df.reset_index()
df.insert(0, "config", experiment.configuration_number)
df.insert(0, "timestamp", experiment.timestamp)
df.insert(0, "experiment", experiment.experiment)
if all_runs:
print(experiment.working_directory)
print(tabulate(df, showindex=False, headers="keys", tablefmt="grid"))
print()
results.append(
{
"working_directory": experiment.working_directory,
"experiment": experiment.experiment,
"timestamp": experiment.timestamp,
"config": experiment.configuration_number,
"num_benchmarks": num_benchmarks,
"num_results": num_results,
"walltime": avg_walltime,
"reward": avg_reward,
}
)
df = pd.DataFrame(results)
if not len(df):
print("No results")
return
print("---------------------------------------")
print("Aggregate over experiments:")
if group_by_working_directory:
df = df.groupby(["working_directory"]).mean()
else:
df = df.groupby(["experiment", "timestamp", "config"]).mean()
# Cast float back to int.
df["num_benchmarks"] = [int(x) for x in df["num_benchmarks"]]
df["num_results"] = [int(x) for x in df["num_results"]]
# Better column names.
df = df.rename(columns={"reward": "geomean_reward", "walltime": "walltime (s)"})
pd.set_option("display.max_rows", None)
print(df)
if __name__ == "__main__":
app()
|
CompilerGym-development
|
examples/llvm_autotuning/info.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from itertools import islice
from typing import Iterable, List, Union
from pydantic import BaseModel, Field, root_validator, validator
from compiler_gym.datasets import Benchmark, BenchmarkUri
from compiler_gym.envs.llvm import LlvmEnv
class BenchmarksEntry(BaseModel):
"""This class represents a single entry in a Benchmarks class."""
# === Start of fields list. ===
dataset: str = Field(default=None, allow_mutation=False)
"""The name of a dataset to iterate over. If set, benchmarks are produced
by iterating over this dataset in order. If not set, the :code:`uris` list
must be provided.
"""
uris: List[str] = Field(default=[], allow_mutation=False)
"""A list of URIs to iterate over."""
max_benchmarks: int = Field(default=0, ge=0, allow_mutation=False)
"""The maximum number of benchmarks to yield from the given dataset or URIs
list.
"""
benchmarks_start_at: int = Field(default=0, ge=0, allow_mutation=False)
"""An offset into the dataset or URIs list to start iterating from.
Note that using very large offsets will slow things down as the
implementation still has to iterate over the excluded benchmarks.
"""
# === Start of public API. ===
def benchmarks_iterator(self, env: LlvmEnv) -> Iterable[Benchmark]:
"""Return an iterator over the benchmarks."""
return self._benchmark_iterator(env)
def benchmark_uris_iterator(self, env: LlvmEnv) -> Iterable[str]:
"""Return an iterator over the URIs of the benchmarks."""
return self._benchmark_iterator(env, uris=True)
# === Start of implementation details. ===
@root_validator
def check_that_either_dataset_or_uris_is_set(cls, values):
assert values.get("dataset") or values.get(
"uris"
), "Neither dataset or uris given"
return values
@validator("uris", pre=True)
def validate_uris(cls, value, *, values, **kwargs):
del kwargs
del values
for uri in value:
uri = BenchmarkUri.from_string(uri)
assert uri.scheme and uri.dataset, f"Invalid benchmark URI: {uri}"
return list(value)
def _benchmark_iterator(
self, env: LlvmEnv, uris: bool = False
) -> Union[Iterable[Benchmark], Iterable[str]]:
return (
self._uris_iterator(env, uris)
if self.uris
else self._dataset_iterator(env, uris)
)
def _uris_iterator(
self, env: LlvmEnv, uris: bool = False
) -> Union[Iterable[Benchmark], Iterable[str]]:
"""Iterate from a URIs list."""
start = self.benchmarks_start_at
n = len(self.uris)
if self.max_benchmarks:
n = min(len(self.uris), n)
if uris:
# Shortcut in case we already have a list of URIs that we can slice
# rather than iterating over.
return iter(self.uris[start:n])
return islice((env.datasets.benchmark(u) for u in self.uris), start, start + n)
def _dataset_iterator(
self, env: LlvmEnv, uris: bool = False
) -> Union[Iterable[Benchmark], Iterable[str]]:
"""Iterate from a dataset name."""
dataset = env.datasets[self.dataset]
dataset.install()
n = dataset.size or self.max_benchmarks # dataset.size == 0 for inf
if self.max_benchmarks:
n = min(self.max_benchmarks, n)
start = self.benchmarks_start_at
iterator = dataset.benchmark_uris if uris else dataset.benchmarks
return islice(iterator(), start, start + n)
class Config:
validate_assignment = True
class Benchmarks(BaseModel):
"""Represents a set of benchmarks to use for training/validation/testing.
There are two ways of describing benchmarks, either as a list of benchmark
URIs:
benchmarks:
uris:
- benchmark://cbench-v1/adpcm
- benchmark://cbench-v1/ghostscript
Or as a dataset to iterate over:
benchmarks:
dataset: benchmark://cbench-v1
max_benchmarks: 20
"""
benchmarks: List[BenchmarksEntry]
# === Start of public API. ===
def benchmarks_iterator(self, env: LlvmEnv) -> Iterable[Benchmark]:
"""Return an iterator over the benchmarks."""
for bm in self.benchmarks:
yield from bm.benchmarks_iterator(env)
def benchmark_uris_iterator(self, env: LlvmEnv) -> Iterable[str]:
"""Return an iterator over the URIs of the benchmarks."""
for bm in self.benchmarks:
yield from bm.benchmark_uris_iterator(env)
# === Start of implementation details. ===
@validator("benchmarks", pre=True)
def validate_benchmarks(cls, value) -> List[BenchmarksEntry]:
return [BenchmarksEntry(**v) for v in value]
|
CompilerGym-development
|
examples/llvm_autotuning/benchmarks.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.