python_code
stringlengths 0
4.04M
| repo_name
stringlengths 7
58
| file_path
stringlengths 5
147
|
---|---|---|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Integration tests for the LLVM autotuners."""
from llvm_autotuning.autotuners import Autotuner
import compiler_gym
def test_autotune():
with compiler_gym.make("llvm-v0", reward_space="IrInstructionCount") as env:
env.reset(benchmark="benchmark://cbench-v1/crc32")
autotuner = Autotuner(
algorithm="random",
optimization_target="codesize",
search_time_seconds=3,
)
result = autotuner(env)
print(result)
assert result.benchmark == "benchmark://cbench-v1/crc32"
assert result.walltime >= 3
assert result.commandline == env.action_space.to_string(env.actions)
assert env.episode_reward >= 0
assert env.benchmark == "benchmark://cbench-v1/crc32"
assert env.reward_space == "IrInstructionCount"
|
CompilerGym-development
|
examples/llvm_autotuning/autotuners/random_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Integration tests for the LLVM autotuners."""
import pytest
from llvm_autotuning.autotuners import Autotuner
import compiler_gym
@pytest.mark.skip(reason="Workaround from pytest: I/O operation on closed file")
def test_autotune():
with compiler_gym.make("llvm-v0", reward_space="IrInstructionCount") as env:
env.reset(benchmark="benchmark://cbench-v1/crc32")
autotuner = Autotuner(
algorithm="opentuner_ga",
optimization_target="codesize",
search_time_seconds=3,
)
result = autotuner(env)
print(result)
assert result.benchmark == "benchmark://cbench-v1/crc32"
assert result.walltime >= 3
assert result.commandline == env.action_space.to_string(env.actions)
assert env.episode_reward >= 0
assert env.benchmark == "benchmark://cbench-v1/crc32"
assert env.reward_space == "IrInstructionCount"
|
CompilerGym-development
|
examples/llvm_autotuning/autotuners/opentuner_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from time import time
def greedy(env, search_time_seconds: int, **kwargs) -> None:
"""A greedy search policy.
At each step, the policy evaluates all possible actions and selects the
action with the highest reward. The search stops when no action produces a
positive reward.
:param env: The environment to optimize.
"""
def eval_action(env, action: int):
with env.fork() as fkd:
return (fkd.step(action)[1], action)
end_time = time() + search_time_seconds
while time() < end_time:
best = max(eval_action(env, action) for action in range(env.action_space.n))
if best[0] <= 0 or env.step(best[1])[2]:
return
|
CompilerGym-development
|
examples/llvm_autotuning/autotuners/greedy.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""This modules defines a class for describing LLVM autotuners."""
import tempfile
from pathlib import Path
from typing import Any, Dict
from llvm_autotuning.autotuners.greedy import greedy # noqa autotuner
from llvm_autotuning.autotuners.nevergrad_ import nevergrad # noqa autotuner
from llvm_autotuning.autotuners.opentuner_ import opentuner_ga # noqa autotuner
from llvm_autotuning.autotuners.random_ import random # noqa autotuner
from llvm_autotuning.optimization_target import OptimizationTarget
from pydantic import BaseModel, validator
from compiler_gym.compiler_env_state import CompilerEnvState
from compiler_gym.envs import CompilerEnv
from compiler_gym.util.capture_output import capture_output
from compiler_gym.util.runfiles_path import transient_cache_path
from compiler_gym.util.temporary_working_directory import temporary_working_directory
from compiler_gym.util.timer import Timer
class Autotuner(BaseModel):
"""This class represents an instance of an autotuning algorithm.
After instantiating from a config dict, instances of this class can be used
to tune CompilerEnv instances:
>>> autotuner = Autotuner(
algorithm="greedy",
optimization_target="codesize",
search_time_seconds=1800,
)
>>> env = compiler_gym.make("llvm-v0")
>>> autotuner(env)
"""
algorithm: str
"""The name of the autotuner algorithm."""
optimization_target: OptimizationTarget
"""The target that the autotuner is optimizing for."""
search_time_seconds: int
"""The search budget of the autotuner."""
algorithm_config: Dict[str, Any] = {}
"""An optional dictionary of keyword arguments for the autotuner function."""
@property
def autotune(self):
"""Return the autotuner function for this algorithm.
An autotuner function takes a single CompilerEnv argument and optional
keyword configuration arguments (determined by algorithm_config) and
tunes the environment, returning nothing.
"""
try:
return globals()[self.algorithm]
except KeyError as e:
raise ValueError(
f"Unknown autotuner: {self.algorithm}.\n"
f"Make sure the {self.algorithm}() function definition is available "
"in the global namespace of {__file__}."
) from e
@property
def autotune_kwargs(self) -> Dict[str, Any]:
"""Get the keyword arguments dictionary for the autotuner."""
kwargs = {
"optimization_target": self.optimization_target,
"search_time_seconds": self.search_time_seconds,
}
kwargs.update(self.algorithm_config)
return kwargs
def __call__(self, env: CompilerEnv, seed: int = 0xCC) -> CompilerEnvState:
"""Autotune the given environment.
:param env: The environment to autotune.
:param seed: The random seed for the autotuner.
:returns: A CompilerEnvState tuple describing the autotuning result.
"""
# Run the autotuner in a temporary working directory and capture the
# stdout/stderr.
with tempfile.TemporaryDirectory(
dir=transient_cache_path("."), prefix="autotune-"
) as tmpdir:
with temporary_working_directory(Path(tmpdir)):
with capture_output():
with Timer() as timer:
self.autotune(env, seed=seed, **self.autotune_kwargs)
return CompilerEnvState(
benchmark=env.benchmark.uri,
commandline=env.action_space.to_string(env.actions),
walltime=timer.time,
reward=self.optimization_target.final_reward(env),
)
# === Start of implementation details. ===
@validator("algorithm_config", pre=True)
def validate_algorithm_config(cls, value) -> Dict[str, Any]:
return value or {}
|
CompilerGym-development
|
examples/llvm_autotuning/autotuners/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from tempfile import TemporaryDirectory
from llvm_autotuning.optimization_target import OptimizationTarget
from compiler_gym.envs import CompilerEnv
from compiler_gym.random_search import random_search as lib_random_search
from compiler_gym.util.runfiles_path import transient_cache_path
def random(
env: CompilerEnv,
optimization_target: OptimizationTarget,
search_time_seconds: int,
patience: int = 350,
**kwargs
) -> None:
"""Run a random search on the environment.
:param env: The environment to optimize.
:param optimization_target: The target to optimize for.
:param search_time_seconds: The total search time.
:param patience: The number of steps to search without an improvement before
resetting to a new trajectory.
"""
with TemporaryDirectory(
dir=transient_cache_path("."), prefix="autotune-"
) as tmpdir:
final_env = lib_random_search(
make_env=lambda: optimization_target.make_env(env.benchmark),
outdir=tmpdir,
total_runtime=search_time_seconds,
patience=patience,
nproc=1,
)
env.apply(final_env.state)
final_env.close()
|
CompilerGym-development
|
examples/llvm_autotuning/autotuners/random_.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from functools import lru_cache
from time import time
from typing import Tuple
import nevergrad as ng
from llvm_autotuning.optimization_target import OptimizationTarget
from compiler_gym.envs import CompilerEnv
from compiler_gym.util.gym_type_hints import ActionType
def nevergrad(
env: CompilerEnv,
optimization_target: OptimizationTarget,
search_time_seconds: int,
seed: int,
episode_length: int = 100,
optimizer: str = "DiscreteLenglerOnePlusOne",
**kwargs
) -> None:
"""Optimize an environment using nevergrad.
Nevergrad is a gradient-free optimization platform that provides
implementations of various black box optimizations techniques:
https://facebookresearch.github.io/nevergrad/
"""
if optimization_target == OptimizationTarget.RUNTIME:
def calculate_negative_reward(actions: Tuple[ActionType]) -> float:
env.reset()
env.multistep(actions)
return -env.episode_reward
else:
# Only cache the deterministic non-runtime rewards.
@lru_cache(maxsize=int(1e4))
def calculate_negative_reward(actions: Tuple[ActionType]) -> float:
env.reset()
env.multistep(actions)
return -env.episode_reward
params = ng.p.Choice(
choices=range(env.action_space.n),
repetitions=episode_length,
deterministic=True,
)
params.random_state.seed(seed)
optimizer_class = getattr(ng.optimizers, optimizer)
optimizer = optimizer_class(parametrization=params, budget=1, num_workers=1)
end_time = time() + search_time_seconds
while time() < end_time:
x = optimizer.ask()
optimizer.tell(x, calculate_negative_reward(x.value))
# Get best solution and replay it.
recommendation = optimizer.provide_recommendation()
env.reset()
env.multistep(recommendation.value)
|
CompilerGym-development
|
examples/llvm_autotuning/autotuners/nevergrad_.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Integration tests for the LLVM autotuners."""
from llvm_autotuning.autotuners import Autotuner
import compiler_gym
def test_autotune():
with compiler_gym.make("llvm-v0", reward_space="IrInstructionCount") as env:
env.reset(benchmark="benchmark://cbench-v1/crc32")
env.reward_space = "IrInstructionCount"
autotuner = Autotuner(
algorithm="nevergrad",
optimization_target="codesize",
search_time_seconds=3,
)
result = autotuner(env)
print(result)
assert result.benchmark == "benchmark://cbench-v1/crc32"
assert result.walltime >= 3
assert result.commandline == env.action_space.to_string(env.actions)
assert env.episode_reward >= 0
assert env.benchmark == "benchmark://cbench-v1/crc32"
assert env.reward_space == "IrInstructionCount"
|
CompilerGym-development
|
examples/llvm_autotuning/autotuners/nevergrad_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import random
import subprocess
import tempfile
import warnings
from pathlib import Path
import numpy as np
from llvm_autotuning.optimization_target import OptimizationTarget
from compiler_gym.envs.llvm import compute_observation
from compiler_gym.errors import ServiceError
from compiler_gym.service.client_service_compiler_env import ClientServiceCompilerEnv
from compiler_gym.third_party.llvm import opt_path
from compiler_gym.util.runfiles_path import transient_cache_path
# Ignore import deprecation warnings from opentuner.
warnings.filterwarnings("ignore", category=DeprecationWarning)
import opentuner as ot # noqa: E402
from opentuner import ( # noqa: E402
ConfigurationManipulator,
MeasurementInterface,
PermutationParameter,
Result,
)
from opentuner.search.binaryga import BinaryGA # noqa: E402
from opentuner.search.manipulator import BooleanParameter # noqa: E402
from opentuner.tuningrunmain import TuningRunMain # noqa: E402
def opentuner_ga(
env: ClientServiceCompilerEnv,
optimization_target: OptimizationTarget,
search_time_seconds: int,
seed: int,
max_copies_of_pass: int = 4,
population: int = 200,
tournament: int = 5,
mutate: int = 2,
sharing: int = 1,
**kwargs,
) -> None:
"""Optimize an environment using opentuner.
OpenTuner is an extensible framework for program autotuning:
https://opentuner.org/
"""
cache_dir = transient_cache_path("llvm_autotuning")
cache_dir.mkdir(exist_ok=True, parents=True)
with tempfile.TemporaryDirectory(dir=cache_dir, prefix="opentuner-") as tmpdir:
argparser = ot.default_argparser()
args = argparser.parse_args(
args=[
f"--stop-after={search_time_seconds}",
f"--database={tmpdir}/opentuner.db",
"--no-dups",
"--technique=custom",
f"--seed={seed}",
"--parallelism=1",
]
)
ot.search.technique.register(
BinaryGA(
population=population,
tournament=tournament,
mutate=mutate,
sharing=sharing,
name="custom",
)
)
manipulator = LlvmOptFlagsTuner(
args,
target=optimization_target,
benchmark=env.benchmark,
max_copies_of_pass=max_copies_of_pass,
)
tuner = TuningRunMain(manipulator, args)
tuner.main()
class DesiredResult:
def __init__(self, configuration) -> None:
self.configuration = configuration
class Configuration:
def __init__(self, data) -> None:
self.data = data
wrapped = DesiredResult(Configuration(manipulator.best_config))
manipulator.run(wrapped, None, None)
env.reset()
env.multistep(manipulator.serialize_actions(manipulator.best_config))
class LlvmOptFlagsTuner(MeasurementInterface):
def __init__(
self,
*args,
target: OptimizationTarget,
benchmark=None,
max_copies_of_pass=4,
**kwargs,
):
super().__init__(*args, **kwargs)
self.opt = str(opt_path())
self.env = target.make_env(benchmark)
self.env.reset()
self.target = target
self.observation_space = self.env.observation.spaces[
target.optimization_space_enum_name
]
self.unoptimized_path = str(
self.env.service.connection.cache.path / "opentuner-unoptimized.bc"
)
self.tmp_optimized_path = str(
self.env.service.connection.cache.path / "opentuner-optimized.bc"
)
self.env.write_bitcode(self.unoptimized_path)
self.env.write_bitcode(self.tmp_optimized_path)
self.cost_o0 = self.env.observation["IrInstructionCountO0"]
self.cost_oz = self.env.observation["IrInstructionCountOz"]
self.flags_limit = self.env.action_space.n * max_copies_of_pass
self.run_count = 0
self.best_config = None
def manipulator(self) -> ConfigurationManipulator:
"""Define the search space."""
manipulator = ConfigurationManipulator()
# A permutation parameter to order the passes that are present.
manipulator.add_parameter(
PermutationParameter("flag_order", list(range(self.flags_limit)))
)
# Boolean parameters for whether each pass is present.
for i in range(self.flags_limit):
manipulator.add_parameter(BooleanParameter(f"flag{i}"))
def biased_random():
cfg = ConfigurationManipulator.random(manipulator)
# duplicates in the search space, bias to `n / 2` enabled
disabled = random.sample(
range(self.flags_limit), k=self.flags_limit - self.env.action_space.n
)
cfg.update({f"flag{x}": False for x in disabled})
return cfg
manipulator.random = biased_random
return manipulator
def serialize_flags(self, config):
"""Convert a point in the search space to an ordered list of opt flags."""
return [self.env.action_space.flags[a] for a in self.serialize_actions(config)]
def serialize_actions(self, config):
"""Convert a point in the search space to an ordered list of opt flags."""
n = len(self.env.action_space.flags)
serialized = []
for i in config["flag_order"]:
if config[f"flag{i}"]:
serialized.append(i % n)
return serialized
def __del__(self):
self.env.close()
def run(self, desired_result, input, limit):
"""Run a single config."""
del input # Unused
del limit # Unused
self.run_count += 1
try:
# Run opt to produce an optimized bitcode file.
cmd = [
self.opt,
self.unoptimized_path,
"-o",
self.tmp_optimized_path,
]
cmd += self.serialize_flags(desired_result.configuration.data)
subprocess.check_call(
cmd, timeout=300, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL
)
if not Path(self.tmp_optimized_path).is_file():
return Result(time=float("inf"))
except (subprocess.CalledProcessError, subprocess.TimeoutExpired):
return Result(time=float("inf"))
# We need to jump through a couple of hoops to optimize for runtime
# using OpenTuner. Replace the environment benchmark with the current
# optimized file. Use the same benchmark protocol buffer so that any
# dynamic configuration is preserved.
if self.target == OptimizationTarget.RUNTIME:
try:
new_benchmark = self.env.benchmark
new_benchmark.proto.program.uri = f"file:///{self.tmp_optimized_path}"
self.env.reset(benchmark=new_benchmark)
return Result(time=float(np.median(self.env.observation.Runtime())))
except (ServiceError, TimeoutError):
return Result(time=float("inf"))
try:
return Result(
time=float(
compute_observation(self.observation_space, self.tmp_optimized_path)
)
)
except (ValueError, TimeoutError):
return Result(time=float("inf"))
def save_final_config(self, configuration):
# Save parameter for later.
self.best_config = configuration.data
|
CompilerGym-development
|
examples/llvm_autotuning/autotuners/opentuner_.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Integration tests for the LLVM autotuners."""
import pytest
from llvm_autotuning.autotuners import Autotuner
import compiler_gym
@pytest.mark.skip(reason="greedy takes a long time")
def test_autotune():
with compiler_gym.make("llvm-v0", reward_space="IrInstructionCount") as env:
env.reset(benchmark="benchmark://cbench-v1/crc32")
autotuner = Autotuner(
algorithm="greedy",
optimization_target="codesize",
search_time_seconds=3,
)
result = autotuner(env)
print(result)
assert result.benchmark == "benchmark://cbench-v1/crc32"
assert result.walltime >= 3
assert result.commandline == env.action_space.to_string(env.actions)
assert env.episode_reward
assert env.benchmark == "benchmark://cbench-v1/crc32"
assert env.reward_space == "IrInstructionCount"
|
CompilerGym-development
|
examples/llvm_autotuning/autotuners/greedy_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import subprocess
import sys
from pathlib import Path
def test_llvm_autotuner_integration_test(tmp_path: Path):
subprocess.check_call(
[
sys.executable,
"-m",
"llvm_autotuning.tune",
"-m",
"experiment=my-exp",
f"outputs={tmp_path}/llvm_autotuning",
"executor.cpus=1",
"num_replicas=1",
"autotuner=nevergrad",
"autotuner.optimization_target=codesize",
"autotuner.search_time_seconds=3",
"autotuner.algorithm_config.episode_length=5",
"benchmarks=single_benchmark_for_testing",
]
)
assert (Path(tmp_path) / "llvm_autotuning/my-exp").is_dir()
|
CompilerGym-development
|
examples/llvm_autotuning/tests/integration_test.py
|
CompilerGym-development
|
examples/gcc_autotuning/__init__.py
|
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Autotuning script for GCC command line options."""
import random
from itertools import islice, product
from multiprocessing import Lock
from pathlib import Path
from typing import NamedTuple
import numpy as np
from absl import app, flags
from geneticalgorithm import geneticalgorithm as ga
import compiler_gym
import compiler_gym.util.flags.nproc # noqa Flag definition.
import compiler_gym.util.flags.output_dir # noqa Flag definition.
import compiler_gym.util.flags.seed # noqa Flag definition.
from compiler_gym.envs import CompilerEnv
from compiler_gym.envs.gcc import DEFAULT_GCC
from compiler_gym.errors import ServiceError
from compiler_gym.util.executor import Executor
from compiler_gym.util.runfiles_path import create_user_logs_dir
from .info import info
FLAGS = flags.FLAGS
flags.DEFINE_string(
"gcc_bin", DEFAULT_GCC, "Binary to use for gcc. Use docker:<image> for docker"
)
flags.DEFINE_list(
"gcc_benchmark",
None,
"List of benchmarks to search. Use 'all' for all. "
"Defaults to the 12 CHStone benchmarks.",
)
flags.DEFINE_list(
"search",
["random", "hillclimb", "genetic"],
"Type of search to perform. One of: {random,hillclimb,genetic}",
)
flags.DEFINE_integer(
"timeout", 60, "Timeout for each compilation in seconds", lower_bound=1
)
flags.DEFINE_integer(
"gcc_search_budget",
100,
"Maximum number of compilations per benchmark",
lower_bound=1,
)
flags.DEFINE_integer(
"gcc_search_repetitions", 1, "Number of times to repeat each search", lower_bound=1
)
flags.DEFINE_integer(
"actions_per_step",
10,
"Number of actions per compilation for action based searches",
lower_bound=1,
)
flags.DEFINE_integer("max_range", 256, "Limit space per option", lower_bound=0)
flags.DEFINE_integer("pop_size", 100, "Population size for GA", lower_bound=1)
flags.DEFINE_enum(
"objective", "obj_size", ["asm_size", "obj_size"], "Which objective to use"
)
# Lock to prevent multiple processes all calling compiler_gym.make("gcc-v0")
# simultaneously as this can cause issues with the docker API.
GCC_ENV_CONSTRUCTOR_LOCK = Lock()
def random_search(env: CompilerEnv):
best = float("inf")
for _ in range(FLAGS.gcc_search_budget):
env.reset()
env.choices = [
random.randint(-1, min(FLAGS.max_range, len(opt) - 1))
for opt in env.gcc_spec.options
]
best = min(objective(env), best)
return best
def hill_climb(env: CompilerEnv):
best = float("inf")
for _ in range(FLAGS.gcc_search_budget):
with env.fork() as fkd:
fkd.choices = [
random.randint(
max(-1, x - 5), min(len(env.gcc_spec.options[i]) - 1, x + 5)
)
for i, x in enumerate(env.choices)
]
cost = objective(fkd)
if cost < objective(env):
best = cost
env.choices = fkd.choices
return best
def genetic_algorithm(env: CompilerEnv):
def f(choices):
env.reset()
env.choices = choices = list(map(int, choices))
s = objective(env)
return s if s > 0 else float("inf")
model = ga(
function=f,
dimension=len(env.gcc_spec.options),
variable_type="int",
variable_boundaries=np.array(
[[-1, min(FLAGS.max_range, len(opt) - 1)] for opt in env.gcc_spec.options]
),
function_timeout=FLAGS.timeout,
algorithm_parameters={
"population_size": FLAGS.pop_size,
"max_num_iteration": max(1, int(FLAGS.gcc_search_budget / FLAGS.pop_size)),
"mutation_probability": 0.1,
"elit_ratio": 0.01,
"crossover_probability": 0.5,
"parents_portion": 0.3,
"crossover_type": "uniform",
"max_iteration_without_improv": None,
},
)
model.run()
return model.best_function
def objective(env) -> int:
"""Get the objective from an environment"""
# Retry loop to defend against flaky environment.
for _ in range(5):
try:
return env.observation[FLAGS.objective]
except ServiceError as e:
print(f"Objective function failed: {e}")
env.reset()
return env.observation[FLAGS.objective]
_SEARCH_FUNCTIONS = {
"random": random_search,
"hillclimb": hill_climb,
"genetic": genetic_algorithm,
}
class SearchResult(NamedTuple):
search: str
benchmark: str
best_size: int
baseline_size: int
@property
def scaled_best(self) -> float:
return self.baseline_size / self.best_size
def run_search(search: str, benchmark: str, seed: int) -> SearchResult:
"""Run a search and return the search class instance."""
with GCC_ENV_CONSTRUCTOR_LOCK:
env = compiler_gym.make("gcc-v0", gcc_bin=FLAGS.gcc_bin)
try:
random.seed(seed)
np.random.seed(seed)
env.reset(benchmark=benchmark)
env.step(env.action_space["-Os"])
baseline_size = objective(env)
env.reset(benchmark=benchmark)
best_size = _SEARCH_FUNCTIONS[search](env)
finally:
env.close()
return SearchResult(
search=search,
benchmark=benchmark,
best_size=best_size,
baseline_size=baseline_size,
)
def main(argv):
del argv # Unused.
# Validate the --search values now.
for search in FLAGS.search:
if search not in _SEARCH_FUNCTIONS:
raise app.UsageError(f"Invalid --search value: {search}")
def get_benchmarks():
benchmarks = []
with compiler_gym.make("gcc-v0", gcc_bin=FLAGS.gcc_bin) as env:
env.reset()
if FLAGS.gcc_benchmark == ["all"]:
for dataset in env.datasets:
benchmarks += islice(dataset.benchmark_uris(), 50)
elif FLAGS.gcc_benchmark:
for uri in FLAGS.gcc_benchmark:
benchmarks.append(env.datasets.benchmark(uri).uri)
else:
benchmarks = list(
env.datasets["benchmark://chstone-v0"].benchmark_uris()
)
benchmarks.sort()
return benchmarks
logdir = (
Path(FLAGS.output_dir)
if FLAGS.output_dir
else create_user_logs_dir("gcc_autotuning")
)
logdir.mkdir(exist_ok=True, parents=True)
with open(logdir / "results.csv", "w") as f:
print(
"search",
"benchmark",
"scaled_size",
"size",
"baseline_size",
sep=",",
file=f,
)
print("Logging results to", logdir)
# Parallel execution environment. Use flag --nproc to control the number of
# worker processes.
executor = Executor(type="local", timeout_hours=12, cpus=FLAGS.nproc, block=True)
with executor.get_executor(logs_dir=logdir) as session:
jobs = []
# Submit each search instance as a separate job.
grid = product(
range(FLAGS.gcc_search_repetitions), FLAGS.search, get_benchmarks()
)
for _, search, benchmark in grid:
if not benchmark:
raise app.UsageError("Empty benchmark name not allowed")
jobs.append(
session.submit(
run_search,
search=search,
benchmark=benchmark,
seed=FLAGS.seed + len(jobs),
)
)
for job in jobs:
result = job.result()
print(result.benchmark, f"{result.scaled_best:.3f}x", sep="\t")
with open(logdir / "results.csv", "a") as f:
print(
result.search,
result.benchmark,
result.scaled_best,
result.best_size,
result.baseline_size,
sep=",",
file=f,
)
# Print results aggregates.
info([logdir])
if __name__ == "__main__":
app.run(main)
|
CompilerGym-development
|
examples/gcc_autotuning/tune.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import sys
from pathlib import Path
from typing import List
import pandas as pd
from llvm_autotuning.experiment import Experiment
from pydantic import ValidationError
from typer import Typer
from compiler_gym.util.statistics import geometric_mean
app = Typer()
def experiments_from_paths(log_dirs: List[Path]) -> List[Experiment]:
experiments: List[Experiment] = []
for path in log_dirs:
try:
experiments += Experiment.from_logsdir(Path(path).expanduser())
except ValidationError as e:
print(e, file=sys.stderr)
sys.exit(1)
return experiments
@app.command()
def info(
log_dirs: List[Path] = ["~/logs/compiler_gym/gcc_autotuning"],
):
dfs: List[pd.DataFrame] = []
for path in log_dirs:
path = Path(path).expanduser()
for root, _, files in os.walk(path):
if "results.csv" not in files:
continue
root = Path(root)
df = pd.read_csv(root / "results.csv")
if not df.size:
continue
df["timestamp"] = "-".join([root.parent.name, root.name])
dfs.append(df)
if not dfs:
print("No results")
df = pd.concat(dfs)
df = df.groupby(["timestamp", "search"])[["scaled_size"]].agg(geometric_mean)
df = df.rename(columns={"scaled_size": "geomean_reward"})
pd.set_option("display.max_rows", None)
print(df)
if __name__ == "__main__":
app()
|
CompilerGym-development
|
examples/gcc_autotuning/info.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import subprocess
import sys
from functools import lru_cache
from pathlib import Path
from typing import Iterable
import docker
import pytest
from absl.flags import FLAGS
from . import tune
def docker_is_available() -> bool:
"""Return whether docker is available."""
try:
docker.from_env()
return True
except docker.errors.DockerException:
return False
@lru_cache(maxsize=2)
def system_gcc_is_available() -> bool:
"""Return whether there is a system GCC available."""
try:
stdout = subprocess.check_output(
["gcc", "--version"], universal_newlines=True, stderr=subprocess.DEVNULL
)
# On some systems "gcc" may alias to a different compiler, so check for
# the presence of the name "gcc" in the first line of output.
return "gcc" in stdout.split("\n")[0].lower()
except (subprocess.CalledProcessError, FileNotFoundError):
return False
def system_gcc_path() -> str:
"""Return the path of the system GCC as a string."""
return subprocess.check_output(
["which", "gcc"], universal_newlines=True, stderr=subprocess.DEVNULL
).strip()
def gcc_bins() -> Iterable[str]:
"""Return a list of available GCCs."""
if docker_is_available():
yield "docker:gcc:11.2.0"
if system_gcc_is_available():
yield system_gcc_path()
@pytest.fixture(scope="module", params=gcc_bins())
def gcc_bin(request) -> str:
return request.param
@pytest.mark.parametrize("search", ["random", "hillclimb", "genetic"])
def test_tune_smoke_test(search: str, gcc_bin: str, capsys, tmpdir: Path):
tmpdir = Path(tmpdir)
flags = [
"argv0",
"--seed=0",
f"--output_dir={tmpdir}",
f"--gcc_bin={gcc_bin}",
"--gcc_benchmark=benchmark://chstone-v0/aes",
f"--search={search}",
"--pop_size=3",
"--gcc_search_budget=6",
]
sys.argv = flags
FLAGS.unparse_flags()
FLAGS(flags)
tune.main([])
out, _ = capsys.readouterr()
assert "benchmark://chstone-v0/aes" in out
assert (tmpdir / "results.csv").is_file()
|
CompilerGym-development
|
examples/gcc_autotuning/tune_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for the example CompilerGym service."""
import socket
import subprocess
from pathlib import Path
from time import sleep
import gym
import numpy as np
import pytest
from flaky import flaky
import examples.example_compiler_gym_service as example
from compiler_gym.envs import CompilerEnv
from compiler_gym.errors import SessionNotFound
from compiler_gym.spaces import ActionSpace, Box, NamedDiscrete, Scalar, Sequence
from compiler_gym.util.commands import Popen
from tests.test_main import main
# Given that the C++ and Python service implementations have identical
# featuresets, we can parameterize the tests and run them against both backends.
EXAMPLE_ENVIRONMENTS = ["example-cc-v0", "example-py-v0"]
@pytest.fixture(scope="function", params=EXAMPLE_ENVIRONMENTS)
def env(request) -> CompilerEnv:
"""Text fixture that yields an environment."""
with gym.make(request.param) as env:
yield env
@pytest.fixture(
scope="module",
params=[example.EXAMPLE_CC_SERVICE_BINARY, example.EXAMPLE_PY_SERVICE_BINARY],
ids=["example-cc-v0", "example-py-v0"],
)
def bin(request) -> Path:
yield request.param
def test_invalid_arguments(bin: Path):
"""Test that running the binary with unrecognized arguments is an error."""
def run(cmd):
with Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True
) as p:
stdout, stderr = p.communicate(timeout=10)
return p.returncode, stdout, stderr
returncode, _, stderr = run([str(bin), "foobar"])
assert "ERROR:" in stderr
assert "'foobar'" in stderr
assert returncode == 1
returncode, _, stderr = run([str(bin), "--foobar"])
# C++ and python flag parsing library emit slightly different error
# messages.
assert "ERROR:" in stderr or "FATAL" in stderr
assert "'foobar'" in stderr
assert returncode == 1
def test_versions(env: CompilerEnv):
"""Tests the GetVersion() RPC endpoint."""
assert env.compiler_version == "1.0.0"
def test_action_space(env: CompilerEnv):
"""Test that the environment reports the service's action spaces."""
assert env.action_spaces == [
ActionSpace(
NamedDiscrete(
name="default",
items=["a", "b", "c"],
)
)
]
def test_observation_spaces(env: CompilerEnv):
"""Test that the environment reports the service's observation spaces."""
env.reset()
assert env.observation.spaces.keys() == {"ir", "features", "runtime"}
assert env.observation.spaces["ir"].space == Sequence(
name="ir",
size_range=(0, np.iinfo(np.int64).max),
dtype=str,
)
assert env.observation.spaces["features"].space == Box(
name="features", shape=(3,), low=-100, high=100, dtype=int
)
assert env.observation.spaces["runtime"].space == Scalar(
name="runtime", min=0, max=np.inf, dtype=float
)
def test_reward_spaces(env: CompilerEnv):
"""Test that the environment reports the service's reward spaces."""
env.reset()
assert env.reward.spaces.keys() == {"runtime"}
def test_step_before_reset(env: CompilerEnv):
"""Taking a step() before reset() is illegal."""
with pytest.raises(SessionNotFound, match=r"Must call reset\(\) before step\(\)"):
env.step(0)
def test_observation_before_reset(env: CompilerEnv):
"""Taking an observation before reset() is illegal."""
with pytest.raises(SessionNotFound, match=r"Must call reset\(\) before step\(\)"):
_ = env.observation["ir"]
def test_reward_before_reset(env: CompilerEnv):
"""Taking a reward before reset() is illegal."""
with pytest.raises(SessionNotFound, match=r"Must call reset\(\) before step\(\)"):
_ = env.reward["runtime"]
def test_reset_invalid_benchmark(env: CompilerEnv):
"""Test requesting a specific benchmark."""
with pytest.raises(LookupError) as ctx:
env.reset(benchmark="example-compiler-v0/foobar")
assert str(ctx.value) == "Unknown program name"
def test_invalid_observation_space(env: CompilerEnv):
"""Test error handling with invalid observation space."""
with pytest.raises(LookupError):
env.observation_space = 100
def test_invalid_reward_space(env: CompilerEnv):
"""Test error handling with invalid reward space."""
with pytest.raises(LookupError):
env.reward_space = 100
def test_double_reset(env: CompilerEnv):
"""Test that reset() can be called twice."""
env.reset()
assert env.in_episode
env.reset()
assert env.in_episode
def test_double_reset_with_step(env: CompilerEnv):
"""Test that reset() can be called twice with a step."""
env.reset()
assert env.in_episode
_, _, done, info = env.step(env.action_space.sample())
assert not done, info
env.reset()
_, _, done, info = env.step(env.action_space.sample())
assert not done, info
assert env.in_episode
def test_Step_out_of_range(env: CompilerEnv):
"""Test error handling with an invalid action."""
env.reset()
with pytest.raises(ValueError) as ctx:
env.step(100)
assert str(ctx.value) == "Out-of-range"
def test_default_ir_observation(env: CompilerEnv):
"""Test default observation space."""
env.observation_space = "ir"
observation = env.reset()
assert observation == "Hello, world!"
observation, reward, done, info = env.step(0)
assert observation == "Hello, world!"
assert reward is None
assert not done
def test_default_features_observation(env: CompilerEnv):
"""Test default observation space."""
env.observation_space = "features"
observation = env.reset()
assert isinstance(observation, np.ndarray)
assert observation.shape == (3,)
assert observation.dtype == np.int64
assert observation.tolist() == [0, 0, 0]
def test_default_reward(env: CompilerEnv):
"""Test default reward space."""
env.reward_space = "runtime"
env.reset()
observation, reward, done, info = env.step(0)
assert observation is None
assert reward == 0
assert not done
def test_observations(env: CompilerEnv):
"""Test observation spaces."""
env.reset()
assert env.observation["ir"] == "Hello, world!"
np.testing.assert_array_equal(env.observation["features"], [0, 0, 0])
def test_rewards(env: CompilerEnv):
"""Test reward spaces."""
env.reset()
assert env.reward["runtime"] == 0
def test_benchmarks(env: CompilerEnv):
assert list(env.datasets.benchmark_uris()) == [
"benchmark://example-compiler-v0/foo",
"benchmark://example-compiler-v0/bar",
]
def test_fork(env: CompilerEnv):
env.reset()
env.step(0)
env.step(1)
other_env = env.fork()
try:
assert env.benchmark == other_env.benchmark
assert other_env.actions == [0, 1]
finally:
other_env.close()
@flaky # Timeout-based test.
def test_force_working_dir(bin: Path, tmpdir):
"""Test that expected files are generated in the working directory."""
tmpdir = Path(tmpdir) / "subdir"
with Popen([str(bin), "--working_dir", str(tmpdir)]):
for _ in range(10):
sleep(0.5)
if (tmpdir / "pid.txt").is_file() and (tmpdir / "port.txt").is_file():
break
else:
pytest.fail(f"PID file not found in {tmpdir}: {list(tmpdir.iterdir())}")
def unsafe_select_unused_port() -> int:
"""Try and select an unused port that on the local system.
There is nothing to prevent the port number returned by this function from
being claimed by another process or thread, so it is liable to race conditions
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("127.0.0.1", 0))
s.listen(1)
port = s.getsockname()[1]
s.close()
return port
def port_is_free(port: int) -> bool:
"""Determine if a port is in use"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind(("127.0.0.1", port))
return True
except OSError:
return False
finally:
s.close()
@flaky # Unsafe free port allocation
def test_force_port(bin: Path, tmpdir):
"""Test that a forced --port value is respected."""
port = unsafe_select_unused_port()
assert port_is_free(port) # Sanity check
tmpdir = Path(tmpdir)
with Popen([str(bin), "--port", str(port), "--working_dir", str(tmpdir)]):
for _ in range(10):
sleep(0.5)
if (tmpdir / "pid.txt").is_file() and (tmpdir / "port.txt").is_file():
break
else:
pytest.fail(f"PID file not found in {tmpdir}: {list(tmpdir.iterdir())}")
with open(tmpdir / "port.txt") as f:
actual_port = int(f.read())
assert actual_port == port
assert not port_is_free(actual_port)
if __name__ == "__main__":
main()
|
CompilerGym-development
|
examples/example_compiler_gym_service/env_tests.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for the example CompilerGym service."""
import os
import socket
import subprocess
import sys
from getpass import getuser
from pathlib import Path
from time import sleep
from typing import Iterable, List, Optional
import gym
import numpy as np
import pytest
from flaky import flaky
from compiler_gym.datasets import Benchmark, Dataset
from compiler_gym.datasets.uri import BenchmarkUri
from compiler_gym.envs import CompilerEnv
from compiler_gym.service import SessionNotFound
from compiler_gym.spaces import Box, NamedDiscrete, Reward, Scalar, Sequence
from compiler_gym.util import debug_util as dbg
from compiler_gym.util.commands import Popen
from compiler_gym.util.registration import register
EXAMPLE_PY_SERVICE_BINARY: Path = Path(
"example_compiler_gym_service/service_py/example_service.py"
)
assert EXAMPLE_PY_SERVICE_BINARY.is_file(), "Service script not found"
class RuntimeReward(Reward):
"""An example reward that uses changes in the "runtime" observation value
to compute incremental reward.
"""
def __init__(self):
super().__init__(
name="runtime",
observation_spaces=["runtime"],
default_value=0,
default_negates_returns=True,
deterministic=False,
platform_dependent=True,
)
self.previous_runtime = None
def reset(self, benchmark: str, observation_view):
del benchmark # unused
self.previous_runtime = None
def update(self, action, observations, observation_view):
del action
del observation_view
if self.previous_runtime is None:
self.previous_runtime = observations[0]
reward = float(self.previous_runtime - observations[0])
self.previous_runtime = observations[0]
return reward
class ExampleDataset(Dataset):
def __init__(self, *args, **kwargs):
super().__init__(
name="benchmark://example-compiler-v0",
license="MIT",
description="An example dataset",
)
self._benchmarks = {
"/foo": Benchmark.from_file_contents(
"benchmark://example-compiler-v0/foo", "Ir data".encode("utf-8")
),
"/bar": Benchmark.from_file_contents(
"benchmark://example-compiler-v0/bar", "Ir data".encode("utf-8")
),
}
def benchmark_uris(self) -> Iterable[str]:
yield from (
f"benchmark://example-compiler-v0{k}" for k in self._benchmarks.keys()
)
def benchmark_from_parsed_uri(self, uri: BenchmarkUri) -> Benchmark:
if uri.path in self._benchmarks:
return self._benchmarks[uri.path]
else:
raise LookupError("Unknown program name")
# Register example-compiler-v0 for use with gym.make(...).
register(
id="example-without-bazel-v0",
entry_point="compiler_gym.service.client_service_compiler_env:ClientServiceCompilerEnv",
kwargs={
"service": EXAMPLE_PY_SERVICE_BINARY,
"rewards": [RuntimeReward()],
"datasets": [ExampleDataset()],
},
)
@pytest.fixture(scope="function")
def env() -> CompilerEnv:
"""Text fixture that yields an environment."""
with gym.make("example-without-bazel-v0") as env:
yield env
@pytest.fixture(scope="module")
def bin() -> Path:
return EXAMPLE_PY_SERVICE_BINARY
def test_invalid_arguments(bin: Path):
"""Test that running the binary with unrecognized arguments is an error."""
def run(cmd):
with Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True
) as p:
stdout, stderr = p.communicate(timeout=60)
return p.returncode, stdout, stderr
returncode, _, stderr = run([str(bin), "foobar"])
assert "ERROR:" in stderr
assert "'foobar'" in stderr
assert returncode == 1
returncode, _, stderr = run([str(bin), "--foobar"])
# C++ and python flag parsing library emit slightly different error
# messages.
assert "ERROR:" in stderr or "FATAL" in stderr
assert "'foobar'" in stderr
assert returncode == 1
def test_versions(env: CompilerEnv):
"""Tests the GetVersion() RPC endpoint."""
assert env.compiler_version == "1.0.0"
def test_action_space(env: CompilerEnv):
"""Test that the environment reports the service's action spaces."""
assert env.action_spaces == [
NamedDiscrete(
name="default",
items=["a", "b", "c"],
)
]
def test_observation_spaces(env: CompilerEnv):
"""Test that the environment reports the service's observation spaces."""
env.reset()
assert env.observation.spaces.keys() == {"ir", "features", "runtime"}
ir_space = env.observation.spaces["ir"]
assert isinstance(ir_space.space, Sequence)
assert ir_space.space.dtype == str
assert ir_space.space.size_range == (0, np.iinfo(np.int64).max)
feature_space = env.observation.spaces["features"].space
assert isinstance(feature_space, Box)
assert feature_space.shape == (3,)
assert np.all(feature_space.low == [-100, -100, -100])
assert np.all(feature_space.high == [100, 100, 100])
assert feature_space.dtype == int
runtime_space = env.observation.spaces["runtime"].space
assert isinstance(runtime_space, Scalar)
assert runtime_space.min == 0
assert runtime_space.max == np.inf
assert runtime_space.dtype == float
def test_reward_spaces(env: CompilerEnv):
"""Test that the environment reports the service's reward spaces."""
env.reset()
assert env.reward.spaces.keys() == {"runtime"}
def test_step_before_reset(env: CompilerEnv):
"""Taking a step() before reset() is illegal."""
with pytest.raises(SessionNotFound, match=r"Must call reset\(\) before step\(\)"):
env.step(0)
def test_observation_before_reset(env: CompilerEnv):
"""Taking an observation before reset() is illegal."""
with pytest.raises(SessionNotFound, match=r"Must call reset\(\) before step\(\)"):
_ = env.observation["ir"]
def test_reward_before_reset(env: CompilerEnv):
"""Taking a reward before reset() is illegal."""
with pytest.raises(SessionNotFound, match=r"Must call reset\(\) before step\(\)"):
_ = env.reward["runtime"]
def test_reset_invalid_benchmark(env: CompilerEnv):
"""Test requesting a specific benchmark."""
with pytest.raises(LookupError) as ctx:
env.reset(benchmark="example-compiler-v0/foobar")
assert str(ctx.value) == "Unknown program name"
def test_invalid_observation_space(env: CompilerEnv):
"""Test error handling with invalid observation space."""
with pytest.raises(LookupError):
env.observation_space = 100
def test_invalid_reward_space(env: CompilerEnv):
"""Test error handling with invalid reward space."""
with pytest.raises(LookupError):
env.reward_space = 100
def test_double_reset(env: CompilerEnv):
"""Test that reset() can be called twice."""
env.reset()
assert env.in_episode
env.reset()
assert env.in_episode
def test_double_reset_with_step(env: CompilerEnv):
"""Test that reset() can be called twice with a step."""
env.reset()
assert env.in_episode
_, _, done, info = env.step(env.action_space.sample())
assert not done, info
env.reset()
_, _, done, info = env.step(env.action_space.sample())
assert not done, info
assert env.in_episode
def test_Step_out_of_range(env: CompilerEnv):
"""Test error handling with an invalid action."""
env.reset()
with pytest.raises(ValueError) as ctx:
env.step(100)
assert str(ctx.value) == "Out-of-range"
def test_default_ir_observation(env: CompilerEnv):
"""Test default observation space."""
env.observation_space = "ir"
observation = env.reset()
assert observation == "Hello, world!"
observation, reward, done, info = env.step(0)
assert observation == "Hello, world!"
assert reward is None
assert not done
def test_default_features_observation(env: CompilerEnv):
"""Test default observation space."""
env.observation_space = "features"
observation = env.reset()
assert isinstance(observation, np.ndarray)
assert observation.shape == (3,)
assert observation.dtype == np.int64
assert observation.tolist() == [0, 0, 0]
def test_default_reward(env: CompilerEnv):
"""Test default reward space."""
env.reward_space = "runtime"
env.reset()
observation, reward, done, info = env.step(0)
assert observation is None
assert reward == 0
assert not done
def test_observations(env: CompilerEnv):
"""Test observation spaces."""
env.reset()
assert env.observation["ir"] == "Hello, world!"
np.testing.assert_array_equal(env.observation["features"], [0, 0, 0])
def test_rewards(env: CompilerEnv):
"""Test reward spaces."""
env.reset()
assert env.reward["runtime"] == 0
def test_benchmarks(env: CompilerEnv):
assert list(env.datasets.benchmark_uris()) == [
"benchmark://example-compiler-v0/foo",
"benchmark://example-compiler-v0/bar",
]
def test_fork(env: CompilerEnv):
env.reset()
env.step(0)
env.step(1)
other_env = env.fork()
try:
assert env.benchmark == other_env.benchmark
assert other_env.actions == [0, 1]
finally:
other_env.close()
@flaky # Timeout-based test.
def test_force_working_dir(bin: Path, tmpdir):
"""Test that expected files are generated in the working directory."""
tmpdir = Path(tmpdir) / "subdir"
with Popen([str(bin), "--working_dir", str(tmpdir)]):
for _ in range(10):
sleep(0.5)
if (tmpdir / "pid.txt").is_file() and (tmpdir / "port.txt").is_file():
break
else:
pytest.fail(f"PID file not found in {tmpdir}: {list(tmpdir.iterdir())}")
def unsafe_select_unused_port() -> int:
"""Try and select an unused port that on the local system.
There is nothing to prevent the port number returned by this function from
being claimed by another process or thread, so it is liable to race conditions
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("127.0.0.1", 0))
s.listen(1)
port = s.getsockname()[1]
s.close()
return port
def port_is_free(port: int) -> bool:
"""Determine if a port is in use"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind(("127.0.0.1", port))
return True
except OSError:
return False
finally:
s.close()
@flaky # Unsafe free port allocation
def test_force_port(bin: Path, tmpdir):
"""Test that a forced --port value is respected."""
port = unsafe_select_unused_port()
assert port_is_free(port) # Sanity check
tmpdir = Path(tmpdir)
with Popen([str(bin), "--port", str(port), "--working_dir", str(tmpdir)]):
for _ in range(10):
sleep(0.5)
if (tmpdir / "pid.txt").is_file() and (tmpdir / "port.txt").is_file():
break
else:
pytest.fail(f"PID file not found in {tmpdir}: {list(tmpdir.iterdir())}")
with open(tmpdir / "port.txt") as f:
actual_port = int(f.read())
assert actual_port == port
assert not port_is_free(actual_port)
# Copied from CompilerGym/tests/test_main.py because there were errors in trying to import it here
def main(extra_pytest_args: Optional[List[str]] = None, debug_level: int = 1):
dbg.set_debug_level(debug_level)
# Keep test data isolated from user data.
os.environ[
"COMPILER_GYM_SITE_DATA"
] = f"/tmp/compiler_gym_{getuser()}/tests/site_data"
os.environ["COMPILER_GYM_CACHE"] = f"/tmp/compiler_gym_{getuser()}/tests/cache"
pytest_args = sys.argv + [
# Run pytest verbosely to print out test names to provide context in
# case of failures.
"-vv",
# Disable "Module already imported" warnings. See:
# https://docs.pytest.org/en/latest/how-to/usage.html#calling-pytest-from-python-code
"-W",
"ignore:Module already imported:pytest.PytestWarning",
# Disable noisy "Flaky tests passed" messages.
"--no-success-flaky-report",
]
# Support for sharding. If a py_test target has the shard_count attribute
# set (in the range [1,50]), then the pytest-shard module is used to divide
# the tests among the shards. See https://pypi.org/project/pytest-shard/
sharded_test = os.environ.get("TEST_TOTAL_SHARDS")
if sharded_test:
num_shards = int(os.environ["TEST_TOTAL_SHARDS"])
shard_index = int(os.environ["TEST_SHARD_INDEX"])
pytest_args += [f"--shard-id={shard_index}", f"--num-shards={num_shards}"]
else:
pytest_args += ["-p", "no:pytest-shard"]
pytest_args += extra_pytest_args or []
returncode = pytest.main(pytest_args)
# By default pytest will fail with an error if no tests are collected.
# Disable that behavior here (with a warning) since there are legitimate
# cases where we may want to run a test file with no tests in it. For
# example, when running on a continuous integration service where all the
# tests are marked with the @skip_on_ci decorator.
if returncode == pytest.ExitCode.NO_TESTS_COLLECTED.value:
print(
"WARNING: The test suite was empty. Is that intended?",
file=sys.stderr,
)
returncode = 0
sys.exit(returncode)
if __name__ == "__main__":
main(
extra_pytest_args=[
"-W",
"ignore::UserWarning",
]
)
|
CompilerGym-development
|
examples/example_compiler_gym_service/env_without_bazel_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""This module defines and registers the example gym environments."""
from pathlib import Path
from typing import Iterable
from compiler_gym.datasets import Benchmark, Dataset
from compiler_gym.datasets.uri import BenchmarkUri
from compiler_gym.spaces import Reward
from compiler_gym.util.registration import register
from compiler_gym.util.runfiles_path import runfiles_path
EXAMPLE_CC_SERVICE_BINARY: Path = runfiles_path(
"examples/example_compiler_gym_service/service_cc/compiler_gym-example-service-cc"
)
EXAMPLE_PY_SERVICE_BINARY: Path = runfiles_path(
"examples/example_compiler_gym_service/service_py/compiler_gym-example-service-py"
)
class RuntimeReward(Reward):
"""An example reward that uses changes in the "runtime" observation value
to compute incremental reward.
"""
def __init__(self):
super().__init__(
name="runtime",
observation_spaces=["runtime"],
default_value=0,
default_negates_returns=True,
deterministic=False,
platform_dependent=True,
)
self.previous_runtime = None
def reset(self, benchmark: str, observation_view):
del benchmark # unused
self.previous_runtime = None
def update(self, action, observations, observation_view):
del action
del observation_view
if self.previous_runtime is None:
self.previous_runtime = observations[0]
reward = float(self.previous_runtime - observations[0])
self.previous_runtime = observations[0]
return reward
class ExampleDataset(Dataset):
def __init__(self, *args, **kwargs):
super().__init__(
name="benchmark://example-compiler-v0",
license="MIT",
description="An example dataset",
)
self._benchmarks = {
"/foo": Benchmark.from_file_contents(
"benchmark://example-compiler-v0/foo", "Ir data".encode("utf-8")
),
"/bar": Benchmark.from_file_contents(
"benchmark://example-compiler-v0/bar", "Ir data".encode("utf-8")
),
}
def benchmark_uris(self) -> Iterable[str]:
yield from (
f"benchmark://example-compiler-v0{k}" for k in self._benchmarks.keys()
)
def benchmark_from_parsed_uri(self, uri: BenchmarkUri) -> Benchmark:
if uri.path in self._benchmarks:
return self._benchmarks[uri.path]
else:
raise LookupError("Unknown program name")
# Register the example service on module import. After importing this module,
# the example-compiler-v0 environment will be available to gym.make(...).
register(
id="example-cc-v0",
entry_point="compiler_gym.service.client_service_compiler_env:ClientServiceCompilerEnv",
kwargs={
"service": EXAMPLE_CC_SERVICE_BINARY,
"rewards": [RuntimeReward()],
"datasets": [ExampleDataset()],
},
)
register(
id="example-py-v0",
entry_point="compiler_gym.service.client_service_compiler_env:ClientServiceCompilerEnv",
kwargs={
"service": EXAMPLE_PY_SERVICE_BINARY,
"rewards": [RuntimeReward()],
"datasets": [ExampleDataset()],
},
)
|
CompilerGym-development
|
examples/example_compiler_gym_service/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Smoke test for examples/example_compiler_gym_service/demo_without_bazel.py"""
from example_compiler_gym_service.demo_without_bazel import main
from flaky import flaky
@flaky
def test_demo_without_bazel():
main()
|
CompilerGym-development
|
examples/example_compiler_gym_service/demo_without_bazel_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""This script demonstrates how the Python example service without needing
to use the bazel build system. Usage:
$ python example_compiler_gym_service/demo_without_bazel.py
It is equivalent in behavior to the demo.py script in this directory.
"""
import logging
from pathlib import Path
from typing import Iterable
import gym
from compiler_gym.datasets import Benchmark, Dataset
from compiler_gym.datasets.uri import BenchmarkUri
from compiler_gym.spaces import Reward
from compiler_gym.util.logging import init_logging
from compiler_gym.util.registration import register
EXAMPLE_PY_SERVICE_BINARY: Path = Path(
"example_compiler_gym_service/service_py/example_service.py"
)
assert EXAMPLE_PY_SERVICE_BINARY.is_file(), "Service script not found"
class RuntimeReward(Reward):
"""An example reward that uses changes in the "runtime" observation value
to compute incremental reward.
"""
def __init__(self):
super().__init__(
name="runtime",
observation_spaces=["runtime"],
default_value=0,
default_negates_returns=True,
deterministic=False,
platform_dependent=True,
)
self.previous_runtime = None
def reset(self, benchmark: str, observation_view):
del benchmark # unused
self.previous_runtime = None
def update(self, action, observations, observation_view):
del action
del observation_view
if self.previous_runtime is None:
self.previous_runtime = observations[0]
reward = float(self.previous_runtime - observations[0])
self.previous_runtime = observations[0]
return reward
class ExampleDataset(Dataset):
def __init__(self, *args, **kwargs):
super().__init__(
name="benchmark://example-compiler-v0",
license="MIT",
description="An example dataset",
)
self._benchmarks = {
"/foo": Benchmark.from_file_contents(
"benchmark://example-compiler-v0/foo", "Ir data".encode("utf-8")
),
"/bar": Benchmark.from_file_contents(
"benchmark://example-compiler-v0/bar", "Ir data".encode("utf-8")
),
}
def benchmark_uris(self) -> Iterable[str]:
yield from (
f"benchmark://example-compiler-v0{k}" for k in self._benchmarks.keys()
)
def benchmark_from_parsed_uri(self, uri: BenchmarkUri) -> Benchmark:
if uri.path in self._benchmarks:
return self._benchmarks[uri.path]
else:
raise LookupError("Unknown program name")
# Register the environment for use with gym.make(...).
register(
id="example-compiler-v0",
entry_point="compiler_gym.service.client_service_compiler_env:ClientServiceCompilerEnv",
kwargs={
"service": EXAMPLE_PY_SERVICE_BINARY,
"rewards": [RuntimeReward()],
"datasets": [ExampleDataset()],
},
)
def main():
# Use debug verbosity to print out extra logging information.
init_logging(level=logging.DEBUG)
# Create the environment using the regular gym.make(...) interface.
with gym.make("example-compiler-v0") as env:
env.reset()
for _ in range(20):
observation, reward, done, info = env.step(env.action_space.sample())
if done:
env.reset()
if __name__ == "__main__":
main()
|
CompilerGym-development
|
examples/example_compiler_gym_service/demo_without_bazel.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""This script demonstrates how the example services defined in this directory
can be used as gym environments. Usage:
$ bazel run -c opt //examples/example_compiler_gym_service:demo
"""
import logging
import gym
# To use the example services we simply need to import the module which
# registers the environments.
import examples.example_compiler_gym_service # noqa Register environments
def main():
# Use debug verbosity to print out extra logging information.
logging.basicConfig(level=logging.DEBUG)
# Create the environment using the regular gym.make(...) interface. We could
# use either the C++ service "example-cc-v0" or the Python service
# "example-py-v0".
with gym.make("example-cc-v0") as env:
env.reset()
for _ in range(20):
observation, reward, done, info = env.step(env.action_space.sample())
if done:
env.reset()
if __name__ == "__main__":
main()
|
CompilerGym-development
|
examples/example_compiler_gym_service/demo.py
|
#! /usr/bin/env python3
#
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""An example CompilerGym service in python."""
import logging
from pathlib import Path
from typing import Optional, Tuple
from compiler_gym.service import CompilationSession
from compiler_gym.service.proto import (
ActionSpace,
Benchmark,
DoubleRange,
Event,
Int64Box,
Int64Range,
Int64Tensor,
NamedDiscreteSpace,
ObservationSpace,
Space,
StringSpace,
)
from compiler_gym.service.runtime import create_and_run_compiler_gym_service
class ExampleCompilationSession(CompilationSession):
"""Represents an instance of an interactive compilation session."""
compiler_version: str = "1.0.0"
# The action spaces supported by this service. Here we will implement a
# single action space, called "default", that represents a command line with
# three options: "a", "b", and "c".
action_spaces = [
ActionSpace(
name="default",
space=Space(
named_discrete=NamedDiscreteSpace(
name=[
"a",
"b",
"c",
],
),
),
)
]
# A list of observation spaces supported by this service. Each of these
# ObservationSpace protos describes an observation space.
observation_spaces = [
ObservationSpace(
name="ir",
space=Space(
string_value=StringSpace(length_range=Int64Range(min=0)),
),
deterministic=True,
platform_dependent=False,
default_observation=Event(string_value=""),
),
ObservationSpace(
name="features",
space=Space(
int64_box=Int64Box(
low=Int64Tensor(shape=[3], value=[-100, -100, -100]),
high=Int64Tensor(shape=[3], value=[100, 100, 100]),
),
),
),
ObservationSpace(
name="runtime",
space=Space(
double_value=DoubleRange(min=0),
),
deterministic=False,
platform_dependent=True,
default_observation=Event(
double_value=0,
),
),
]
def __init__(
self, working_directory: Path, action_space: ActionSpace, benchmark: Benchmark
):
super().__init__(working_directory, action_space, benchmark)
logging.info("Started a compilation session for %s", benchmark.uri)
def apply_action(self, action: Event) -> Tuple[bool, Optional[ActionSpace], bool]:
num_choices = len(self.action_spaces[0].space.named_discrete.name)
# This is the index into the action space's values ("a", "b", "c") that
# the user selected, e.g. 0 -> "a", 1 -> "b", 2 -> "c".
choice_index = action.int64_value
logging.info("Applying action %d", choice_index)
if choice_index < 0 or choice_index >= num_choices:
raise ValueError("Out-of-range")
# Here is where we would run the actual action to update the environment's
# state.
return False, None, False
def get_observation(self, observation_space: ObservationSpace) -> Event:
logging.info("Computing observation from space %s", observation_space)
if observation_space.name == "ir":
return Event(string_value="Hello, world!")
elif observation_space.name == "features":
observation = Event(int64_tensor=Int64Tensor(shape=[3], value=[0, 0, 0]))
return observation
elif observation_space.name == "runtime":
return Event(double_value=0)
else:
raise KeyError(observation_space.name)
if __name__ == "__main__":
create_and_run_compiler_gym_service(ExampleCompilationSession)
|
CompilerGym-development
|
examples/example_compiler_gym_service/service_py/example_service.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pickle
import sqlite3
import zlib
import dgl
import numpy as np
import torch
from dgl.data import DGLDataset
class CompilerGymDataset(DGLDataset):
def __init__(
self,
filepath,
num_workers=64,
max_len_nodes=50000,
input_key="dgl_graph",
output_key="reward",
table_name="Observations",
train_prop=0.8,
vocab=None,
dataset_size=-1,
):
"""
The class loads a CompilerGym Database which contains 'States' and 'Observations'
as tables. The tables contain the necessary information for doing supervised learning.
This class handles all of the underlying structure including differentiating between
training and dev, creating the 'dgl graph', and colating individual graphs into a larger
graph, which is used for training.
Inputs:
- filepath: the path to the dataset
- num_wokers: number of workers used to fetch the instances
- max_len_nodes: maximum number of nodes in the grpah
- input_key: the key that we save to the input observation
- output_key: the key that we want to generate supervised loss off of
- table_name: the table name in the database that has the primary keys
- train_prop: proportion of training instances
- vocab: the vocab mapping text to integer indices of a embedding table
- dataset_size: size of the dataset we want to use, default -1 means use the whole datbase
"""
self.filepath = filepath
self.num_workers = num_workers
self.max_len_nodes = max_len_nodes
self.graph_key = input_key
self.output_key = output_key
self.table_name = table_name
self.train_prop = train_prop
self.vocab = vocab
self.dataset_size = dataset_size
self.distribution_type = "train"
print("using filepath: ", self.filepath)
super().__init__(name="CopmilerGym")
def process(self):
"""
Called during initialization of the class and initializes the underlying
functions needed for supervised learning
"""
self.initialize_database()
def initialize_database(self):
print("using: ", self.filepath, " as dataset")
self.cursor = self.get_cursor()
self.train_size = int(self.train_prop * self.get_full_db_length())
self.dev_size = self.get_full_db_length() - self.train_size
self.select_distribution_indices()
self.get_observation_indices()
print("intialized database: ", self.filepath)
def select_distribution_indices(self):
total_size = self.get_full_db_length()
self.all_indices = set(range(total_size))
self.train_indices = np.random.choice(
total_size, size=self.train_size, replace=False
)
self.dev_indices = list(self.all_indices - set(self.train_indices))
assert len(self.train_indices) == self.train_size
assert len(self.dev_indices) == self.dev_size
def get_observation_indices(self):
self.all_state_indices = get_all_states(self.cursor, self.dataset_size)
def get_full_db_length(self):
if self.dataset_size == -1:
return get_database_size(self.cursor, self.table_name)
else:
return self.dataset_size
def __getitem__(self, i):
return self.get_instance(i)
def get_instance(self, i):
"""
Given an index (i), determined by the length of the current dataset ('train', 'dev')
get the desired instance
"""
index = None
if self.distribution_type == "train":
index = self.train_indices[i]
elif self.distribution_type == "dev":
index = self.dev_indices[i]
cursor = self.get_cursor()
cur_state = self.all_state_indices[index]
s = get_observation_from_table(cursor, cur_state[3])
# This reward is hardcoded right now to be the number of instruction
# counts in the given LLVM-IR graph.
reward = s[0][1]
programl = pickle.loads(zlib.decompress(s[0][3]))
dgl_graph = process_networkx_graph(programl, self.vocab)
return {self.output_key: reward, self.graph_key: dgl_graph}
def __len__(self):
if self.distribution_type == "train":
return self.train_size
elif self.distribution_type == "dev":
return self.dev_size
def collate_fn(self, samples):
samples = [sample for sample in samples if sample is not None]
# Takes a list of graphs and makes it into one big graph that dgl operates on
ret = None
if samples:
dgl_graph = dgl.batch([sample[self.graph_key] for sample in samples])
reward = [sample[self.output_key] for sample in samples]
ret = (dgl_graph, reward)
return ret
def set_distribution_type(self, dist_type):
assert dist_type in ["train", "dev"]
self.distribution_type = dist_type
def get_cursor(self):
connection = sqlite3.connect(self.filepath)
return connection.cursor()
def get_database_size(cursor, table):
return cursor.execute(f"SELECT COUNT(*) FROM {table}").fetchall()[0][0]
def get_all_states(cursor, db_size):
if db_size == -1:
cursor.execute("SELECT * from States")
else:
cursor.execute(f"SELECT * from States LIMIT {db_size}")
return cursor.fetchall()
def get_observation_from_table(cursor, hash):
"""
Gets the observation for a state_id from a given database
Inputs:
- cursor: the db cursor
- state_id: the state_id we want (primary key in the table)
"""
cursor.execute(f"SELECT * from Observations where state_id = '{hash}'")
return cursor.fetchall()
def process_networkx_graph(
graph,
vocab,
node_feature_list=["text", "type"],
edge_feature_list=["flow", "position"],
):
"""
Handles all of the requirements of taking a networkx graph and converting it into a
dgl graph
Inputs:
- graph: the networkx graph
- vocab: the vocabulary, a mapping from word to index.
- node_feature_list: a list of textual features from the networkx node that we want to make sure
are featurizable into a vector.
- edge_feature_list: a list of textual features from the networkx edges that we want to make sure
are featurizable into a vector.
"""
update_graph_with_vocab(graph.nodes, node_feature_list, vocab)
update_graph_with_vocab(graph.edges, edge_feature_list, vocab)
dgl_graph = fast_networkx_to_dgl(graph)
return dgl_graph
def fast_networkx_to_dgl(
graph, node_attrs=["text_idx", "type"], edge_attrs=["flow", "position"]
):
"""
Takes a networkx graph and its given node attributes and edge attributes
and converts it corresponding dgl graph
Inputs:
- graph: the networkx graph
- node_attrs: node attributes to convert
- edge_attrs: edge attributes to convert
"""
edges = [edge for edge in graph.edges()]
dgl_graph = dgl.graph(edges, num_nodes=graph.number_of_nodes())
for feat in edge_attrs:
edge_assigns = torch.tensor(
[val[-1] for val in graph.edges(data=feat)], dtype=torch.int64
)
dgl_graph.edata[feat] = edge_assigns
for feat in node_attrs:
node_assigns = torch.tensor(
[val[-1] for val in graph.nodes(data=feat)], dtype=torch.int64
)
dgl_graph.ndata[feat] = node_assigns
return dgl_graph
def update_graph_with_vocab(graph_fn, features, vocab):
"""
Given a networkx attribute (function) and features update it with a vocab if possible.
If it cannot be updated, the features should already be numerical features.
Inputs:
- graph_fn: a networkx graph function (describing nodes or edges)
- features: the feature from the function that should be updated
- vocab: A dict mapping text to int
"""
for feature_name in features:
curr_vocab = None
if feature_name in vocab:
curr_vocab = vocab[feature_name]
for graph_item in graph_fn(data=feature_name):
feature = graph_item[-1]
idx = graph_item[0]
if feature_name in vocab:
# Lookup vocab item, or map to out-of-vocab index if item is not
# found.
vocab_index = curr_vocab.get(feature, len(curr_vocab))
update_networkx_feature(
graph_fn, idx, f"{feature_name}_idx", vocab_index
)
else:
assert isinstance(
feature, int
), f"{(feature_name, feature)} is not an int"
def update_networkx_feature(graph_fn, idx, feature_name, feature):
graph_fn[idx][feature_name] = feature
|
CompilerGym-development
|
examples/gnn_cost_model/compiler_gym_dataset.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for examples/gnn_cost_model/train_cost_model_test.py"""
import sys
import pytest
from absl import flags
from compiler_gym.util.capture_output import capture_output
from .train import main
FLAGS = flags.FLAGS
@pytest.mark.skip(reason="Need to create a small test set")
def test_run_train_smoke_test():
flags = [
"argv0",
"--dataset_size=64",
"--batch_size=4",
"--num_epoch=2",
"--device=cpu",
]
sys.argv = flags
FLAGS(flags)
with capture_output() as out:
main(["argv0"])
assert "Epoch num 0 training" in out.stdout
|
CompilerGym-development
|
examples/gnn_cost_model/train_test.py
|
CompilerGym-development
|
examples/gnn_cost_model/__init__.py
|
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import dgl
import numpy as np
import torch
import torch.nn as nn
class GNNEncoder(nn.Module):
def __init__(
self,
node_vocab_size,
node_hidden_size,
use_node_embedding=True,
n_steps=1,
n_etypes=3,
n_message_passes=0,
reward_dim=1,
gnn_type="GatedGraphConv",
heads=None,
feat_drop=0.0,
concat_intermediate=True,
):
super(GNNEncoder, self).__init__()
self.use_node_embedding = use_node_embedding
self.node_hidden_size = node_hidden_size
self.n_steps = n_steps
self.n_etypes = n_etypes
self.n_message_passes = n_message_passes
self.reward_dim = reward_dim
self.gnn_type = gnn_type
self.heads = heads
self.feat_drop = feat_drop
self.concat_intermediate = concat_intermediate
if self.use_node_embedding:
self.node_embedding = nn.Embedding(node_vocab_size, node_hidden_size)
embed_dim = self.node_hidden_size
if self.gnn_type == "GatedGraphConv":
self.ggcnn = nn.ModuleList(
[
dgl.nn.pytorch.conv.GatedGraphConv(
in_feats=self.node_hidden_size,
out_feats=self.node_hidden_size,
n_steps=self.n_steps,
n_etypes=self.n_etypes,
)
for _ in range(self.n_message_passes)
]
)
if self.concat_intermediate:
embed_dim = (self.n_message_passes + 1) * embed_dim
else:
raise NotImplementedError("")
self.reward_predictor = nn.Sequential(
nn.Linear(embed_dim, self.node_hidden_size),
nn.ReLU(),
nn.Linear(self.node_hidden_size, self.reward_dim),
)
self.mse_loss = nn.MSELoss()
def forward(self, g):
with g.local_scope():
self.featurize_nodes(g)
res = g.ndata["feat"]
if self.concat_intermediate:
intermediate = [dgl.mean_nodes(g, "feat")]
if self.gnn_type == "GatedGraphConv":
for i, layer in enumerate(self.ggcnn):
res = layer(g, res, g.edata["flow"])
if self.concat_intermediate:
g.ndata["feat"] = res
intermediate.append(dgl.mean_nodes(g, "feat"))
g.ndata["feat"] = res
if self.concat_intermediate and self.gnn_type == "GatedGraphConv":
graph_agg = torch.cat(intermediate, axis=1)
else:
graph_agg = dgl.mean_nodes(g, "feat")
res = self.reward_predictor(graph_agg)
return res, graph_agg
def get_loss(self, g, labels, eps=0.0):
"""
Loss function, scales the reward to the same loss function from
R2D2 (https://openreview.net/pdf?id=r1lyTjAqYX). It also allows
us to see the difference between the unscaled reward and its
associated prediction
"""
preds, _ = self.forward(g)
preds = preds.squeeze(1)
scaled_labels = rescale(labels, eps=eps)
inv_scale_pred = inv_rescale(preds, eps=eps)
return (
self.mse_loss(preds, scaled_labels),
self.mse_loss(inv_scale_pred, labels),
((labels - inv_scale_pred).abs() / labels).mean(),
)
def featurize_nodes(self, g):
# This is very CompilerGym specific, can be rewritten for other tasks
features = []
if self.use_node_embedding:
features.append(self.node_embedding(g.ndata["text_idx"]))
g.ndata["feat"] = torch.cat(features)
def get_edge_embedding(self, g):
# TODO: this should can be for positional embeddings
pass
def rescale(x, eps=1e-3):
sign = get_sign(x)
x_abs = get_abs(x)
if isinstance(x, np.ndarray):
return sign * (np.sqrt(x_abs + 1) - 1) + eps * x
else:
return sign * ((x_abs + 1).sqrt() - 1) + eps * x
def inv_rescale(x, eps=1e-3):
sign = get_sign(x)
x_abs = get_abs(x)
if eps == 0:
return sign * (x * x + 2.0 * x_abs)
else:
return sign * (
(((1.0 + 4.0 * eps * (x_abs + 1.0 + eps)).sqrt() - 1.0) / (2.0 * eps)).pow(
2
)
- 1.0
)
def get_sign(x):
if isinstance(x, np.ndarray):
return np.sign(x)
elif isinstance(x, torch.Tensor):
return x.sign()
else:
raise NotImplementedError(f"Data type: {type(x)} is not implemented")
def get_abs(x):
if isinstance(x, np.ndarray):
return np.abs(x)
elif isinstance(x, torch.Tensor):
return x.abs()
else:
raise NotImplementedError(f"Data type: {type(x)} is not implemented")
|
CompilerGym-development
|
examples/gnn_cost_model/model.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
This module trains a cost model with a GNN on a LLVM-IR transition database
predicting some output reward (the default is instruction count).
Example usage:
$ python train_cost_model.py --num_epoch 10 --batch_size 16 --dataset_size 64
"""
import collections
import io
import logging
import pickle
import sys
import tarfile
import time
from pathlib import Path
from threading import Lock
import numpy as np
import torch
from absl import app, flags
from fasteners import InterProcessLock
from torch.utils.data import DataLoader
import compiler_gym.util.flags.nproc # noqa flag definition
from compiler_gym.util.download import download
from compiler_gym.util.filesystem import extract_tar
from compiler_gym.util.runfiles_path import cache_path, transient_cache_path
from compiler_gym.util.timer import Timer, humanize_duration
from .compiler_gym_dataset import CompilerGymDataset
from .model import GNNEncoder
flags.DEFINE_integer(
"dataset_size", -1, "How large should the dataset be, -1 if no constraint"
)
flags.DEFINE_integer("num_epoch", 100, "Number of epochs for training")
flags.DEFINE_integer("batch_size", 4, "Number of epochs for training")
flags.DEFINE_string(
"db",
"https://dl.fbaipublicfiles.com/compiler_gym/state_transition_dataset/2021-11-15-csmith.tar.bz2",
"URL of the dataset to use.",
)
flags.DEFINE_string(
"db_sha256",
"0b101a17fdbb1851f38ca46cc089b0026eb740e4055a4fe06b4c899ca87256a2",
"SHA256 checksum of the dataset database.",
)
flags.DEFINE_string(
"vocab_db",
"https://dl.fbaipublicfiles.com/compiler_gym/state_transition_dataset/2021-11-15-vocab.tar.bz2",
"URL of the vocabulary database to use.",
)
flags.DEFINE_string(
"vocab_db_sha256",
"af7781f57e6ef430c561afb045fc03693783e668b21826b32234e9c45bd1882c",
"SHA256 checksum of the vocabulary database.",
)
flags.DEFINE_string(
"device", "cuda:0" if torch.cuda.is_available() else "cpu", "The device to run on."
)
FLAGS = flags.FLAGS
logger = logging.getLogger(__name__)
_DB_DOWNLOAD_LOCK = Lock()
def dataset_looper(epoch_num, data_loader, model, device, optimizer=None, train=True):
times = collections.defaultdict(float)
losses = []
unscaled_mse = []
epoch_grad_clip = []
t1 = time.time()
for data in data_loader:
if data is None:
continue
graph, labels = data
times["get_data"] += time.time() - t1
t1 = time.time()
labels = torch.Tensor(labels).to(device)
graph = graph.to(device)
loss, unscaled, _ = model.get_loss(graph, labels)
losses.append(loss.cpu().data.numpy())
unscaled_mse.append(unscaled.cpu().data.numpy())
times["model_forward"] += time.time() - t1
t1 = time.time()
if train:
optimizer.zero_grad()
loss.backward()
grad_clip = torch.nn.utils.clip_grad_norm_(
model.parameters(), max_norm=400.0
)
epoch_grad_clip.append(grad_clip.cpu().data.numpy())
optimizer.step()
times["model_backward"] += time.time() - t1
t1 = time.time()
avg_loss, avg_unscaled = (
np.mean(losses),
np.mean(unscaled_mse),
)
avg_grad_clip = None
if train:
avg_grad_clip = np.mean(epoch_grad_clip)
times = ", ".join(f"{k}: {humanize_duration(v)}" for k, v in times.items())
print(
f" Epoch {epoch_num + 1} {'training' if train else 'validation'} took: "
f"{{ {times} }}, loss: {avg_loss}, unscaled: {avg_unscaled}, "
f"grad_clip {avg_grad_clip}"
)
return avg_loss, avg_unscaled, avg_grad_clip
def train(dataset, data_loader, model, num_epoch, device):
optimizer = torch.optim.Adam(model.parameters())
for epoch in range(num_epoch):
with Timer(f"Epoch {epoch + 1} of {num_epoch} ({(epoch + 1) / num_epoch:.1%})"):
dataset.set_distribution_type("train")
dataset_looper(epoch, data_loader, model, device, optimizer)
dataset.set_distribution_type("dev")
dataset_looper(epoch, data_loader, model, device, train=False)
def download_and_unpack_database(db: str, sha256: str) -> Path:
"""Download the given database, unpack it to the local filesystem, and
return the path.
"""
local_dir = cache_path(f"state_transition_dataset/{sha256}")
with _DB_DOWNLOAD_LOCK, InterProcessLock(
transient_cache_path(".state_transition_database_download.LOCK")
):
if not (local_dir / ".installed").is_file():
tar_data = io.BytesIO(download(db, sha256))
local_dir.mkdir(parents=True, exist_ok=True)
logger.info("Unpacking database to %s ...", local_dir)
with tarfile.open(fileobj=tar_data, mode="r:bz2") as arc:
extract_tar(arc, str(local_dir))
(local_dir / ".installed").touch()
unpacked = [f for f in local_dir.iterdir() if f.name != ".installed"]
if len(unpacked) != 1:
print(
f"fatal: Archive {db} expected to contain one file, contains: {len(unpacked)}",
file=sys.stderr,
)
return unpacked[0]
def main(argv):
"""Main entry point."""
del argv # unused
node_vocab_pth = download_and_unpack_database(
db=FLAGS.vocab_db, sha256=FLAGS.vocab_db_sha256
)
root_pth = download_and_unpack_database(db=FLAGS.db, sha256=FLAGS.db_sha256)
with open(node_vocab_pth, "rb") as f:
vocab = pickle.load(f)
model = GNNEncoder(
# Add one to the vocab size to accomodate for the out-of-vocab element.
node_vocab_size=len(vocab) + 1,
node_hidden_size=64,
)
# This is required to get the vocab into the right state
# as the vocab is over all nodes of the graph
vocab = {"text": vocab}
model.to(FLAGS.device)
print(model)
dataset = CompilerGymDataset(root_pth, vocab=vocab, dataset_size=FLAGS.dataset_size)
dataset_loader = DataLoader(
dataset,
batch_size=FLAGS.batch_size,
shuffle=True,
num_workers=FLAGS.nproc,
collate_fn=dataset.collate_fn,
)
train(dataset, dataset_loader, model, FLAGS.num_epoch, FLAGS.device)
if __name__ == "__main__":
app.run(main)
|
CompilerGym-development
|
examples/gnn_cost_model/train.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
|
CompilerGym-development
|
examples/llvm_rl/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import sys
from pathlib import Path
from typing import List
from llvm_rl.model import Model
def main(argv):
paths = argv[1:] or ["~/logs/compiler_gym/llvm_rl"]
models: List[Model] = []
for path in paths:
models += Model.from_logsdir(Path(path).expanduser())
for model in models:
model.test()
if __name__ == "__main__":
main(sys.argv)
|
CompilerGym-development
|
examples/llvm_rl/test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Environment wrappers to closer replicate the MLSys'20 Autophase paper."""
from typing import List
import gym
import numpy as np
from compiler_gym.envs import CompilerEnv, LlvmEnv
from compiler_gym.util.gym_type_hints import ActionType
from compiler_gym.wrappers import (
ConstrainedCommandline,
ObservationWrapper,
RewardWrapper,
)
class ClampedReward(RewardWrapper):
"""A wrapper class that clamps reward signal within a bounded range,
optionally with some leaking for out-of-range values.
"""
def __init__(
self,
env: CompilerEnv,
min: float = -1,
max: float = 1,
leakiness_factor: float = 0.001,
):
super().__init__(env)
self.min = min
self.max = max
self.leakiness_factor = leakiness_factor
def convert_reward(self, reward: float) -> float:
if reward > self.max:
return self.max + (reward - self.max) * self.leakiness_factor
elif reward < self.min:
return self.min + (reward - self.min) * self.leakiness_factor
return reward
class AutophaseNormalizedFeatures(ObservationWrapper):
"""A wrapper for LLVM environments that use the Autophase observation space
to normalize and clip features to the range [0, 1].
"""
# The index of the "TotalInsts" feature of autophase.
TotalInsts_index = 51
def __init__(self, env: CompilerEnv):
super().__init__(env=env)
# Force Autophase observation space.
self.env.observation_space = self.env.unwrapped.observation.spaces["Autophase"]
# Adjust the bounds to reflect the normalized values.
self.env.observation_space_spec.space = gym.spaces.Box(
low=np.full(
self.env.observation_space_spec.space.shape[0], 0, dtype=np.float32
),
high=np.full(
self.env.observation_space_spec.space.shape[0], 1, dtype=np.float32
),
dtype=np.float32,
)
def convert_observation(self, observation):
if observation[self.TotalInsts_index] <= 0:
return np.zeros(observation.shape, dtype=np.float32)
return np.clip(
observation.astype(np.float32) / observation[self.TotalInsts_index], 0, 1
)
class ConcatActionsHistogram(ObservationWrapper):
"""A wrapper that concatenates a histogram of previous actions to each
observation.
The actions histogram is concatenated to the end of the existing 1-D box
observation, expanding the space.
The actions histogram has bounds [0,inf]. If you specify a fixed episode
length `norm_to_episode_len`, each histogram update will be scaled by
1/norm_to_episode_len, so that `sum(observation) == 1` after episode_len
steps.
"""
def __init__(self, env: CompilerEnv, norm_to_episode_len: int = 0):
super().__init__(env=env)
assert isinstance(
self.observation_space, gym.spaces.Box
), f"Can only contatenate actions histogram to box shape, not {self.observation_space}"
assert isinstance(
self.action_space, gym.spaces.Discrete
), "Can only construct histograms from discrete spaces"
assert len(self.observation_space.shape) == 1, "Requires 1-D observation space"
self.increment = 1 / norm_to_episode_len if norm_to_episode_len else 1
# Reshape the observation space.
self.env.observation_space_spec.space = gym.spaces.Box(
low=np.concatenate(
(
self.env.observation_space.low,
np.zeros(
self.action_space.n, dtype=self.env.observation_space.dtype
),
)
),
high=np.concatenate(
(
self.env.observation_space.high,
# The upper bound is 1.0 if we are normalizing to the
# episode length, else infinite for unbounded episode
# lengths.
np.ones(self.action_space.n, dtype=self.env.observation_space.dtype)
* (1.0 if norm_to_episode_len else np.inf),
)
),
dtype=self.env.observation_space.dtype,
)
def reset(self, *args, **kwargs):
self.histogram = np.zeros(
(self.action_space.n,), dtype=self.env.observation_space.dtype
)
return super().reset(*args, **kwargs)
def multistep(
self,
actions: List[ActionType],
observation_spaces=None,
observations=None,
**kwargs,
):
for a in actions:
self.histogram[a] += self.increment
return super().multistep(actions, **kwargs)
def convert_observation(self, observation):
return np.concatenate((observation, self.histogram)).astype(
self.env.observation_space.dtype
)
class AutophaseActionSpace(ConstrainedCommandline):
"""An action space wrapper that limits the action space to that of the
Autophase paper.
The actions used in the Autophase work are taken from:
https://github.com/ucb-bar/autophase/blob/2f2e61ad63b50b5d0e2526c915d54063efdc2b92/gym-hls/gym_hls/envs/getcycle.py#L9
Note that 4 of the 46 flags are not included. Those are:
-codegenprepare Excluded from CompilerGym
-scalarrepl Removed from LLVM in https://reviews.llvm.org/D21316
-scalarrepl-ssa Removed from LLVM in https://reviews.llvm.org/D21316
-terminate Not found in LLVM 10.0.0
"""
def __init__(self, env: LlvmEnv):
super().__init__(
env=env,
flags=[
"-adce",
"-break-crit-edges",
"-constmerge",
"-correlated-propagation",
"-deadargelim",
"-dse",
"-early-cse",
"-functionattrs",
"-functionattrs",
"-globaldce",
"-globalopt",
"-gvn",
"-indvars",
"-inline",
"-instcombine",
"-ipsccp",
"-jump-threading",
"-lcssa",
"-licm",
"-loop-deletion",
"-loop-idiom",
"-loop-reduce",
"-loop-rotate",
"-loop-simplify",
"-loop-unroll",
"-loop-unswitch",
"-lower-expect",
"-loweratomic",
"-lowerinvoke",
"-lowerswitch",
"-mem2reg",
"-memcpyopt",
"-partial-inliner",
"-prune-eh",
"-reassociate",
"-sccp",
"-simplifycfg",
"-sink",
"-sroa",
"-strip",
"-strip-nondebug",
"-tailcallelim",
],
)
|
CompilerGym-development
|
examples/llvm_rl/wrappers.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import sys
import hydra
from hydra.core.hydra_config import HydraConfig
from llvm_rl.model import Model
from omegaconf import DictConfig, OmegaConf
from omegaconf.errors import MissingMandatoryValue
from pydantic import ValidationError
import compiler_gym
def _get_job_id() -> int:
try:
return HydraConfig.get().job.id
except MissingMandatoryValue:
# The numeric job ID is missing if not in a multirun context. In that
# case, there can only be a single run.
return 0
@hydra.main(config_path="config", config_name="default")
def main(config: DictConfig) -> None:
OmegaConf.set_readonly(config, True)
# Parse the config to pydantic models.
try:
model: Model = Model(
# Hydra changes the working directory.
working_directory=os.getcwd(),
job_id=_get_job_id(),
compiler_gym_version=compiler_gym.__version__,
**config
)
except ValidationError as e:
print(e, file=sys.stderr)
sys.exit(1)
model.train()
model.test()
if __name__ == "__main__":
main()
|
CompilerGym-development
|
examples/llvm_rl/train.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import re
import sys
from pathlib import Path
from typing import List
import humanize
import pandas as pd
from llvm_rl.model import Model
from pydantic import ValidationError
from tabulate import tabulate
from typer import Typer
from compiler_gym.util.logging import init_logging
from compiler_gym.util.statistics import geometric_mean
app = Typer()
def models_from_paths(log_dirs: List[Path]):
# Read all the inputs first.
models: List[Model] = []
for path in log_dirs:
try:
models += Model.from_logsdir(Path(path).expanduser())
except ValidationError as e:
print(e, file=sys.stderr)
sys.exit(1)
return models
@app.command()
def train(log_dirs: List[Path] = ["~/logs/compiler_gym/llvm_rl"]):
init_logging()
models = models_from_paths(log_dirs)
dfs = []
for model in models:
df = model.dataframe
if not len(df):
continue
# Select only the rows with a checkpoint.
df = df[df["checkpoint"].values]
df = df[
[
"trial_name",
"experiment_timestamp",
"episodes_total",
"episode_reward_geomean",
"episode_reward_mean",
"evaluation/episode_reward_mean",
"evaluation/episode_reward_geomean",
"time_total_s",
"complete",
"cpus",
"gpus",
]
]
sdf = df.groupby(
["experiment", "config", "replica", "experiment_timestamp"]
).max()
test_results = model.test_dataframes
sdf["test_results"] = [
test_results.get(d, pd.DataFrame()) for d in sdf["trial_name"]
]
sdf["test_ic_mean"] = [
sum(d["instruction_count_reduction"]) / len(d)
if not d.empty
else float("nan")
for d in sdf["test_results"]
]
sdf["test_ic_geomean"] = [
geometric_mean(d["instruction_count_reduction"])
if not d.empty
else float("nan")
for d in sdf["test_results"]
]
sdf["test_os_mean"] = [
sum(d["object_size_reduction"]) / len(d) if not d.empty else float("nan")
for d in sdf["test_results"]
]
sdf["test_os_geomean"] = [
geometric_mean(d["object_size_reduction"]) if not d.empty else float("nan")
for d in sdf["test_results"]
]
sdf["test_checkpoint"] = [
int(d["test_checkpoint"].values[0].split("-")[-1]) if not d.empty else ""
for d in sdf["test_results"]
]
dfs.append(sdf.reset_index())
df = pd.concat(dfs)
# Print everything.
pd.set_option("display.max_columns", None)
pd.set_option("display.max_rows", None)
pd.set_option("display.width", None)
df = df.rename(
columns={
"experiment_timestamp": "timestamp",
"episodes_total": "episodes",
"evaluation/episode_reward_geomean": "val_geomean",
"evaluation/episode_reward_mean": "val_mean",
"episode_reward_mean": "train_mean",
"episode_reward_geomean": "train_geomean",
"time_total_s": "training_time",
"test_reward_mean": "test_mean",
"test_reward_geomean": "test_geomean",
}
)
# Format for printing.
df["complete"] = [f"{x:.1%}" for x in df["complete"]]
df["episodes"] = [f"{int(x):,d}" for x in df["episodes"]]
df["training_time"] = [humanize.naturaldelta(x) for x in df["training_time"]]
for reward in [
"train_mean",
"train_geomean",
"val_mean",
"val_geomean",
"test_ic_geomean",
"test_os_geomean",
"test_ic_mean",
"test_os_mean",
]:
df[reward] = [f"{x:.4f}" for x in df[reward].values]
df = df[
[
"trial_name",
"timestamp",
"complete",
"episodes",
"training_time",
"test_checkpoint",
"train_geomean",
"val_geomean",
]
]
print(tabulate(df, headers="keys", showindex=False, tablefmt="psql"))
@app.command()
def test(
log_dirs: List[Path] = ["~/logs/compiler_gym/llvm_rl"],
format_for_latex: bool = False,
):
models = models_from_paths(log_dirs)
# Print everything.
pd.set_option("display.max_columns", None)
pd.set_option("display.max_rows", None)
pd.set_option("display.width", None)
dfs = {}
for model in models:
for trial, df in model.test_dataframes.items():
df["test_set"] = [
re.search(r"^((benchmark|generator)://)(.+)-v[012]/", d).group(3)
for d in df["benchmark"]
]
# Prune empty test set.
df = df[df["instruction_count_init"] > 0]
gmean_df = (
df[
[
"test_set",
"instruction_count_reduction",
"object_size_reduction",
]
]
.groupby(["test_set"])
.agg(geometric_mean)
)
mean_df = (
df[
[
"test_set",
"inference_walltime_seconds",
]
]
.groupby(["test_set"])
.mean()
)
df = pd.concat((mean_df, gmean_df), axis=1)
df = df.reset_index()
df.insert(0, "trial", trial)
if format_for_latex:
df["instruction_count_reduction"] = [
f"${float(d):.3f}\\times$"
for d in df["instruction_count_reduction"]
]
df["object_size_reduction"] = [
f"${float(d):.3f}\\times$" for d in df["object_size_reduction"]
]
print()
print(tabulate(df, headers="keys", showindex=False, tablefmt="psql"))
dfs[trial] = df
if __name__ == "__main__":
app()
|
CompilerGym-development
|
examples/llvm_rl/info.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import pytest
from llvm_rl import wrappers
import compiler_gym
@pytest.fixture(scope="function")
def env():
with compiler_gym.make("llvm-v0") as env:
yield env
def test_AutophaseNormalizedFeatures(env):
env = wrappers.AutophaseNormalizedFeatures(env)
assert env.observation_space_spec.id == "Autophase"
assert env.observation_space.shape == (56,)
assert env.observation_space.dtype == np.float32
def test_ConcatActionsHistogram(env):
env.observation_space = "Autophase"
num_features = env.observation_space.shape[0]
num_actions = env.action_space.n
env = wrappers.ConcatActionsHistogram(env)
env.reset()
action = env.action_space.sample()
obs, _, _, _ = env.step(action)
assert env.observation_space.shape == (num_features + num_actions,)
assert obs.shape == (num_features + num_actions,)
def test_AutophaseActionSpace(env):
env = wrappers.AutophaseActionSpace(env)
env.reset()
env.step(env.action_space.sample())
assert env.action_space.n == 42
|
CompilerGym-development
|
examples/llvm_rl/tests/wrappers_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from llvm_rl.model.testing import Testing
from omegaconf import OmegaConf
import compiler_gym
def test_testing_config():
cfg = Testing(
**OmegaConf.create(
"""\
timeout_hours: 12
runs_per_benchmark: 6
benchmarks:
- dataset: benchmark://cbench-v1
max_benchmarks: 5
"""
)
)
assert cfg.timeout_hours == 12
with compiler_gym.make("llvm-v0") as env:
assert len(list(cfg.benchmark_uris_iterator(env))) == 5 * 6
|
CompilerGym-development
|
examples/llvm_rl/tests/testing_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from llvm_rl.model.environment import Environment
def test_basic_environment_config():
model = Environment(id="llvm-ic-v0", max_episode_steps=3)
with model.make_env() as env:
assert env.spec.id == "llvm-ic-v0"
assert env.reward_space == "IrInstructionCountOz"
# Test max episode steps:
env.reset()
_, _, done, _ = env.step(env.action_space.sample()) # step 1
assert not done
_, _, done, _ = env.step(env.action_space.sample()) # step 2
assert not done
_, _, done, _ = env.step(env.action_space.sample()) # step 3
assert done
def test_reward_and_observation_space():
model = Environment(
id="llvm-ic-v0",
max_episode_steps=3,
observation_space="Ir",
reward_space="ObjectTextSizeBytes",
)
with model.make_env() as env:
assert env.reward_space == "ObjectTextSizeBytes"
assert env.observation_space_spec.id == "Ir"
def test_wrappers():
model = Environment(
id="llvm-ic-v0",
max_episode_steps=3,
wrappers=[
{
"wrapper": "ConstrainedCommandline",
"args": {"flags": ["-mem2reg", "-reg2mem"]},
}
],
)
with model.make_env() as env:
assert env.action_space.flags == ["-mem2reg", "-reg2mem"]
assert env.action_space.n == 2
|
CompilerGym-development
|
examples/llvm_rl/tests/environment_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from llvm_rl.model.training import Training
from omegaconf import OmegaConf
def test_parse_yaml():
cfg = Training(
**OmegaConf.create(
"""\
timeout_hours: 10
episodes: 1000
benchmarks:
- uris:
- benchmark://cbench-v1/qsort
- dataset: benchmark://cbench-v1
max_benchmarks: 2
validation:
benchmarks:
- uris:
- benchmark://cbench-v1/qsort
"""
)
)
assert cfg.timeout_hours == 10
|
CompilerGym-development
|
examples/llvm_rl/tests/training_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from llvm_rl.model.validation import Validation
from omegaconf import OmegaConf
import compiler_gym
from compiler_gym.datasets import Benchmark
def test_validation_benchmarks_uris_list():
cfg = Validation(
**OmegaConf.create(
"""\
benchmarks:
- uris:
- benchmark://cbench-v1/qsort
- dataset: benchmark://cbench-v1
max_benchmarks: 2
"""
)
)
with compiler_gym.make("llvm-v0") as env:
assert list(cfg.benchmarks_iterator(env)) == [
"benchmark://cbench-v1/qsort",
"benchmark://cbench-v1/adpcm",
"benchmark://cbench-v1/bitcount",
]
bm = list(cfg.benchmarks_iterator(env))[0]
print(type(bm).__name__)
assert isinstance(bm, Benchmark)
assert list(cfg.benchmark_uris_iterator(env)) == [
"benchmark://cbench-v1/qsort",
"benchmark://cbench-v1/adpcm",
"benchmark://cbench-v1/bitcount",
]
|
CompilerGym-development
|
examples/llvm_rl/tests/validation_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pytest
from llvm_rl.model.training import Benchmarks
from omegaconf import OmegaConf
from pydantic import ValidationError
import compiler_gym
from compiler_gym.datasets import Benchmark
def test_benchmarks_missing_dataset_and_uris():
with pytest.raises(ValidationError):
Benchmarks()
def test_benchmarks_uris_list():
cfg = Benchmarks(uris=["benchmark://cbench-v1/qsort"])
assert cfg.uris == ["benchmark://cbench-v1/qsort"]
with compiler_gym.make("llvm-v0") as env:
assert list(cfg.benchmarks_iterator(env)) == ["benchmark://cbench-v1/qsort"]
assert isinstance(list(cfg.benchmarks_iterator(env))[0], Benchmark)
assert list(cfg.benchmark_uris_iterator(env)) == ["benchmark://cbench-v1/qsort"]
def test_validation_benchmarks_uris_list_yaml():
cfg = Benchmarks(
**OmegaConf.create(
"""\
uris:
- benchmark://cbench-v1/qsort
"""
)
)
assert len(cfg.uris) == 1
|
CompilerGym-development
|
examples/llvm_rl/tests/benchmarks_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import os
import sys
import warnings
from pathlib import Path
from llvm_rl.model.model import Model
from omegaconf import OmegaConf
def test_local_train(tmp_path: Path):
model = Model(
**OmegaConf.create(
f"""\
experiment: tiger
working_directory: {tmp_path}/outputs
executor:
type: local
cpus: 2
environment:
id: llvm-autophase-ic-v0
max_episode_steps: 3
agent:
type: PPOTrainer
args:
lr: 1.e-3
model:
fcnet_hiddens: [16]
fcnet_activation: relu
framework: torch
rollout_fragment_length: 8
train_batch_size: 8
sgd_minibatch_size: 8
training:
timeout_hours: 0.25
episodes: 32
benchmarks:
- dataset: benchmark://cbench-v1
max_benchmarks: 3
validation:
benchmarks:
- dataset: benchmark://cbench-v1
max_benchmarks: 3
testing:
timeout_hours: 0.25
benchmarks:
- dataset: benchmark://cbench-v1
max_benchmarks: 3
"""
)
)
warnings.filterwarnings("ignore", category=DeprecationWarning)
model.train()
print("Outputs", list((tmp_path / "outputs").iterdir()), file=sys.stderr)
assert (tmp_path / "outputs").is_dir()
with open(tmp_path / "outputs" / "training-model.json") as f:
assert json.load(f)
assert (tmp_path / "outputs" / "train").is_dir()
print("Outputs", list((tmp_path / "outputs" / "train").iterdir()), file=sys.stderr)
# Check that a checkpoint was created.
assert (
tmp_path
/ "outputs"
/ "train"
/ "tiger-C0-R0"
/ "checkpoint_000001"
/ "checkpoint-1"
).is_file()
# TODO(github.com/facebookresearch/CompilerGym/issues/487): Fix test on CI.
if os.environ.get("CI", "") != "":
return
model.test()
print(
"Trail files",
list((tmp_path / "outputs" / "train" / "tiger-C0-R0").iterdir()),
file=sys.stderr,
flush=True,
)
assert (tmp_path / "outputs" / "train" / "tiger-C0-R0" / "test-meta.json").is_file()
assert (
tmp_path / "outputs" / "train" / "tiger-C0-R0" / "test-results.json"
).is_file()
|
CompilerGym-development
|
examples/llvm_rl/tests/training_integration_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from typing import List
import numpy as np
from pydantic import BaseModel, validator
from ray.rllib.agents.dqn import ApexTrainer, R2D2Trainer # noqa
from ray.rllib.agents.impala import ImpalaTrainer # noqa
from ray.rllib.agents.ppo import PPOTrainer # noqa
from compiler_gym.datasets import BenchmarkUri
from compiler_gym.envs.llvm import LlvmEnv
from compiler_gym.util.timer import Timer
logger = logging.getLogger(__name__)
class InferenceResult(BaseModel):
"""Represents the result of running an RL agent on a problem."""
# The benchmark URI.
benchmark: str
inference_walltime_seconds: float
commandline: str
episode_len: int
instruction_count_init: int
instruction_count_final: int
instruction_count_oz: int
instruction_count_reduction: float
"""The final instruction count, normalized to -Oz."""
object_size_init: int
object_size_final: int
object_size_oz: int
object_size_reduction: float
"""The final object size, normalized to -Oz."""
runtimes_init: List[float]
runtimes_final: List[float]
runtimes_o3: List[float]
runtime_reduction: float
"""The final runtime, normalized to -Oz."""
@classmethod
def from_agent(
cls,
env: LlvmEnv,
agent,
runtime: bool = True,
runtimes_count: int = 30,
):
# We calculate our own reward at the end, no need for incremental
# rewards during inference.
env.reward_space = None
# Run inference on the environment.
observation, done = env.reset(), False
with Timer() as inference_timer:
while not done:
action = agent.compute_action(observation)
observation, _, done, _ = env.step(action)
instruction_count_init = env.unwrapped.observation["IrInstructionCountO0"]
instruction_count_final = env.unwrapped.observation["IrInstructionCount"]
instruction_count_oz = env.unwrapped.observation["IrInstructionCountOz"]
object_size_init = env.unwrapped.observation["ObjectTextSizeO0"]
object_size_final = env.unwrapped.observation["ObjectTextSizeBytes"]
object_size_oz = env.unwrapped.observation["ObjectTextSizeOz"]
runtimes_init = []
runtimes_o3 = []
runtimes_final = []
try:
if runtime and env.unwrapped.observation["IsRunnable"]:
env.send_param(
"llvm.set_runtimes_per_observation_count", str(runtimes_count)
)
env.unwrapped.observation["Runtime"] # warmup
runtimes_final = env.unwrapped.observation["Runtime"].tolist()
assert (
len(runtimes_final) == runtimes_count
), f"{len(runtimes_final)} != {runtimes_count}"
env.reset()
env.send_param(
"llvm.set_runtimes_per_observation_count", str(runtimes_count)
)
env.unwrapped.observation["Runtime"] # warmup
runtimes_init = env.unwrapped.observation["Runtime"].tolist()
assert (
len(runtimes_init) == runtimes_count
), f"{len(runtimes_init)} != {runtimes_count}"
env.send_param("llvm.apply_baseline_optimizations", "-O3")
env.unwrapped.observation["Runtime"] # warmup
runtimes_o3 = env.unwrapped.observation["Runtime"].tolist()
assert (
len(runtimes_o3) == runtimes_count
), f"{len(runtimes_o3)} != {runtimes_count}"
except Exception as e: # pylint: disable=broad-except
logger.warning("Failed to compute runtime: %s", e)
return cls(
benchmark=env.benchmark.uri,
inference_walltime_seconds=inference_timer.time,
commandline=env.action_space.to_string(env.actions),
episode_len=len(env.actions),
instruction_count_init=instruction_count_init,
instruction_count_final=instruction_count_final,
instruction_count_oz=instruction_count_oz,
instruction_count_reduction=instruction_count_oz
/ max(instruction_count_final, 1),
object_size_init=object_size_init,
object_size_final=object_size_final,
object_size_oz=object_size_oz,
object_size_reduction=object_size_oz / max(object_size_final, 1),
runtimes_init=runtimes_init,
runtimes_final=runtimes_final,
runtimes_o3=runtimes_o3,
runtime_reduction=np.median(runtimes_o3 or [0])
/ max(np.median(runtimes_final or [0]), 1),
)
@validator("benchmark", pre=True)
def validate_benchmark(cls, value):
if isinstance(value, BenchmarkUri):
return str(value)
return value
|
CompilerGym-development
|
examples/llvm_rl/model/inference_result.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .model import Model
__all__ = [
"Model",
]
|
CompilerGym-development
|
examples/llvm_rl/model/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import logging
import warnings
from datetime import datetime
from pathlib import Path
from typing import Any, Dict, Iterable, List, Optional
import pandas as pd
import yaml
from pydantic import BaseModel, Field
from ray import tune
from compiler_gym.util.executor import Executor
from compiler_gym.util.shell_format import indent, plural
from compiler_gym.util.statistics import geometric_mean
from .agent import Agent
from .environment import Environment
from .inference_result import InferenceResult
from .testing import Testing
from .training import Training
# Ignore import deprecation warnings from ray.
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
import ray
logger = logging.getLogger(__name__)
class Model(BaseModel):
"""The composition of the full environment, agent, training / testing
regime, and execution environment. Provides the API for training / testing.
"""
# === Start of fields list. ===
executor: Executor
"""The execution environment to use for training / testing jobs."""
environment: Environment = Field(allow_mutation=False)
"""Description of the environment, which defines the particular optimization
problem, the reward signal for training, and the representation of state
that the agent receives.
"""
agent: Agent = Field(allow_mutation=False)
"""The agent describes the RLlib training algorithm that is used."""
training: Training = Field(allow_mutation=False)
"""Description of the training regime: the benchmarks to learn over, and how
long to learn for.
"""
testing: Testing = Field(allow_mutation=False)
"""The testing setup."""
working_directory: Path = Field(allow_mutation=False)
"""The working directory where logs and other artifacts are written to."""
experiment: str = Field(default="unnamed_experiment", allow_mutation=False)
"""A logical name for this experiment. This is used for naming RLlib
trials.
"""
num_replicas: int = Field(default=1, ge=1, allow_mutation=False)
"""The number of duplicate jobs to run. E.g. for training, this will train
:code:`n` independent models in trials that share the same working
directory.
"""
job_id: int = Field(default=0, allow_mutation=0)
"""An optional numeric job ID."""
seed: int = Field(default=0xCC, allow_mutation=False)
"""The numeric seed to use"""
compiler_gym_version: str = Field(default="", allow_mutation=False)
"""The compiler_gym.__version__ string."""
# === Start of public API. ===
def train(self) -> None:
"""Run the training job for this model."""
logger.info("Model:\n%s", indent(self.yaml(), 4))
logger.info("Starting training job in %s", self.working_directory)
# The working directory may already have been created by hydra, so we
# will check for the training-model.json file below to see if this
# directory has already been used for training.
self.working_directory.mkdir(parents=True, exist_ok=True)
# Dump the parsed config to file.
model_dump_path = self.working_directory / "training-model.json"
assert not model_dump_path.is_file(), (
f"Refusing to overwrite file: {model_dump_path}. "
"Is the working directory clean?"
)
with open(model_dump_path, "w") as f:
print(json.dumps(json.loads(self.json()), indent=2), file=f)
with self.executor.get_executor(
logs_dir=self.working_directory / "slurm",
# Provision an extra hour for RLlib overhead.
timeout_hours=self.training.timeout_hours + 1,
) as executor:
for i in range(self.num_replicas):
executor.submit(train_job, model=self, seed=self.seed + i, replica_id=i)
def test_checkpoints(
self, metric: str = "evaluation/episode_reward_mean"
) -> Iterable[Path]:
df = self.dataframe
if not len(df):
return
for logsdir in set(df["logsdir"].values):
sdf = df[(df["logsdir"] == logsdir) & df["checkpoint"]]
if not len(sdf):
continue
sdf = sdf.reset_index()
idx = sdf[metric].idxmax()
best = sdf.iloc[idx]
logger.info(
"Selected checkpoint %s with %s %f",
best["checkpoint_path"],
metric,
best[metric],
)
yield Path(best["checkpoint_path"])
def test(self) -> None:
"""Run the testing job for this model."""
# Gather all the jobs to run now. We will submit them all in a batch.
jobs = []
for checkpoint in self.test_checkpoints():
assert checkpoint.is_file(), f"Checkpoint not found: {checkpoint}"
# Go up two levels to the main directory
test_dir = checkpoint.parent.parent
assert (test_dir / "progress.csv").is_file()
# Try not to have to launch a job.
if (test_dir / "test-meta.json").is_file():
with open(test_dir / "test-meta.json") as f:
meta = json.load(f)
if meta.get("checkpoint") == checkpoint.name:
logger.info(
"Already have test results for %s, nothing to do",
checkpoint.name,
)
continue
jobs.append((checkpoint, test_dir))
# Submit all the jobs now.
with self.executor.get_executor(
logs_dir=self.working_directory / "slurm",
timeout_hours=self.testing.timeout_hours,
# Single threaded evaluation loop.
cpus=2,
) as executor:
for checkpoint, test_dir in jobs:
executor.submit(
test_job, model=self, checkpoint=checkpoint, outputs_dir=test_dir
)
def yaml(self) -> str:
"""Serialize the model configuration to a YAML string."""
# We can't directly dump the dict() representation because we need to
# simplify the types first, so we go via JSON.
simplified_data = json.loads(self.json())
return yaml.dump(simplified_data)
@property
def dataframe(self) -> pd.DataFrame:
if not (self.working_directory / "train").is_dir():
return pd.DataFrame([])
dfs = []
for subdir in (self.working_directory / "train").iterdir():
if not subdir.is_dir():
continue
df = self._trial_to_dataframe(subdir)
if df is not None:
dfs.append(df)
df.to_csv(subdir / "progress-redux.csv")
return pd.concat(dfs) if dfs else pd.DataFrame([])
def _trial_to_dataframe(self, directory: Path) -> Optional[pd.DataFrame]:
components = directory.name.split("-")
if len(components) < 3:
logger.warning(
"Directory name does not match expected "
"{experiment}-{config}-{replica} format: %s",
directory,
)
return
replica = components[-1]
config = components[-2]
experiment = "-".join(components[:-2])
if not (directory / "progress.csv").is_file():
logger.warning("File not found: %s", directory / "progress.csv")
return
try:
df = pd.read_csv(directory / "progress.csv")
except pd.errors.EmptyDataError:
return None
df.insert(0, "logsdir", str(directory))
df.insert(
0,
"experiment_timestamp",
" ".join(
[
self.working_directory.parent.parent.name,
self.working_directory.parent.name,
]
),
)
df.insert(0, "trial_name", directory.name)
df.insert(0, "replica", replica)
df.insert(0, "config", config)
df.insert(0, "experiment", experiment)
df["checkpoint"] = [
(directory / f"checkpoint_{i:06d}").is_dir()
for i in df["training_iteration"]
]
df["checkpoint_path"] = [
str(directory / f"checkpoint_{i:06d}" / f"checkpoint-{i}")
if (directory / f"checkpoint_{i:06d}").is_dir()
else None
for i in df["training_iteration"]
]
df["evaluation/episode_reward_geomean"] = [
geometric_mean(eval(x)) for x in df["evaluation/hist_stats/episode_reward"]
]
df["episode_reward_geomean"] = [
geometric_mean(eval(x)) for x in df["hist_stats/episode_reward"]
]
df["complete"] = [
min(d / self.training.episodes, 1) for d in df["episodes_total"]
]
df["cpus"] = self.executor.cpus
df["gpus"] = self.executor.gpus
df = df.set_index(["experiment", "config", "replica", "training_iteration"])
return df
@property
def test_dataframes(self) -> Dict[str, pd.DataFrame]:
"""Get a dictionary of test dataframes, keyed by trial name."""
results = {}
if not (self.working_directory / "train").is_dir():
return results
for subdir in (self.working_directory / "train").iterdir():
if not subdir.is_dir():
continue
if not (subdir / "test-results.json").is_file():
continue
if not (subdir / "test-meta.json").is_file():
continue
with open(subdir / "test-meta.json") as f:
meta = json.load(f)
df = pd.read_json(subdir / "test-results.json")
df["test_checkpoint"] = meta["checkpoint"]
df["test_timestamp"] = meta["timestamp"]
results[subdir.name] = df
return results
@classmethod
def from_logsdir(cls, working_directory: Path) -> List["Model"]:
"""Reconstruct models by recursively reading from logs directories."""
def find_models(dir: Path) -> Iterable[Path]:
"""Attempt to locate models recursively from logs directories."""
if (dir / "training-model.json").is_file():
yield dir / "training-model.json"
return
for entry in dir.iterdir():
if entry.is_dir():
yield from find_models(entry)
models: List[Model] = []
for model_file in find_models(working_directory):
with open(model_file) as f:
try:
model = json.load(f)
model["working_directory"] = model_file.parent
models.append(cls(**model))
except json.decoder.JSONDecodeError as e:
logger.warning(
"Failed to parse JSON for model file %s: %s", model_file, e
)
continue
return models
# === Start of implementation details. ===
def make_rllib_trainer_config(self, seed: int) -> Dict[str, Any]:
"""Coerce user preferences into a dictionary of arguments for RLlib
trainer class.
"""
with self.environment.make_env() as env:
evaluation_num_episodes = len(
list(self.training.validation.benchmark_uris_iterator(env))
)
logger.info(
"Calculated the number of episodes per evaluation to be %d",
evaluation_num_episodes,
)
if not evaluation_num_episodes:
raise ValueError("#. of validation episodes is 0!")
derived_args = {
"env": self.environment.rllib_id,
"seed": seed,
"horizon": self.environment.max_episode_steps,
# Reserve one CPU for the trainer, the rest for rollout workers.
"num_workers": self.executor.cpus - 1,
"num_cpus_per_worker": 1,
"num_gpus": self.executor.gpus,
# Set the number of evaluation episodes to the size of the
# validation set.
"evaluation_num_episodes": evaluation_num_episodes,
# 1 checkpoint = 1 evaluation.
"evaluation_interval": 1,
# Argument dictionary passed to make_env().
"env_config": {"type": "training"},
"evaluation_config": {
"env_config": {"type": "validation"},
},
}
# Merge with the user args. In case of conflict, the user's arg value
# overrides the derived arg value.
return merge(derived_args, self.agent.args)
class Config:
validate_assignment = True
def test_job(model: Model, checkpoint: Path, outputs_dir: Path) -> None:
logger.info(
"Initializing ray with 2 cpus and %d GPUs",
model.executor.gpus,
)
ray.init(
num_cpus=2,
num_gpus=model.executor.gpus,
include_dashboard=False,
)
tune.register_env(
model.environment.rllib_id, lambda _: model.environment.make_env()
)
agent = model.agent.make_agent(model.environment)
logger.info(
"Restoring %s agent with %s trainable params from %s",
model.agent.type,
f"{model.agent.trainable_parameters_count(agent):,}",
checkpoint,
)
agent.restore(str(checkpoint))
# Run inference on all of the test benchmarks.
results: List[InferenceResult] = []
with model.environment.make_env() as env:
test_benchmarks = list(model.testing.benchmark_uris_iterator(env))
for i, benchmark in enumerate(test_benchmarks, start=1):
env.reset(benchmark=benchmark)
result = InferenceResult.from_agent(
env, agent, runtime=model.environment.reward_space == "Runtime"
)
logger.info(
"Test %s of %s: %s",
f"{i:,d}",
f"{len(test_benchmarks):,d}",
result,
)
results.append(result)
# Do this once the actual work has been done so that failed jobs
# don't leave meta files lying around.
with open(outputs_dir / "test-results.json", "w") as f:
json.dump([r.dict() for r in results], f)
with open(outputs_dir / "test-meta.json", "w") as f:
json.dump(
{
"timestamp": datetime.now().isoformat(),
"checkpoint": checkpoint.name,
},
f,
)
# Explicit call to ray shutdown here so that multiple consecutive
# jobs can initialize ray with different resource requirements.
ray.shutdown()
def train_job(model: Model, seed: int, replica_id: int) -> None:
logger.info(
"Initializing ray with %d %s and %d %s",
model.executor.cpus,
plural(model.executor.cpus, "CPU", "CPUs"),
model.executor.gpus,
plural(model.executor.gpus, "GPU", "GPUs"),
)
ray.init(
num_cpus=model.executor.cpus,
num_gpus=model.executor.gpus,
include_dashboard=False,
)
logger.info("Registered RLlib environment %s", model.environment.rllib_id)
def make_env(env_config: Dict[str, Any]):
"""Construct a training or validation environment."""
env = model.environment.make_env()
if "type" not in env_config:
raise KeyError(f"No type in dict: {env_config}")
if env_config["type"] == "training":
return model.training.wrap_env(env)
elif env_config["type"] == "validation":
return model.training.validation.wrap_env(env)
raise ValueError(f"Unknown environment type: {env_config['type']}")
tune.register_env(
model.environment.rllib_id,
make_env,
)
def trial_name_creator(trial):
del trial # Unused
# NOTE(cummins): Only a single trial per isntance.
return f"{model.experiment}-C{model.job_id}-R{replica_id}"
def trial_dirname_creator(trial):
del trial # Unused
return f"{model.experiment}-C{model.job_id}-R{replica_id}"
rllib_opts = {
"config": model.make_rllib_trainer_config(seed),
"time_budget_s": model.training.timeout_hours * 3600,
"stop": {
"episodes_total": model.training.episodes,
},
"reuse_actors": model.agent.reuse_actors,
"checkpoint_freq": model.agent.checkpoint_freq,
"checkpoint_at_end": model.agent.checkpoint_at_end,
# Write RLlib files to: "<working_directory>/train/<experiment_name>-<job_id>".
"local_dir": str(model.working_directory),
"name": "train",
}
logger.info("RLlib options:\n%s", json.dumps(rllib_opts, indent=2))
tune.run(
model.agent.actual_type,
trial_name_creator=trial_name_creator,
trial_dirname_creator=trial_dirname_creator,
**rllib_opts,
)
# Explicit call to ray shutdown here so that multiple consecutive
# jobs can initialize ray with different resource requirements.
ray.shutdown()
def merge(a, b, path=None):
"Update values in `a` with values from `b`. Supported nested dicts."
if path is None:
path = []
for key in b:
if key in a and isinstance(a[key], dict) and isinstance(b[key], dict):
merge(a[key], b[key], path + [str(key)])
else:
a[key] = b[key]
return a
|
CompilerGym-development
|
examples/llvm_rl/model/model.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import warnings
from typing import Any, Dict
import numpy as np
from omegaconf import DictConfig, ListConfig
from pydantic import BaseModel, Field, validator
# Ignore import deprecation warnings from ray.
warnings.filterwarnings("ignore", category=DeprecationWarning)
from ray.rllib.agents.a3c import A2CTrainer # noqa
from ray.rllib.agents.a3c import A3CTrainer # noqa
from ray.rllib.agents.dqn import ApexTrainer, R2D2Trainer # noqa
from ray.rllib.agents.impala import ImpalaTrainer # noqa
from ray.rllib.agents.ppo import PPOTrainer # noqa
from .environment import Environment # noqa: E402
class Agent(BaseModel):
"""Represents the RL algorithm used."""
# === Start of fields list. ===
type: str = Field(allow_mutation=False)
"""The name of the class used to instantiate the RL algorithm as a string,
e.g. :code:`"PPOTrainer". The class must be imported to this module to be
used.
"""
args: Dict[str, Any] = Field(default={}, allow_mutation=False)
"""A dictionary of arguments that are passed into the
:code:`type` constructor.
"""
checkpoint_freq: int = Field(default=1, ge=1, allow_mutation=False)
"""How frequently to checkpoint the agents progress, in rllib training
iterations.
"""
checkpoint_at_end: bool = Field(default=True, allow_mutation=False)
"""Whether to produce a final checkpoint at the end of training.
"""
reuse_actors: bool = Field(default=True, allow_mutation=False)
"""Whether to reuse workers between training iterations."""
# === Start of public API. ===
@property
def actual_type(self):
"""Get the trainer class type."""
return self._to_class(self.type)
@property
def rllib_trainer_config_dict(self):
"""Merge generated arguments with user trainer args dict."""
config = {
"log_level": "INFO",
}
config.update(self.args)
return config
def make_agent(self, environment: Environment):
"""Construct an agent object."""
try:
return self.actual_type(config=self.args, env=environment.rllib_id)
except TypeError as e:
raise TypeError(
"Error constructing RLlib trainer class "
f"{self.actual_type.__name__}: {e}"
) from e
def trainable_parameters_count(self, agent):
"""Given an agent instance (created by :code:`make_agent()`), compute
and return the number of trainable parameters.
"""
framework = self.args.get("framework")
model = agent.get_policy().model
if framework == "torch":
return np.sum([np.prod(var.shape) for var in model.trainable_variables()])
elif framework == "tf":
return np.sum(
[np.prod(v.get_shape().as_list()) for v in model.trainable_variables()]
)
raise ValueError(f"Unknown framework: {framework}")
# === Start of implementation details. ===
@staticmethod
def _to_class(value):
try:
return globals()[value]
except KeyError as e:
raise ValueError(
f"Unknown RLlib trainer class: {value}.\n"
"Make sure it is imported in rl/model/agent.py"
) from e
@validator("type")
def validate_type(cls, value):
cls._to_class(value)
return value
@validator("args", pre=True)
def validate_args(cls, value):
def omegaconf_to_py(x):
if isinstance(x, DictConfig):
return {k: omegaconf_to_py(v) for k, v in x.items()}
elif isinstance(x, ListConfig):
return [omegaconf_to_py(v) for v in x]
else:
return x
return omegaconf_to_py(value)
class Config:
validate_assignment = True
|
CompilerGym-development
|
examples/llvm_rl/model/agent.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict, List, Optional
from llvm_autotuning.just_keep_going_env import JustKeepGoingEnv
from llvm_rl.wrappers import * # noqa wrapper definition
from pydantic import BaseModel, Field, validator
from pydantic.class_validators import root_validator
import compiler_gym
from compiler_gym import CompilerEnv
from compiler_gym.wrappers import * # noqa wrapper definitions
from compiler_gym.wrappers import TimeLimit
class EnvironmentWrapperConfig(BaseModel):
"""Description of a CompilerEnvWrapper class."""
# === Start of fields list. ===
wrapper: str = Field(allow_mutation=False)
"""The name of the wrapper class. This class name must be imported into this
module.
"""
args: Dict[str, Any] = Field(default={}, allow_mutation=False)
""""A dictionary of arguments to pass to the wrapper constructor."""
# === Start of public API. ===
@property
def wrapper_class(self):
"""Return the wrapper class type."""
return self._to_class(self.wrapper)
def wrap(self, env: CompilerEnv) -> CompilerEnv:
"""Wrap the given environment."""
try:
return self.wrapper_class(env=env, **self.args)
except TypeError as e:
raise TypeError(
f"Error constructing CompilerEnv wrapper {self.wrapper_class.__name__}: {e}"
) from e
# === Start of implementation details. ===
@validator("wrapper")
def validate_wrapper(cls, value):
# Check that the class can be constructed.
cls._to_class(value)
return value
@staticmethod
def _to_class(value: str):
try:
return globals()[value]
except KeyError as e:
raise ValueError(
f"Unknown wrapper class: {value}\n"
"Make sure it is imported in rl/model/environment.py"
) from e
class Config:
validate_assignment = True
class Environment(BaseModel):
"""Represents a CompilerEnv environment."""
id: str = Field(allow_mutation=False)
"""The environment ID, as passed to :code:`gym.make(...)`."""
reward_space: Optional[str] = Field(default=None, allow_mutation=False)
"""The reward space to use, as a string."""
observation_space: Optional[str] = Field(default=None, allow_mutation=False)
"""The observation space to use, as a string."""
max_episode_steps: int = Field(allow_mutation=False, gt=0)
"""The maximum number of steps in an episode of this environment. For the
sake of consistency this *must* be defined.
"""
wrappers: List[EnvironmentWrapperConfig] = Field(default=[], allow_mutation=False)
"""A list of wrapper classes to apply to the environment."""
rllib_id: Optional[str] = Field(allow_mutation=False)
"""The ID of the custom environment to register with RLlib. This shows up in
the logs but has no effect on behavior. Defaults to the `id` value.
"""
# === Start of public API. ===
def make_env(self) -> CompilerEnv:
"""Construct a compiler environment from the given config."""
env = compiler_gym.make(self.id)
if self.observation_space:
env.observation_space = self.observation_space
if self.reward_space:
env.reward_space = self.reward_space
for wrapper in self.wrappers:
env = wrapper.wrap(env)
# Wrap the env to ignore errors during search.
env = JustKeepGoingEnv(env)
env = TimeLimit(env, max_episode_steps=self.max_episode_steps)
return env
# === Start of implementation details. ===
@validator("id")
def validate_id(cls, value):
assert (
value in compiler_gym.COMPILER_GYM_ENVS
), f"Not a CompilerGym environment: {value}"
return value
@validator("wrappers", pre=True)
def validate_wrappers(cls, value) -> List[EnvironmentWrapperConfig]:
# Convert the omegaconf ListConfig into a list of
# EnvironmentWrapperConfig objects.
return [EnvironmentWrapperConfig(**v) for v in value]
@root_validator
def rllib_id_default_value(cls, values):
values["rllib_id"] = values["rllib_id"] or values["id"]
return values
class Config:
validate_assignment = True
arbitrary_types_allowed = True
|
CompilerGym-development
|
examples/llvm_rl/model/environment.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from itertools import islice
from typing import Iterable, List
import numpy as np
from pydantic import BaseModel, Field, validator
from compiler_gym.datasets import Benchmark
from compiler_gym.envs import CompilerEnv
from .benchmarks import Benchmarks
logger = logging.getLogger(__name__)
class Testing(BaseModel):
"""The testing regime."""
__test__ = False # Prevent pytest from thinking that this class is a test.
# === Start of fields list. ===
timeout_hours: float = Field(allow_mutation=False, gt=0)
"""The timeout for test jobs, in hours."""
benchmarks: List[Benchmarks] = Field(allow_mutation=False)
"""The set of benchmarks to test on."""
runs_per_benchmark: int = Field(default=1, ge=1, allow_mutation=False)
"""The number of inference episodes to run on each benchmark. If the
environment and policy are deterministic then running multiple episodes per
benchmark is only useful for producing accurate aggregate measurements of
inference walltime.
"""
# === Start of public API. ===
def benchmarks_iterator(self, env: CompilerEnv) -> Iterable[Benchmark]:
"""Return an iterator over the test benchmarks."""
for _ in range(self.runs_per_benchmark):
for bm in self.benchmarks:
yield from bm.benchmarks_iterator(env)
def benchmark_uris_iterator(self, env: CompilerEnv) -> Iterable[str]:
"""Return an iterator over the test benchmark URIs."""
for _ in range(self.runs_per_benchmark):
for bm in self.benchmarks:
yield from bm.benchmark_uris_iterator(env)
# === Start of implementation details. ===
@validator("benchmarks", pre=True)
def validate_benchmarks(cls, value):
return [Benchmarks(**v) for v in value]
class Config:
validate_assignment = True
def get_testing_benchmarks(
env: CompilerEnv, max_benchmarks: int = 50, seed: int = 0
) -> List[str]:
rng = np.random.default_rng(seed=seed)
for dataset in env.datasets:
if dataset.name == "generator://csmith-v0":
yield from islice(dataset.benchmarks(), 50)
elif not dataset.size or dataset.size > max_benchmarks:
logger.info(
"Selecting random %d benchmarks from dataset %s of size %d",
max_benchmarks,
dataset,
dataset.size,
)
for _ in range(max_benchmarks):
yield dataset.random_benchmark(rng)
else:
logger.info(
"Selecting all %d benchmarks from dataset %s", dataset.size, dataset
)
yield from dataset.benchmarks()
|
CompilerGym-development
|
examples/llvm_rl/model/testing.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from itertools import islice
from typing import Iterable, List, Union
from pydantic import BaseModel, Field, root_validator, validator
from compiler_gym.datasets import Benchmark, BenchmarkUri
from compiler_gym.envs import CompilerEnv
class Benchmarks(BaseModel):
"""Represents a set of benchmarks to use for training/validation/testing.
There are two ways of describing benchmarks, either as a list of benchmark
URIs:
benchmarks:
uris:
- benchmark://cbench-v1/adpcm
- benchmark://cbench-v1/ghostscript
Or as a dataset to iterate over:
benchmarks:
dataset: benchmark://cbench-v1
max_benchmarks: 20
"""
# === Start of fields list. ===
dataset: str = Field(default=None, allow_mutation=False)
"""The name of a dataset to iterate over. If set, benchmarks are produced
by iterating over this dataset in order. If not set, the :code:`uris` list
must be provided.
"""
uris: List[str] = Field(default=[], allow_mutation=False)
"""A list of URIs to iterate over."""
max_benchmarks: int = Field(default=0, ge=0, allow_mutation=False)
"""The maximum number of benchmarks to yield from the given dataset or URIs
list.
"""
benchmarks_start_at: int = Field(default=0, ge=0, allow_mutation=False)
"""An offset into the dataset or URIs list to start iterating from.
Note that using very large offsets will slow things down as the
implementation still has to iterate over the excluded benchmarks.
"""
# === Start of public API. ===
def benchmarks_iterator(self, env: CompilerEnv) -> Iterable[Benchmark]:
"""Return an iterator over the benchmarks."""
return self._benchmark_iterator(env)
def benchmark_uris_iterator(self, env: CompilerEnv) -> Iterable[str]:
"""Return an iterator over the URIs of the benchmarks."""
return self._benchmark_iterator(env, uris=True)
# === Start of implementation details. ===
@root_validator
def check_that_either_dataset_or_uris_is_set(cls, values):
assert values.get("dataset") or values.get(
"uris"
), "Neither dataset or uris given"
return values
@validator("uris", pre=True)
def validate_uris(cls, value, *, values, **kwargs):
del kwargs
for uri in value:
uri = BenchmarkUri.from_string(uri)
assert uri.scheme and uri.dataset, f"Invalid benchmark URI: {uri}"
return list(value)
def _benchmark_iterator(
self, env: CompilerEnv, uris: bool = False
) -> Union[Iterable[Benchmark], Iterable[str]]:
return (
self._uris_iterator(env, uris)
if self.uris
else self._dataset_iterator(env, uris)
)
def _uris_iterator(
self, env: CompilerEnv, uris: bool = False
) -> Union[Iterable[Benchmark], Iterable[str]]:
"""Iterate from a URIs list."""
start = self.benchmarks_start_at
n = len(self.uris)
if self.max_benchmarks:
n = min(self.max_benchmarks, n)
if uris:
# Shortcut in case we already have a list of URIs that we can slice
# rather than iterating over.
return iter(self.uris[start:n])
return islice((env.datasets.benchmark(u) for u in self.uris), start, start + n)
def _dataset_iterator(
self, env: CompilerEnv, uris: bool = False
) -> Union[Iterable[Benchmark], Iterable[str]]:
"""Iterate from a dataset name."""
dataset = env.datasets[self.dataset]
dataset.install()
n = dataset.size or self.max_benchmarks # dataset.size == 0 for inf
if self.max_benchmarks:
n = min(self.max_benchmarks, n)
start = self.benchmarks_start_at
iterator = dataset.benchmark_uris if uris else dataset.benchmarks
return islice(iterator(), start, start + n)
class Config:
validate_assignment = True
|
CompilerGym-development
|
examples/llvm_rl/model/benchmarks.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Iterable, List
from pydantic import BaseModel, Field, validator
from compiler_gym.datasets import Benchmark
from compiler_gym.envs import CompilerEnv
from compiler_gym.wrappers import (
CycleOverBenchmarks,
CycleOverBenchmarksIterator,
IterateOverBenchmarks,
)
from .benchmarks import Benchmarks
from .validation import Validation
class Training(BaseModel):
"""The training regime."""
timeout_hours: float = Field(allow_mutation=False, gt=0)
"""The maximum runtime of the training job."""
episodes: int = Field(ge=1, allow_mutation=False)
"""The number of episodes to train for."""
benchmarks: List[Benchmarks] = Field(allow_mutation=False)
"""The programs to train over."""
validation: Validation = Field(allow_mutation=False)
"""The validation set."""
cycle_over_benchmarks: bool = Field(default=True, allow_mutation=False)
"""If :code:`True`, the benchmark iterator repeats itself once an entire
epoch has completed. Set this to :code:`False` to disable benchmarks from
being cached.
"""
cache_benchmarks: bool = Field(default=False, allow_mutation=False)
"""If :code:`True`, construct the actual benchmark objects during iteration.
This will make it faster to cycle over the same set of benchmarks multiple
times, but requires enough resources to hold all of the benchmark objects in
memory. If :code:`False`, just the benchmark URIs are cached in memory.
"""
# === Start of public API. ===
def benchmarks_iterator(self, env: CompilerEnv) -> Iterable[Benchmark]:
"""Return an iterator over the training benchmarks."""
for bm in self.benchmarks:
yield from bm.benchmarks_iterator(env)
def benchmark_uris_iterator(self, env: CompilerEnv) -> Iterable[str]:
"""Return an iterator over the training benchmark URIs."""
for bm in self.benchmarks:
yield from bm.benchmark_uris_iterator(env)
def wrap_env(self, env: CompilerEnv) -> CompilerEnv:
"""Wrap an environment for use in the training loop that is configured
to iterate over the training benchmarks on each call to :code:`reset()`.
"""
if self.cycle_over_benchmarks and self.cache_benchmarks:
wrapper = CycleOverBenchmarks
elif self.cycle_over_benchmarks:
return CycleOverBenchmarksIterator(
env=env,
make_benchmark_iterator=lambda: self.benchmark_uris_iterator(env),
)
else:
wrapper = IterateOverBenchmarks
iterator = (
self.benchmarks_iterator
if self.cache_benchmarks
else self.benchmark_uris_iterator
)
return wrapper(env=env, benchmarks=iterator(env))
# === Start of implementation details. ===
@validator("benchmarks", pre=True)
def validate_benchmarks(cls, value):
return [Benchmarks(**v) for v in value]
class Config:
validate_assignment = True
|
CompilerGym-development
|
examples/llvm_rl/model/training.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Iterable, List
from pydantic import BaseModel, Field, validator
from compiler_gym.datasets import Benchmark
from compiler_gym.envs import CompilerEnv
from compiler_gym.wrappers import CycleOverBenchmarks
from .benchmarks import Benchmarks
class Validation(BaseModel):
"""The validation set which is used for periodically evaluating agent
performance during training.
"""
# === Start of fields list. ===
benchmarks: List[Benchmarks] = Field(allow_mutation=False)
"""The benchmarks to evaluate agent performance on. These must be distinct
from the training and testing sets (this requirement is not enforced by the
API, you have to do it yourself).
"""
# === Start of public API. ===
def benchmarks_iterator(self, env: CompilerEnv) -> Iterable[Benchmark]:
"""Return an iterator over the validation benchmarks."""
for bm in self.benchmarks:
yield from bm.benchmarks_iterator(env)
def benchmark_uris_iterator(self, env: CompilerEnv) -> Iterable[str]:
"""Return an iterator over the training benchmark URIs."""
for bm in self.benchmarks:
yield from bm.benchmark_uris_iterator(env)
def wrap_env(self, env: CompilerEnv) -> CompilerEnv:
"""Wrap an environment for use in the training loop that is configured
to iterate over the validation benchmarks on each call to
:code:`reset()`.
"""
return CycleOverBenchmarks(env=env, benchmarks=self.benchmarks_iterator(env))
# === Start of implementation details. ===
@validator("benchmarks", pre=True)
def validate_benchmarks(cls, value):
return [Benchmarks(**v) for v in value]
class Config:
validate_assignment = True
|
CompilerGym-development
|
examples/llvm_rl/model/validation.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Estimate the immediate reward of different actions using random trials.
This script estimates the immediate reward that running a specific action has by
running trials. A trial is a random episode that ends with the determined
action.
Example Usage
-------------
Evaluate the impact of three passes on the codesize of the cBench-crc32
benchmark:
$ python -m sensitivity_analysis.action_sensitivity_analysis \
--env=llvm-v0 --reward=IrInstructionCountO3 \
--benchmark=cbench-v1/crc32 --num_action_sensitivity_trials=25 \
--action=-add-discriminators,-adce,-mem2reg
Evaluate the single-step immediate reward of all actions on LLVM codesize:
$ python -m sensitivity_analysis.action_sensitivity_analysis -- \
--env=llvm-v0 --reward=IrInstructionCountO3
"""
import random
from concurrent.futures import ThreadPoolExecutor
from pathlib import Path
from typing import List, Optional
import numpy as np
from absl import app, flags
from sensitivity_analysis.sensitivity_analysis_eval import (
SensitivityAnalysisResult,
run_sensitivity_analysis,
)
import compiler_gym.util.flags.nproc # noqa
from compiler_gym.envs import CompilerEnv
from compiler_gym.util.flags.benchmark_from_flags import benchmark_from_flags
from compiler_gym.util.flags.env_from_flags import env_from_flags
from compiler_gym.util.gym_type_hints import ActionType
from compiler_gym.util.runfiles_path import create_user_logs_dir
from compiler_gym.util.timer import Timer
flags.DEFINE_integer(
"num_action_sensitivity_trials",
100,
"The number of trials to perform when estimating the reward of each action. "
"A trial is a random episode that ends with the determined action. Increasing "
"this number increases the number of trials performed, leading to a higher "
"fidelity estimate of the reward of an action.",
)
flags.DEFINE_integer(
"max_warmup_steps",
25,
"The maximum number of random steps to make before determining the reward of an action.",
)
flags.DEFINE_list(
"action",
[],
"An optional list of actions to evaluate. If not specified, all actions will be evaluated.",
)
flags.DEFINE_integer(
"max_action_attempts_multiplier",
5,
"A trial may fail because the environment crashes, or an action produces an invalid state. "
"Limit the total number of trials performed for each action to "
"max_action_attempts_multiplier * num_trials.",
)
FLAGS = flags.FLAGS
def get_rewards(
action: int,
action_name: str,
reward_space: str,
num_trials: int,
max_warmup_steps: int,
max_attempts_multiplier: int = 5,
) -> SensitivityAnalysisResult:
"""Run random trials to get a list of num_trials immediate rewards."""
rewards, runtimes = [], []
benchmark = benchmark_from_flags()
num_attempts = 0
while (
num_attempts < max_attempts_multiplier * num_trials
and len(rewards) < num_trials
):
num_attempts += 1
with env_from_flags(benchmark=benchmark) as env:
env.observation_space = None
env.reward_space = None
env.reset(benchmark=benchmark)
with Timer() as t:
reward = run_one_trial(env, reward_space, action, max_warmup_steps)
if reward is not None:
rewards.append(reward)
runtimes.append(t.time)
return SensitivityAnalysisResult(
name=action_name, runtimes=np.array(runtimes), rewards=np.array(rewards)
)
def run_one_trial(
env: CompilerEnv, reward_space: str, action: int, max_warmup_steps: int
) -> Optional[float]:
"""Run a random number of "warmup" steps in an environment, then compute
the immediate reward of the given action.
:return: An immediate reward.
"""
num_warmup_steps = random.randint(0, max_warmup_steps)
warmup_actions = [env.action_space.sample() for _ in range(num_warmup_steps)]
env.reward_space = reward_space
_, _, done, _ = env.multistep(warmup_actions)
if done:
return None
_, (reward,), done, _ = env.step(action, reward_spaces=[reward_space])
return None if done else reward
def run_action_sensitivity_analysis(
actions: List[ActionType],
rewards_path: Path,
runtimes_path: Path,
reward_space: str,
num_trials: int,
max_warmup_steps: int,
nproc: int,
max_attempts_multiplier: int = 5,
):
"""Estimate the immediate reward of a given list of actions."""
with env_from_flags() as env:
action_names = env.action_space.names
with ThreadPoolExecutor(max_workers=nproc) as executor:
analysis_futures = {
executor.submit(
get_rewards,
action,
action_names[action],
reward_space,
num_trials,
max_warmup_steps,
max_attempts_multiplier,
)
for action in actions
}
return run_sensitivity_analysis(
analysis_futures=analysis_futures,
runtimes_path=runtimes_path,
rewards_path=rewards_path,
)
def main(argv):
"""Main entry point."""
argv = FLAGS(argv)
if len(argv) != 1:
raise app.UsageError(f"Unknown command line arguments: {argv[1:]}")
with env_from_flags() as env:
action_names = env.action_space.names
print(action_names)
if FLAGS.action:
actions = [env.action_space[a] for a in FLAGS.action]
else:
actions = list(range(len(action_names)))
logs_dir = Path(
FLAGS.output_dir or create_user_logs_dir("benchmark_sensitivity_analysis")
)
rewards_path = logs_dir / f"actions_{FLAGS.reward}.rewards.csv"
runtimes_path = logs_dir / f"actions_{FLAGS.reward}.runtimes.csv"
run_action_sensitivity_analysis(
rewards_path=rewards_path,
runtimes_path=runtimes_path,
actions=actions,
reward_space=FLAGS.reward,
num_trials=FLAGS.num_action_sensitivity_trials,
max_warmup_steps=FLAGS.max_warmup_steps,
nproc=FLAGS.nproc,
max_attempts_multiplier=FLAGS.max_action_attempts_multiplier,
)
if __name__ == "__main__":
app.run(main)
|
CompilerGym-development
|
examples/sensitivity_analysis/action_sensitivity_analysis.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""End-to-end test of //compiler_gym/bin:action_sensitivity_analysis."""
import tempfile
from pathlib import Path
from absl.flags import FLAGS
from flaky import flaky
from sensitivity_analysis.action_sensitivity_analysis import (
run_action_sensitivity_analysis,
)
from sensitivity_analysis.sensitivity_analysis_eval import run_sensitivity_analysis_eval
@flaky
def test_run_action_sensitivity_analysis():
actions = [0, 1]
env = "llvm-v0"
reward = "IrInstructionCountO3"
benchmark = "cbench-v1/crc32"
FLAGS.unparse_flags()
FLAGS(["argv0", f"--env={env}", f"--benchmark={benchmark}"])
with tempfile.TemporaryDirectory() as tmp:
tmp = Path(tmp)
run_action_sensitivity_analysis(
actions=actions,
rewards_path=tmp / "rewards.txt",
runtimes_path=tmp / "runtimes.txt",
reward_space=reward,
num_trials=2,
max_warmup_steps=5,
nproc=1,
)
assert (tmp / "rewards.txt").is_file()
assert (tmp / "runtimes.txt").is_file()
run_sensitivity_analysis_eval(
rewards_path=tmp / "rewards.txt",
runtimes_path=tmp / "runtimes.txt",
)
|
CompilerGym-development
|
examples/sensitivity_analysis/action_sensitivity_analysis_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Evaluate logs generated by sensitivity analysis.
Usage:
$ bazel run -c opt //compiler_gym/bin:sensitivity_analysis_eval -- \
--output_dir=/path/to/generated/logs \
--analysis=actions_IrInstructionCountO3
"""
import sys
from concurrent.futures import Future, as_completed
from pathlib import Path
from typing import Iterable, NamedTuple
import numpy as np
from absl import app, flags
import compiler_gym.util.flags.output_dir # noqa Flag definition.
from compiler_gym.util.tabulate import tabulate
from compiler_gym.util.timer import humanize_duration
flags.DEFINE_string(
"analysis", None, "The name of the sensitivity analysis logs to read"
)
FLAGS = flags.FLAGS
class SensitivityAnalysisResult(NamedTuple):
"""The result of running a sensitivity analysis."""
# The name of the thing being analyzed (e.g. an action or a benchmark).
name: str
# A list of runtimes, one per observation.
runtimes: np.ndarray
# A list of reward deltas, one per observation.
rewards: np.ndarray
def run_sensitivity_analysis(
analysis_futures: Iterable[Future],
rewards_path: Path,
runtimes_path: Path,
) -> None:
"""Run the provided sensitivity analyses to completion and log the results.
:param analysis_futures: A sequence of future analysis results. The future
should return a SensitityAnalysisResult.
:param rewards_path: The path of the CSV file to write rewards to.
:param runtimes_path: The path of the CSV file to write runtimes to.
"""
rewards_path.parent.mkdir(parents=True, exist_ok=True)
runtimes_path.parent.mkdir(parents=True, exist_ok=True)
print(f"Writing rewards to {rewards_path}", file=sys.stderr)
print(f"Writing runtimes to {runtimes_path}", file=sys.stderr)
print("Waiting for first result ... ", end="", flush=True, file=sys.stderr)
with open(str(rewards_path), "w") as rewards_f, open(
str(runtimes_path), "w"
) as runtimes_f:
for i, future in enumerate(as_completed(analysis_futures), start=1):
result: SensitivityAnalysisResult = future.result()
print(
f"\r\033[KCompleted {i} of {len(analysis_futures)} analyses. "
f"Latest: {result.name}, "
f"avg_delta={result.rewards.mean():.5%}, "
f"avg_runtime={humanize_duration(result.runtimes.mean())} ... ",
end="",
flush=True,
file=sys.stderr,
)
print(
result.name,
",".join(str(a) for a in result.rewards),
sep=",",
flush=True,
file=rewards_f,
)
print(
result.name,
",".join(str(a) for a in result.runtimes),
sep=",",
flush=True,
file=runtimes_f,
)
print(flush=True, file=sys.stderr)
return run_sensitivity_analysis_eval(rewards_path, runtimes_path)
def run_sensitivity_analysis_eval(rewards_path: Path, runtimes_path: Path) -> None:
"""Print a summary of sensitivity analysis logs."""
with open(str(rewards_path)) as f:
rewards_in = f.read().rstrip().split("\n")
with open(str(runtimes_path)) as f:
runtimes_in = f.read().rstrip().split("\n")
rows = []
for rewards_row, runtimes_row in zip(rewards_in, runtimes_in):
name, *rewards = rewards_row.split(",")
_, *runtimes = runtimes_row.split(",")
if rewards == [""]:
rows.append((name, "-", "-", "-", "-", "-"))
continue
rewards = np.array([float(v) for v in rewards])
runtimes = np.array([float(v) for v in runtimes])
rows.append(
(
name,
humanize_duration(runtimes.mean()),
f"{rewards.mean():.5%}",
f"{np.median(rewards):.5%}",
f"{rewards.max():.5%}",
f"{rewards.std():.5%}",
)
)
print(
tabulate(
sorted(rows),
headers=(
"Name",
"Time (avg)",
"Ξ (avg)",
"Ξ (median)",
"Ξ (max)",
"Ξ (std.)",
),
)
)
def main(argv):
"""Main entry point."""
argv = FLAGS(argv)
if len(argv) != 1:
raise app.UsageError(f"Unknown command line arguments: {argv[1:]}")
assert FLAGS.output_dir, "Required argument --logs_path not set"
assert FLAGS.analysis, "Required argument --analysis not set"
output_dir = Path(FLAGS.output_dir)
rewards_path = output_dir / f"{FLAGS.analysis}.rewards.csv"
runtimes_path = output_dir / f"{FLAGS.analysis}.runtimes.csv"
run_sensitivity_analysis_eval(
rewards_path=rewards_path, runtimes_path=runtimes_path
)
if __name__ == "__main__":
app.run(main)
|
CompilerGym-development
|
examples/sensitivity_analysis/sensitivity_analysis_eval.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Estimate the cumulative reward of random episodes on benchmarks.
This script estimates the cumulative reward for a random episode on a benchmark
by running trials. A trial is an episode in which a random number of random
actions are performed and the total cumulative reward is recorded.
Example Usage
-------------
Evaluate the impact on LLVM codesize of random actions on the cBench-crc32
benchmark:
$ python -m sensitivity_analysis.benchmark_sensitivity_analysis \
--env=llvm-v0 --reward=IrInstructionCountO3 \
--benchmark=cbench-v1/crc32 --num_benchmark_sensitivity_trials=25
Evaluate the LLVM codesize episode reward on all benchmarks:
$ python -m sensitivity_analysis.benchmark_sensitivity_analysis \
--env=llvm-v0 --reward=IrInstructionCountO3
"""
import random
from concurrent.futures import ThreadPoolExecutor
from itertools import islice
from pathlib import Path
from typing import List, Optional, Union
import numpy as np
from absl import app, flags
from sensitivity_analysis.sensitivity_analysis_eval import (
SensitivityAnalysisResult,
run_sensitivity_analysis,
)
import compiler_gym.util.flags.nproc # noqa
from compiler_gym.envs import CompilerEnv
from compiler_gym.service.proto import Benchmark
from compiler_gym.util.flags.benchmark_from_flags import benchmark_from_flags
from compiler_gym.util.flags.env_from_flags import env_from_flags
from compiler_gym.util.runfiles_path import create_user_logs_dir
from compiler_gym.util.timer import Timer
flags.DEFINE_integer(
"num_benchmark_sensitivity_trials",
100,
"The number of trials to perform when estimating the episode reward of each benchmark. "
"A trial is a random episode of a benchmark. Increasing this number increases the "
"number of trials performed, leading to a higher fidelity estimate of the reward "
"potential for a benchmark.",
)
flags.DEFINE_integer(
"min_steps",
10,
"The minimum number of random steps to make in a single trial.",
)
flags.DEFINE_integer(
"max_steps",
100,
"The maximum number of random steps to make in a single trial.",
)
flags.DEFINE_integer(
"max_benchmark_attempts_multiplier",
5,
"A trial may fail because the environment crashes, or an action produces an invalid state. "
"Limit the total number of trials performed for each action to "
"max_benchmark_attempts_multiplier * num_trials.",
)
FLAGS = flags.FLAGS
def get_rewards(
benchmark: Union[Benchmark, str],
reward_space: str,
num_trials: int,
min_steps: int,
max_steps: int,
max_attempts_multiplier: int = 5,
) -> SensitivityAnalysisResult:
"""Run random trials to get a list of num_trials episode rewards."""
rewards, runtimes = [], []
num_attempts = 0
while (
num_attempts < max_attempts_multiplier * num_trials
and len(rewards) < num_trials
):
num_attempts += 1
with env_from_flags(benchmark=benchmark) as env:
env.observation_space = None
env.reward_space = None
env.reset(benchmark=benchmark)
benchmark = env.benchmark
with Timer() as t:
reward = run_one_trial(env, reward_space, min_steps, max_steps)
if reward is not None:
rewards.append(reward)
runtimes.append(t.time)
return SensitivityAnalysisResult(
name=env.benchmark, runtimes=np.array(runtimes), rewards=np.array(rewards)
)
def run_one_trial(
env: CompilerEnv, reward_space: str, min_steps: int, max_steps: int
) -> Optional[float]:
"""Run a random number of random steps in an environment and return the
cumulative reward.
:return: A cumulative reward.
"""
num_steps = random.randint(min_steps, max_steps)
warmup_actions = [env.action_space.sample() for _ in range(num_steps)]
env.reward_space = reward_space
_, _, done, _ = env.multistep(warmup_actions)
if done:
return None
return env.episode_reward
def run_benchmark_sensitivity_analysis(
benchmarks: List[Union[Benchmark, str]],
rewards_path: Path,
runtimes_path: Path,
reward: str,
num_trials: int,
min_steps: int,
max_steps: int,
nproc: int,
max_attempts_multiplier: int = 5,
):
"""Estimate the cumulative reward of random walks on a list of benchmarks."""
with ThreadPoolExecutor(max_workers=nproc) as executor:
analysis_futures = [
executor.submit(
get_rewards,
benchmark,
reward,
num_trials,
min_steps,
max_steps,
max_attempts_multiplier,
)
for benchmark in benchmarks
]
return run_sensitivity_analysis(
analysis_futures=analysis_futures,
runtimes_path=runtimes_path,
rewards_path=rewards_path,
)
def main(argv):
"""Main entry point."""
argv = FLAGS(argv)
if len(argv) != 1:
raise app.UsageError(f"Unknown command line arguments: {argv[1:]}")
# Determine the benchmark that is being analyzed, or use all of them.
benchmark = benchmark_from_flags()
if benchmark:
benchmarks = [benchmark]
else:
with env_from_flags() as env:
benchmarks = islice(env.benchmarks, 100)
logs_dir = Path(
FLAGS.output_dir or create_user_logs_dir("benchmark_sensitivity_analysis")
)
rewards_path = logs_dir / f"benchmarks_{FLAGS.reward}.csv"
runtimes_path = logs_dir / f"benchmarks_{FLAGS.reward}_runtimes.csv"
run_benchmark_sensitivity_analysis(
rewards_path=rewards_path,
runtimes_path=runtimes_path,
benchmarks=benchmarks,
reward=FLAGS.reward,
num_trials=FLAGS.num_benchmark_sensitivity_trials,
min_steps=FLAGS.min_steps,
max_steps=FLAGS.max_steps,
nproc=FLAGS.nproc,
max_attempts_multiplier=FLAGS.max_benchmark_attempts_multiplier,
)
if __name__ == "__main__":
app.run(main)
|
CompilerGym-development
|
examples/sensitivity_analysis/benchmark_sensitivity_analysis.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""End-to-end test of //compiler_gym/bin:benchmark_sensitivity_analysis."""
import tempfile
from pathlib import Path
from absl.flags import FLAGS
from sensitivity_analysis.benchmark_sensitivity_analysis import (
run_benchmark_sensitivity_analysis,
)
from sensitivity_analysis.sensitivity_analysis_eval import run_sensitivity_analysis_eval
def test_run_benchmark_sensitivity_analysis():
env = "llvm-v0"
reward = "IrInstructionCountO3"
benchmarks = ["cbench-v1/crc32"]
FLAGS.unparse_flags()
FLAGS(["argv0", f"--env={env}"])
with tempfile.TemporaryDirectory() as tmp:
tmp = Path(tmp)
run_benchmark_sensitivity_analysis(
benchmarks=benchmarks,
rewards_path=tmp / "rewards.txt",
runtimes_path=tmp / "runtimes.txt",
reward=reward,
num_trials=2,
min_steps=3,
max_steps=5,
nproc=1,
)
assert (tmp / "rewards.txt").is_file()
assert (tmp / "runtimes.txt").is_file()
run_sensitivity_analysis_eval(
rewards_path=tmp / "rewards.txt",
runtimes_path=tmp / "runtimes.txt",
)
|
CompilerGym-development
|
examples/sensitivity_analysis/benchmark_sensitivity_analysis_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Smoke test for //benchmarks:parallelization_load_test."""
from pathlib import Path
from absl import flags
from benchmarks.parallelization_load_test import main as load_test
from compiler_gym.util.capture_output import capture_output
from tests.pytest_plugins.common import set_command_line_flags, skip_on_ci
from tests.test_main import main
FLAGS = flags.FLAGS
pytest_plugins = ["tests.pytest_plugins.llvm", "tests.pytest_plugins.common"]
@skip_on_ci
def test_load_test(env, tmpwd):
del env # Unused.
del tmpwd # Unused.
set_command_line_flags(
[
"arv0",
"--env=llvm-v0",
"--benchmark=cbench-v1/crc32",
"--max_nproc=3",
"--nproc_increment=1",
"--num_steps=2",
"--num_episodes=2",
]
)
with capture_output() as out:
load_test(["argv0"])
assert "Run 1 threaded workers in " in out.stdout
assert "Run 1 process workers in " in out.stdout
assert "Run 2 threaded workers in " in out.stdout
assert "Run 2 process workers in " in out.stdout
assert "Run 3 threaded workers in " in out.stdout
assert "Run 3 process workers in " in out.stdout
assert Path("parallelization_load_test.csv").is_file()
if __name__ == "__main__":
main()
|
CompilerGym-development
|
benchmarks/parallelization_load_test_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""A load test for measuring parallelization scalability.
This benchmark runs random episodes with varying numbers of parallel threads and
processes and records the time taken for each. The objective is to compare
performance of a simple random search when parallelized using thread-level
parallelism vs process-based parallelism.
This load test aims to provide a worst-case scenario for multithreading
performance testing: there is no communication or synchronization between
threads and the benchmark is entirely compute bound.
"""
from multiprocessing import Process, cpu_count
from threading import Thread
from absl import app, flags
from compiler_gym.util.flags.benchmark_from_flags import benchmark_from_flags
from compiler_gym.util.flags.env_from_flags import env_from_flags
from compiler_gym.util.timer import Timer
flags.DEFINE_integer("max_nproc", 2 * cpu_count(), "The maximum number of threads.")
flags.DEFINE_integer(
"nproc_increment",
cpu_count() // 4,
"The number of workers to change at each step of the load test.",
)
flags.DEFINE_integer(
"num_episodes", 50, "The number of episodes to run in each worker."
)
flags.DEFINE_integer("num_steps", 50, "The number of steps in each episode.")
flags.DEFINE_string(
"logfile",
"parallelization_load_test.csv",
"The path of the file to write results to.",
)
FLAGS = flags.FLAGS
def run_random_search(num_episodes, num_steps) -> None:
"""The inner loop of a load test benchmark."""
with env_from_flags(benchmark=benchmark_from_flags()) as env:
for _ in range(num_episodes):
env.reset()
for _ in range(num_steps):
_, _, done, _ = env.step(env.action_space.sample())
if done:
break
def main(argv):
assert len(argv) == 1, f"Unknown arguments: {argv[1:]}"
with open(FLAGS.logfile, "w") as f:
print(
"nproc",
"episodes_per_worker",
"steps_per_episode",
"total_episodes",
"thread_steps_per_second",
"process_steps_per_second",
"thread_walltime",
"process_walltime",
sep=",",
file=f,
)
for nproc in [1] + list(
range(FLAGS.nproc_increment, FLAGS.max_nproc + 1, FLAGS.nproc_increment)
):
# Perform the same `nproc * num_episodes` random trajectories first
# using threads, then using processes.
threads = [
Thread(
target=run_random_search,
args=(FLAGS.num_episodes, FLAGS.num_steps),
)
for _ in range(nproc)
]
with Timer(f"Run {nproc} threaded workers") as thread_time:
for thread in threads:
thread.start()
for thread in threads:
thread.join()
processes = [
Process(
target=run_random_search,
args=(FLAGS.num_episodes, FLAGS.num_steps),
)
for _ in range(nproc)
]
with Timer(f"Run {nproc} process workers") as process_time:
for process in processes:
process.start()
for process in processes:
process.join()
print(
nproc,
FLAGS.num_episodes,
FLAGS.num_steps,
FLAGS.num_episodes * nproc,
(FLAGS.num_episodes * FLAGS.num_steps * nproc) / thread_time.time,
(FLAGS.num_episodes * FLAGS.num_steps * nproc) / process_time.time,
thread_time.time,
process_time.time,
sep=",",
file=f,
flush=True,
)
if __name__ == "__main__":
app.run(main)
|
CompilerGym-development
|
benchmarks/parallelization_load_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Microbenchmarks for CompilerGym environments.
To run these benchmarks an optimized build using bazel:
$ bazel test -c opt --test_output=streamed //benchmarks:bench_test
A record of the benchmark results is stored in
/tmp/compiler_gym_<user>/pytest_benchmark/<device>/<run_id>_bench_test.json. Compare
multiple runs using:
$ pytest-benchmark compare --group-by=name --sort=fullname \
/tmp/compiler_gym_<user>/pytest_benchmark/*/*_bench_test.json
"""
from getpass import getuser
import gym
import pytest
import examples.example_compiler_gym_service as dummy
from compiler_gym.envs import CompilerEnv, LlvmEnv, llvm
from compiler_gym.service import CompilerGymServiceConnection
from compiler_gym.service.client_service_compiler_env import ClientServiceCompilerEnv
from tests.pytest_plugins.llvm import OBSERVATION_SPACE_NAMES, REWARD_SPACE_NAMES
from tests.test_main import main
@pytest.fixture(
params=["llvm-v0", "example-cc-v0", "example-py-v0"],
ids=["llvm", "dummy-cc", "dummy-py"],
)
def env_id(request) -> str:
yield request.param
@pytest.fixture(
params=["llvm-v0", "example-cc-v0", "example-py-v0"],
ids=["llvm", "dummy-cc", "dummy-py"],
)
def env(request) -> CompilerEnv:
yield request.param
@pytest.mark.parametrize(
"env_id",
["llvm-v0", "example-cc-v0", "example-py-v0"],
ids=["llvm", "dummy-cc", "dummy-py"],
)
def test_make_local(benchmark, env_id):
benchmark(lambda: gym.make(env_id).close())
@pytest.mark.parametrize(
"args",
[
(llvm.LLVM_SERVICE_BINARY, LlvmEnv),
(dummy.EXAMPLE_CC_SERVICE_BINARY, ClientServiceCompilerEnv),
(dummy.EXAMPLE_PY_SERVICE_BINARY, ClientServiceCompilerEnv),
],
ids=["llvm", "dummy-cc", "dummy-py"],
)
def test_make_service(benchmark, args):
service_binary, env_class = args
service = CompilerGymServiceConnection(service_binary)
try:
benchmark(lambda: env_class(service=service.connection.url).close())
finally:
service.close()
@pytest.mark.parametrize(
"make_env",
[
lambda: gym.make("llvm-autophase-ic-v0", benchmark="cbench-v1/crc32"),
lambda: gym.make("llvm-autophase-ic-v0", benchmark="cbench-v1/jpeg-d"),
lambda: gym.make("example-cc-v0"),
lambda: gym.make("example-py-v0"),
],
ids=["llvm;fast-benchmark", "llvm;slow-benchmark", "dummy-cc", "dummy-py"],
)
def test_reset(benchmark, make_env: CompilerEnv):
with make_env() as env:
benchmark(env.reset)
@pytest.mark.parametrize(
"args",
[
(
lambda: gym.make("llvm-autophase-ic-v0", benchmark="cbench-v1/crc32"),
"-globaldce",
),
(lambda: gym.make("llvm-autophase-ic-v0", benchmark="cbench-v1/crc32"), "-gvn"),
(
lambda: gym.make("llvm-autophase-ic-v0", benchmark="cbench-v1/jpeg-d"),
"-globaldce",
),
(
lambda: gym.make("llvm-autophase-ic-v0", benchmark="cbench-v1/jpeg-d"),
"-gvn",
),
(lambda: gym.make("example-cc-v0"), "a"),
(lambda: gym.make("example-py-v0"), "a"),
],
ids=[
"llvm;fast-benchmark;fast-action",
"llvm;fast-benchmark;slow-action",
"llvm;slow-benchmark;fast-action",
"llvm;slow-benchmark;slow-action",
"dummy-cc",
"dummy-py",
],
)
def test_step(benchmark, args):
make_env, action_name = args
with make_env() as env:
env.reset()
action = env.action_space[action_name]
benchmark(env.step, action)
_args = dict(
{
f"llvm;{obs}": (lambda: gym.make("llvm-v0", benchmark="cbench-v1/qsort"), obs)
for obs in OBSERVATION_SPACE_NAMES
},
**{
"dummy-cc": (lambda: gym.make("example-cc-v0"), "ir"),
"dummy-py": (lambda: gym.make("example-py-v0"), "features"),
},
)
@pytest.mark.parametrize("args", _args.values(), ids=_args.keys())
def test_observation(benchmark, args):
make_env, observation_space = args
with make_env() as env:
env.reset()
benchmark(lambda: env.observation[observation_space])
_args = dict(
{
f"llvm;{reward}": (
lambda: gym.make("llvm-v0", benchmark="cbench-v1/qsort"),
reward,
)
for reward in REWARD_SPACE_NAMES
},
**{
"dummy-cc": (lambda: gym.make("example-cc-v0"), "runtime"),
"dummy-py": (lambda: gym.make("example-py-v0"), "runtime"),
},
)
@pytest.mark.parametrize("args", _args.values(), ids=_args.keys())
def test_reward(benchmark, args):
make_env, reward_space = args
with make_env() as env:
env.reset()
benchmark(lambda: env.reward[reward_space])
@pytest.mark.parametrize(
"make_env",
[
lambda: gym.make("llvm-autophase-ic-v0", benchmark="cbench-v1/crc32"),
lambda: gym.make("llvm-autophase-ic-v0", benchmark="cbench-v1/jpeg-d"),
# TODO: Example service does not yet support fork() operator.
# lambda: gym.make("example-cc-v0"),
# lambda: gym.make("example-py-v0"),
],
ids=["llvm;fast-benchmark", "llvm;slow-benchmark"],
)
def test_fork(benchmark, make_env):
with make_env() as env:
env.reset()
benchmark(lambda: env.fork().close())
if __name__ == "__main__":
main(
extra_pytest_args=[
f"--benchmark-storage=/tmp/compiler_gym_{getuser()}/pytest_benchmark",
"--benchmark-save=bench_test",
"--benchmark-sort=name",
"-x",
],
debug_level=0,
)
|
CompilerGym-development
|
benchmarks/bench_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
# Based on the 'util/collect_env.py' script from PyTorch.
# <https://github.com/pytorch/pytorch>
#
# From PyTorch:
#
# Copyright (c) 2016- Facebook, Inc (Adam Paszke)
# Copyright (c) 2014- Facebook, Inc (Soumith Chintala)
# Copyright (c) 2011-2014 Idiap Research Institute (Ronan Collobert)
# Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu)
# Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu)
# Copyright (c) 2011-2013 NYU (Clement Farabet)
# Copyright (c) 2006-2010 NEC Laboratories America (Ronan Collobert, Leon Bottou, Iain Melvin, Jason Weston)
# Copyright (c) 2006 Idiap Research Institute (Samy Bengio)
# Copyright (c) 2001-2004 Idiap Research Institute (Ronan Collobert, Samy Bengio, Johnny Mariethoz)
#
# From Caffe2:
#
# Copyright (c) 2016-present, Facebook Inc. All rights reserved.
#
# All contributions by Facebook:
# Copyright (c) 2016 Facebook Inc.
#
# All contributions by Google:
# Copyright (c) 2015 Google Inc.
# All rights reserved.
#
# All contributions by Yangqing Jia:
# Copyright (c) 2015 Yangqing Jia
# All rights reserved.
#
# All contributions by Kakao Brain:
# Copyright 2019-2020 Kakao Brain
#
# All contributions by Cruise LLC:
# Copyright (c) 2022 Cruise LLC.
# All rights reserved.
#
# All contributions from Caffe:
# Copyright(c) 2013, 2014, 2015, the respective contributors
# All rights reserved.
#
# All other contributions:
# Copyright(c) 2015, 2016 the respective contributors
# All rights reserved.
#
# Caffe2 uses a copyright model similar to Caffe: each contributor holds
# copyright over their contributions to Caffe2. The project versioning records
# all such contribution and copyright details. If a contributor wants to further
# mark their specific copyright on a particular contribution, they should
# indicate their copyright solely in the commit message of the change when it is
# committed.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the names of Facebook, Deepmind Technologies, NYU, NEC Laboratories America
# and IDIAP Research Institute nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
from __future__ import print_function
# Unlike the rest of the PyTorch this file must be python2 compliant.
# This script outputs relevant system environment info
# Run it with `python collect_env.py`.
import locale
import os
import re
import subprocess
import sys
from collections import namedtuple
try:
import compiler_gym
COMPILER_GYM_AVAILABLE = True
except (ImportError, NameError, AttributeError, OSError):
COMPILER_GYM_AVAILABLE = False
# System Environment Information
SystemEnv = namedtuple(
"SystemEnv",
[
"compiler_gym_version",
"is_debug_build",
"gcc_version",
"clang_version",
"cmake_version",
"os",
"libc_version",
"python_version",
"python_platform",
"pip_version", # 'pip' or 'pip3'
"pip_packages",
"conda_packages",
],
)
def run(command):
"""Returns (return-code, stdout, stderr)"""
p = subprocess.Popen(
command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True
)
raw_output, raw_err = p.communicate()
rc = p.returncode
if get_platform() == "win32":
enc = "oem"
else:
enc = locale.getpreferredencoding()
output = raw_output.decode(enc)
err = raw_err.decode(enc)
return rc, output.strip(), err.strip()
def run_and_read_all(run_lambda, command):
"""Runs command using run_lambda; reads and returns entire output if rc is 0"""
rc, out, _ = run_lambda(command)
if rc != 0:
return None
return out
def run_and_parse_first_match(run_lambda, command, regex):
"""Runs command using run_lambda, returns the first regex match if it exists"""
rc, out, _ = run_lambda(command)
if rc != 0:
return None
match = re.search(regex, out)
if match is None:
return None
return match.group(1)
def run_and_return_first_line(run_lambda, command):
"""Runs command using run_lambda and returns first line if output is not empty"""
rc, out, _ = run_lambda(command)
if rc != 0:
return None
return out.split("\n")[0]
def get_conda_packages(run_lambda):
conda = os.environ.get("CONDA_EXE", "conda")
out = run_and_read_all(run_lambda, conda + " list")
if out is None:
return out
# Comment starting at beginning of line
comment_regex = re.compile(r"^#.*\n")
return re.sub(comment_regex, "", out)
def get_gcc_version(run_lambda):
return run_and_parse_first_match(run_lambda, "gcc --version", r"gcc (.*)")
def get_clang_version(run_lambda):
return run_and_parse_first_match(
run_lambda, "clang --version", r"clang version (.*)"
)
def get_cmake_version(run_lambda):
return run_and_parse_first_match(run_lambda, "cmake --version", r"cmake (.*)")
def get_platform():
if sys.platform.startswith("linux"):
return "linux"
elif sys.platform.startswith("win32"):
return "win32"
elif sys.platform.startswith("cygwin"):
return "cygwin"
elif sys.platform.startswith("darwin"):
return "darwin"
else:
return sys.platform
def get_mac_version(run_lambda):
return run_and_parse_first_match(run_lambda, "sw_vers -productVersion", r"(.*)")
def get_windows_version(run_lambda):
system_root = os.environ.get("SYSTEMROOT", "C:\\Windows")
wmic_cmd = os.path.join(system_root, "System32", "Wbem", "wmic")
findstr_cmd = os.path.join(system_root, "System32", "findstr")
return run_and_read_all(
run_lambda, "{} os get Caption | {} /v Caption".format(wmic_cmd, findstr_cmd)
)
def get_lsb_version(run_lambda):
return run_and_parse_first_match(
run_lambda, "lsb_release -a", r"Description:\t(.*)"
)
def check_release_file(run_lambda):
return run_and_parse_first_match(
run_lambda, "cat /etc/*-release", r'PRETTY_NAME="(.*)"'
)
def get_os(run_lambda):
from platform import machine
platform = get_platform()
if platform == "win32" or platform == "cygwin":
return get_windows_version(run_lambda)
if platform == "darwin":
version = get_mac_version(run_lambda)
if version is None:
return None
return "macOS {} ({})".format(version, machine())
if platform == "linux":
# Ubuntu/Debian based
desc = get_lsb_version(run_lambda)
if desc is not None:
return "{} ({})".format(desc, machine())
# Try reading /etc/*-release
desc = check_release_file(run_lambda)
if desc is not None:
return "{} ({})".format(desc, machine())
return "{} ({})".format(platform, machine())
# Unknown platform
return platform
def get_python_platform():
import platform
return platform.platform()
def get_libc_version():
import platform
if get_platform() != "linux":
return "N/A"
return "-".join(platform.libc_ver())
def indent(s):
return " " + "\n ".join(s.split("\n"))
def get_pip_packages(run_lambda):
"""Returns `pip list` output. Note: will also find conda-installed pytorch
and numpy packages."""
# People generally have `pip` as `pip` or `pip3`
# But here it is incoved as `python -mpip`
def run_with_pip(pip):
return run_and_read_all(run_lambda, pip + " list --format=freeze")
pip_version = "pip3" if sys.version[0] == "3" else "pip"
out = run_with_pip(sys.executable + " -mpip")
return pip_version, out
def get_cachingallocator_config():
ca_config = os.environ.get("PYTORCH_CUDA_ALLOC_CONF", "")
return ca_config
def get_env_info():
run_lambda = run
pip_version, pip_list_output = get_pip_packages(run_lambda)
if COMPILER_GYM_AVAILABLE:
version_str = compiler_gym.__version__
# NOTE(cummins): CompilerGym does not yet have a debug string.
debug_mode_str = "N/A"
else:
version_str = debug_mode_str = "N/A"
sys_version = sys.version.replace("\n", " ")
return SystemEnv(
compiler_gym_version=version_str,
is_debug_build=debug_mode_str,
python_version="{} ({}-bit runtime)".format(
sys_version, sys.maxsize.bit_length() + 1
),
python_platform=get_python_platform(),
pip_version=pip_version,
pip_packages=pip_list_output,
conda_packages=get_conda_packages(run_lambda),
os=get_os(run_lambda),
libc_version=get_libc_version(),
gcc_version=get_gcc_version(run_lambda),
clang_version=get_clang_version(run_lambda),
cmake_version=get_cmake_version(run_lambda),
)
env_info_fmt = """
CompilerGym: {compiler_gym_version}
Is debug build: {is_debug_build}
Python version: {python_version}
Python platform: {python_platform}
OS: {os}
GCC version: {gcc_version}
Clang version: {clang_version}
CMake version: {cmake_version}
Libc version: {libc_version}
Versions of all installed libraries:
{pip_packages}
{conda_packages}
""".strip()
def pretty_str(envinfo):
def replace_nones(dct, replacement="Could not collect"):
for key in dct.keys():
if dct[key] is not None:
continue
dct[key] = replacement
return dct
def replace_bools(dct, true="Yes", false="No"):
for key in dct.keys():
if dct[key] is True:
dct[key] = true
elif dct[key] is False:
dct[key] = false
return dct
def prepend(text, tag="[prepend]"):
lines = text.split("\n")
updated_lines = [tag + line for line in lines]
return "\n".join(updated_lines)
def replace_if_empty(text, replacement="No relevant packages"):
if text is not None and len(text) == 0:
return replacement
return text
def maybe_start_on_next_line(string):
# If `string` is multiline, prepend a \n to it.
if string is not None and len(string.split("\n")) > 1:
return "\n{}\n".format(string)
return string
mutable_dict = envinfo._asdict()
# Replace True with Yes, False with No
mutable_dict = replace_bools(mutable_dict)
# Replace all None objects with 'Could not collect'
mutable_dict = replace_nones(mutable_dict)
# If either of these are '', replace with 'No relevant packages'
mutable_dict["pip_packages"] = replace_if_empty(mutable_dict["pip_packages"])
mutable_dict["conda_packages"] = replace_if_empty(mutable_dict["conda_packages"])
# Tag conda and pip packages with a prefix
# If they were previously None, they'll show up as ie '[conda] Could not collect'
if mutable_dict["pip_packages"]:
mutable_dict["pip_packages"] = prepend(
mutable_dict["pip_packages"], " [{}] ".format(envinfo.pip_version)
)
if mutable_dict["conda_packages"]:
mutable_dict["conda_packages"] = prepend(
mutable_dict["conda_packages"], " [conda] "
)
return env_info_fmt.format(**mutable_dict)
def get_pretty_env_info():
return pretty_str(get_env_info())
def main():
print("Collecting environment information...")
print()
print(pretty_str(get_env_info()))
if __name__ == "__main__":
main()
|
CompilerGym-development
|
build_tools/collect_env.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Configuration for building an action space from a list of LLVM passes."""
from common import Pass
# A set of headers that must be included to use the generated pass list.
LLVM_ACTION_INCLUDES = {
"llvm/LinkAllPasses.h",
# A handle of coroutine utility passes are not pulled in by the
# LinkAllPasses.h header.
"llvm/Transforms/Coroutines.h",
}
# A mapping from the name of a pass as defined in a INITIALIZE_PASS(name, ...)
# macro invocation to the name of the pass as defined in the createPASS();
# factory function. Not all passes are named consistently.
CREATE_PASS_NAME_MAP = {
"ADCELegacyPass": "AggressiveDCEPass",
"AddDiscriminatorsLegacyPass": "AddDiscriminatorsPass",
"AggressiveInstCombinerLegacyPass": "AggressiveInstCombinerPass",
"AlignmentFromAssumptions": "AlignmentFromAssumptionsPass",
"ArgPromotion": "ArgumentPromotionPass",
"BarrierNoop": "BarrierNoopPass",
"BDCELegacyPass": "BitTrackingDCEPass",
"BlockExtractor": "BlockExtractorPass",
"BreakCriticalEdges": "BreakCriticalEdgesPass",
"CalledValuePropagationLegacyPass": "CalledValuePropagationPass",
"CallSiteSplittingLegacyPass": "CallSiteSplittingPass",
"CanonicalizeAliasesLegacyPass": "CanonicalizeAliasesPass",
"CFGSimplifyPass": "CFGSimplificationPass",
"CFGuard": ["CFGuardCheckPass", "CFGuardDispatchPass"],
"ConstantHoistingLegacyPass": "ConstantHoistingPass",
"ConstantMergeLegacyPass": "ConstantMergePass",
"ConstantPropagation": "ConstantPropagationPass",
"CoroCleanupLegacy": "CoroCleanupLegacyPass",
"CoroEarlyLegacy": "CoroEarlyLegacyPass",
"CoroElideLegacy": "CoroElideLegacyPass",
"CoroSplitLegacy": "CoroSplitLegacyPass",
"CorrelatedValuePropagation": "CorrelatedValuePropagationPass",
"CrossDSOCFI": "CrossDSOCFIPass",
"DAE": "DeadArgEliminationPass",
"DataFlowSanitizer": "DataFlowSanitizerPass",
"DCELegacyPass": "DeadCodeEliminationPass",
"DeadInstElimination": "DeadInstEliminationPass",
"DivRemPairsLegacyPass": "DivRemPairsPass",
"DSELegacyPass": "DeadStoreEliminationPass",
"EarlyCSELegacyPass": "EarlyCSEPass",
"EarlyCSEMemSSALegacyPass": "EarlyCSEMemSSAPass",
"EliminateAvailableExternallyLegacyPass": "EliminateAvailableExternallyPass",
"EntryExitInstrumenter": "EntryExitInstrumenterPass",
"Float2IntLegacyPass": "Float2IntPass",
"FunctionImportLegacyPass": "FunctionImportPass",
"GCOVProfilerLegacyPass": "GCOVProfilerPass",
"GlobalDCELegacyPass": "GlobalDCEPass",
"GlobalOptLegacyPass": "GlobalOptimizerPass",
"GlobalSplit": "GlobalSplitPass",
"GuardWideningLegacyPass": "GuardWideningPass",
"GVNHoistLegacyPass": "GVNHoistPass",
"GVNLegacyPass": "GVNPass",
"GVNSinkLegacyPass": "GVNSinkPass",
"HotColdSplittingLegacyPass": "HotColdSplittingPass",
"ICPPass": "IPConstantPropagationPass",
"IndVarSimplifyLegacyPass": "IndVarSimplifyPass",
"InferAddressSpaces": "InferAddressSpacesPass",
"InjectTLIMappingsLegacy": "InjectTLIMappingsLegacyPass",
"InstNamer": "InstructionNamerPass",
"InstrOrderFileLegacyPass": "InstrOrderFilePass",
"InternalizeLegacyPass": "InternalizePass",
"IPCP": "IPConstantPropagationPass",
"IPSCCPLegacyPass": "IPSCCPPass",
"IRCELegacyPass": "InductiveRangeCheckEliminationPass",
"JumpThreading": "JumpThreadingPass",
"LCSSAWrapperPass": "LCSSAPass",
"LegacyLICMPass": "LICMPass",
"LegacyLoopSinkPass": "LoopSinkPass",
"LibCallsShrinkWrapLegacyPass": "LibCallsShrinkWrapPass",
"LoadStoreVectorizerLegacyPass": "LoadStoreVectorizerPass",
"LoopDataPrefetchLegacyPass": "LoopDataPrefetchPass",
"LoopDeletionLegacyPass": "LoopDeletionPass",
"LoopDistributeLegacy": "LoopDistributePass",
"LoopExtractor": "LoopExtractorPass",
"LoopFuseLegacy": "LoopFusePass",
"LoopGuardWideningLegacyPass": "LoopGuardWideningPass",
"LoopIdiomRecognizeLegacyPass": "LoopIdiomPass",
"LoopInstSimplifyLegacyPass": "LoopInstSimplifyPass",
"LoopInterchange": "LoopInterchangePass",
"LoopLoadElimination": "LoopLoadEliminationPass",
"LoopPredicationLegacyPass": "LoopPredicationPass",
"LoopReroll": "LoopRerollPass",
"LoopRotateLegacyPass": "LoopRotatePass",
"LoopSimplify": "LoopSimplifyPass",
"LoopSimplifyCFGLegacyPass": "LoopSimplifyCFGPass",
"LoopStrengthReduce": "LoopStrengthReducePass",
"LoopUnroll": "LoopUnrollPass",
"LoopUnrollAndJam": "LoopUnrollAndJamPass",
"LoopUnswitch": "LoopUnswitchPass",
"LoopVectorize": "LoopVectorizePass",
"LoopVersioningLICM": "LoopVersioningLICMPass",
"LowerAtomicLegacyPass": "LowerAtomicPass",
"LowerConstantIntrinsics": "LowerConstantIntrinsicsPass",
"LowerExpectIntrinsic": "LowerExpectIntrinsicPass",
"LowerGuardIntrinsicLegacyPass": "LowerGuardIntrinsicPass",
"LowerInvokeLegacyPass": "LowerInvokePass",
"LowerMatrixIntrinsicsLegacyPass": "LowerMatrixIntrinsicsPass",
"LowerSwitch": "LowerSwitchPass",
"LowerWidenableConditionLegacyPass": "LowerWidenableConditionPass",
"MemCpyOptLegacyPass": "MemCpyOptPass",
"MemorySanitizerLegacyPass": "MemorySanitizerLegacyPassPass",
"MergedLoadStoreMotionLegacyPass": "MergedLoadStoreMotionPass",
"MergeFunctionsLegacyPass": "MergeFunctionsPass",
"MetaRenamer": "MetaRenamerPass",
"ModuleAddressSanitizerLegacyPass": "ModuleAddressSanitizerLegacyPassPass",
"ModuleSanitizerCoverageLegacyPass": "ModuleSanitizerCoverageLegacyPassPass",
"NameAnonGlobalLegacyPass": "NameAnonGlobalPass",
"NaryReassociateLegacyPass": "NaryReassociatePass",
"NewGVNLegacyPass": "NewGVNPass",
"ObjCARCAPElim": "ObjCARCAPElimPass",
"ObjCARCContract": "ObjCARCContractPass",
"ObjCARCExpand": "ObjCARCExpandPass",
"ObjCARCOpt": "ObjCARCOptPass",
"PAEval": "PAEvalPass",
"PartialInlinerLegacyPass": "PartialInliningPass",
"PartiallyInlineLibCallsLegacyPass": "PartiallyInlineLibCallsPass",
"PlaceSafepoints": "PlaceSafepointsPass",
"PostInlineEntryExitInstrumenter": "PostInlineEntryExitInstrumenterPass",
"PromoteLegacyPass": "PromoteMemoryToRegisterPass",
"PruneEH": "PruneEHPass",
"ReassociateLegacyPass": "ReassociatePass",
"RedundantDbgInstElimination": "RedundantDbgInstEliminationPass",
"RegToMem": "DemoteRegisterToMemoryPass",
"ReversePostOrderFunctionAttrsLegacyPass": "ReversePostOrderFunctionAttrsPass",
"RewriteSymbolsLegacyPass": "RewriteSymbolsPass",
"SampleProfileLoaderLegacyPass": "SampleProfileLoaderPass",
"ScalarizerLegacyPass": "ScalarizerPass",
"SCCPLegacyPass": "SCCPPass",
"SeparateConstOffsetFromGEP": "SeparateConstOffsetFromGEPPass",
"SimpleInliner": "FunctionInliningPass",
"SingleLoopExtractor": "SingleLoopExtractorPass",
"SinkingLegacyPass": "SinkingPass",
"SLPVectorizer": "SLPVectorizerPass",
"SpeculativeExecutionLegacyPass": "SpeculativeExecutionPass",
"SROALegacyPass": "SROAPass",
"StraightLineStrengthReduce": "StraightLineStrengthReducePass",
"StripDeadDebugInfo": "StripDeadDebugInfoPass",
"StripDeadPrototypesLegacyPass": "StripDeadPrototypesPass",
"StripDebugDeclare": "StripDebugDeclarePass",
"StripNonDebugSymbols": "StripNonDebugSymbolsPass",
"StripNonLineTableDebugInfo": "StripNonLineTableDebugInfoPass",
"StripSymbols": "StripSymbolsPass",
"StructurizeCFG": "StructurizeCFGPass",
"TailCallElim": "TailCallEliminationPass",
"ThreadSanitizerLegacyPass": "ThreadSanitizerLegacyPassPass",
"UnifyFunctionExitNodes": "UnifyFunctionExitNodesPass",
}
# A list of pass names that should be excluded from the action space.
_EXCLUDED_PASSES = {
# Irrelevant garbage collection passes.
"StripGCRelocates",
"PlaceBackedgeSafepointsImpl",
"PlaceSafepointsPass",
"RewriteStatepointsForGclegacyPass",
# Irrelevant Objective-C Automatic Reference Counting passes.
"ObjCARCAAWrapperPass",
"ObjCARCAPElim",
"ObjCARCAPElimPass",
"ObjCARCContractPass",
"ObjCARCExpandPass",
"ObjCARCOptPass",
# Doesn't use legacy pass constructor API, or requires additional
# constructor arguments that are not available.
"WholeProgramDevirt",
"MakeGuardsExplicitLegacyPass",
"LowerTypeTests",
# Unneeded debugging passes.
"WriteThinLTOBitcode",
"PredicateInfoPrinterLegacyPass",
"WarnMissedTransformationsLegacy",
"DAH", # Bugpoint only.
"MetaRenamerPass",
"PAEvalPass",
"BarrierNoop", # Used for debugging pass manager.
"StripNonLineTableDebugInfoPass", # Debug stripping.
"StripDeadDebugInfoPass", # Debug stripping.
"LoopExtractorPass", # Pulls out loops into functions. Changes semantics.
"SingleLoopExtractorPass", # Pulls out loops into functions. Changes semantics.
"BlockExtractorPass", # Pulls out blocks into functions. Changes semantics.
# Unwanted instrumentation passes.
"BoundsCheckingLegacyPass", # Inserts traps on illegal access. Changes semantics.
"ASanGlobalsMetadataWrapperPass",
"AddressSanitizerLegacyPass",
"HWAddressSanitizerLegacyPass",
"SampleProfileLoaderPass",
"MemorySanitizerLegacyPassPass",
"ThreadSanitizerLegacyPassPass",
"ModuleAddressSanitizerLegacyPassPass",
"FunctionImportPass",
"DataFlowSanitizerPass",
"InstrOrderFilePass",
"PostInlineEntryExitInstrumenter",
# Profile-guided optimization or profiling.
"PGOIndirectCallPromotionLegacyPass",
"PGOInstrumentationUseLegacyPass",
"PGOInstrumentationGenCreateVarLegacyPass",
"PGOInstrumentationGenLegacyPass",
"PGOInstrumentationUseLegacyPass",
"PGOMemOpsizeOptLegacyPass",
"PgomemOpsizeOptLegacyPass",
"InstrProfilingLegacyPass",
"ControlHeightReductionLegacyPass",
# Unneeded symbol rewriting pass.
"RewriteSymbolsPass",
# Microsoft's Control Flow Guard checks on Windows targets.
# https://llvm.org/doxygen/CFGuard_8cpp.html
"CFGuardCheckPass",
"CFGuardDispatchPass",
# We don't want to change the visibility of symbols.
"InternalizePass",
# NOTE(github.com/facebookresearch/CompilerGym/issues/103): The
# -structurizecg has been found to break the semantics of cBench benchmarks
# ghostscript and tiff2bw.
"StructurizeCFGPass",
# NOTE(github.com/facebookresearch/CompilerGym/issues/46): The -gvn-sink
# pass has been found to produce different states when run multiple times
# on the same input.
"GVNSinkPass",
}
# The name of the LLVM target to extract architecture-specific transforms for.
_TARGET = "X86"
def include_pass(pass_: Pass) -> bool:
"""Determine whether the pass should be included in the generated C++ sources."""
if pass_.name in _EXCLUDED_PASSES:
return False
return "lib/Transforms" in pass_.source or f"Targets/{_TARGET}" in pass_.source
|
CompilerGym-development
|
build_tools/llvm/legacy_pass_manager/config.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import NamedTuple, Optional
class Pass(NamedTuple):
"""The declaration of an LLVM pass."""
# The name of the pass, e.g. "AddDiscriminatorsPass".
name: str
# The opt commandline flag which turns this pass on, e.g. "-add-discriminators".
flag: str
# The docstring for this pass, as reported by `opt -help`. E.g. "Add DWARF path discriminators".
description: str
# The path of the C++ file which defines this pass, relative to the LLVM source tree root.
source: str
# The path of the C++ header which declares this pass, relative to the LLVM source tree root.
# If the header path could not be inferred, this is None.
header: Optional[str]
# Boolean flags set in INITIALIZE_PASS().
cfg: bool
is_analysis: bool
|
CompilerGym-development
|
build_tools/llvm/legacy_pass_manager/common.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Filter the list of LLVM passes to use as an action space.
This scripts reads a list of passes from stdin and for each, calls
config.include_pass() to determine whether it should be printed to stdout.
"""
import csv
import logging
import sys
from typing import Iterable
import config
from common import Pass
logger = logging.getLogger(__name__)
def filter_passes(pass_iterator: Iterable[Pass]) -> Iterable[Pass]:
"""Apply config.include_pass() to an input sequence of passes.
:param pass_iterator: An iterator over Pass objects.
:returns: A subset of the input Pass iterator.
"""
total_count = 0
selected_count = 0
for pass_ in pass_iterator:
total_count += 1
if config.include_pass(pass_):
selected_count += 1
logger.debug(
f"Selected {pass_.name} pass ({pass_.flag}) from {pass_.source}",
)
yield pass_
print(
f"Selected {selected_count} of {total_count} LLVM passes to use as actions",
file=sys.stderr,
)
def main(argv):
"""Main entry point."""
del argv
reader = csv.reader(sys.stdin, delimiter=",", quotechar='"')
next(reader)
pass_iterator = (Pass(*row) for row in reader)
filtered_passes = filter_passes(pass_iterator)
writer = csv.writer(sys.stdout, delimiter=",", quotechar='"')
writer.writerow(Pass._fields)
writer.writerows(sorted(list(filtered_passes), key=lambda r: r.name))
if __name__ == "__main__":
main(sys.argv)
|
CompilerGym-development
|
build_tools/llvm/legacy_pass_manager/filter_action_space.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Generate specifications for the LLVM service capabilities.
Usage: make_specs.py <service_binary> <output_path>.
"""
import signal
# TODO: As we add support for more compilers we could generalize this script
# to work with other compiler services rather than hardcoding to LLVM.
import sys
from pathlib import Path
from compiler_gym.envs.llvm.llvm_env import LlvmEnv
# The maximum number of seconds to wait before timing out.
TIMEOUT_SECONDS = 300
def timeout_handler(signum, frame):
del signum # unused
del frame # unused
print(f"error: Timeout reached after {TIMEOUT_SECONDS:,d} seconds", file=sys.stderr)
sys.exit(1)
def main(argv):
assert (
len(argv) == 3
), "Usage: make_specs.py <service_binary> <flag_descriptions> <output_path>"
service_path, flag_descriptions, output_path = argv[1:]
signal.signal(signal.SIGALRM, timeout_handler)
signal.alarm(TIMEOUT_SECONDS)
with open(flag_descriptions) as f:
flag_descriptions = [ln.rstrip() for ln in f.readlines()]
with LlvmEnv(Path(service_path)) as env:
with open(output_path, "w") as f:
print("from enum import Enum", file=f)
print(file=f)
print("class observation_spaces(Enum):", file=f)
for name in env.observation.spaces:
print(f' {name} = "{name}"', file=f)
print(file=f)
print("class reward_spaces(Enum):", file=f)
for name in env.reward.spaces:
print(f' {name} = "{name}"', file=f)
print(file=f)
print("class actions(Enum):", file=f)
for name in env.action_space.names:
enum_name = "".join([x.capitalize() for x in name[1:].split("-")])
print(f' {enum_name} = "{name}"', file=f)
print(file=f)
print("class action_descriptions(Enum):", file=f)
for name, description in zip(env.action_space.names, flag_descriptions):
enum_name = "".join([x.capitalize() for x in name[1:].split("-")])
sanitized_description = description.replace('" "', "")
sanitized_description = sanitized_description.replace('"', "")
print(f' {enum_name} = "{sanitized_description}"', file=f)
if __name__ == "__main__":
main(sys.argv)
|
CompilerGym-development
|
build_tools/llvm/legacy_pass_manager/make_specs.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Build generated files from a list of passes.
This script reads from stdin a list of passes and generates files so that these
passes can be used as an action space.
Usage:
$ make_action_space_genfiles.py <output-directory> < <pass-list>
The following files are generated:
<outdir>/ActionHeaders.h
------------------------
Example:
#pragma once
#include "llvm/LinkAllPasses.h"
#include "llvm/Transforms/AggressiveInstCombine/AggressiveInstCombine.h"
...
This file includes the set of LLVM headers that must be included to use the
passes.
<outdir>/ActionEnum.h
---------------------
Example:
enum class LlvmAction {
ADD_DISCRIMINATORS_PASS,
AGGRESSIVE_DCEPASS,
...
}
This defines an enum that names all of the passes.
<outdir>/ActionSwitch.h
-----------------------
Example:
#define HANDLE_ACTION(action, handlePass) \
switch (action) { \
case LlvmAction::ADD_DISCRIMINATORS_PASS: \
handlePass(llvm::createAddDiscriminatorsPass()); \
break; \
case LlvmAction::AGGRESSIVE_DCEPASS: \
handlePass(llvm::createAggressiveDCEPass()); \
break; \
...
}
To use the generated switch, call the HANDLE_ACTION() macro using an
LlvmAction enum value and a handlePass function which accepts a pass
instance as input.
<outdir>/flags.txt
-------------------------
Example:
-add-discriminators
-adce
...
A list of names for each pass.
<outdir>/flag_descriptions.txt
---------------------------------
Example:
Add DWARF path discriminators
Aggressive Dead Code Elimination
...
A list of descriptions of each pass.
"""
import csv
import logging
import sys
from pathlib import Path
from common import Pass
from config import LLVM_ACTION_INCLUDES
logger = logging.getLogger(__name__)
def process_pass(pass_, headers, enum_f, switch_f):
"""Extract and process transform passes in header."""
if pass_.header:
# Strip a leading "include/" from the header path.
header = pass_.header
if header.startswith("include/"):
header = header[len("include/") :]
headers.add(header)
# The name of the pass in UPPER_PASCAL_CASE.
enum_name = pass_.flag[1:].replace("-", "_").upper()
print(f" {enum_name},", file=enum_f)
print(f" case LlvmAction::{enum_name}: \\", file=switch_f)
print(f" handlePass(llvm::create{pass_.name}()); \\", file=switch_f)
print(" break; \\", file=switch_f)
def make_action_sources(pass_iterator, outpath: Path):
"""Generate the enum and switch content."""
total_passes = 0
headers = set(LLVM_ACTION_INCLUDES)
passes = sorted(list(pass_iterator), key=lambda p: p.name)
switch_path = Path(outpath / "ActionSwitch.h")
enum_path = Path(outpath / "ActionEnum.h")
include_path = Path(outpath / "ActionHeaders.h")
flags_path = Path(outpath / "flags.txt")
descriptions_path = Path(outpath / "flag_descriptions.txt")
with open(switch_path, "w", encoding="utf-8") as switch_f, open(
enum_path, "w", encoding="utf-8"
) as enum_f:
print("enum class LlvmAction {", file=enum_f)
print("#define HANDLE_ACTION(action, handlePass) \\", file=switch_f)
print(" switch (action) { \\", file=switch_f)
for pass_ in passes:
total_passes += 1
process_pass(pass_, headers, enum_f, switch_f)
print("};", file=enum_f)
print(" }", file=switch_f)
logger.debug("Generated %s", switch_path.name)
logger.debug("Generated %s", enum_path.name)
with open(include_path, "w", encoding="utf-8") as f:
print("#pragma once", file=f)
for header in sorted(headers):
print(f'#include "{header}"', file=f)
logger.debug("Generated %s", include_path.name)
with open(flags_path, "w", encoding="utf-8") as f:
print("\n".join(p.flag for p in passes), file=f)
logger.debug("Generated %s", flags_path.name)
with open(descriptions_path, "w", encoding="utf-8") as f:
print("\n".join(p.description for p in passes), file=f)
logger.debug("Generated %s", descriptions_path.name)
logger.debug("Created genfiles for %s pass actions", total_passes)
def main(argv):
"""Main entry point."""
outpath = Path(argv[1])
assert outpath.is_dir(), f"Output directory not found: {outpath}"
reader = csv.reader(sys.stdin, delimiter=",", quotechar='"')
next(reader)
outpath = Path(outpath).absolute().resolve()
pass_iterator = (Pass(*row) for row in reader)
make_action_sources(pass_iterator, outpath)
if __name__ == "__main__":
main(sys.argv)
|
CompilerGym-development
|
build_tools/llvm/legacy_pass_manager/make_action_space_genfiles.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Extract a list of passes form the LLVM source tree.
Usage:
$ python extract_passes_from_llvm_source_tree.py /path/to/llvm-project/llvm
Optionally accepts a list of specific files to examine:
$ python extract_passes_from_llvm_source_tree.py \
/path/to/llvm-project/llvm /path/to/llvm/source/file
Implementation notes
--------------------
This implements a not-very-good parser for the INITIALIZE_PASS() family of
macros, which are used in the LLVM sources to declare a pass using it's name,
flag, and docstring. Parsing known macros like this is fragile and likely to
break as the LLVM sources evolve. Currently only tested on LLVM 10.0 and 13.0.1.
A more robust solution would be to parse the C++ sources and extract all classes
which inherit from ModulePass etc.
"""
import codecs
import csv
import logging
import os
import re
import shlex
import subprocess
import sys
from pathlib import Path
from typing import Dict, Iterable, List, Optional, Tuple
from common import Pass
from config import CREATE_PASS_NAME_MAP
logger = logging.getLogger(__name__)
# A regular expression to match the start of an invocation of one of the
# InitializePass helper macros.
INITIALIZE_PASS_RE = r"(INITIALIZE_PASS|INITIALIZE_PASS_BEGIN|INITIALIZE_PASS_WITH_OPTIONS|INITIALIZE_PASS_WITH_OPTIONS_BEGIN)\("
# A regular expression to match static const string definitions.
CONST_CHAR_RE = r'^\s*static\s+const\s+char(\s+(?P<name>[a-zA-Z_]+)\s*\[\s*\]|\s*\*\s*(?P<ptr_name>[a-zA-Z_]+))\s*=\s*(?P<value>".+")\s*;'
class ParseError(ValueError):
def __init__(self, message: str, source: str, components: List[str]):
self.message = message
self.source = source
self.components = components
def parse_initialize_pass(
source_path: Path, header: Optional[str], input_source: str, defines: Dict[str, str]
) -> Iterable[Pass]:
"""A shitty parser for INITIALIZE_PASS() macro invocations.."""
# ****************************************************
# __ _
# _/ \ _(\(o
# / \ / _ ^^^o
# / ! \/ ! '!!!v'
# ! ! \ _' ( \____
# ! . \ _!\ \===^\)
# \ \_! / __!
# \! / \
# (\_ _/ _\ )
# \ ^^--^^ __-^ /(__
# ^^----^^ "^--v'
#
# HERE BE DRAGONS!
#
# TODO(cummins): Take this code out back and shoot it.
# ****************************************************
# Squish down to a single line.
source = re.sub(r"\n\s*", " ", input_source, re.MULTILINE)
# Contract multi-spaces to single space.
source = re.sub(r",", ", ", source)
source = re.sub(r"\s+", " ", source)
source = re.sub(r"\(\s+", "(", source)
source = re.sub(r"\)\s+", ")", source)
# Strip the INITIALIZE_PASS(...) macro.
match = re.match(rf"^\s*{INITIALIZE_PASS_RE}(?P<args>.+)\)", source)
if not match:
raise ParseError("Failed to match INITIALIZE_PASS regex", source, [])
source = match.group("args")
components = []
start = 0
in_quotes = False
in_comment = False
substr = ""
for i in range(len(source)):
if (
not in_comment
and source[i] == "/"
and i < len(source) - 1
and source[i + 1] == "*"
):
in_comment = True
substr += source[start:i].strip()
if (
in_comment
and source[i] == "*"
and i < len(source) - 1
and source[i + 1] == "/"
):
in_comment = False
start = i + 2
if source[i] == '"':
in_quotes = not in_quotes
if not in_quotes and source[i] == ",":
substr += source[start:i].strip()
components.append(substr)
substr = ""
start = i + 2
components.append(substr + source[start:].strip())
if len(components) != 5:
raise ParseError(
f"Expected 5 components, found {len(components)}", source, components
)
pass_name, arg, name, cfg, analysis = components
# Strip quotation marks in arg and name.
if not arg:
raise ParseError(f"Empty arg: `{arg}`", source, components)
if not name:
raise ParseError(f"Empty name: `{name}`", source, components)
# Dodgy code to combine adjacent strings with macro expansion. For example,
# 'DEBUG_TYPE "-foo"'.
arg_components = shlex.split(arg)
for i, _ in enumerate(arg_components):
while arg_components[i] in defines:
arg_components[i] = defines[arg_components[i]]
arg = " ".join(arg_components)
if arg[0] == '"' and arg[-1] == '"':
arg = arg[1:-1]
while name in defines:
name = defines[name]
if not (name[0] == '"' and name[-1] == '"'):
raise ParseError(f"Could not interpret name `{name}`", source, components)
name = name[1:-1]
# Convert cfg and analysis to bool.
if cfg not in {"true", "false"}:
raise ParseError(
f"Could not interpret bool cfg argument `{cfg}`", source, components
)
if analysis not in {"true", "false"}:
raise ParseError(
f"Could not interpret bool analysis argument `{analysis}`",
source,
components,
)
cfg = cfg == "true"
analysis = analysis == "true"
opts = {
"source": source_path,
"header": header,
"name": pass_name,
"flag": f"-{arg}",
"description": name,
"cfg": cfg,
"is_analysis": analysis,
}
pass_name_or_list = CREATE_PASS_NAME_MAP.get(pass_name, pass_name)
if isinstance(pass_name_or_list, str):
opts["name"] = pass_name_or_list
yield Pass(**opts)
else:
for name in pass_name_or_list:
opts["name"] = name
yield Pass(**opts)
def build_defines(source: str) -> Dict[str, str]:
"""A quick-and-dirty technique to build a translation table from #defines
and string literals to their values."""
defines = {}
lines = source.split("\n")
for i in range(len(lines)):
line = lines[i].strip()
if line.startswith("#define"):
# Match #define strings.
components = line[len("#define ") :].split()
name = components[0]
value = " ".join(components[1:]).strip()
if value == "\\":
value = lines[i + 1].strip()
defines[name] = value
else:
# Match string literals.
match = re.match(CONST_CHAR_RE, line)
if match:
defines[match.group("name") or match.group("ptr_name")] = match.group(
"value"
)
return defines
def handle_file(source_path: Path) -> Tuple[Path, List[Pass]]:
"""Parse the passes declared in a file."""
assert str(source_path).endswith(".cpp"), f"Unexpected file type: {source_path}"
header = Path("llvm/" + str(source_path)[len("lib") : -len("cpp")] + "h")
if not header.is_file():
header = ""
with codecs.open(source_path, "r", "utf-8") as f:
source = f.read()
defines = build_defines(source)
passes: List[Pass] = []
for match in re.finditer(INITIALIZE_PASS_RE, source):
start = match.start()
first_bracket = source.find("(", start)
bracket_depth = 1
end = first_bracket
for end in range(first_bracket + 1, len(source)):
if source[end] == "(":
bracket_depth += 1
elif source[end] == ")":
bracket_depth -= 1
if not bracket_depth:
break
try:
passes += list(
parse_initialize_pass(
source_path, header, source[start : end + 1], defines
)
)
except ParseError as e:
print(f"Parsing error: {e.message}", file=sys.stderr)
print(f"Parsed components: {e.components}", file=sys.stderr)
print(f"In line: {e.source}", file=sys.stderr)
print(f"In file: {source_path}", file=sys.stderr)
print("Fatal error. Aborting now.", file=sys.stderr)
sys.exit(1)
if passes:
logger.debug(
f"Extracted {len(passes)} {'passes' if len(passes) - 1 else 'pass'} from {source_path}",
)
else:
logger.debug(f"Found no passes in {source_path}")
return passes
def main(argv):
root = Path(argv[1])
assert root.is_dir(), f"Not a directory: {root}"
os.chdir(root)
if len(argv) > 2:
paths = [Path(path) for path in argv[2:]]
else:
# Get the names of all files which contain a pass definition.
matching_paths = []
try:
grep = subprocess.check_output(
["grep", "-l", "-E", rf"^\s*{INITIALIZE_PASS_RE}", "-R", "lib/"],
universal_newlines=True,
)
except subprocess.CalledProcessError:
print(
f"fatal: Failed to find any LLVM pass declarations in {root}",
file=sys.stderr,
)
sys.exit(1)
matching_paths += grep.strip().split("\n")
logger.debug("Processing %s files ...", len(matching_paths))
paths = [Path(path) for path in matching_paths]
# Build a list of pass entries.
rows = []
for path in sorted(paths):
passes = handle_file(path)
if passes:
rows += passes
writer = csv.writer(sys.stdout, delimiter=",", quotechar='"')
writer.writerow(Pass._fields)
writer.writerows(sorted(rows, key=lambda r: r.name))
if __name__ == "__main__":
main(sys.argv)
|
CompilerGym-development
|
build_tools/llvm/legacy_pass_manager/extract_passes_from_llvm_source_tree.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""This module defines the validation result tuple."""
import itertools
import re
from collections import Counter
from typing import Iterable, List
from pydantic import BaseModel, validator
from compiler_gym.compiler_env_state import CompilerEnvState
from compiler_gym.errors import ValidationError
from compiler_gym.util.shell_format import plural
from compiler_gym.util.truncate import truncate
class ValidationResult(BaseModel):
"""A tuple that represents the result of validating a compiler environment state."""
state: CompilerEnvState
"""The compiler environment state that was validated."""
walltime: float
"""The wall time in seconds that the validation took."""
reward_validated: bool = False
"""Whether the reward that was recorded in the original state was validated."""
actions_replay_failed: bool = False
"""Whether the commandline was unable to be reproduced."""
reward_validation_failed: bool = False
"""Whether the validated reward differed from the original state."""
benchmark_semantics_validated: bool = False
"""Whether the semantics of the benchmark were validated."""
benchmark_semantics_validation_failed: bool = False
"""Whether the semantics of the benchmark were found to have changed."""
errors: List[ValidationError] = []
"""A list of :class:`ValidationError <compiler_gym.ValidationError>` """
@validator("walltime")
def walltime_nonnegative(cls, v):
assert v >= 0, "Walltime cannot be negative"
return v
def __eq__(self, rhs):
"""Equality comparison.
Validation results are *not* compared on walltime, and are insensitive
to the order of errors.
"""
if not isinstance(rhs, ValidationResult):
return False
return (
self.state == rhs.state
and self.reward_validated == rhs.reward_validated
and self.actions_replay_failed == rhs.actions_replay_failed
and self.reward_validation_failed == rhs.reward_validation_failed
and self.benchmark_semantics_validated == rhs.benchmark_semantics_validated
and self.benchmark_semantics_validation_failed
== rhs.benchmark_semantics_validation_failed
and sorted(self.errors) == sorted(rhs.errors)
)
def __ne__(self, rhs):
return not self == rhs
@property
def error_details(self) -> str:
"""A summary description of the validation errors."""
if not self.errors:
return ""
msg = []
error_types = [e.type for e in self.errors]
freq = sorted(Counter(error_types).items(), key=lambda x: -x[1])
# Shortcut for when there is just a single message to aggregate. Use
# format: "${error_msg}" if there is a single error or "${n}Γ
# ${error_msg}" if there are multiple copies of the same error.
if len(freq) == 1:
message = str(error_types[0])
if len(error_types) == 1:
return message
return f"{len(error_types)}Γ {message}"
# If there are multiple error messages, number them using the format:
# "[${i}/${j}] ${n}Γ ${error_msg}". E.g. "[1/3] 18Γ Memory leak".
for j, (message, count) in enumerate(freq, start=1):
if count > 1:
msg.append(f"[{j}/{len(freq)}] {count}Γ {message}")
else:
msg.append(f"[{j}/{len(freq)}] {message}")
remaining = len(freq) - j
if j >= 3 and remaining > 3:
msg.append(
f"... ({remaining} more {plural(remaining, 'error', 'errors')})"
)
break
return ", ".join(msg)
def okay(self) -> bool:
"""Whether validation succeeded."""
return not (
self.actions_replay_failed
or self.reward_validation_failed
or self.benchmark_semantics_validation_failed
)
def __repr__(self):
# Remove default-scheme prefix to improve output readability.
benchmark = re.sub(r"^benchmark://", "", str(self.state.benchmark))
if not self.okay():
msg = ", ".join(self.error_details.strip().split("\n"))
return f"β {benchmark} {truncate(msg, max_lines=1, max_line_len=50)}"
elif self.state.reward is None:
return f"β
{benchmark}"
else:
return f"β
{benchmark} {self.state.reward:.4f}"
def __str__(self):
return repr(self)
@classmethod
def join(cls, results: Iterable["ValidationResult"]):
"""Create a validation result that is the union join of multiple results."""
results = list(results)
if not results:
raise ValueError("No states to join")
if any(r.state != results[0].state for r in results[1:]):
raise ValueError("All states must be the same")
return cls(
# NOTE: No checking that states are the same.
state=results[0].state,
walltime=sum(r.walltime for r in results),
reward_validated=any(r.reward_validated for r in results),
actions_replay_failed=any(r.actions_replay_failed for r in results),
reward_validation_failed=any(r.reward_validation_failed for r in results),
benchmark_semantics_validated=any(
r.benchmark_semantics_validated for r in results
),
benchmark_semantics_validation_failed=any(
r.benchmark_semantics_validation_failed for r in results
),
errors=list(itertools.chain.from_iterable(r.errors for r in results)),
)
|
CompilerGym-development
|
compiler_gym/validation_result.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import compiler_gym.errors
# Deprecated since v0.2.4.
# This type is for backwards compatibility that will be removed in a future release.
# Please, use errors from `compiler_gym.errors`.
ValidationError = compiler_gym.errors.ValidationError
|
CompilerGym-development
|
compiler_gym/validation_error.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import sys
from argparse import ArgumentParser
def make_config(argv):
parser = ArgumentParser()
parser.add_argument(
"--out-file-path", type=str, required=True, help="Path to the generated config."
)
parser.add_argument("--enable-llvm-env", action="store_true")
parser.add_argument("--enable-mlir-env", action="store_true")
args = parser.parse_args(args=argv[1:])
with open(args.out_file_path, "w") as f:
f.write(f"enable_llvm_env = {args.enable_llvm_env}\n")
f.write(f"enable_mlir_env = {args.enable_mlir_env}\n")
if __name__ == "__main__":
make_config(sys.argv)
|
CompilerGym-development
|
compiler_gym/make_config.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""CompilerGym is a set of compiler optimization environments for reinforcement learning.
After importing this module, the :class:`CompilerGym environments <compiler_gym.envs.CompilerEnv>`
will be available through the :code:`gym.make(...)` interface:
>>> import gym
>>> import compiler_gym
>>> gym.make("llvm-v0")
The list of CompilerGym environments that can be passed to :code:`gym.make(...)`
is available through :code:`compiler_gym.COMPILER_GYM_ENVS`:
>>> import compiler_gym
>>> compiler_gym.COMPILER_GYM_ENVS
['llvm-v0', 'llvm-ic-v0', 'llvm-autophase-ic-v0', 'llvm-ir-ic-v0']
"""
try:
from compiler_gym.util.version import __version__ # isort:skip
except ModuleNotFoundError as e:
# NOTE(https://github.com/facebookresearch/CompilerGym/issues/76): Handler
# for a particularly unhelpful error message.
raise ModuleNotFoundError(
f"{e}.\nAre you running in the root of the CompilerGym repository?\n"
"If so, please change to a different directory so that `import "
"compiler_gym` will work."
) from e
from compiler_gym.compiler_env_state import (
CompilerEnvState,
CompilerEnvStateReader,
CompilerEnvStateWriter,
)
from compiler_gym.envs import COMPILER_GYM_ENVS, CompilerEnv
from compiler_gym.errors import ValidationError
from compiler_gym.random_search import random_search
from compiler_gym.util.debug_util import (
get_debug_level,
get_logging_level,
set_debug_level,
)
from compiler_gym.util.download import download
from compiler_gym.util.registration import make
from compiler_gym.util.runfiles_path import (
cache_path,
site_data_path,
transient_cache_path,
)
from compiler_gym.validate import validate_states
from compiler_gym.validation_result import ValidationResult
# The top-level compiler_gym API.
__all__ = [
"__version__",
"cache_path",
"COMPILER_GYM_ENVS",
"make",
"CompilerEnv",
"CompilerEnvState",
"CompilerEnvStateWriter",
"CompilerEnvStateReader",
"download",
"get_debug_level",
"get_logging_level",
"random_search",
"set_debug_level",
"site_data_path",
"transient_cache_path",
"validate_states",
"ValidationError",
"ValidationResult",
]
|
CompilerGym-development
|
compiler_gym/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Simple parallelized random search."""
import json
import os
from multiprocessing import cpu_count
from pathlib import Path
from threading import Thread
from time import sleep, time
from typing import Callable, List, NamedTuple, Optional, Union
import humanize
from compiler_gym import config
from compiler_gym.envs import CompilerEnv
from compiler_gym.errors import ServiceError
from compiler_gym.util.gym_type_hints import ActionType
from compiler_gym.util.runfiles_path import create_user_logs_dir
from compiler_gym.util.tabulate import tabulate
if config.enable_llvm_env:
from compiler_gym.envs.llvm import LlvmEnv
class RandomSearchProgressLogEntry(NamedTuple):
"""A snapshot of incremental search progress."""
runtime_seconds: float
total_episode_count: int
total_step_count: int
num_passes: int
reward: float
def to_csv(self) -> str:
return ",".join(
[
f"{self.runtime_seconds:.3f}",
str(self.total_episode_count),
str(self.total_step_count),
str(self.num_passes),
str(self.reward),
]
)
@classmethod
def from_csv(cls, line: str) -> "RandomSearchProgressLogEntry":
(
runtime_seconds,
total_episode_count,
total_step_count,
num_passes,
reward,
) = line.split(",")
return RandomSearchProgressLogEntry(
float(runtime_seconds),
int(total_episode_count),
int(total_step_count),
int(num_passes),
float(reward),
)
class RandomAgentWorker(Thread):
"""Worker thread to run a repeating agent.
To stop the agent, set the :code:`alive` attribute of this thread to False.
"""
def __init__(
self,
make_env: Callable[[], CompilerEnv],
patience: int,
):
super().__init__()
self._make_env = make_env
self._patience = patience
# Incremental progress.
self.total_environment_count = 0
self.total_episode_count = 0
self.total_step_count = 0
self.best_returns = -float("inf")
self.best_actions: List[ActionType] = []
self.best_commandline: str = []
self.best_found_at_time = time()
self.alive = True # Set this to False to signal the thread to stop.
@property
def should_run_one_episode(self) -> bool:
"""Whether to run an episode."""
return self.alive or not self.total_episode_count
def run(self) -> None:
"""Run episodes in an infinite loop."""
while self.should_run_one_episode:
self.total_environment_count += 1
with self._make_env() as env:
self._patience = self._patience or env.action_space.n
self.run_one_environment(env)
def run_one_environment(self, env: CompilerEnv) -> None:
"""Run random walks in an infinite loop. Returns if the environment ends."""
while self.should_run_one_episode:
self.total_episode_count += 1
if not self.run_one_episode(env):
return
def run_one_episode(self, env: CompilerEnv) -> bool:
"""Run a single random episode.
:param env: An environment.
:return: True if the episode ended gracefully, else False.
"""
observation = env.reset()
actions: List[ActionType] = []
patience = self._patience
total_returns = 0
while patience >= 0:
patience -= 1
self.total_step_count += 1
# === Your agent here! ===
action = env.action_space.sample()
# === End of agent. ===
actions.append(action)
observation, reward, done, _ = env.step(action)
if done:
return False
total_returns += reward
if total_returns > self.best_returns:
patience = self._patience
self.best_returns = total_returns
self.best_actions = actions.copy()
try:
self.best_commandline = env.action_space.to_string(env.actions)
except NotImplementedError:
self.best_commandline = ""
self.best_found_at_time = time()
return True
def random_search(
make_env: Callable[[], CompilerEnv],
outdir: Optional[Union[str, Path]] = None,
total_runtime: Optional[float] = 600,
patience: int = 0,
nproc: int = cpu_count(),
skip_done: bool = False,
) -> CompilerEnv:
with make_env() as env:
env.reset()
if not isinstance(env.unwrapped, CompilerEnv):
raise TypeError(
f"random_search() requires CompilerEnv. Called with: {type(env).__name__}"
)
benchmark_uri = env.benchmark.uri
if not outdir:
outdir = create_user_logs_dir(
os.path.normpath(f"random/{benchmark_uri.scheme}/{benchmark_uri.path}")
)
outdir = Path(outdir)
if not env.reward_space:
raise ValueError("A reward space must be specified for random search")
reward_space_name = env.reward_space.name
action_space_names = list(env.action_space.names)
metadata_path = outdir / "random_search.json"
progress_path = outdir / "random_search_progress.csv"
best_actions_path = outdir / "random_search_best_actions.txt"
best_commandline_path = outdir / "random_search_best_actions_commandline.txt"
if skip_done and metadata_path.is_file():
# TODO(cummins): Return best reward.
return 0
# Write a metadata file.
metadata = {
"env": env.spec.id if env.spec else "",
"benchmark": str(benchmark_uri),
"reward": reward_space_name,
"patience": patience,
}
with open(str(metadata_path), "w") as f:
json.dump(metadata, f, sort_keys=True, indent=2)
workers = [RandomAgentWorker(make_env, patience) for _ in range(nproc)]
for worker in workers:
worker.start()
best_actions = []
best_commandline = ""
started = time()
last_best_returns = -float("inf")
print(
f"Started {len(workers)} worker threads for {benchmark_uri} "
f"using reward {reward_space_name}."
)
print(f"Writing logs to {outdir}")
end_time = time() + total_runtime if total_runtime else None
if end_time:
print(f"=== Running for {humanize.naturaldelta(total_runtime)} ===")
else:
print("=== WARNING: This will loop forever! Use C-c to terminate. ===")
print() # Blank line gets filled below when the cursor moves up one line.
try:
with open(str(progress_path), "w") as f:
print(
"runtime_seconds",
"total_episode_count",
"total_step_count",
"num_passes",
"reward",
sep=",",
file=f,
flush=True,
)
while not end_time or time() < end_time:
sleep(0.5)
total_episode_count = sum(
worker.total_episode_count for worker in workers
)
total_step_count = sum(worker.total_step_count for worker in workers)
total_environment_count = sum(
worker.total_environment_count for worker in workers
)
best_worker = max(workers, key=lambda worker: worker.best_returns)
best_returns = best_worker.best_returns
best_actions = best_worker.best_actions
best_commandline = best_worker.best_commandline
runtime = time() - started
print(
"\r\033[1A"
"\033[K"
f"Runtime: {humanize.naturaldelta(runtime)}. "
f"Num steps: {humanize.intcomma(total_step_count)} "
f"({humanize.intcomma(int(total_step_count / runtime))} / sec). "
f"Num episodes: {humanize.intcomma(total_episode_count)} "
f"({humanize.intcomma(int(total_episode_count / runtime))} / sec). "
f"Num restarts: {humanize.intcomma(total_environment_count - nproc)}.\n"
"\033[K"
f"Best reward: {best_returns:.4f} "
f"({len(best_actions)} passes, "
f"found after {humanize.naturaldelta(best_worker.best_found_at_time - started)})",
end="",
flush=True,
)
# Log the incremental progress improvements.
if best_returns > last_best_returns:
entry = RandomSearchProgressLogEntry(
runtime_seconds=runtime,
total_episode_count=total_episode_count,
total_step_count=total_step_count,
num_passes=len(best_actions),
reward=best_returns,
)
print(entry.to_csv(), file=f, flush=True)
last_best_returns = best_returns
except KeyboardInterrupt:
print("\nkeyboard interrupt", end="", flush=True)
best_action_names = [action_space_names[a] for a in best_actions]
with open(str(best_actions_path), "w") as f:
f.write("\n".join(best_action_names))
f.write("\n")
with open(str(best_commandline_path), "w") as f:
print(best_commandline, file=f)
print("\n", flush=True)
print("Ending worker threads ... ", end="", flush=True)
for worker in workers:
worker.alive = False
for worker in workers:
try:
worker.join()
except ServiceError:
# Service error can be raised on abrupt service termination causing
# RPC errors.
pass
print("done")
print("Replaying actions from best solution found:")
with make_env() as env:
env.reset()
replay_actions(env, best_action_names, outdir)
return env
def replay_actions(env: CompilerEnv, action_names: List[str], outdir: Path):
logs_path = outdir / "random_search_best_actions_progress.csv"
start_time = time()
if config.enable_llvm_env:
if isinstance(env, LlvmEnv):
env.write_bitcode(outdir / "unoptimized.bc")
with open(str(logs_path), "w") as f:
ep_reward = 0
for i, action in enumerate(action_names, start=1):
_, reward, done, _ = env.step(env.action_space.names.index(action))
assert not done
ep_reward += reward
print(
f"Step [{i:03d} / {len(action_names):03d}]: reward={reward:.4f} \t"
f"episode={ep_reward:.4f} \taction={action}"
)
progress = RandomSearchProgressLogEntry(
runtime_seconds=time() - start_time,
total_episode_count=1,
total_step_count=i,
num_passes=i,
reward=reward,
)
print(progress.to_csv(), action, file=f, sep=",")
if config.enable_llvm_env:
if isinstance(env, LlvmEnv):
env.write_bitcode(outdir / "optimized.bc")
print(
tabulate(
[
(
"IR instruction count",
env.observation["IrInstructionCountO0"],
env.observation["IrInstructionCountOz"],
env.observation["IrInstructionCount"],
),
(
"Object .text size (bytes)",
env.observation["ObjectTextSizeO0"],
env.observation["ObjectTextSizeOz"],
env.observation["ObjectTextSizeBytes"],
),
],
headers=("", "-O0", "-Oz", "final"),
)
)
def replay_actions_from_logs(env: CompilerEnv, logdir: Path, benchmark=None) -> None:
best_actions_path = logdir / "random_search_best_actions.txt"
meta_path = logdir / "random_search.json"
assert best_actions_path.is_file(), f"File not found: {best_actions_path}"
assert meta_path.is_file(), f"File not found: {meta_path}"
with open(meta_path, "rb") as f:
meta = json.load(f)
with open(best_actions_path) as f:
actions = [ln.strip() for ln in f.readlines() if ln.strip()]
benchmark = benchmark or meta["benchmark"]
env.reward_space = meta["reward"]
env.reset(benchmark=benchmark)
replay_actions(env, actions, logdir)
|
CompilerGym-development
|
compiler_gym/random_search.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Validate environment states."""
import random
from concurrent.futures import as_completed
from typing import Callable, Iterable, Optional
from compiler_gym.compiler_env_state import CompilerEnvState
from compiler_gym.envs.compiler_env import CompilerEnv
from compiler_gym.util import thread_pool
from compiler_gym.validation_result import ValidationResult
def _validate_states_worker(
make_env: Callable[[], CompilerEnv], state: CompilerEnvState
) -> ValidationResult:
with make_env() as env:
result = env.validate(state)
return result
def validate_states(
make_env: Callable[[], CompilerEnv],
states: Iterable[CompilerEnvState],
nproc: Optional[int] = None,
inorder: bool = False,
) -> Iterable[ValidationResult]:
"""A parallelized implementation of
:meth:`env.validate() <compiler_gym.envs.CompilerEnv.validate>` for batched
validation.
:param make_env: A callback which instantiates a compiler environment.
:param states: A sequence of compiler environment states to validate.
:param nproc: The number of parallel worker processes to run.
:param inorder: Whether to return results in the order they were provided,
or in the order that they are available.
:return: An iterator over validation results. The order of results may
differ from the input states.
"""
executor = thread_pool.get_thread_pool_executor()
if nproc == 1:
map_func = map
elif inorder:
map_func = executor.map
else:
# The validation function of benchmarks can vary wildly in computational
# demands. Shuffle the order of states (unless explicitly asked for them
# to be kept inorder) as crude load balancing for the case where
# multiple states are provided for each benchmark.
states = list(states)
random.shuffle(states)
def map_func(func, envs, states):
futures = (
executor.submit(func, env, state) for env, state in zip(envs, states)
)
return (r.result() for r in as_completed(futures))
yield from map_func(_validate_states_worker, [make_env] * len(states), states)
|
CompilerGym-development
|
compiler_gym/validate.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""This module defines a class to represent a compiler environment state."""
import csv
import re
import sys
from io import StringIO
from typing import Iterable, List, Optional, TextIO
import requests
from pydantic import BaseModel, Field, validator
from compiler_gym.datasets.uri import BenchmarkUri
from compiler_gym.util.truncate import truncate
class CompilerEnvState(BaseModel):
"""The representation of a compiler environment state.
The state of an environment is defined as a benchmark and a sequence of
actions that has been applied to it. For a given environment, the state
contains the information required to reproduce the result.
"""
benchmark: str = Field(
allow_mutation=False,
examples=[
"benchmark://cbench-v1/crc32",
"generator://csmith-v0/0",
],
)
"""The URI of the benchmark used for this episode."""
commandline: str
"""The list of actions that produced this state, as a commandline."""
walltime: float
"""The walltime of the episode in seconds. Must be non-negative."""
reward: Optional[float] = Field(
required=False,
default=None,
allow_mutation=True,
)
"""The cumulative reward for this episode. Optional."""
@validator("walltime")
def walltime_nonnegative(cls, v):
if v is not None:
assert v >= 0, "Walltime cannot be negative"
return v
@validator("benchmark", pre=True)
def validate_benchmark(cls, value):
if isinstance(value, BenchmarkUri):
return str(value)
return value
@property
def has_reward(self) -> bool:
"""Return whether the state has a reward value."""
return self.reward is not None
def __eq__(self, rhs) -> bool:
if not isinstance(rhs, CompilerEnvState):
return False
epsilon = 1e-5
# Only compare reward if both states have it.
if not (self.has_reward and rhs.has_reward):
reward_equal = True
else:
reward_equal = abs(self.reward - rhs.reward) < epsilon
# Note that walltime is excluded from equivalence checks as two states
# are equivalent if they define the same point in the optimization space
# irrespective of how long it took to get there.
return (
self.benchmark == rhs.benchmark
and reward_equal
and self.commandline == rhs.commandline
)
def __ne__(self, rhs) -> bool:
return not self == rhs
class Config:
validate_assignment = True
class CompilerEnvStateWriter:
"""Serialize compiler environment states to CSV.
Example use:
>>> with CompilerEnvStateWriter(open("results.csv", "wb")) as writer:
... writer.write_state(env.state)
"""
def __init__(self, f: TextIO, header: bool = True):
"""Constructor.
:param f: The file to write to.
:param header: Whether to include a header row.
"""
self.f = f
self.writer = csv.writer(self.f, lineterminator="\n")
self.header = header
def write_state(self, state: CompilerEnvState, flush: bool = False) -> None:
"""Write the state to file.
:param state: A compiler environment state.
:param flush: Write to file immediately.
"""
if self.header:
self.writer.writerow(("benchmark", "reward", "walltime", "commandline"))
self.header = False
self.writer.writerow(
(state.benchmark, state.reward, state.walltime, state.commandline)
)
if flush:
self.f.flush()
def __enter__(self):
"""Support with-statement for the writer."""
return self
def __exit__(self, *args):
"""Support with-statement for the writer."""
self.f.close()
class CompilerEnvStateReader:
"""Read states from a CSV file.
Example usage:
>>> with CompilerEnvStateReader(open("results.csv", "rb")) as reader:
... for state in reader:
... print(state)
"""
def __init__(self, f: TextIO):
"""Constructor.
:param f: The file to read.
"""
self.f = f
self.reader = csv.reader(self.f)
def __iter__(self) -> Iterable[CompilerEnvState]:
"""Read the states from the file."""
columns_in_order = ["benchmark", "reward", "walltime", "commandline"]
# Read the CSV and coerce the columns into the expected order.
for (
benchmark,
reward,
walltime,
commandline,
) in self._iterate_columns_in_order(self.reader, columns_in_order):
yield CompilerEnvState(
benchmark=benchmark,
reward=None if reward == "" else float(reward),
walltime=0 if walltime == "" else float(walltime),
commandline=commandline,
)
@staticmethod
def _iterate_columns_in_order(
reader: csv.reader, columns: List[str]
) -> Iterable[List[str]]:
"""Read the input CSV and return each row in the given column order.
Supports CSVs both with and without a header. If no header, columns are
expected to be in the correct order. Else the header row is used to
determine column order.
Header row detection is case insensitive.
:param reader: The CSV file to read.
:param columns: A list of column names in the order that they are
expected.
:return: An iterator over rows.
"""
try:
row = next(reader)
except StopIteration:
# Empty file.
return
if len(row) != len(columns):
raise ValueError(
f"Expected {len(columns)} columns in the first row of CSV: {truncate(row)}"
)
# Convert the maybe-header columns to lowercase for case-insensitive
# comparison.
maybe_header = [v.lower() for v in row]
if set(maybe_header) == set(columns):
# The first row matches the expected columns names, so use it to
# determine the column order.
column_order = [maybe_header.index(v) for v in columns]
yield from ([row[v] for v in column_order] for row in reader)
else:
# The first row isn't a header, so assume that all rows are in
# expected column order.
yield row
yield from reader
def __enter__(self):
"""Support with-statement for the reader."""
return self
def __exit__(self, *args):
"""Support with-statement for the reader."""
self.f.close()
@staticmethod
def read_paths(paths: Iterable[str]) -> Iterable[CompilerEnvState]:
"""Read a states from a list of file paths.
Read states from stdin using a special path :code:`"-"`.
:param: A list of paths.
:return: A generator of compiler env states.
"""
for path in paths:
if path == "-":
yield from iter(CompilerEnvStateReader(sys.stdin))
elif (
re.match(r"^(http|https)://[a-zA-Z0-9.-_/]+(\.csv)$", path) is not None
):
response: requests.Response = requests.get(path)
if response.status_code == 200:
yield from iter(CompilerEnvStateReader(StringIO(response.text)))
else:
raise requests.exceptions.InvalidURL(
f"Url {path} content could not be obtained"
)
else:
with open(path) as f:
yield from iter(CompilerEnvStateReader(f))
|
CompilerGym-development
|
compiler_gym/compiler_env_state.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""This module implements a wrapper that logs state transitions to an sqlite
database.
"""
import logging
import pickle
import sqlite3
import zlib
from pathlib import Path
from time import time
from typing import Iterable, Optional, Union
import numpy as np
from compiler_gym.envs import LlvmEnv
from compiler_gym.spaces import Reward
from compiler_gym.util.gym_type_hints import ActionType
from compiler_gym.util.timer import Timer, humanize_duration
from compiler_gym.views import ObservationSpaceSpec
from compiler_gym.wrappers import CompilerEnvWrapper
DB_CREATION_SCRIPT = """
CREATE TABLE IF NOT EXISTS States (
benchmark_uri TEXT NOT NULL, -- The URI of the benchmark.
done INTEGER NOT NULL, -- 0 = False, 1 = True.
ir_instruction_count_oz_reward REAL NULLABLE,
state_id TEXT NOT NULL, -- 40-char sha1.
actions TEXT NOT NULL, -- Decode: [int(x) for x in field.split()]
PRIMARY KEY (benchmark_uri, actions),
FOREIGN KEY (state_id) REFERENCES Observations(state_id) ON UPDATE CASCADE
);
CREATE TABLE IF NOT EXISTS Observations (
state_id TEXT NOT NULL, -- 40-char sha1.
ir_instruction_count INTEGER NOT NULL,
compressed_llvm_ir BLOB NOT NULL, -- Decode: zlib.decompress(...)
pickled_compressed_programl BLOB NOT NULL, -- Decode: pickle.loads(zlib.decompress(...))
autophase TEXT NOT NULL, -- Decode: np.array([int(x) for x in field.split()], dtype=np.int64)
instcount TEXT NOT NULL, -- Decode: np.array([int(x) for x in field.split()], dtype=np.int64)
PRIMARY KEY (state_id)
);
"""
class SynchronousSqliteLogger(CompilerEnvWrapper):
"""A wrapper for an LLVM environment that logs all transitions to an sqlite
database.
Wrap an existing LLVM environment and then use it as per normal:
>>> env = SynchronousSqliteLogger(
... env=gym.make("llvm-autophase-ic-v0"),
... db_path="example.db",
... )
Connect to the database file you specified:
.. code-block::
$ sqlite3 example.db
There are two tables:
1. States: records every unique combination of benchmark + actions. For each
entry, records an identifying state ID, the episode reward, and whether
the episode is terminated:
.. code-block::
sqlite> .mode markdown
sqlite> .headers on
sqlite> select * from States limit 5;
| benchmark_uri | done | ir_instruction_count_oz_reward | state_id | actions |
|--------------------------|------|--------------------------------|------------------------------------------|----------------|
| generator://csmith-v0/99 | 0 | 0.0 | d625b874e58f6d357b816e21871297ac5c001cf0 | |
| generator://csmith-v0/99 | 0 | 0.0 | d625b874e58f6d357b816e21871297ac5c001cf0 | 31 |
| generator://csmith-v0/99 | 0 | 0.0 | 52f7142ef606d8b1dec2ff3371c7452c8d7b81ea | 31 116 |
| generator://csmith-v0/99 | 0 | 0.268005818128586 | d8c05bd41b7a6c6157b6a8f0f5093907c7cc7ecf | 31 116 103 |
| generator://csmith-v0/99 | 0 | 0.288621664047241 | c4d7ecd3807793a0d8bc281104c7f5a8aa4670f9 | 31 116 103 109 |
2. Observations: records pickled, compressed, and text observation values
for each unique state.
Caveats of this implementation:
1. Only :class:`LlvmEnv <compiler_gym.envs.LlvmEnv>` environments may be
wrapped.
2. The wrapped environment must have an observation space and reward space
set.
3. The observation spaces and reward spaces that are logged to database
are hardcoded. To change what is recorded, you must copy and modify this
implementation.
4. Writing to the database is synchronous and adds significant overhead to
the compute cost of the environment.
"""
def __init__(
self,
env: LlvmEnv,
db_path: Path,
commit_frequency_in_seconds: int = 300,
max_step_buffer_length: int = 5000,
):
"""Constructor.
:param env: The environment to wrap.
:param db_path: The path of the database to log to. This file may
already exist. If it does, new entries are appended. If the files
does not exist, it is created.
:param commit_frequency_in_seconds: The maximum amount of time to elapse
before writing pending logs to the database.
:param max_step_buffer_length: The maximum number of calls to
:code:`step()` before writing pending logs to the database.
"""
super().__init__(env)
if not hasattr(env, "unwrapped"):
raise TypeError("Requires LlvmEnv base environment")
if not isinstance(self.unwrapped, LlvmEnv):
raise TypeError("Requires LlvmEnv base environment")
db_path.parent.mkdir(exist_ok=True, parents=True)
self.connection = sqlite3.connect(str(db_path))
self.cursor = self.connection.cursor()
self.commit_frequency = commit_frequency_in_seconds
self.max_step_buffer_length = max_step_buffer_length
self.cursor.executescript(DB_CREATION_SCRIPT)
self.connection.commit()
self.last_commit = time()
self.observations_buffer = {}
self.step_buffer = []
# House keeping notice: Keep these lists in sync with record().
self._observations = [
self.env.observation.spaces["IrSha1"],
self.env.observation.spaces["Ir"],
self.env.observation.spaces["Programl"],
self.env.observation.spaces["Autophase"],
self.env.observation.spaces["InstCount"],
self.env.observation.spaces["IrInstructionCount"],
]
self._rewards = [
self.env.reward.spaces["IrInstructionCountOz"],
self.env.reward.spaces["IrInstructionCount"],
]
self._reward_totals = np.zeros(len(self._rewards))
def flush(self) -> None:
"""Flush the buffered steps and observations to database."""
n_steps, n_observations = len(self.step_buffer), len(self.observations_buffer)
# Nothing to flush.
if not n_steps:
return
with Timer() as flush_time:
# House keeping notice: Keep these statements in sync with record().
self.cursor.executemany(
"INSERT OR IGNORE INTO States VALUES (?, ?, ?, ?, ?)",
self.step_buffer,
)
self.cursor.executemany(
"INSERT OR IGNORE INTO Observations VALUES (?, ?, ?, ?, ?, ?)",
((k, *v) for k, v in self.observations_buffer.items()),
)
self.step_buffer = []
self.observations_buffer = {}
self.connection.commit()
logging.info(
"Wrote %d state records and %d observations in %s. Last flush %s ago",
n_steps,
n_observations,
flush_time,
humanize_duration(time() - self.last_commit),
)
self.last_commit = time()
def reset(self, *args, **kwargs):
observation = self.env.reset(*args, **kwargs)
observations, rewards, done, info = self.env.multistep(
actions=[],
observation_spaces=self._observations,
reward_spaces=self._rewards,
)
assert not done, f"reset() failed! {info}"
self._reward_totals = np.array(rewards, dtype=np.float32)
rewards = self._reward_totals
self._record(
actions=self.actions,
observations=observations,
rewards=self._reward_totals,
done=False,
)
return observation
def step(
self,
action: ActionType,
observation_spaces: Optional[Iterable[Union[str, ObservationSpaceSpec]]] = None,
reward_spaces: Optional[Iterable[Union[str, Reward]]] = None,
observations: Optional[Iterable[Union[str, ObservationSpaceSpec]]] = None,
rewards: Optional[Iterable[Union[str, Reward]]] = None,
):
assert self.observation_space, "No observation space set"
assert self.reward_space, "No reward space set"
assert (
observation_spaces is None
), "SynchronousSqliteLogger does not support observation_spaces"
assert (
reward_spaces is None
), "SynchronousSqliteLogger does not support reward_spaces"
assert (
observations is None
), "SynchronousSqliteLogger does not support observations"
assert rewards is None, "SynchronousSqliteLogger does not support rewards"
observations, rewards, done, info = self.env.step(
action=action,
observation_spaces=self._observations + [self.observation_space_spec],
reward_spaces=self._rewards + [self.reward_space],
)
self._reward_totals += rewards[:-1]
self._record(
actions=self.actions,
observations=observations[:-1],
rewards=self._reward_totals,
done=done,
)
return observations[-1], rewards[-1], done, info
def _record(self, actions, observations, rewards, done) -> None:
state_id, ir, programl, autophase, instcount, instruction_count = observations
instruction_count_reward = float(rewards[0])
self.step_buffer.append(
(
str(self.benchmark.uri),
1 if done else 0,
instruction_count_reward,
state_id,
" ".join(str(x) for x in actions),
)
)
self.observations_buffer[state_id] = (
instruction_count,
zlib.compress(ir.encode("utf-8")),
zlib.compress(pickle.dumps(programl)),
" ".join(str(x) for x in autophase),
" ".join(str(x) for x in instcount),
)
if (
len(self.step_buffer) >= self.max_step_buffer_length
or time() - self.last_commit >= self.commit_frequency
):
self.flush()
def close(self):
self.flush()
self.env.close()
def fork(self):
raise NotImplementedError
|
CompilerGym-development
|
compiler_gym/wrappers/sqlite_logger.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""This module implements fork wrappers."""
from typing import List
from compiler_gym.envs import CompilerEnv
from compiler_gym.wrappers import CompilerEnvWrapper
class ForkOnStep(CompilerEnvWrapper):
"""A wrapper that creates a fork of the environment before every step.
This wrapper creates a new fork of the environment before every call to
:meth:`env.reset() <compiler_gym.envs.CompilerEnv.reset>`. Because of this,
this environment supports an additional :meth:`env.undo()
<compiler_gym.wrappers.ForkOnStep.undo>` method that can be used to
backtrack.
Example usage:
>>> env = ForkOnStep(compiler_gym.make("llvm-v0"))
>>> env.step(0)
>>> env.actions
[0]
>>> env.undo()
>>> env.actions
[]
:ivar stack: A fork of the environment before every previous call to
:meth:`env.reset() <compiler_gym.envs.CompilerEnv.reset>`, ordered
oldest to newest.
:vartype stack: List[CompilerEnv]
"""
def __init__(self, env: CompilerEnv):
"""Constructor.
:param env: The environment to wrap.
"""
super().__init__(env)
self.stack: List[CompilerEnv] = []
def undo(self) -> CompilerEnv:
"""Undo the previous action.
:returns: Self.
"""
if not self.stack:
return
self.env.close()
self.env = self.stack.pop()
return self.env
def close(self) -> None:
for env in self.stack:
env.close()
self.stack: List[CompilerEnv] = []
self.env.close()
self.custom_close = True
def reset(self, *args, **kwargs):
self.env.reset()
for env in self.stack:
env.close()
self.stack: List[CompilerEnv] = []
def step(self, *args, **kwargs):
self.stack.append(self.env.fork())
return self.env.step(*args, **kwargs)
def fork(self):
raise NotImplementedError
|
CompilerGym-development
|
compiler_gym/wrappers/fork.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from itertools import cycle
from typing import Callable, Iterable, Optional, Union
import numpy as np
from compiler_gym.datasets import Benchmark
from compiler_gym.envs import CompilerEnv
from compiler_gym.util.parallelization import thread_safe_tee
from compiler_gym.wrappers.core import CompilerEnvWrapper
BenchmarkLike = Union[str, Benchmark]
class IterateOverBenchmarks(CompilerEnvWrapper):
"""Iterate over a (possibly infinite) sequence of benchmarks on each call to
reset(). Will raise :code:`StopIteration` on :meth:`reset()
<compiler_gym.envs.CompilerEnv.reset>` once the iterator is exhausted. Use
:class:`CycleOverBenchmarks` or :class:`RandomOrderBenchmarks` for wrappers
which will loop over the benchmarks.
"""
def __init__(
self,
env: CompilerEnv,
benchmarks: Iterable[BenchmarkLike],
fork_shares_iterator: bool = False,
):
"""Constructor.
:param env: The environment to wrap.
:param benchmarks: An iterable sequence of benchmarks.
:param fork_shares_iterator: If :code:`True`, the :code:`benchmarks`
iterator will bet shared by a forked environment created by
:meth:`env.fork() <compiler_gym.envs.CompilerEnv.fork>`. This means
that calling :meth:`env.reset()
<compiler_gym.envs.CompilerEnv.reset>` with one environment will
advance the iterator in the other. If :code:`False`, forked
environments will use :code:`itertools.tee()` to create a copy of
the iterator so that each iterator may advance independently.
However, this requires shared buffers between the environments which
can lead to memory overheads if :meth:`env.reset()
<compiler_gym.envs.CompilerEnv.reset>` is called many times more in
one environment than the other.
"""
super().__init__(env)
self.benchmarks = iter(benchmarks)
self.fork_shares_iterator = fork_shares_iterator
def reset(self, benchmark: Optional[BenchmarkLike] = None, **kwargs):
if benchmark is not None:
raise TypeError("Benchmark passed to IterateOverBenchmarks.reset()")
benchmark: BenchmarkLike = next(self.benchmarks)
return self.env.reset(benchmark=benchmark)
def fork(self) -> "IterateOverBenchmarks":
if self.fork_shares_iterator:
other_benchmarks_iterator = self.benchmarks
else:
self.benchmarks, other_benchmarks_iterator = thread_safe_tee(
self.benchmarks
)
return IterateOverBenchmarks(
env=self.env.fork(),
benchmarks=other_benchmarks_iterator,
fork_shares_iterator=self.fork_shares_iterator,
)
class CycleOverBenchmarks(IterateOverBenchmarks):
"""Cycle through a list of benchmarks on each call to :meth:`reset()
<compiler_gym.envs.CompilerEnv.reset>`. Same as
:class:`IterateOverBenchmarks` except the list of benchmarks repeats once
exhausted.
"""
def __init__(
self,
env: CompilerEnv,
benchmarks: Iterable[BenchmarkLike],
fork_shares_iterator: bool = False,
):
"""Constructor.
:param env: The environment to wrap.
:param benchmarks: An iterable sequence of benchmarks.
:param fork_shares_iterator: If :code:`True`, the :code:`benchmarks`
iterator will be shared by a forked environment created by
:meth:`env.fork() <compiler_gym.envs.CompilerEnv.fork>`. This means
that calling :meth:`env.reset()
<compiler_gym.envs.CompilerEnv.reset>` with one environment will
advance the iterator in the other. If :code:`False`, forked
environments will use :code:`itertools.tee()` to create a copy of
the iterator so that each iterator may advance independently.
However, this requires shared buffers between the environments which
can lead to memory overheads if :meth:`env.reset()
<compiler_gym.envs.CompilerEnv.reset>` is called many times more in
one environment than the other.
"""
super().__init__(
env, benchmarks=cycle(benchmarks), fork_shares_iterator=fork_shares_iterator
)
class CycleOverBenchmarksIterator(CompilerEnvWrapper):
"""Same as :class:`CycleOverBenchmarks
<compiler_gym.wrappers.CycleOverBenchmarks>` except that the user generates
the iterator.
"""
def __init__(
self,
env: CompilerEnv,
make_benchmark_iterator: Callable[[], Iterable[BenchmarkLike]],
):
"""Constructor.
:param env: The environment to wrap.
:param make_benchmark_iterator: A callback that returns an iterator over
a sequence of benchmarks. Once the iterator is exhausted, this
callback is called to produce a new iterator.
"""
super().__init__(env)
self.make_benchmark_iterator = make_benchmark_iterator
self.benchmarks = iter(self.make_benchmark_iterator())
def reset(self, benchmark: Optional[BenchmarkLike] = None, **kwargs):
if benchmark is not None:
raise TypeError("Benchmark passed toIterateOverBenchmarks.reset()")
try:
benchmark: BenchmarkLike = next(self.benchmarks)
except StopIteration:
self.benchmarks = iter(self.make_benchmark_iterator())
benchmark: BenchmarkLike = next(self.benchmarks)
return self.env.reset(benchmark=benchmark)
def fork(self) -> "CycleOverBenchmarksIterator":
return CycleOverBenchmarksIterator(
env=self.env.fork(),
make_benchmark_iterator=self.make_benchmark_iterator,
)
class RandomOrderBenchmarks(IterateOverBenchmarks):
"""Select randomly from a list of benchmarks on each call to :meth:`reset()
<compiler_gym.envs.CompilerEnv.reset>`.
.. note::
Uniform random selection is provided by evaluating the input benchmarks
iterator into a list and sampling randomly from the list. For very large
and infinite iterables of benchmarks you must use the
:class:`IterateOverBenchmarks
<compiler_gym.wrappers.IterateOverBenchmarks>` wrapper with your own
random sampling iterator.
"""
def __init__(
self,
env: CompilerEnv,
benchmarks: Iterable[BenchmarkLike],
rng: Optional[np.random.Generator] = None,
):
"""Constructor.
:param env: The environment to wrap.
:param benchmarks: An iterable sequence of benchmarks. The entirety of
this input iterator is evaluated during construction.
:param rng: A random number generator to use for random benchmark
selection.
"""
self._all_benchmarks = list(benchmarks)
rng = rng or np.random.default_rng()
super().__init__(
env,
benchmarks=(rng.choice(self._all_benchmarks) for _ in iter(int, 1)),
fork_shares_iterator=True,
)
def fork(self) -> "IterateOverBenchmarks":
"""Fork the random order benchmark wrapper.
Note that RNG state is not copied to forked environments.
"""
return IterateOverBenchmarks(
env=self.env.fork(), benchmarks=self._all_benchmarks
)
|
CompilerGym-development
|
compiler_gym/wrappers/datasets.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""The :code:`compiler_gym.wrappers` module provides a set of classes that can
be used to transform an environment in a modular way.
For example:
>>> env = compiler_gym.make("llvm-v0")
>>> env = TimeLimit(env, n=10)
>>> env = CycleOverBenchmarks(
... env,
... benchmarks=[
... "benchmark://cbench-v1/crc32",
... "benchmark://cbench-v1/qsort",
... ],
... )
.. warning::
CompilerGym environments are incompatible with the `OpenAI Gym wrappers
<https://github.com/openai/gym/tree/master/gym/wrappers>`_. This is because
CompilerGym extends the environment API with additional arguments and
methods. You must use the wrappers from this module when wrapping
CompilerGym environments. We provide a set of base wrappers that are
equivalent to those in OpenAI Gym that you can use to write your own
wrappers.
"""
from compiler_gym import config
from compiler_gym.wrappers.commandline import (
CommandlineWithTerminalAction,
ConstrainedCommandline,
)
from compiler_gym.wrappers.core import (
ActionWrapper,
CompilerEnvWrapper,
ObservationWrapper,
RewardWrapper,
)
from compiler_gym.wrappers.counter import Counter
from compiler_gym.wrappers.datasets import (
CycleOverBenchmarks,
CycleOverBenchmarksIterator,
IterateOverBenchmarks,
RandomOrderBenchmarks,
)
from compiler_gym.wrappers.fork import ForkOnStep
if config.enable_llvm_env:
from compiler_gym.wrappers.llvm import RuntimePointEstimateReward # noqa: F401
from compiler_gym.wrappers.sqlite_logger import ( # noqa: F401
SynchronousSqliteLogger,
)
from compiler_gym.wrappers.time_limit import TimeLimit
from .validation import ValidateBenchmarkAfterEveryStep
__all__ = [
"ActionWrapper",
"CommandlineWithTerminalAction",
"CompilerEnvWrapper",
"ConstrainedCommandline",
"Counter",
"CycleOverBenchmarks",
"CycleOverBenchmarksIterator",
"ForkOnStep",
"IterateOverBenchmarks",
"ObservationWrapper",
"RandomOrderBenchmarks",
"RewardWrapper",
"TimeLimit",
"ValidateBenchmarkAfterEveryStep",
]
if config.enable_llvm_env:
__all__.append("RuntimePointEstimateReward")
__all__.append("SynchronousSqliteLogger")
|
CompilerGym-development
|
compiler_gym/wrappers/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import warnings
from abc import ABC, abstractmethod
from collections.abc import Iterable as IterableType
from typing import Any, Iterable, List, Optional, Tuple, Union
from deprecated.sphinx import deprecated
from gym import Wrapper
from gym.spaces import Space
from compiler_gym.compiler_env_state import CompilerEnvState
from compiler_gym.datasets import Benchmark, BenchmarkUri, Dataset
from compiler_gym.envs import CompilerEnv
from compiler_gym.spaces.reward import Reward
from compiler_gym.util.gym_type_hints import ActionType, ObservationType
from compiler_gym.validation_result import ValidationResult
from compiler_gym.views import ObservationSpaceSpec, ObservationView, RewardView
class CompilerEnvWrapper(CompilerEnv, Wrapper):
"""Wraps a :class:`CompilerEnv <compiler_gym.envs.CompilerEnv>` environment
to allow a modular transformation.
This class is the base class for all wrappers. This class must be used
rather than :code:`gym.Wrapper` to support the CompilerGym API extensions
such as the :code:`fork()` method.
"""
def __init__(self, env: CompilerEnv): # pylint: disable=super-init-not-called
"""Constructor.
:param env: The environment to wrap.
:raises TypeError: If :code:`env` is not a :class:`CompilerEnv
<compiler_gym.envs.CompilerEnv>`.
"""
# No call to gym.Wrapper superclass constructor here because we need to
# avoid setting the observation_space member variable, which in the
# CompilerEnv class is a property with a custom setter. Instead we set
# the observation_space_spec directly.
self.env = env
def close(self):
self.env.close()
def reset(self, *args, **kwargs) -> Optional[ObservationType]:
return self.env.reset(*args, **kwargs)
def fork(self) -> CompilerEnv:
return type(self)(env=self.env.fork())
def step( # pylint: disable=arguments-differ
self,
action: ActionType,
observation_spaces: Optional[Iterable[Union[str, ObservationSpaceSpec]]] = None,
reward_spaces: Optional[Iterable[Union[str, Reward]]] = None,
observations: Optional[Iterable[Union[str, ObservationSpaceSpec]]] = None,
rewards: Optional[Iterable[Union[str, Reward]]] = None,
timeout: Optional[float] = 300,
):
if isinstance(action, IterableType):
warnings.warn(
"Argument `action` of CompilerEnv.step no longer accepts a list "
" of actions. Please use CompilerEnv.multistep instead",
category=DeprecationWarning,
)
return self.multistep(
action,
observation_spaces=observation_spaces,
reward_spaces=reward_spaces,
observations=observations,
rewards=rewards,
)
if observations is not None:
warnings.warn(
"Argument `observations` of CompilerEnv.multistep has been "
"renamed `observation_spaces`. Please update your code",
category=DeprecationWarning,
)
observation_spaces = observations
if rewards is not None:
warnings.warn(
"Argument `rewards` of CompilerEnv.multistep has been renamed "
"`reward_spaces`. Please update your code",
category=DeprecationWarning,
)
reward_spaces = rewards
return self.multistep(
actions=[action],
observation_spaces=observation_spaces,
reward_spaces=reward_spaces,
)
def multistep(
self,
actions: Iterable[ActionType],
observation_spaces: Optional[Iterable[Union[str, ObservationSpaceSpec]]] = None,
reward_spaces: Optional[Iterable[Union[str, Reward]]] = None,
observations: Optional[Iterable[Union[str, ObservationSpaceSpec]]] = None,
rewards: Optional[Iterable[Union[str, Reward]]] = None,
timeout: Optional[float] = 300,
):
if observations is not None:
warnings.warn(
"Argument `observations` of CompilerEnv.multistep has been "
"renamed `observation_spaces`. Please update your code",
category=DeprecationWarning,
)
observation_spaces = observations
if rewards is not None:
warnings.warn(
"Argument `rewards` of CompilerEnv.multistep has been renamed "
"`reward_spaces`. Please update your code",
category=DeprecationWarning,
)
reward_spaces = rewards
return self.env.multistep(
actions=actions,
observation_spaces=observation_spaces,
reward_spaces=reward_spaces,
)
def render(
self,
mode="human",
) -> Optional[str]:
return self.env.render(mode)
@property
def reward_range(self) -> Tuple[float, float]:
return self.env.reward_range
@reward_range.setter
def reward_range(self, value: Tuple[float, float]):
self.env.reward_range = value
@property
def observation_space(self):
return self.env.observation_space
@observation_space.setter
def observation_space(
self, observation_space: Optional[Union[str, ObservationSpaceSpec]]
) -> None:
self.env.observation_space = observation_space
@property
def observation(self) -> ObservationView:
return self.env.observation
@observation.setter
def observation(self, observation: ObservationView) -> None:
self.env.observation = observation
@property
def observation_space_spec(self):
return self.env.observation_space_spec
@observation_space_spec.setter
def observation_space_spec(
self, observation_space_spec: Optional[ObservationSpaceSpec]
) -> None:
self.env.observation_space_spec = observation_space_spec
@property
def reward_space_spec(self) -> Optional[Reward]:
return self.env.reward_space_spec
@reward_space_spec.setter
def reward_space_spec(self, val: Optional[Reward]):
self.env.reward_space_spec = val
@property
def reward_space(self) -> Optional[Reward]:
return self.env.reward_space
@reward_space.setter
def reward_space(self, reward_space: Optional[Union[str, Reward]]) -> None:
self.env.reward_space = reward_space
@property
def reward(self) -> RewardView:
return self.env.reward
@reward.setter
def reward(self, reward: RewardView) -> None:
self.env.reward = reward
@property
def action_space(self) -> Space:
return self.env.action_space
@action_space.setter
def action_space(self, action_space: Optional[str]):
self.env.action_space = action_space
@property
def action_spaces(self) -> List[str]:
return self.env.action_spaces
@action_spaces.setter
def action_spaces(self, action_spaces: List[str]):
self.env.action_spaces = action_spaces
@property
def spec(self) -> Any:
return self.env.spec
@property
def benchmark(self) -> Benchmark:
return self.env.benchmark
@benchmark.setter
def benchmark(self, benchmark: Optional[Union[str, Benchmark, BenchmarkUri]]):
self.env.benchmark = benchmark
@property
def datasets(self) -> Iterable[Dataset]:
return self.env.datasets
@datasets.setter
def datasets(self, datasets: Iterable[Dataset]):
self.env.datasets = datasets
@property
def episode_walltime(self) -> float:
return self.env.episode_walltime
@property
def in_episode(self) -> bool:
return self.env.in_episode
@property
def episode_reward(self) -> Optional[float]:
return self.env.episode_reward
@episode_reward.setter
def episode_reward(self, episode_reward: Optional[float]):
self.env.episode_reward = episode_reward
@property
def actions(self) -> List[ActionType]:
return self.env.actions
@property
def version(self) -> str:
return self.env.version
@property
def compiler_version(self) -> str:
return self.env.compiler_version
@property
def state(self) -> CompilerEnvState:
return self.env.state
@deprecated(
version="0.2.5", reason="Use env.action_space.to_string(env.actions) instead"
)
def commandline(self) -> str:
return self.env.commandline()
@deprecated(
version="0.2.5", reason='Use env.action_space.from_string("...") instead'
)
def commandline_to_actions(self, commandline: str) -> List[ActionType]:
return self.env.commandline_to_actions(commandline)
def apply(self, state: CompilerEnvState) -> None: # noqa
self.env.apply(state)
def validate(self, state: Optional[CompilerEnvState] = None) -> ValidationResult:
return self.env.validate(state)
class ActionWrapper(CompilerEnvWrapper):
"""Wraps a :class:`CompilerEnv <compiler_gym.envs.CompilerEnv>` environment
to allow an action space transformation.
"""
def multistep(
self,
actions: Iterable[ActionType],
observation_spaces: Optional[Iterable[ObservationSpaceSpec]] = None,
reward_spaces: Optional[Iterable[Reward]] = None,
observations: Optional[Iterable[Union[str, ObservationSpaceSpec]]] = None,
rewards: Optional[Iterable[Union[str, Reward]]] = None,
timeout: Optional[float] = 300,
):
return self.env.multistep(
[self.action(a) for a in actions],
observation_spaces=observation_spaces,
reward_spaces=reward_spaces,
observations=observations,
rewards=rewards,
)
def action(self, action: ActionType) -> ActionType:
"""Translate the action to the new space."""
raise NotImplementedError
def reverse_action(self, action: ActionType) -> ActionType:
"""Translate an action from the new space to the wrapped space."""
raise NotImplementedError
class ObservationWrapper(CompilerEnvWrapper, ABC):
"""Wraps a :class:`CompilerEnv <compiler_gym.envs.CompilerEnv>` environment
to allow an observation space transformation.
"""
def reset(self, *args, **kwargs):
observation = self.env.reset(*args, **kwargs)
return self.convert_observation(observation)
def multistep(
self,
actions: List[ActionType],
observation_spaces: Optional[Iterable[Union[str, ObservationSpaceSpec]]] = None,
reward_spaces: Optional[Iterable[Union[str, Reward]]] = None,
observations: Optional[Iterable[Union[str, ObservationSpaceSpec]]] = None,
rewards: Optional[Iterable[Union[str, Reward]]] = None,
timeout: Optional[float] = 300,
):
observation, reward, done, info = self.env.multistep(
actions,
observation_spaces=observation_spaces,
reward_spaces=reward_spaces,
observations=observations,
rewards=rewards,
)
return self.convert_observation(observation), reward, done, info
@abstractmethod
def convert_observation(self, observation: ObservationType) -> ObservationType:
"""Translate an observation to the new space."""
raise NotImplementedError
class RewardWrapper(CompilerEnvWrapper, ABC):
"""Wraps a :class:`CompilerEnv <compiler_gym.envs.CompilerEnv>` environment
to allow an reward space transformation.
"""
def reset(self, *args, **kwargs):
return self.env.reset(*args, **kwargs)
def multistep(
self,
actions: List[ActionType],
observation_spaces: Optional[Iterable[Union[str, ObservationSpaceSpec]]] = None,
reward_spaces: Optional[Iterable[Union[str, Reward]]] = None,
observations: Optional[Iterable[Union[str, ObservationSpaceSpec]]] = None,
rewards: Optional[Iterable[Union[str, Reward]]] = None,
timeout: Optional[float] = 300,
):
observation, reward, done, info = self.env.multistep(
actions,
observation_spaces=observation_spaces,
reward_spaces=reward_spaces,
observations=observations,
rewards=rewards,
)
# Undo the episode_reward update and reapply it once we have transformed
# the reward.
#
# TODO(cummins): Refactor step() so that we don't have to do this
# recalculation of episode_reward, as this is prone to errors if, say,
# the base reward returns NaN or an invalid type.
if reward is not None and self.episode_reward is not None:
self.unwrapped.episode_reward -= reward
reward = self.convert_reward(reward)
self.unwrapped.episode_reward += reward
return observation, reward, done, info
@abstractmethod
def convert_reward(self, reward):
"""Translate a reward to the new space."""
raise NotImplementedError
|
CompilerGym-development
|
compiler_gym/wrappers/core.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections.abc import Iterable as IterableType
from typing import Dict, Iterable, List, Optional, Union
from gym import Space
from compiler_gym.envs import CompilerEnv
from compiler_gym.spaces import Commandline, CommandlineFlag, Reward
from compiler_gym.util.gym_type_hints import ActionType, StepType
from compiler_gym.views import ObservationSpaceSpec
from compiler_gym.wrappers.core import ActionWrapper, CompilerEnvWrapper
class CommandlineWithTerminalAction(CompilerEnvWrapper):
"""Creates a new action space with a special "end of episode" terminal
action at the start. If step() is called with it, the "done" flag is set.
"""
def __init__(
self,
env: CompilerEnv,
terminal=CommandlineFlag(
name="end-of-episode",
flag="# end-of-episode",
description="End the episode",
),
):
"""Constructor.
:param env: The environment to wrap.
:param terminal: The flag to use as the terminal action. Optional.
"""
super().__init__(env)
if not isinstance(env.action_space.wrapped, Commandline):
raise TypeError(
f"Unsupported action space: {type(env.action_space).__name__}"
)
# Redefine the action space, inserting the terminal action at the start.
self.action_space = Commandline(
items=[
CommandlineFlag(
name=name,
flag=flag,
description=description,
)
for name, flag, description in zip(
env.action_space.names,
env.action_space.flags,
env.action_space.descriptions,
)
]
+ [terminal],
name=f"{type(self).__name__}<{env.action_space.name}>",
)
def multistep(
self,
actions: List[ActionType],
observation_spaces: Optional[Iterable[Union[str, ObservationSpaceSpec]]] = None,
reward_spaces: Optional[Iterable[Union[str, Reward]]] = None,
observations: Optional[Iterable[Union[str, ObservationSpaceSpec]]] = None,
rewards: Optional[Iterable[Union[str, Reward]]] = None,
) -> StepType:
terminal_action: int = len(self.action_space.flags) - 1
try:
index_of_terminal = actions.index(terminal_action)
except ValueError:
index_of_terminal = -1
# Run only the actions up to the terminal action.
if index_of_terminal >= 0:
actions = actions[:index_of_terminal]
observation, reward, done, info = self.env.multistep(
actions,
observation_spaces=observation_spaces,
reward_spaces=reward_spaces,
observations=observations,
rewards=rewards,
)
# Communicate back to the frontend.
if index_of_terminal >= 0 and not done:
done = True
info["terminal_action"] = True
return observation, reward, done, info
@property
def action_space(self) -> Space:
return self._action_space
@action_space.setter
def action_space(self, action_space: Space):
self._action_space = action_space
class ConstrainedCommandline(ActionWrapper):
"""Constrains a Commandline action space to a subset of the original space's
flags.
"""
def __init__(
self, env: CompilerEnv, flags: Iterable[str], name: Optional[str] = None
):
"""Constructor.
:param env: The environment to wrap.
:param flags: A list of entries from :code:`env.action_space.flags`
denoting flags that are available in this wrapped environment.
:param name: The name of the new action space.
"""
super().__init__(env)
self._flags = flags
if not flags:
raise TypeError("No flags provided")
if not issubclass(type(env.action_space.wrapped), Commandline):
raise TypeError(
"Can only wrap Commandline action space. "
f"Received: {type(env.action_space.wrapped).__name__}"
)
self._forward_translation: List[int] = [self.action_space[f] for f in flags]
self._reverse_translation: Dict[int, int] = {
v: i for i, v in enumerate(self._forward_translation)
}
# Redefine the action space using this smaller set of flags.
self.action_space = Commandline(
items=[
CommandlineFlag(
name=env.action_space.names[a],
flag=env.action_space.flags[a],
description=env.action_space.descriptions[a],
)
for a in (env.action_space.flags.index(f) for f in flags)
],
name=f"{type(self).__name__}<{name or env.action_space.name}, {len(flags)}>",
)
def action(self, action: Union[int, List[int]]):
if isinstance(action, IterableType):
return [self._forward_translation[a] for a in action]
return self._forward_translation[action]
def reverse_action(self, action: Union[int, List[int]]):
if isinstance(action, IterableType):
return [self._reverse_translation[a] for a in action]
return self._reverse_translation[action]
@property
def actions(self) -> List[int]:
"""Reverse-translate actions back into the constrained space."""
return self.reverse_action(self.env.actions)
def fork(self) -> "ConstrainedCommandline":
return ConstrainedCommandline(
env=self.env.fork(), flags=self._flags, name=self.action_space.name
)
@property
def action_space(self) -> Space:
return self._action_space
@action_space.setter
def action_space(self, action_space: Space):
self._action_space = action_space
|
CompilerGym-development
|
compiler_gym/wrappers/commandline.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Wrapper classes for the LLVM environments."""
from typing import Callable, Iterable
import numpy as np
from compiler_gym.envs.llvm import LlvmEnv
from compiler_gym.spaces import RuntimeReward
from compiler_gym.wrappers import CompilerEnvWrapper
class RuntimePointEstimateReward(CompilerEnvWrapper):
"""LLVM wrapper that uses a point estimate of program runtime as reward.
This class wraps an LLVM environment and registers a new runtime reward
space. Runtime is estimated from one or more runtime measurements, after
optionally running one or more warmup runs. At each step, reward is the
change in runtime estimate from the runtime estimate at the previous step.
"""
def __init__(
self,
env: LlvmEnv,
runtime_count: int = 30,
warmup_count: int = 0,
estimator: Callable[[Iterable[float]], float] = np.median,
):
"""Constructor.
:param env: The environment to wrap.
:param runtime_count: The number of times to execute the binary when
estimating the runtime.
:param warmup_count: The number of warmup runs of the binary to perform
before measuring the runtime.
:param estimator: A function that takes a list of runtime measurements
and produces a point estimate.
"""
super().__init__(env)
self.env.unwrapped.reward.add_space(
RuntimeReward(
runtime_count=runtime_count,
warmup_count=warmup_count,
estimator=estimator,
)
)
self.env.unwrapped.reward_space = "runtime"
self.env.unwrapped.runtime_observation_count = runtime_count
self.env.unwrapped.runtime_warmup_runs_count = warmup_count
def fork(self) -> "RuntimePointEstimateReward":
fkd = self.env.fork()
# Remove the original "runtime" space so that we that new
# RuntimePointEstimateReward wrapper instance does not attempt to
# redefine, raising a warning.
del fkd.unwrapped.reward.spaces["runtime"]
return RuntimePointEstimateReward(
env=fkd,
runtime_count=self.reward.spaces["runtime"].runtime_count,
warmup_count=self.reward.spaces["runtime"].warmup_count,
estimator=self.reward.spaces["runtime"].estimator,
)
|
CompilerGym-development
|
compiler_gym/wrappers/llvm.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Iterable, Optional
from compiler_gym.envs import CompilerEnv
from compiler_gym.util.gym_type_hints import ActionType
from compiler_gym.wrappers.core import CompilerEnvWrapper
class TimeLimit(CompilerEnvWrapper):
"""A step-limited wrapper that is compatible with CompilerGym.
Example usage:
>>> env = TimeLimit(env, max_episode_steps=3)
>>> env.reset()
>>> _, _, done, _ = env.step(0)
>>> _, _, done, _ = env.step(0)
>>> _, _, done, _ = env.step(0)
>>> done
True
"""
def __init__(self, env: CompilerEnv, max_episode_steps: Optional[int] = None):
super().__init__(env=env)
if max_episode_steps is None and self.env.spec is not None:
max_episode_steps = env.spec.max_episode_steps
if self.env.spec is not None:
self.env.spec.max_episode_steps = max_episode_steps
self._max_episode_steps = max_episode_steps
self._elapsed_steps = None
def multistep(self, actions: Iterable[ActionType], **kwargs):
actions = list(actions)
assert (
self._elapsed_steps is not None
), "Cannot call env.step() before calling reset()"
observation, reward, done, info = self.env.multistep(actions, **kwargs)
self._elapsed_steps += len(actions)
if self._elapsed_steps >= self._max_episode_steps:
info["TimeLimit.truncated"] = not done
done = True
return observation, reward, done, info
def reset(self, **kwargs):
self._elapsed_steps = 0
return self.env.reset(**kwargs)
def fork(self) -> "TimeLimit":
"""Fork the wrapped environment.
The time limit state of the forked environment is the same as the source
state.
"""
fkd = type(self)(env=self.env.fork(), max_episode_steps=self._max_episode_steps)
fkd._elapsed_steps = self._elapsed_steps # pylint: disable=protected-access
return fkd
|
CompilerGym-development
|
compiler_gym/wrappers/time_limit.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections.abc import Mapping
from copy import deepcopy
from math import factorial
from numbers import Integral
from typing import Iterable, Optional, Union
import numpy as np
from gym.spaces import Space
from compiler_gym.envs import CompilerEnv
from compiler_gym.spaces import Box
from compiler_gym.spaces import Dict as DictSpace
from compiler_gym.spaces import (
Discrete,
NamedDiscrete,
Permutation,
Reward,
Scalar,
SpaceSequence,
)
from compiler_gym.spaces import Tuple as TupleSpace
from compiler_gym.util.gym_type_hints import ActionType, ObservationType, StepType
from compiler_gym.util.permutation import convert_number_to_permutation
from compiler_gym.views import ObservationSpaceSpec
from compiler_gym.wrappers.core import (
ActionWrapper,
CompilerEnvWrapper,
ObservationWrapper,
)
def convert_permutation_to_discrete_space(permutation: Permutation) -> Discrete:
return Discrete(name=permutation.name, n=factorial(permutation.size_range[0]))
def get_tile_size_discrete_space(min: Integral) -> NamedDiscrete:
items = [str(min * 2**i) for i in range(11)]
return NamedDiscrete(items=items, name=None)
def convert_tile_sizes_space(box: Box) -> TupleSpace:
spaces = [get_tile_size_discrete_space(box.low[i]) for i in range(box.shape[0])]
return TupleSpace(spaces=spaces, name=box.name)
def convert_bool_to_discrete_space(x: Scalar) -> NamedDiscrete:
if x.min or not x.max:
raise ValueError(
f"Invalid scalar range [{x.min}, {x.max}. [False, True] expected."
)
return NamedDiscrete(name=x.name, items=["False", "True"])
def convert_action_space(
action_space: SpaceSequence, max_subactions: Optional[Integral]
) -> Space:
template_space = deepcopy(action_space.space)
template_space["tile_options"][
"interchange_vector"
] = convert_permutation_to_discrete_space(
template_space["tile_options"]["interchange_vector"]
)
template_space["tile_options"]["tile_sizes"] = convert_tile_sizes_space(
template_space["tile_options"]["tile_sizes"]
)
template_space["tile_options"]["promote"] = convert_bool_to_discrete_space(
template_space["tile_options"]["promote"]
)
template_space["tile_options"][
"promote_full_tile"
] = convert_bool_to_discrete_space(
template_space["tile_options"]["promote_full_tile"]
)
template_space["vectorize_options"][
"unroll_vector_transfers"
] = convert_bool_to_discrete_space(
template_space["vectorize_options"]["unroll_vector_transfers"]
)
res = TupleSpace(name=None, spaces=[])
for i in range(action_space.size_range[0]):
res.spaces.append(deepcopy(template_space))
if max_subactions is None:
loop_bound = action_space.size_range[1]
else:
if action_space.size_range[0] > max_subactions:
raise ValueError(
f"max_subactions {max_subactions} must be greater than the minimum the environment expects {action_space.size_range[0]}."
)
loop_bound = max_subactions
for i in range(action_space.size_range[0], loop_bound):
res.spaces.append(
DictSpace(
name=None,
spaces={
"space": deepcopy(template_space),
"is_present": NamedDiscrete(name=None, items=["False", "True"]),
},
)
)
return res
_tile_size_discrite_values = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
def convert_matmul_op_action(action: ActionType) -> ActionType:
res = deepcopy(action)
res["tile_options"]["interchange_vector"] = convert_number_to_permutation(
action["tile_options"]["interchange_vector"], permutation_size=3
)
tile_sizes = action["tile_options"]["tile_sizes"]
res["tile_options"]["tile_sizes"] = np.array(
[_tile_size_discrite_values[tile_sizes[i]] for i in range(len(tile_sizes))],
dtype=int,
)
res["tile_options"]["promote"] = bool(action["tile_options"]["promote"])
res["tile_options"]["promote_full_tile"] = bool(
action["tile_options"]["promote_full_tile"]
)
res["vectorize_options"]["unroll_vector_transfers"] = bool(
action["vectorize_options"]["unroll_vector_transfers"]
)
return res
def convert_action(action: ActionType) -> ActionType:
res = []
for a in action:
if not isinstance(a, Mapping) or "is_present" not in a:
res.append(convert_matmul_op_action(a))
elif a["is_present"] != 0:
res.append(convert_matmul_op_action(a["space"]))
return res
def convert_observation_space(space: Space) -> Scalar:
return Box(
name=space.name,
shape=[1],
low=space.scalar_range.min,
high=space.scalar_range.max,
dtype=float,
)
def convert_observation(observation: ObservationType) -> ObservationType:
return (
None if observation is None else np.array([np.median(observation)], dtype=float)
)
class MlirRlObservationWrapperEnv(ObservationWrapper):
@property
def observation_space(self):
return convert_observation_space(self.env.observation_space)
@observation_space.setter
def observation_space(
self, observation_space: Optional[Union[str, ObservationSpaceSpec]]
) -> None:
self.env.observation_space = observation_space
def convert_observation(self, observation: ObservationType) -> ObservationType:
return convert_observation(observation)
class MlirRlActionWrapperEnv(ActionWrapper):
def __init__(
self,
env: CompilerEnv,
max_subactions: Optional[Integral] = None,
):
super().__init__(env)
self.max_subactions = max_subactions
@property
def action_space(self) -> Space:
return convert_action_space(
self.env.action_space, max_subactions=self.max_subactions
)
@action_space.setter
def action_space(self, action_space: Optional[str]):
self.env.action_space = action_space
def action(self, action: ActionType) -> ActionType:
return convert_action(action)
class MlirRlErrorWrapperEnv(CompilerEnvWrapper):
def multistep(
self,
actions: Iterable[ActionType],
observation_spaces: Optional[Iterable[Union[str, ObservationSpaceSpec]]] = None,
reward_spaces: Optional[Iterable[Union[str, Reward]]] = None,
observations: Optional[Iterable[Union[str, ObservationSpaceSpec]]] = None,
rewards: Optional[Iterable[Union[str, Reward]]] = None,
) -> StepType:
observation, reward, done, info = super().multistep(
actions,
observation_spaces=observation_spaces,
reward_spaces=reward_spaces,
observations=observations,
rewards=rewards,
)
if "error_type" in info:
raise RuntimeError(str(info))
return observation, reward, done, info
def step(
self,
action: ActionType,
observation_spaces: Optional[Iterable[Union[str, ObservationSpaceSpec]]] = None,
reward_spaces: Optional[Iterable[Union[str, Reward]]] = None,
observations: Optional[Iterable[Union[str, ObservationSpaceSpec]]] = None,
rewards: Optional[Iterable[Union[str, Reward]]] = None,
):
return self.multistep(
[action], observation_spaces, reward_spaces, observations, rewards
)
def make_mlir_rl_wrapper_env(
env: CompilerEnv, max_subactions: Optional[Integral] = None
):
"""Create a wrapper for the MLIR environment that is suitable to interface with
off-the-shelf RL frameworks.
"""
env.reward_space = "runtime"
env.observation_space = "Runtime"
res = MlirRlActionWrapperEnv(env, max_subactions=max_subactions)
res = MlirRlObservationWrapperEnv(res)
res = MlirRlErrorWrapperEnv(res)
return res
|
CompilerGym-development
|
compiler_gym/wrappers/mlir.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""This module implements a wrapper that counts calls to operations.
"""
from typing import Dict
from compiler_gym.envs import CompilerEnv
from compiler_gym.wrappers import CompilerEnvWrapper
class Counter(CompilerEnvWrapper):
"""A wrapper that counts the number of calls to its operations.
The counters are _not_ reset by :meth:`env.reset()
<compiler_gym.envs.CompilerEnv.reset>`.
Example usage:
>>> env = Counter(compiler_gym.make("llvm-v0"))
>>> env.counters
{"close": 0, "reset": 0, "step": 0, "fork": 0}
>>> env.step(0)
{"close": 0, "reset": 0, "step": 1, "fork": 0}
:ivar counters: A dictionary of counters for different operation types.
:vartype counters: Dict[str, int]
"""
def __init__(self, env: CompilerEnv):
"""Constructor.
:param env: The environment to wrap.
"""
super().__init__(env)
self.counters: Dict[str, int] = {
"close": 0,
"reset": 0,
"step": 0,
"fork": 0,
}
def close(self) -> None:
self.counters["close"] += 1
self.env.close()
def reset(self, *args, **kwargs):
self.counters["reset"] += 1
return self.env.reset(*args, **kwargs)
def step(self, *args, **kwargs):
self.counters["step"] += 1
return self.env.step(*args, **kwargs)
def fork(self):
self.counters["fork"] += 1
return self.env.fork()
|
CompilerGym-development
|
compiler_gym/wrappers/counter.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import List
from compiler_gym.envs import CompilerEnv
from compiler_gym.util.gym_type_hints import ActionType
from compiler_gym.wrappers.core import CompilerEnvWrapper
class ValidateBenchmarkAfterEveryStep(CompilerEnvWrapper):
"""Run the benchmark validation routine after every step of the environment
and end the episode with a penalty reward if validation fails.
"""
def __init__(
self,
env: CompilerEnv,
reward_penalty: float = -1e3,
):
"""Constructor.
:param env: The environment to wrap.
:param reward_penalty: The reward value that is returned by
:code:`step()` if validation fails.
"""
super().__init__(env)
self.reward_penalty = reward_penalty
def multistep(
self,
actions: List[ActionType],
observation_spaces=None,
reward_spaces=None,
observations=None,
rewards=None,
):
observation, reward, done, info = self.env.multistep(
actions,
observation_spaces=observation_spaces,
reward_spaces=reward_spaces,
observations=observations,
rewards=rewards,
)
# Early exit if environment reaches terminal state.
if done:
return observation, reward, done, info
try:
# Try and get an error from the validation callback.
info["error_details"] = next(self.env.benchmark.ivalidate(self.env))
return observation, self.reward_penalty, True, info
except StopIteration:
# No error, we're good.
return observation, reward, done, info
|
CompilerGym-development
|
compiler_gym/wrappers/validation.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import List
import gym
from gym.envs.registration import register as gym_register
# A list of gym environment names defined by CompilerGym.
COMPILER_GYM_ENVS: List[str] = []
def make(id: str, **kwargs):
"""Equivalent to :code:`gym.make()`."""
return gym.make(id, **kwargs)
def _parse_version_string(version):
"""Quick and dirty <major>.<minor>.<micro> parser. Very hacky."""
components = version.split(".")
if len(components) != 3:
return None
try:
return tuple([int(x) for x in components])
except (TypeError, ValueError):
return None
def register(id: str, order_enforce: bool = False, **kwargs):
COMPILER_GYM_ENVS.append(id)
# As of gym==0.21.0 a new OrderEnforcing wrapper is enabled by default. Turn
# this off as CompilerEnv already enforces this and the wrapper obscures the
# docstrings of the base class.
gym_version = _parse_version_string(gym.__version__)
if gym_version and gym_version >= (0, 21):
kwargs["order_enforce"] = order_enforce
gym_register(id=id, **kwargs)
|
CompilerGym-development
|
compiler_gym/util/registration.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from enum import Enum
from typing import Any, Dict, List, Optional, Tuple, TypeVar, Union
# A JSON dictionary.
JsonDictType = Dict[str, Any]
# A default value for the reward_space parameter in env.reset() and rev.observation_space() functions.
class OptionalArgumentValue(Enum):
UNCHANGED = 1
# Type hints for the values returned by gym.Env.step().
ObservationType = TypeVar("ObservationType")
ActionType = TypeVar("ActionType")
RewardType = float
DoneType = bool
InfoType = JsonDictType
StepType = Tuple[
Optional[Union[ObservationType, List[ObservationType]]],
Optional[Union[RewardType, List[RewardType]]],
DoneType,
InfoType,
]
|
CompilerGym-development
|
compiler_gym/util/gym_type_hints.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging as logging_
import sys
from typing import Optional
def init_logging(level: int = logging_.INFO, logger: Optional[logging_.Logger] = None):
logger = logger or logging_.getLogger()
logger.setLevel(level)
handler = logging_.StreamHandler(sys.stdout)
handler.setLevel(level)
formatter = logging_.Formatter(
fmt="%(asctime)s %(name)s] %(message)s", datefmt="%m%d %H:%M:%S"
)
handler.setFormatter(formatter)
logger.addHandler(handler)
|
CompilerGym-development
|
compiler_gym/util/logging.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import deque
from typing import Iterable
def truncate(
string: str,
max_line_len: int = 60,
max_lines: int = 1,
tail: bool = False,
) -> str:
"""Truncate a string using ellipsis.
For multi-line inputs, each line is truncated independently.
For example:
>>> truncate("abcdefghijklmnop\n1234", max_line_len=10)
"abcdefg...\n1234"
:param string: The string to truncate.
:param max_line_len: The maximum number of characters in each line.
:param max_lines: The maximum number of lines in the output string.
:return: A (possibly truncated) string.
"""
return truncate_lines(
str(string).split("\n"),
max_line_len=max_line_len,
max_lines=max_lines,
tail=tail,
)
def truncate_lines(
lines: Iterable[str],
max_line_len: int = 60,
max_lines: int = 1,
tail: bool = False,
) -> str:
"""Truncate a sequence of lines, one string per line, using ellipsis.
Each line is truncated independently and combined into a single multi-line
string.
For example:
>>> truncate_lines(["abcdefghijklmnop", "1234"], max_line_len=10)
"abcdefg...\n1234"
:param string: The string to truncate.
:param max_line_len: The maximum number of characters in each line.
:param max_lines: The maximum number of lines in the output string.
:return: A (possibly truncated) string.
"""
if max_line_len <= 3:
raise ValueError("Lines must be greater than 3 characeters long.")
def _truncate_line(line: str):
if len(line) > max_line_len:
return f"{line[:max_line_len-3]}..."
return line
def _consume(iterable, n):
"""Consume fist or last `n` elements from iterable."""
if tail:
yield from deque(iterable, n)
else:
for _ in range(n):
try:
yield next(iterable)
except StopIteration:
return
lines = iter(lines)
truncated_lines = [_truncate_line(str(ln)) for ln in _consume(lines, max_lines)]
# Truncate the final line if required.
try:
next(lines)
truncated_lines[-1] = _truncate_line(f"{truncated_lines[-1]}...")
except StopIteration:
pass
return "\n".join(truncated_lines)
|
CompilerGym-development
|
compiler_gym/util/truncate.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import csv
from io import StringIO
from typing import Any, Iterable, Optional
from tabulate import tabulate as tabulate_lib
def tabulate(
rows: Iterable[Iterable[Any]],
headers: Iterable[str],
tablefmt: Optional[str] = "grid",
) -> str:
"""A wrapper around the third-party tabulate function that adds support
for tab- and comma-separate formats.
:param rows: The data to tabulate.
:param headers: A list of table headers.
:param tablefmt: The format of tables to print. For a full list of options,
see: https://github.com/astanin/python-tabulate#table-format.
:return: A formatted table as a string.
"""
if tablefmt == "tsv" or tablefmt == "csv":
sep = {"tsv": "\t", "csv": ","}[tablefmt]
buf = StringIO()
writer = csv.writer(buf, delimiter=sep)
writer.writerow([str(x) for x in headers])
for row in rows:
writer.writerow([str(x) for x in row])
return buf.getvalue()
else:
return tabulate_lib(
rows,
headers=headers,
tablefmt=tablefmt,
)
|
CompilerGym-development
|
compiler_gym/util/tabulate.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from concurrent.futures import ThreadPoolExecutor
from multiprocessing import cpu_count
from threading import Lock
_executor_lock = Lock()
_executor = None
def get_thread_pool_executor() -> ThreadPoolExecutor:
"""Return a singleton :code:`ThreadPoolExecutor`.
This executor is intended to be used for multithreaded parallelism. The
maximum number of threads in the pool is equal to the number of cores on the
machine. This is based on the assumption that CompilerGym workloads are
typically CPU bound and not I/O bound, so the number of active threads
should correspond to the number of available cores.
:returns: A thread pool executor.
"""
with _executor_lock:
global _executor
if _executor is None:
_executor = ThreadPoolExecutor(max_workers=cpu_count())
return _executor
|
CompilerGym-development
|
compiler_gym/util/thread_pool.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Module for resolving a runfiles path."""
import os
from datetime import datetime
from getpass import getuser
from pathlib import Path
from threading import Lock
from time import sleep
from typing import Optional
# NOTE(cummins): Moving this file may require updating this relative path.
_PACKAGE_ROOT = Path(os.path.join(os.path.dirname(__file__), "../../")).resolve(
strict=True
)
_CREATE_LOGGING_DIR_LOCK = Lock()
def runfiles_path(relpath: str) -> Path:
"""Resolve the path to a runfiles data path.
No checks are to made to ensure that the path, or the containing directory,
exist.
Use environment variable COMPILER_GYM_RUNFILES=/path/to/runfiles if running
outside of bazel.
:param relpath: The relative path within the runfiles tree.
:return: An absolute path.
"""
# There are three ways of determining a runfiles path:
# 1. Set the COMPILER_GYM_RUNFILES environment variable.
# 2. Using the rules_python library that is provided by bazel. This will
# fail if not being executed within a bazel sandbox.
# 3. Computing the path relative to the location of this file. This is the
# fallback approach that is used for when the code has been installed
# by setuptools.
runfiles_path = os.environ.get("COMPILER_GYM_RUNFILES")
if runfiles_path:
return Path(runfiles_path) / relpath
else:
try:
from rules_python.python.runfiles import runfiles
return Path(
runfiles.Create().Rlocation(
"CompilerGym" if relpath == "." else f"CompilerGym/{relpath}"
)
)
except (ModuleNotFoundError, TypeError):
return _PACKAGE_ROOT / relpath
def site_data_path(relpath: str) -> Path:
"""Return a path within the site data directory.
CompilerGym uses a directory to store persistent site data files in, such as
benchmark datasets. The default location is
:code:`~/.local/share/compiler_gym`. Set the environment variable
:code:`$COMPILER_GYM_SITE_DATA` to override this default location.
No checks are to made to ensure that the path, or the containing directory,
exist.
Files in this directory are intended to be long lived (this is not a cache),
but it is safe to delete this directory, so long as no CompilerGym
environments are running.
:param relpath: The relative path within the site data tree.
:return: An absolute path.
"""
# NOTE(cummins): This function has a matching implementation in the C++
# sources, compiler_gym::service::getSiteDataPath(). Any change to behavior
# here must be reflected in the C++ version.
forced = os.environ.get("COMPILER_GYM_SITE_DATA")
if forced:
return Path(forced) / relpath
elif os.environ.get("HOME"):
return Path("~/.local/share/compiler_gym").expanduser() / relpath
else:
return Path(f"/tmp/compiler_gym_{getuser()}/site_data") / relpath
def cache_path(relpath: str) -> Path:
"""Return a path within the cache directory.
CompilerGym uses a directory to cache files in, such as downloaded content.
The default location for this cache is :code:`~/.local/cache/compiler_gym`.
Set the environment variable :code:`$COMPILER_GYM_CACHE` to override this
default location.
It is safe to delete this directory, so long as no CompilerGym environments
are running.
No checks are to made to ensure that the path, or the containing directory,
exist.
:param relpath: The relative path within the cache tree.
:return: An absolute path.
"""
forced = os.environ.get("COMPILER_GYM_CACHE")
if forced:
return Path(forced) / relpath
elif os.environ.get("HOME"):
return Path("~/.local/cache/compiler_gym").expanduser() / relpath
else:
return Path(f"/tmp/compiler_gym_{getuser()}/cache") / relpath
def transient_cache_path(relpath: str) -> Path:
"""Return a path within the transient cache directory.
The transient cache is a directory used to store files that do not need to
persist beyond the lifetime of the current process. When available, the
temporary filesystem :code:`/dev/shm` will be used. Else,
:meth:`cache_path() <compiler_gym.cache_path>` is used as a fallback. Set
the environment variable :code:`$COMPILER_GYM_TRANSIENT_CACHE` to override
the default location.
Files in this directory are not meant to outlive the lifespan of the
CompilerGym environment that creates them. It is safe to delete this
directory, so long as no CompilerGym environments are running.
No checks are to made to ensure that the path, or the containing directory,
exist.
:param relpath: The relative path within the cache tree.
:return: An absolute path.
"""
forced = os.environ.get("COMPILER_GYM_TRANSIENT_CACHE")
if forced:
return Path(forced) / relpath
elif Path("/dev/shm").is_dir():
return Path(f"/dev/shm/compiler_gym_{getuser()}") / relpath
else:
# Fallback to using the regular cache.
return cache_path(relpath)
def create_user_logs_dir(name: str, dir: Optional[Path] = None) -> Path:
"""Create a directory for writing logs to.
Defaults to ~/logs/compiler_gym base directory, set the
:code:`COMPILER_GYM_LOGS` environment variable to override this.
Example use:
>>> create_user_logs_dir("my_experiment")
Path("~/logs/compiler_gym/my_experiment/2020-11-03T11:00:00")
:param name: The grouping name for the logs.
:return: A unique timestamped directory for logging. This directory exists.
"""
base_dir = Path(
os.environ.get("COMPILER_GYM_LOGS", dir or "~/logs/compiler_gym")
).expanduser()
group_dir = base_dir / name
with _CREATE_LOGGING_DIR_LOCK:
# Require that logging directory timestamps are unique by waiting until
# a unique timestamp is generated.
while True:
now = datetime.now()
subdirs = now.strftime("%Y-%m-%d/%H-%M-%S")
logs_dir = group_dir / subdirs
if logs_dir.is_dir():
sleep(0.3)
continue
logs_dir.mkdir(parents=True, exist_ok=False)
# Create a symlink to the "latest" logs results.
if (group_dir / "latest").exists():
os.unlink(group_dir / "latest")
os.symlink(subdirs, group_dir / "latest")
return logs_dir
|
CompilerGym-development
|
compiler_gym/util/runfiles_path.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from time import time
from typing import Callable, Optional
from absl.logging import skip_log_prefix
def humanize_duration(seconds: float) -> str:
"""Format a time for humans."""
value = abs(seconds)
sign = "-" if seconds < 0 else ""
if value < 1e-6:
return f"{sign}{value*1e9:.1f}ns"
elif value < 1e-3:
return f"{sign}{value*1e6:.1f}us"
if value < 1:
return f"{sign}{value*1e3:.1f}ms"
elif value < 60:
return f"{sign}{value:.3f}s"
else:
return f"{sign}{value:.1f}s"
def humanize_duration_hms(seconds: float) -> str:
"""Format a time in to :code:`hours:minutes:seconds` format."""
seconds = int(seconds)
return f"{seconds // 3600}:{(seconds % 3600) // 60:02d}:{seconds % 60:02d}"
class Timer:
"""A very simple scoped timer.
Example:
>>> with Timer() as timer:
time.sleep(10)
print(f"That took {timer}")
That took 10.0s
If you're feeling even more terse:
>>> with Timer("Did stuff"):
# do stuff ...
Did stuff in 5.6ms
You can control where the print out should be logged to:
>>> with Timer("Did stuff", logging.getLogger().info)
# do stuff ...
[log] Did stuff in 11us
"""
def __init__(
self, label: Optional[str] = None, print_fn: Callable[[str], None] = print
):
self._start_time = None
self._elapsed = None
self.label = label
self.print_fn = print_fn
def reset(self) -> "Timer":
self._start_time = time()
return self
def __enter__(self) -> "Timer":
return self.reset()
@property
def time(self) -> float:
if self._elapsed:
return self._elapsed
elif self._start_time:
return time() - self._start_time
else:
return 0
@skip_log_prefix
def __exit__(self, *args):
self._elapsed = time() - self._start_time
if self.label:
self.print_fn(f"{self.label} in {self}")
def __str__(self):
return humanize_duration(self.time)
|
CompilerGym-development
|
compiler_gym/util/timer.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import hashlib
import logging
from time import sleep
from typing import List, Optional, Union
import fasteners
import requests
import compiler_gym.errors
from compiler_gym.util.filesystem import atomic_file_write
from compiler_gym.util.runfiles_path import cache_path
from compiler_gym.util.truncate import truncate
logger = logging.getLogger(__name__)
# Deprecated since v0.2.4.
# This type is for backwards compatibility that will be removed in a future release.
# Please, use errors from `compiler_gym.errors`.
DownloadFailed = compiler_gym.errors.DownloadFailed
# Deprecated since v0.2.4.
# This type is for backwards compatibility that will be removed in a future release.
# Please, use errors from `compiler_gym.errors`.
TooManyRequests = compiler_gym.errors.TooManyRequests
def _get_url_data(url: str) -> bytes:
try:
req = requests.get(url)
except IOError as e:
# Re-cast an error raised by requests library to DownloadFailed type.
raise DownloadFailed(str(e)) from e
try:
if req.status_code == 429:
raise TooManyRequests("429 Too Many Requests")
elif req.status_code != 200:
raise DownloadFailed(f"GET returned status code {req.status_code}: {url}")
return req.content
finally:
req.close()
def _do_download_attempt(url: str, sha256: Optional[str]) -> bytes:
logger.info("Downloading %s ...", url)
content = _get_url_data(url)
if sha256:
# Validate the checksum.
checksum = hashlib.sha256()
checksum.update(content)
actual_sha256 = checksum.hexdigest()
if sha256 != actual_sha256:
raise DownloadFailed(
f"Checksum of download does not match:\n"
f"Url: {url}\n"
f"Expected: {sha256}\n"
f"Actual: {actual_sha256}"
)
# Cache the downloaded file.
path = cache_path(f"downloads/{sha256}")
path.parent.mkdir(parents=True, exist_ok=True)
with atomic_file_write(path, fileobj=True) as f:
f.write(content)
logger.debug(f"Downloaded {url}")
return content
def _download(urls: List[str], sha256: Optional[str], max_retries: int) -> bytes:
if not urls:
raise ValueError("No URLs to download")
# Cache hit.
if sha256 and cache_path(f"downloads/{sha256}").is_file():
with open(str(cache_path(f"downloads/{sha256}")), "rb") as f:
return f.read()
# A retry loop, and loop over all urls provided.
last_exception = None
wait_time = 10
for _ in range(max(max_retries, 1)):
for url in urls:
try:
return _do_download_attempt(url, sha256)
except TooManyRequests as e:
last_exception = e
logger.info(
"Download attempt failed with Too Many Requests error. "
"Watiting %.1f seconds",
wait_time,
)
sleep(wait_time)
wait_time *= 1.5
except DownloadFailed as e:
logger.info("Download attempt failed: %s", truncate(e))
last_exception = e
raise last_exception
def download(
urls: Union[str, List[str]], sha256: Optional[str] = None, max_retries: int = 5
) -> bytes:
"""Download a file and return its contents.
If :code:`sha256` is provided and the download succeeds, the file contents
are cached locally in :code:`$cache_path/downloads/$sha256`. See
:func:`compiler_gym.cache_path`.
An inter-process lock ensures that only a single call to this function may
execute at a time.
:param urls: Either a single URL of the file to download, or a list of URLs
to download.
:param sha256: The expected sha256 checksum of the file.
:return: The contents of the downloaded file.
:raises IOError: If the download fails, or if the downloaded content does
match the expected :code:`sha256` checksum.
"""
# Convert a singular string into a list of strings.
urls = [urls] if not isinstance(urls, list) else urls
# Only a single process may download a file at a time. The idea here is to
# prevent redundant downloads when multiple simultaneous processes all try
# and download the same resource. If we don't have an ID for the resource
# then we just lock globally to reduce NIC thrashing.
if sha256:
with fasteners.InterProcessLock(cache_path(f"downloads/.{sha256}.lock")):
return _download(urls, sha256, max_retries)
else:
with fasteners.InterProcessLock(cache_path("downloads/.lock")):
return _download(urls, None, max_retries)
|
CompilerGym-development
|
compiler_gym/util/download.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Utilities for working with the filesystem."""
import os
import tempfile
from contextlib import contextmanager
from pathlib import Path
from typing import BinaryIO, List, TextIO, Union
from compiler_gym.util import runfiles_path
def get_storage_paths() -> List[Path]:
"""Return the list of paths used by CompilerGym for filesystem storage.
:return: A list of filesystem paths that CompilerGym uses to store files.
"""
return sorted(
{
runfiles_path.cache_path("."),
runfiles_path.transient_cache_path("."),
runfiles_path.site_data_path("."),
}
)
@contextmanager
def atomic_file_write(
path: Path, fileobj: bool = False, mode: str = "wb"
) -> Union[Path, TextIO, BinaryIO]:
"""A context manager for atomically writing to a file.
Provides a lock-free mechanism for ensuring concurrent safe writes to a
filesystem path. Use this to prevent filesystem races when multiple callers
may be writing to the same file. This is best suited for cases where the
chance of a race are low, as it does not prevent redundant writes. It simply
guarantees that each write is atomic.
This relies on POSIX atomic file renaming.
Use it as a context manager that yields the path of a temporary file to
write to:
>>> outpath = Path("some_file.txt")
>>> with atomic_file_write(outpath) as tmp_path:
... with open(tmp_path, "w") as f:
... f.write("Hello\n")
>>> outpath.is_file()
True
It can also return a file object if passed the :code:`fileobj` argument:
>>> outpath = Path("some_file.txt")
>>> with atomic_file_write(outpath, fileobj=True) as f:
... f.write(file_data)
>>> outpath.is_file()
True
:param path: The path to write to atomically write to.
:param fileobj: If :code:`True`, return a file object in the given
:code:`mode`.
:param mode: The file mode to use when returning a file object.
:returns: The path of a temporary file to write to.
"""
with tempfile.NamedTemporaryFile(dir=path.parent, delete=False, mode=mode) as tmp:
tmp_path = Path(tmp.name)
try:
yield tmp if fileobj else tmp_path
finally:
if tmp_path.is_file():
os.rename(tmp_path, path)
def is_in_memory(path: Path) -> bool:
"""Determine if a path's mountpoint is in-memory.
:param path: A filesystem path.
:returns: True if the path is in-memory.
"""
# TODO(cummins): This is totally hacky and intended to work only for the
# transient_cache_path() case. There will be false negatives, though not
# likely false positives.
return str(path).startswith("/dev/shm")
def is_within_directory(directory, target) -> bool:
abs_directory = os.path.abspath(directory)
abs_target = os.path.abspath(target)
prefix = os.path.commonprefix([abs_directory, abs_target])
return prefix == abs_directory
def extract_tar(tar, path=".", members=None, *, numeric_owner=False) -> None:
for member in tar.getmembers():
member_path = os.path.join(path, member.name)
if not is_within_directory(path, member_path):
raise Exception("Attempted Path Traversal in Tar File")
tar.extractall(path, members, numeric_owner=numeric_owner)
|
CompilerGym-development
|
compiler_gym/util/filesystem.py
|
CompilerGym-development
|
compiler_gym/util/__init__.py
|
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""This module defines utilities for minimizing trajectories.
A trajectory is the sequence of actions applied to an environment. The goal of
trajectory minimization is to identify the shortest subregion of a trajectory
such that some hypothesis still holds. A a hypothesis is a boolean test on an
environment, for example, a hypothesis could be that :code:`env.validate()`
returns an error.
"""
import logging
import random
from math import ceil, log
from typing import Callable, Iterable
from compiler_gym.util.truncate import truncate
logger = logging.getLogger(__name__)
class MinimizationError(OSError):
"""Error raised if trajectory minimization fails."""
# A hypothesis is a callback that accepts as input an enivornment in a given
# state returns true if a particular hypothesis holds, else false.
Hypothesis = Callable[["CompilerEnv"], bool] # noqa: F821
def environment_validation_fails(env: "CompilerEnv") -> bool: # noqa: F821
"""A hypothesis that holds true if environment validation fails."""
validation_result = env.validate()
logger.debug(truncate(str(validation_result), max_lines=1, max_line_len=120))
return not validation_result.okay()
def _apply_and_test(env, actions, hypothesis, flakiness) -> bool:
"""Run specific actions on environment and return whether hypothesis holds."""
env.reset(benchmark=env.benchmark)
for _ in range(flakiness):
logger.debug("Applying %d actions ...", len(actions))
_, _, done, info = env.multistep(actions)
if done:
raise MinimizationError(
f"Failed to replay actions: {info.get('error_details', '')}"
)
logger.debug("Applied %d actions", len(actions))
if hypothesis(env):
return True
return False
def bisect_trajectory(
env: "CompilerEnv", # noqa: F821
hypothesis: Hypothesis = environment_validation_fails,
reverse: bool = False,
flakiness: int = 1,
) -> Iterable["CompilerEnv"]: # noqa: F821
"""Run a binary search to remove the suffix or prefix of a trjectory.
Requires worst-case O(log n) evaluation rounds, where n is the length of the
trajectory.
:param env: An environment whose action trajectory should be minimized.
:param hypothesis: The hypothesis that is used to determine if a trajectory
is valid. A callback that accepts as argument the :code:`env:`
instance and returns true if the hypothesis holds, else false. The
hypothesis must hold on the initial trajectory.
:param reverse: If :code:`True`, minimize the trajectory from the front
(i.e. the prefix). Else, minimization occurs form the back (i.e. the
suffix).
:param flakiness: The maximum number of times the hypothesis is repeated
to check if it holds. If the hypothesis returns :code:`True` within this
many iterations, it is said to hold. It needs to only return
:code:`True` once.
:returns: A generator that yields the input environment every time the
trajectory is successfully reduced.
:raises MinimizationError: If the environment action replay fails, or if
the hypothesis does not hold on the initial trajectory.
"""
def apply_and_test(actions):
return _apply_and_test(env, actions, hypothesis, flakiness)
all_actions = env.actions.copy()
# No actions to minimize.
if not all_actions:
return env
logger.info(
"%sisecting sequence of %d actions",
"Reverse b" if reverse else "B",
len(all_actions),
)
if not apply_and_test(all_actions):
raise MinimizationError(
"Hypothesis failed on the initial state! The hypothesis must hold for the first state."
)
left = 0
right = len(all_actions) - 1
step = 0
while right >= left:
step += 1
remaining_steps = int(log(max(right - left, 1), 2))
mid = left + ((right - left) // 2)
logger.debug(
"Bisect step=%d, left=%d, right=%d, mid=%d", step, left, right, mid
)
actions = all_actions[mid:] if reverse else all_actions[:mid]
if apply_and_test(actions):
logger.info(
"π’ Hypothesis holds at num_actions=%d, remaining bisect steps=%d",
mid,
remaining_steps,
)
yield env
if reverse:
left = mid + 1
else:
right = mid - 1
else:
logger.info(
"π΄ Hypothesis does not hold at num_actions=%d, remaining bisect steps=%d",
mid,
remaining_steps,
)
if reverse:
right = mid - 1
else:
left = mid + 1
mid = max(left, right) - 1 if reverse else min(left, right) + 1
if (reverse and mid < 0) or (not reverse and mid >= len(all_actions)):
actions = all_actions
logger.info("Failed to reduce trajectory length using bisection")
else:
actions = all_actions[mid:] if reverse else all_actions[:mid]
logger.info(
"Determined that action %d of %d is the first at which the hypothesis holds: %s",
mid,
len(all_actions),
env.action_space.flags[all_actions[mid]],
)
if not apply_and_test(actions):
raise MinimizationError("Post-bisect sanity check failed!")
yield env
def random_minimization(
env: "CompilerEnv", # noqa: F821
hypothesis: Hypothesis = environment_validation_fails,
num_steps_ratio_multiplier: float = 5,
init_discard_ratio: float = 0.75,
discard_ratio_decay: float = 0.75,
min_trajectory_len: int = 5,
flakiness: int = 1,
) -> Iterable["CompilerEnv"]: # noqa: F821
"""Run an iterative process of randomly removing actions to minimize a
trajectory.
For each round of minimization, a number of actions are discarded randomly
and the hypothesis is tested. If the hypothesis still holds with those
actions removed, the minimization proceeds. Else the actions are re-inserted
into the trajectory and a new set of actions are removed. After a failure
Performs up to O(num_steps_ratio_multiplier * log n) evaluation rounds,
where n is the length of the trajectory.
:param env: An environment whose action trajectory should be minimized.
:param hypothesis: The hypothesis that is used to determine if a trajectory
is valid. A callback that accepts as argument the :code:`env:`
instance and returns true if the hypothesis holds, else false. The
hypothesis must hold on the initial trajectory.
:param num_steps_ratio_multiplier: A multiplier for the number of rounds of
minimization to perform, using log(n) the length of the trajectory as
the factor.
:param init_discard_ratio: The number of actions that will be randomly
discarded, as a multiplier of the length of the trajectory.
:param discard_ratio_decay: The ratio of decay for the discard ratio on
failure.
:param min_trajectory_len: The minimum number of actions in the trajectory
for minimization to run. If the trajectory contains fewer than this many
actions, minimization stops.
:param flakiness: The maximum number of times the hypothesis is repeated
to check if it holds. If the hypothesis returns :code:`True` within this
many iterations, it is said to hold. It needs to only return
:code:`True` once.
:returns: A generator that yields the input environment every time the
trajectory is successfully reduced.
:raises MinimizationError: If the environment action replay fails, or if
the hypothesis does not hold on the initial trajectory.
"""
def apply_and_test(actions):
return _apply_and_test(env, actions, hypothesis, flakiness)
actions = env.actions.copy()
if not apply_and_test(actions):
raise MinimizationError(
"Hypothesis failed on the initial state! The hypothesis must hold for the first state."
)
max_num_steps = int(log(len(actions), 2) * num_steps_ratio_multiplier)
num_steps = 0
discard_ratio = init_discard_ratio
while len(actions) >= min_trajectory_len and num_steps < max_num_steps:
num_steps += 1
num_to_remove = int(ceil(len(actions) * discard_ratio))
candidate_actions = actions.copy()
# Delete actions randomly.
for _ in range(num_to_remove):
del candidate_actions[random.randint(0, len(candidate_actions) - 1)]
if apply_and_test(candidate_actions):
logger.info(
"π’ Hypothesis holds with %s of %s actions randomly removed, continuing",
num_to_remove,
len(actions),
)
actions = candidate_actions
discard_ratio = init_discard_ratio
yield env
else:
logger.info(
"π΄ Hypothesis does not hold with %s of %s actions randomly removed, rolling back",
num_to_remove,
len(actions),
)
discard_ratio *= discard_ratio_decay
if num_to_remove == 1:
logger.info(
"Terminating random minimization after failing with only a single action removed"
)
break
if not apply_and_test(actions):
raise MinimizationError("Post-minimization sanity check failed!")
yield env
def minimize_trajectory_iteratively(
env: "CompilerEnv", # noqa: F821
hypothesis: Hypothesis = environment_validation_fails,
flakiness: int = 1,
) -> Iterable["CompilerEnv"]: # noqa: F821
"""Minimize a trajectory by remove actions, one at a time, until a minimal
trajectory is reached.
Performs up to O(n * n / 2) evaluation rounds, where n is the length of the
trajectory.
:param env: An environment whose action trajectory should be minimized.
:param hypothesis: The hypothesis that is used to determine if a trajectory
is valid. A callback that accepts as argument the :code:`env:`
instance and returns true if the hypothesis holds, else false. The
hypothesis must hold on the initial trajectory.
:param flakiness: The maximum number of times the hypothesis is repeated
to check if it holds. If the hypothesis returns :code:`True` within this
many iterations, it is said to hold. It needs to only return
:code:`True` once.
:returns: A generator that yields the input environment every time the
trajectory is successfully reduced.
:raises MinimizationError: If the environment action replay fails, or if
the hypothesis does not hold on the initial trajectory.
"""
def apply_and_test(actions):
return _apply_and_test(env, actions, hypothesis, flakiness)
all_actions = env.actions.copy()
init_num_actions = len(all_actions)
if not all_actions: # Nothing to minimize.
return
if not apply_and_test(all_actions):
raise MinimizationError(
"Hypothesis failed on the initial state! The hypothesis must hold for the first state."
)
pass_num = 0
actions_removed = 0
action_has_been_pruned = True
# Outer loop. Repeat iterative reduction until no change is made.
while action_has_been_pruned and len(all_actions) > 1:
pass_num += 1
action_has_been_pruned = False
action_mask = [True] * len(all_actions)
logger.info("Minimization pass on sequence of %d actions", len(all_actions))
# Inner loop. Go through every action and see if it can be removed.
for i in range(len(action_mask)):
action_mask[i] = False
action_name = env.action_space.flags[all_actions[i]]
actions = [action for action, mask in zip(all_actions, action_mask) if mask]
if apply_and_test(actions):
logger.info(
"π’ Hypothesis holds with action %s removed, %d actions remaining",
action_name,
sum(action_mask),
)
action_has_been_pruned = True
actions_removed += 1
yield env
else:
action_mask[i] = True
logger.info(
"π΄ Hypothesis does not hold with action %s removed, %d actions remaining",
action_name,
sum(action_mask),
)
all_actions = [action for action, mask in zip(all_actions, action_mask) if mask]
logger.info(
"Minimization halted after %d passes, %d of %d actions removed",
pass_num,
actions_removed,
init_num_actions,
)
if not apply_and_test(all_actions):
raise ValueError("Post-bisect sanity check failed!")
yield env
|
CompilerGym-development
|
compiler_gym/util/minimize_trajectory.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""A context manager to set a temporary working directory."""
import os
import tempfile
from contextlib import contextmanager
from pathlib import Path
from typing import Optional, Union
@contextmanager
def temporary_working_directory(directory: Optional[Union[str, Path]] = None) -> Path:
"""Temporarily set the working directory.
This function provides a way to set the working directory within the
scope of a "with statement". Example usage:
.. code-block:: python
print(os.getcwd()) # /tmp/foo
with temporary_working_directory("/tmp/bar"):
# Now in scope of new working directory.
print(os.getcwd()) # /tmp/bar
# Return to original working directory.
print(os.getcwd()) # /tmp/foo
:param directory: A directory to set as the temporary working directory. If
not provided, a temporary directory is created and deleted once out of
scope.
:return: The temporary working directory.
"""
old_working_directory = os.getcwd()
try:
if directory:
os.chdir(directory)
yield Path(directory)
else:
with tempfile.TemporaryDirectory(prefix="compiler_gym-") as d:
os.chdir(d)
yield Path(d)
finally:
os.chdir(old_working_directory)
|
CompilerGym-development
|
compiler_gym/util/temporary_working_directory.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Utilities for parallelization / threading / concurrency."""
from itertools import tee
from threading import Lock
from typing import Any, Iterable
class _ThreadSafeTee:
"""An extension of :code:`itertools.tee()` that uses a lock to ensure
exclusive access to the iterator.
"""
def __init__(self, tee_obj, lock):
self.tee_obj = tee_obj
self.lock = lock
def __iter__(self):
return self
def __next__(self):
with self.lock:
return next(self.tee_obj)
def __copy__(self):
return _ThreadSafeTee(self.tee_obj.__copy__(), self.lock)
def thread_safe_tee(iterable: Iterable[Any], n: int = 2):
"""An extension of :code:`itertools.tee()` that yields thread-safe iterators."""
lock = Lock()
return tuple(_ThreadSafeTee(tee_obj, lock) for tee_obj in tee(iterable, n))
|
CompilerGym-development
|
compiler_gym/util/parallelization.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from numbers import Integral
from typing import List
import numpy as np
def convert_number_to_permutation(
n: Integral, permutation_size: Integral
) -> List[Integral]:
m = n
res = np.zeros(permutation_size, dtype=type(permutation_size))
elements = np.arange(permutation_size, dtype=type(permutation_size))
for i in range(permutation_size):
j = m % (permutation_size - i)
m = m // (permutation_size - i)
res[i] = elements[j]
elements[j] = elements[permutation_size - i - 1]
return res
def convert_permutation_to_number(permutation: List[Integral]) -> Integral:
pos = np.arange(len(permutation), dtype=int)
elements = np.arange(len(permutation), dtype=int)
m = 1
res = 0
for i in range(len(permutation) - 1):
res += m * pos[permutation[i]]
m = m * (len(permutation) - i)
pos[elements[len(permutation) - i - 1]] = pos[permutation[i]]
elements[pos[permutation[i]]] = elements[len(permutation) - i - 1]
return res
|
CompilerGym-development
|
compiler_gym/util/permutation.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import shlex
import sys
from typing import Any, Iterable
class ShellFormatCodes:
"""Shell escape codes for pretty-printing."""
PURPLE = "\033[95m"
CYAN = "\033[96m"
DARKCYAN = "\033[36m"
BLUE = "\033[94m"
GREEN = "\033[92m"
YELLOW = "\033[93m"
RED = "\033[91m"
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
END = "\033[0m"
def emph(stringable: Any) -> str:
"""Emphasize a string."""
return f"{ShellFormatCodes.BOLD}{ShellFormatCodes.BLUE}{stringable}{ShellFormatCodes.END}"
def plural(quantity: int, singular: str, plural: str) -> str:
"""Return the singular or plural word."""
return singular if quantity == 1 else plural
def indent(string: str, n=4) -> str:
"""Indent a multi-line string by given number of spaces."""
return "\n".join(" " * n + x for x in str(string).split("\n"))
def join_cmd(cmd: Iterable[str]) -> str:
"""Join a list of command line arguments into a single string.
This is intended for logging purposes only. It does not provide any safety
guarantees.
"""
if sys.version_info >= (3, 8, 0):
return shlex.join(cmd)
return " ".join(cmd)
|
CompilerGym-development
|
compiler_gym/util/shell_format.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
def geometric_mean(array_like):
"""Zero-length-safe geometric mean."""
values = np.asarray(array_like)
if not values.size:
return 0
# Shortcut to return 0 when any element of the input is not positive.
if not np.all(values > 0):
return 0
a = np.log(values)
return np.exp(a.sum() / len(a))
def arithmetic_mean(array_like):
"""Zero-length-safe arithmetic mean."""
values = np.asarray(array_like)
if not values.size:
return 0
return values.mean()
def stdev(array_like):
"""Zero-length-safe standard deviation."""
values = np.asarray(array_like)
if not values.size:
return 0
return values.std()
|
CompilerGym-development
|
compiler_gym/util/statistics.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import subprocess
import sys
from contextlib import contextmanager
from signal import Signals
from subprocess import Popen as _Popen
from typing import List
def run_command(cmd: List[str], timeout: int):
with Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True
) as process:
stdout, stderr = process.communicate(timeout=timeout)
if process.returncode:
returncode = process.returncode
try:
# Try and decode the name of a signal. Signal returncodes
# are negative.
returncode = f"{returncode} ({Signals(abs(returncode)).name})"
except ValueError:
pass
raise OSError(
f"Compilation job failed with returncode {returncode}\n"
f"Command: {' '.join(cmd)}\n"
f"Stderr: {stderr.strip()}"
)
return stdout
def communicate(process, input=None, timeout=None):
"""subprocess.communicate() which kills subprocess on timeout."""
try:
return process.communicate(input=input, timeout=timeout)
except subprocess.TimeoutExpired:
# kill() was added in Python 3.7.
if sys.version_info >= (3, 7, 0):
process.kill()
else:
process.terminate()
# Wait for shutdown to complete.
try:
process.communicate(timeout=timeout)
except subprocess.TimeoutExpired:
pass # Stubborn process won't die, nothing can be done.
raise
@contextmanager
def Popen(*args, **kwargs):
"""subprocess.Popen() with resilient process termination at end of scope."""
with _Popen(*args, **kwargs) as process:
try:
yield process
finally:
# Process has not yet terminated, kill it.
if process.poll() is None:
# kill() was added in Python 3.7.
if sys.version_info >= (3, 7, 0):
process.kill()
else:
process.terminate()
# Wait for shutdown to complete.
try:
process.communicate(timeout=60)
except subprocess.TimeoutExpired:
pass # Stubborn process won't die, nothing can be done.
|
CompilerGym-development
|
compiler_gym/util/commands.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import multiprocessing
from collections import deque
from contextlib import contextmanager
from enum import Enum
from itertools import islice
from os import cpu_count
from pathlib import Path
from threading import Lock
from typing import Optional
from pydantic import BaseModel, Field, validator
from pydantic.class_validators import root_validator
logger = logging.getLogger(__name__)
_executor_lock = Lock()
_executor = None
class Executor(BaseModel):
"""Defines an execution environment for jobs.
E.g. a node on a cluster, the local machine, etc. To create jobs,
instantiate this class and submit functions to using the executor API:
>>> executor = Executor(executor="local", block=True)
>>> with executor.get_executor() as executor:
... executor.submit(my_job, arg1, arg2)
... executor.submit(another_job)
"""
class Type(str, Enum):
"""Types of execution environments."""
SLURM = "slurm"
"""Submit jobs to a SLURM cluster scheduler."""
LOCAL = "local"
"""Submit jobs to run on the current machine."""
DEBUG = "debug"
"""Submit jobs to run synchronously on the current machine."""
NOOP = "noop"
"""Submitted jobs return immediately without executing. This can be
useful for debugging, where you want to validate the code and
configuration without performing any computation.
"""
type: Type = Field(allow_mutation=False)
"""The execution environment."""
slurm_partition: Optional[str] = Field(default=None, allow_mutation=False)
"""The name of the SLURM partition to submit jobs to.
Only used for :code:`Type.SLURM` executors.
"""
cpus: int = Field(default=1, allow_mutation=False, ge=-1)
"""The number of CPU threads to provision.
If the type of executor is :code:`Type.SLURM`, this is the number of CPU
threads to provision for each job. If the type of executor is
:code:`Type.LOCAL`, this is the number of parallel jobs to process in a
thread pool. If the value is -1 and the executor is :code:`Type.LOCAL`, the
number of physical cores on the machine is used. Has no effect for
:code:`Type.DEBUG` and :code:`Type.NOOP`.
"""
gpus: int = Field(default=0, allow_mutation=False, ge=0)
"""The number of GPUs to provision.
This is used only by the :code:`Type.SLURM` executor.
"""
timeout_hours: float = Field(default=12, allow_mutation=False, gt=0)
block: bool = Field(default=False, allow_mutation=False)
"""If :code:`True`, the :code:`get_executor()` context manager will block
until all jobs have completed when exiting scope. Jobs are still submitted
asynchronously for parallel execution.
"""
# === Start of public API. ===
@contextmanager
def get_executor(
self, logs_dir: Path, timeout_hours: Optional[float] = None, cpus=None
) -> "Executor":
cpus = cpus or self.cpus
timeout_hours = timeout_hours or self.timeout_hours
if self.type == self.Type.SLURM:
try:
from submitit import AutoExecutor
except ImportError as e:
raise OSError(
"Using the slurm executor requires the submitit library. "
"Install submitit using: python -m pip install submitit"
) from e
executor = AutoExecutor(folder=logs_dir)
executor.update_parameters(
timeout_min=int(round(timeout_hours * 60)),
nodes=1,
cpus_per_task=cpus,
gpus_per_node=self.gpus,
slurm_partition=self.slurm_partition,
)
name = self.slurm_partition or "slurm" # default value for logging
elif self.type == self.Type.LOCAL:
executor, name = (
LocalParallelExecutor(
cpus=cpus,
timeout_seconds=int(round(timeout_hours * 3600)),
),
"local",
)
elif self.type == self.Type.DEBUG:
executor, name = LocalSynchronousExecutor(), "local"
elif self.type == self.Type.NOOP:
executor, name = DummyExecutor(), "noop"
else:
assert False, f"Unknown executor: {self.type} ({type(self.type).__name__})"
executor = WrappedExecutor(executor, name=name)
yield executor
if self.type == self.Type.DEBUG or self.block:
wait_on_jobs(
executor.jobs,
executor_name=str(executor),
cancel_on_error=self.type == self.Type.SLURM,
)
if hasattr(executor.unwrapped, "close"):
executor.unwrapped.close()
@staticmethod
def get_default_local_executor():
"""Return a singleton :code:`Executor`.
:returns: An executor.
"""
with _executor_lock:
global _executor
if _executor is None:
_executor = Executor(type="local", cpus=cpu_count())
return _executor
# === Start of implementation details. ===
@validator("slurm_partition")
def validate_slurm_partition(cls, value, *, values, **kwargs):
del kwargs
if values["type"] == cls.Type.SLURM:
assert value, f"Must specify a partition for executor: {values['executor']}"
return value
@validator("cpus", pre=True)
def validate_cpus(cls, value, *, values, **kwargs):
del kwargs
# -1 CPU count defaults to CPU count.
if values["type"] == cls.Type.LOCAL and value == -1:
return cpu_count()
return value
@root_validator
def local_always_blocks(cls, values):
if values["type"] == cls.Type.LOCAL or values["type"] == cls.Type.NOOP:
values["block"] = True
return values
class Config:
validate_assignment = True
class WrappedExecutor:
"""An executor-like interface that records all jobs that are submitted."""
def __init__(self, executor, name: str):
self.unwrapped = executor
self.jobs = []
self.name = name
def submit(self, *args, **kwargs):
job = self.unwrapped.submit(*args, **kwargs)
logger.info("Submitting job %s to %s ...", job.job_id, self)
self.jobs.append(job)
return job
def __repr__(self) -> str:
return self.name
def wait_on_jobs(jobs, executor_name: str = "executor", cancel_on_error: bool = True):
njobs = len(jobs)
jobs = deque(jobs)
def cancel_all_jobs(jobs):
print(f"Cancelling {len(jobs)} {executor_name} jobs")
for job in jobs:
try:
job.cancel()
except: # noqa
pass
# Produce a list of the first few job IDs
max_num_job_ids_to_show = 8
job_ids = [j.job_id for j in islice(jobs, max_num_job_ids_to_show)]
job_ids = ", ".join(str(x) for x in job_ids)
job_ids = f"job ID: {job_ids}" if len(jobs) == 1 else f"job IDs: {job_ids}"
if len(jobs) > max_num_job_ids_to_show:
job_ids = f"{job_ids} ..."
logger.info(
f"Waiting for {len(jobs)} {executor_name} jobs to complete with {job_ids}"
)
completed = 0
while jobs:
job = jobs.popleft()
if cancel_on_error:
try:
job.result()
completed += 1
logger.info(f"Jobs completed = {completed} of {njobs} ...")
except Exception as e: # noqa Intentionally broad.
logger.error(f"Caught: {type(e).__name__}: {e}")
jobs.append(job)
return cancel_all_jobs(jobs)
else:
job.result()
completed += 1
logger.info(f"Jobs completed = {completed} of {njobs} ...")
logger.info("All done.")
class LocalParallelExecutor:
"""An executor which uses a process pool to process jobs in parallel on the
local machine.
"""
class LocalJob:
def __init__(self, job_id: int, async_result, timeout_seconds: int):
self._async_result = async_result
self.job_id = job_id
self.timeout_seconds = timeout_seconds
def result(self):
return self._async_result.get(timeout=self.timeout_seconds)
def cancel(self):
pass
def __init__(self, cpus: int, timeout_seconds: int):
self.last_job_id = 0
self.process_pool = multiprocessing.Pool(cpus)
self.timeout_seconds = timeout_seconds
self.futures = []
def submit(self, fn, *args, **kwargs):
self.last_job_id += 1
self.futures.append(self.process_pool.apply_async(fn, args, kwargs))
return self.LocalJob(
self.last_job_id,
self.futures[-1],
self.timeout_seconds,
)
def close(self):
# Block until all jobs have completed.
for future in self.futures:
future.get()
self.process_pool.close()
class LocalSynchronousExecutor:
"""An executor where each job is executed synchronously when result() is
called."""
class LocalJob:
def __init__(self, job_id: int, fn, *args, **kwargs):
self._callback = lambda: fn(*args, **kwargs)
self.job_id = job_id
def result(self):
return self._callback()
def cancel(self):
pass
def __init__(self):
self.last_job_id = 0
def submit(self, fn, *args, **kwargs):
self.last_job_id += 1
return self.LocalJob(self.last_job_id, fn, *args, **kwargs)
class DummyExecutor:
class DummyJob:
def __init__(self, job_id: int):
self.job_id = job_id
def result(self):
return None
def cancel(self):
pass
def __init__(self) -> None:
self.last_job_id = 0
def submit(self, fn, *args, **kwargs):
del fn
del args
del kwargs
self.last_job_id += 1
return self.DummyJob(self.last_job_id)
|
CompilerGym-development
|
compiler_gym/util/executor.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""This module contains debugging helpers."""
import logging
import os
# Map for translating between COMPILER_GYM_DEBUG levels to python logging
# severity values.
_DEBUG_LEVEL_LOGGING_LEVEL_MAP = {
0: logging.ERROR,
1: logging.WARNING,
2: logging.INFO,
3: logging.DEBUG,
}
_LOGGING_LEVEL_DEBUG_LEVEL_MAP = {
v: k for k, v in _DEBUG_LEVEL_LOGGING_LEVEL_MAP.items()
}
def get_debug_level() -> int:
"""Get the debugging level.
The debug level is a non-negative integer that controls the verbosity of
logging messages and other debugging behavior. At each level, the types of
messages that are logged are:
* :code:`0` - only non-fatal errors are logged (default).
* :code:`1` - extra warnings message are logged.
* :code:`2` - enables purely informational logging messages.
* :code:`3` and above - extremely verbose logging messages are enabled that
may be useful for debugging.
The debugging level can be set using the :code:`$COMPILER_GYM_DEBUG`
environment variable, or by calling :func:`set_debug_level`.
:return: A non-negative integer.
"""
return max(int(os.environ.get("COMPILER_GYM_DEBUG", "0")), 0)
def get_logging_level() -> int:
"""Returns the logging level.
The logging level is not set directly, but as a result of setting the debug
level using :func:`set_debug_level`.
:return: An integer.
"""
return _DEBUG_LEVEL_LOGGING_LEVEL_MAP.get(get_debug_level(), logging.DEBUG)
def set_debug_level(level: int) -> None:
"""Set a new debugging level.
See :func:`get_debug_level` for a description of the debug levels.
The debugging level should be set first when interacting with CompilerGym as
many CompilerGym objects will check the debug level only at initialization
time and not throughout their lifetime.
Setting the debug level affects the entire process and is not thread safe.
:param level: The debugging level to use.
"""
os.environ["COMPILER_GYM_DEBUG"] = str(level)
logging.getLogger("compiler_gym").setLevel(
_DEBUG_LEVEL_LOGGING_LEVEL_MAP.get(level, logging.DEBUG)
)
def logging_level_to_debug_level(logging_level: int) -> int:
"""Convert a python logging level to a debug level.
See :func:`get_debug_level` for a description of the debug levels.
:param logging_level: A python logging level.
:returns: An integer logging level in the range :code:`[0,3]`.
"""
return max(_LOGGING_LEVEL_DEBUG_LEVEL_MAP.get(logging_level, 1) - 1, 0)
|
CompilerGym-development
|
compiler_gym/util/debug_util.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.