python_code
stringlengths 0
4.04M
| repo_name
stringlengths 8
58
| file_path
stringlengths 5
147
|
---|---|---|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from llvm_rl.model.training import Training
from omegaconf import OmegaConf
def test_parse_yaml():
cfg = Training(
**OmegaConf.create(
"""\
timeout_hours: 10
episodes: 1000
benchmarks:
- uris:
- benchmark://cbench-v1/qsort
- dataset: benchmark://cbench-v1
max_benchmarks: 2
validation:
benchmarks:
- uris:
- benchmark://cbench-v1/qsort
"""
)
)
assert cfg.timeout_hours == 10
|
CompilerGym-development
|
examples/llvm_rl/tests/training_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from llvm_rl.model.validation import Validation
from omegaconf import OmegaConf
import compiler_gym
from compiler_gym.datasets import Benchmark
def test_validation_benchmarks_uris_list():
cfg = Validation(
**OmegaConf.create(
"""\
benchmarks:
- uris:
- benchmark://cbench-v1/qsort
- dataset: benchmark://cbench-v1
max_benchmarks: 2
"""
)
)
with compiler_gym.make("llvm-v0") as env:
assert list(cfg.benchmarks_iterator(env)) == [
"benchmark://cbench-v1/qsort",
"benchmark://cbench-v1/adpcm",
"benchmark://cbench-v1/bitcount",
]
bm = list(cfg.benchmarks_iterator(env))[0]
print(type(bm).__name__)
assert isinstance(bm, Benchmark)
assert list(cfg.benchmark_uris_iterator(env)) == [
"benchmark://cbench-v1/qsort",
"benchmark://cbench-v1/adpcm",
"benchmark://cbench-v1/bitcount",
]
|
CompilerGym-development
|
examples/llvm_rl/tests/validation_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pytest
from llvm_rl.model.training import Benchmarks
from omegaconf import OmegaConf
from pydantic import ValidationError
import compiler_gym
from compiler_gym.datasets import Benchmark
def test_benchmarks_missing_dataset_and_uris():
with pytest.raises(ValidationError):
Benchmarks()
def test_benchmarks_uris_list():
cfg = Benchmarks(uris=["benchmark://cbench-v1/qsort"])
assert cfg.uris == ["benchmark://cbench-v1/qsort"]
with compiler_gym.make("llvm-v0") as env:
assert list(cfg.benchmarks_iterator(env)) == ["benchmark://cbench-v1/qsort"]
assert isinstance(list(cfg.benchmarks_iterator(env))[0], Benchmark)
assert list(cfg.benchmark_uris_iterator(env)) == ["benchmark://cbench-v1/qsort"]
def test_validation_benchmarks_uris_list_yaml():
cfg = Benchmarks(
**OmegaConf.create(
"""\
uris:
- benchmark://cbench-v1/qsort
"""
)
)
assert len(cfg.uris) == 1
|
CompilerGym-development
|
examples/llvm_rl/tests/benchmarks_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import os
import sys
import warnings
from pathlib import Path
from llvm_rl.model.model import Model
from omegaconf import OmegaConf
def test_local_train(tmp_path: Path):
model = Model(
**OmegaConf.create(
f"""\
experiment: tiger
working_directory: {tmp_path}/outputs
executor:
type: local
cpus: 2
environment:
id: llvm-autophase-ic-v0
max_episode_steps: 3
agent:
type: PPOTrainer
args:
lr: 1.e-3
model:
fcnet_hiddens: [16]
fcnet_activation: relu
framework: torch
rollout_fragment_length: 8
train_batch_size: 8
sgd_minibatch_size: 8
training:
timeout_hours: 0.25
episodes: 32
benchmarks:
- dataset: benchmark://cbench-v1
max_benchmarks: 3
validation:
benchmarks:
- dataset: benchmark://cbench-v1
max_benchmarks: 3
testing:
timeout_hours: 0.25
benchmarks:
- dataset: benchmark://cbench-v1
max_benchmarks: 3
"""
)
)
warnings.filterwarnings("ignore", category=DeprecationWarning)
model.train()
print("Outputs", list((tmp_path / "outputs").iterdir()), file=sys.stderr)
assert (tmp_path / "outputs").is_dir()
with open(tmp_path / "outputs" / "training-model.json") as f:
assert json.load(f)
assert (tmp_path / "outputs" / "train").is_dir()
print("Outputs", list((tmp_path / "outputs" / "train").iterdir()), file=sys.stderr)
# Check that a checkpoint was created.
assert (
tmp_path
/ "outputs"
/ "train"
/ "tiger-C0-R0"
/ "checkpoint_000001"
/ "checkpoint-1"
).is_file()
# TODO(github.com/facebookresearch/CompilerGym/issues/487): Fix test on CI.
if os.environ.get("CI", "") != "":
return
model.test()
print(
"Trail files",
list((tmp_path / "outputs" / "train" / "tiger-C0-R0").iterdir()),
file=sys.stderr,
flush=True,
)
assert (tmp_path / "outputs" / "train" / "tiger-C0-R0" / "test-meta.json").is_file()
assert (
tmp_path / "outputs" / "train" / "tiger-C0-R0" / "test-results.json"
).is_file()
|
CompilerGym-development
|
examples/llvm_rl/tests/training_integration_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from typing import List
import numpy as np
from pydantic import BaseModel, validator
from ray.rllib.agents.dqn import ApexTrainer, R2D2Trainer # noqa
from ray.rllib.agents.impala import ImpalaTrainer # noqa
from ray.rllib.agents.ppo import PPOTrainer # noqa
from compiler_gym.datasets import BenchmarkUri
from compiler_gym.envs.llvm import LlvmEnv
from compiler_gym.util.timer import Timer
logger = logging.getLogger(__name__)
class InferenceResult(BaseModel):
"""Represents the result of running an RL agent on a problem."""
# The benchmark URI.
benchmark: str
inference_walltime_seconds: float
commandline: str
episode_len: int
instruction_count_init: int
instruction_count_final: int
instruction_count_oz: int
instruction_count_reduction: float
"""The final instruction count, normalized to -Oz."""
object_size_init: int
object_size_final: int
object_size_oz: int
object_size_reduction: float
"""The final object size, normalized to -Oz."""
runtimes_init: List[float]
runtimes_final: List[float]
runtimes_o3: List[float]
runtime_reduction: float
"""The final runtime, normalized to -Oz."""
@classmethod
def from_agent(
cls,
env: LlvmEnv,
agent,
runtime: bool = True,
runtimes_count: int = 30,
):
# We calculate our own reward at the end, no need for incremental
# rewards during inference.
env.reward_space = None
# Run inference on the environment.
observation, done = env.reset(), False
with Timer() as inference_timer:
while not done:
action = agent.compute_action(observation)
observation, _, done, _ = env.step(action)
instruction_count_init = env.unwrapped.observation["IrInstructionCountO0"]
instruction_count_final = env.unwrapped.observation["IrInstructionCount"]
instruction_count_oz = env.unwrapped.observation["IrInstructionCountOz"]
object_size_init = env.unwrapped.observation["ObjectTextSizeO0"]
object_size_final = env.unwrapped.observation["ObjectTextSizeBytes"]
object_size_oz = env.unwrapped.observation["ObjectTextSizeOz"]
runtimes_init = []
runtimes_o3 = []
runtimes_final = []
try:
if runtime and env.unwrapped.observation["IsRunnable"]:
env.send_param(
"llvm.set_runtimes_per_observation_count", str(runtimes_count)
)
env.unwrapped.observation["Runtime"] # warmup
runtimes_final = env.unwrapped.observation["Runtime"].tolist()
assert (
len(runtimes_final) == runtimes_count
), f"{len(runtimes_final)} != {runtimes_count}"
env.reset()
env.send_param(
"llvm.set_runtimes_per_observation_count", str(runtimes_count)
)
env.unwrapped.observation["Runtime"] # warmup
runtimes_init = env.unwrapped.observation["Runtime"].tolist()
assert (
len(runtimes_init) == runtimes_count
), f"{len(runtimes_init)} != {runtimes_count}"
env.send_param("llvm.apply_baseline_optimizations", "-O3")
env.unwrapped.observation["Runtime"] # warmup
runtimes_o3 = env.unwrapped.observation["Runtime"].tolist()
assert (
len(runtimes_o3) == runtimes_count
), f"{len(runtimes_o3)} != {runtimes_count}"
except Exception as e: # pylint: disable=broad-except
logger.warning("Failed to compute runtime: %s", e)
return cls(
benchmark=env.benchmark.uri,
inference_walltime_seconds=inference_timer.time,
commandline=env.action_space.to_string(env.actions),
episode_len=len(env.actions),
instruction_count_init=instruction_count_init,
instruction_count_final=instruction_count_final,
instruction_count_oz=instruction_count_oz,
instruction_count_reduction=instruction_count_oz
/ max(instruction_count_final, 1),
object_size_init=object_size_init,
object_size_final=object_size_final,
object_size_oz=object_size_oz,
object_size_reduction=object_size_oz / max(object_size_final, 1),
runtimes_init=runtimes_init,
runtimes_final=runtimes_final,
runtimes_o3=runtimes_o3,
runtime_reduction=np.median(runtimes_o3 or [0])
/ max(np.median(runtimes_final or [0]), 1),
)
@validator("benchmark", pre=True)
def validate_benchmark(cls, value):
if isinstance(value, BenchmarkUri):
return str(value)
return value
|
CompilerGym-development
|
examples/llvm_rl/model/inference_result.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .model import Model
__all__ = [
"Model",
]
|
CompilerGym-development
|
examples/llvm_rl/model/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import logging
import warnings
from datetime import datetime
from pathlib import Path
from typing import Any, Dict, Iterable, List, Optional
import pandas as pd
import yaml
from pydantic import BaseModel, Field
from ray import tune
from compiler_gym.util.executor import Executor
from compiler_gym.util.shell_format import indent, plural
from compiler_gym.util.statistics import geometric_mean
from .agent import Agent
from .environment import Environment
from .inference_result import InferenceResult
from .testing import Testing
from .training import Training
# Ignore import deprecation warnings from ray.
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
import ray
logger = logging.getLogger(__name__)
class Model(BaseModel):
"""The composition of the full environment, agent, training / testing
regime, and execution environment. Provides the API for training / testing.
"""
# === Start of fields list. ===
executor: Executor
"""The execution environment to use for training / testing jobs."""
environment: Environment = Field(allow_mutation=False)
"""Description of the environment, which defines the particular optimization
problem, the reward signal for training, and the representation of state
that the agent receives.
"""
agent: Agent = Field(allow_mutation=False)
"""The agent describes the RLlib training algorithm that is used."""
training: Training = Field(allow_mutation=False)
"""Description of the training regime: the benchmarks to learn over, and how
long to learn for.
"""
testing: Testing = Field(allow_mutation=False)
"""The testing setup."""
working_directory: Path = Field(allow_mutation=False)
"""The working directory where logs and other artifacts are written to."""
experiment: str = Field(default="unnamed_experiment", allow_mutation=False)
"""A logical name for this experiment. This is used for naming RLlib
trials.
"""
num_replicas: int = Field(default=1, ge=1, allow_mutation=False)
"""The number of duplicate jobs to run. E.g. for training, this will train
:code:`n` independent models in trials that share the same working
directory.
"""
job_id: int = Field(default=0, allow_mutation=0)
"""An optional numeric job ID."""
seed: int = Field(default=0xCC, allow_mutation=False)
"""The numeric seed to use"""
compiler_gym_version: str = Field(default="", allow_mutation=False)
"""The compiler_gym.__version__ string."""
# === Start of public API. ===
def train(self) -> None:
"""Run the training job for this model."""
logger.info("Model:\n%s", indent(self.yaml(), 4))
logger.info("Starting training job in %s", self.working_directory)
# The working directory may already have been created by hydra, so we
# will check for the training-model.json file below to see if this
# directory has already been used for training.
self.working_directory.mkdir(parents=True, exist_ok=True)
# Dump the parsed config to file.
model_dump_path = self.working_directory / "training-model.json"
assert not model_dump_path.is_file(), (
f"Refusing to overwrite file: {model_dump_path}. "
"Is the working directory clean?"
)
with open(model_dump_path, "w") as f:
print(json.dumps(json.loads(self.json()), indent=2), file=f)
with self.executor.get_executor(
logs_dir=self.working_directory / "slurm",
# Provision an extra hour for RLlib overhead.
timeout_hours=self.training.timeout_hours + 1,
) as executor:
for i in range(self.num_replicas):
executor.submit(train_job, model=self, seed=self.seed + i, replica_id=i)
def test_checkpoints(
self, metric: str = "evaluation/episode_reward_mean"
) -> Iterable[Path]:
df = self.dataframe
if not len(df):
return
for logsdir in set(df["logsdir"].values):
sdf = df[(df["logsdir"] == logsdir) & df["checkpoint"]]
if not len(sdf):
continue
sdf = sdf.reset_index()
idx = sdf[metric].idxmax()
best = sdf.iloc[idx]
logger.info(
"Selected checkpoint %s with %s %f",
best["checkpoint_path"],
metric,
best[metric],
)
yield Path(best["checkpoint_path"])
def test(self) -> None:
"""Run the testing job for this model."""
# Gather all the jobs to run now. We will submit them all in a batch.
jobs = []
for checkpoint in self.test_checkpoints():
assert checkpoint.is_file(), f"Checkpoint not found: {checkpoint}"
# Go up two levels to the main directory
test_dir = checkpoint.parent.parent
assert (test_dir / "progress.csv").is_file()
# Try not to have to launch a job.
if (test_dir / "test-meta.json").is_file():
with open(test_dir / "test-meta.json") as f:
meta = json.load(f)
if meta.get("checkpoint") == checkpoint.name:
logger.info(
"Already have test results for %s, nothing to do",
checkpoint.name,
)
continue
jobs.append((checkpoint, test_dir))
# Submit all the jobs now.
with self.executor.get_executor(
logs_dir=self.working_directory / "slurm",
timeout_hours=self.testing.timeout_hours,
# Single threaded evaluation loop.
cpus=2,
) as executor:
for checkpoint, test_dir in jobs:
executor.submit(
test_job, model=self, checkpoint=checkpoint, outputs_dir=test_dir
)
def yaml(self) -> str:
"""Serialize the model configuration to a YAML string."""
# We can't directly dump the dict() representation because we need to
# simplify the types first, so we go via JSON.
simplified_data = json.loads(self.json())
return yaml.dump(simplified_data)
@property
def dataframe(self) -> pd.DataFrame:
if not (self.working_directory / "train").is_dir():
return pd.DataFrame([])
dfs = []
for subdir in (self.working_directory / "train").iterdir():
if not subdir.is_dir():
continue
df = self._trial_to_dataframe(subdir)
if df is not None:
dfs.append(df)
df.to_csv(subdir / "progress-redux.csv")
return pd.concat(dfs) if dfs else pd.DataFrame([])
def _trial_to_dataframe(self, directory: Path) -> Optional[pd.DataFrame]:
components = directory.name.split("-")
if len(components) < 3:
logger.warning(
"Directory name does not match expected "
"{experiment}-{config}-{replica} format: %s",
directory,
)
return
replica = components[-1]
config = components[-2]
experiment = "-".join(components[:-2])
if not (directory / "progress.csv").is_file():
logger.warning("File not found: %s", directory / "progress.csv")
return
try:
df = pd.read_csv(directory / "progress.csv")
except pd.errors.EmptyDataError:
return None
df.insert(0, "logsdir", str(directory))
df.insert(
0,
"experiment_timestamp",
" ".join(
[
self.working_directory.parent.parent.name,
self.working_directory.parent.name,
]
),
)
df.insert(0, "trial_name", directory.name)
df.insert(0, "replica", replica)
df.insert(0, "config", config)
df.insert(0, "experiment", experiment)
df["checkpoint"] = [
(directory / f"checkpoint_{i:06d}").is_dir()
for i in df["training_iteration"]
]
df["checkpoint_path"] = [
str(directory / f"checkpoint_{i:06d}" / f"checkpoint-{i}")
if (directory / f"checkpoint_{i:06d}").is_dir()
else None
for i in df["training_iteration"]
]
df["evaluation/episode_reward_geomean"] = [
geometric_mean(eval(x)) for x in df["evaluation/hist_stats/episode_reward"]
]
df["episode_reward_geomean"] = [
geometric_mean(eval(x)) for x in df["hist_stats/episode_reward"]
]
df["complete"] = [
min(d / self.training.episodes, 1) for d in df["episodes_total"]
]
df["cpus"] = self.executor.cpus
df["gpus"] = self.executor.gpus
df = df.set_index(["experiment", "config", "replica", "training_iteration"])
return df
@property
def test_dataframes(self) -> Dict[str, pd.DataFrame]:
"""Get a dictionary of test dataframes, keyed by trial name."""
results = {}
if not (self.working_directory / "train").is_dir():
return results
for subdir in (self.working_directory / "train").iterdir():
if not subdir.is_dir():
continue
if not (subdir / "test-results.json").is_file():
continue
if not (subdir / "test-meta.json").is_file():
continue
with open(subdir / "test-meta.json") as f:
meta = json.load(f)
df = pd.read_json(subdir / "test-results.json")
df["test_checkpoint"] = meta["checkpoint"]
df["test_timestamp"] = meta["timestamp"]
results[subdir.name] = df
return results
@classmethod
def from_logsdir(cls, working_directory: Path) -> List["Model"]:
"""Reconstruct models by recursively reading from logs directories."""
def find_models(dir: Path) -> Iterable[Path]:
"""Attempt to locate models recursively from logs directories."""
if (dir / "training-model.json").is_file():
yield dir / "training-model.json"
return
for entry in dir.iterdir():
if entry.is_dir():
yield from find_models(entry)
models: List[Model] = []
for model_file in find_models(working_directory):
with open(model_file) as f:
try:
model = json.load(f)
model["working_directory"] = model_file.parent
models.append(cls(**model))
except json.decoder.JSONDecodeError as e:
logger.warning(
"Failed to parse JSON for model file %s: %s", model_file, e
)
continue
return models
# === Start of implementation details. ===
def make_rllib_trainer_config(self, seed: int) -> Dict[str, Any]:
"""Coerce user preferences into a dictionary of arguments for RLlib
trainer class.
"""
with self.environment.make_env() as env:
evaluation_num_episodes = len(
list(self.training.validation.benchmark_uris_iterator(env))
)
logger.info(
"Calculated the number of episodes per evaluation to be %d",
evaluation_num_episodes,
)
if not evaluation_num_episodes:
raise ValueError("#. of validation episodes is 0!")
derived_args = {
"env": self.environment.rllib_id,
"seed": seed,
"horizon": self.environment.max_episode_steps,
# Reserve one CPU for the trainer, the rest for rollout workers.
"num_workers": self.executor.cpus - 1,
"num_cpus_per_worker": 1,
"num_gpus": self.executor.gpus,
# Set the number of evaluation episodes to the size of the
# validation set.
"evaluation_num_episodes": evaluation_num_episodes,
# 1 checkpoint = 1 evaluation.
"evaluation_interval": 1,
# Argument dictionary passed to make_env().
"env_config": {"type": "training"},
"evaluation_config": {
"env_config": {"type": "validation"},
},
}
# Merge with the user args. In case of conflict, the user's arg value
# overrides the derived arg value.
return merge(derived_args, self.agent.args)
class Config:
validate_assignment = True
def test_job(model: Model, checkpoint: Path, outputs_dir: Path) -> None:
logger.info(
"Initializing ray with 2 cpus and %d GPUs",
model.executor.gpus,
)
ray.init(
num_cpus=2,
num_gpus=model.executor.gpus,
include_dashboard=False,
)
tune.register_env(
model.environment.rllib_id, lambda _: model.environment.make_env()
)
agent = model.agent.make_agent(model.environment)
logger.info(
"Restoring %s agent with %s trainable params from %s",
model.agent.type,
f"{model.agent.trainable_parameters_count(agent):,}",
checkpoint,
)
agent.restore(str(checkpoint))
# Run inference on all of the test benchmarks.
results: List[InferenceResult] = []
with model.environment.make_env() as env:
test_benchmarks = list(model.testing.benchmark_uris_iterator(env))
for i, benchmark in enumerate(test_benchmarks, start=1):
env.reset(benchmark=benchmark)
result = InferenceResult.from_agent(
env, agent, runtime=model.environment.reward_space == "Runtime"
)
logger.info(
"Test %s of %s: %s",
f"{i:,d}",
f"{len(test_benchmarks):,d}",
result,
)
results.append(result)
# Do this once the actual work has been done so that failed jobs
# don't leave meta files lying around.
with open(outputs_dir / "test-results.json", "w") as f:
json.dump([r.dict() for r in results], f)
with open(outputs_dir / "test-meta.json", "w") as f:
json.dump(
{
"timestamp": datetime.now().isoformat(),
"checkpoint": checkpoint.name,
},
f,
)
# Explicit call to ray shutdown here so that multiple consecutive
# jobs can initialize ray with different resource requirements.
ray.shutdown()
def train_job(model: Model, seed: int, replica_id: int) -> None:
logger.info(
"Initializing ray with %d %s and %d %s",
model.executor.cpus,
plural(model.executor.cpus, "CPU", "CPUs"),
model.executor.gpus,
plural(model.executor.gpus, "GPU", "GPUs"),
)
ray.init(
num_cpus=model.executor.cpus,
num_gpus=model.executor.gpus,
include_dashboard=False,
)
logger.info("Registered RLlib environment %s", model.environment.rllib_id)
def make_env(env_config: Dict[str, Any]):
"""Construct a training or validation environment."""
env = model.environment.make_env()
if "type" not in env_config:
raise KeyError(f"No type in dict: {env_config}")
if env_config["type"] == "training":
return model.training.wrap_env(env)
elif env_config["type"] == "validation":
return model.training.validation.wrap_env(env)
raise ValueError(f"Unknown environment type: {env_config['type']}")
tune.register_env(
model.environment.rllib_id,
make_env,
)
def trial_name_creator(trial):
del trial # Unused
# NOTE(cummins): Only a single trial per isntance.
return f"{model.experiment}-C{model.job_id}-R{replica_id}"
def trial_dirname_creator(trial):
del trial # Unused
return f"{model.experiment}-C{model.job_id}-R{replica_id}"
rllib_opts = {
"config": model.make_rllib_trainer_config(seed),
"time_budget_s": model.training.timeout_hours * 3600,
"stop": {
"episodes_total": model.training.episodes,
},
"reuse_actors": model.agent.reuse_actors,
"checkpoint_freq": model.agent.checkpoint_freq,
"checkpoint_at_end": model.agent.checkpoint_at_end,
# Write RLlib files to: "<working_directory>/train/<experiment_name>-<job_id>".
"local_dir": str(model.working_directory),
"name": "train",
}
logger.info("RLlib options:\n%s", json.dumps(rllib_opts, indent=2))
tune.run(
model.agent.actual_type,
trial_name_creator=trial_name_creator,
trial_dirname_creator=trial_dirname_creator,
**rllib_opts,
)
# Explicit call to ray shutdown here so that multiple consecutive
# jobs can initialize ray with different resource requirements.
ray.shutdown()
def merge(a, b, path=None):
"Update values in `a` with values from `b`. Supported nested dicts."
if path is None:
path = []
for key in b:
if key in a and isinstance(a[key], dict) and isinstance(b[key], dict):
merge(a[key], b[key], path + [str(key)])
else:
a[key] = b[key]
return a
|
CompilerGym-development
|
examples/llvm_rl/model/model.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import warnings
from typing import Any, Dict
import numpy as np
from omegaconf import DictConfig, ListConfig
from pydantic import BaseModel, Field, validator
# Ignore import deprecation warnings from ray.
warnings.filterwarnings("ignore", category=DeprecationWarning)
from ray.rllib.agents.a3c import A2CTrainer # noqa
from ray.rllib.agents.a3c import A3CTrainer # noqa
from ray.rllib.agents.dqn import ApexTrainer, R2D2Trainer # noqa
from ray.rllib.agents.impala import ImpalaTrainer # noqa
from ray.rllib.agents.ppo import PPOTrainer # noqa
from .environment import Environment # noqa: E402
class Agent(BaseModel):
"""Represents the RL algorithm used."""
# === Start of fields list. ===
type: str = Field(allow_mutation=False)
"""The name of the class used to instantiate the RL algorithm as a string,
e.g. :code:`"PPOTrainer". The class must be imported to this module to be
used.
"""
args: Dict[str, Any] = Field(default={}, allow_mutation=False)
"""A dictionary of arguments that are passed into the
:code:`type` constructor.
"""
checkpoint_freq: int = Field(default=1, ge=1, allow_mutation=False)
"""How frequently to checkpoint the agents progress, in rllib training
iterations.
"""
checkpoint_at_end: bool = Field(default=True, allow_mutation=False)
"""Whether to produce a final checkpoint at the end of training.
"""
reuse_actors: bool = Field(default=True, allow_mutation=False)
"""Whether to reuse workers between training iterations."""
# === Start of public API. ===
@property
def actual_type(self):
"""Get the trainer class type."""
return self._to_class(self.type)
@property
def rllib_trainer_config_dict(self):
"""Merge generated arguments with user trainer args dict."""
config = {
"log_level": "INFO",
}
config.update(self.args)
return config
def make_agent(self, environment: Environment):
"""Construct an agent object."""
try:
return self.actual_type(config=self.args, env=environment.rllib_id)
except TypeError as e:
raise TypeError(
"Error constructing RLlib trainer class "
f"{self.actual_type.__name__}: {e}"
) from e
def trainable_parameters_count(self, agent):
"""Given an agent instance (created by :code:`make_agent()`), compute
and return the number of trainable parameters.
"""
framework = self.args.get("framework")
model = agent.get_policy().model
if framework == "torch":
return np.sum([np.prod(var.shape) for var in model.trainable_variables()])
elif framework == "tf":
return np.sum(
[np.prod(v.get_shape().as_list()) for v in model.trainable_variables()]
)
raise ValueError(f"Unknown framework: {framework}")
# === Start of implementation details. ===
@staticmethod
def _to_class(value):
try:
return globals()[value]
except KeyError as e:
raise ValueError(
f"Unknown RLlib trainer class: {value}.\n"
"Make sure it is imported in rl/model/agent.py"
) from e
@validator("type")
def validate_type(cls, value):
cls._to_class(value)
return value
@validator("args", pre=True)
def validate_args(cls, value):
def omegaconf_to_py(x):
if isinstance(x, DictConfig):
return {k: omegaconf_to_py(v) for k, v in x.items()}
elif isinstance(x, ListConfig):
return [omegaconf_to_py(v) for v in x]
else:
return x
return omegaconf_to_py(value)
class Config:
validate_assignment = True
|
CompilerGym-development
|
examples/llvm_rl/model/agent.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict, List, Optional
from llvm_autotuning.just_keep_going_env import JustKeepGoingEnv
from llvm_rl.wrappers import * # noqa wrapper definition
from pydantic import BaseModel, Field, validator
from pydantic.class_validators import root_validator
import compiler_gym
from compiler_gym import CompilerEnv
from compiler_gym.wrappers import * # noqa wrapper definitions
from compiler_gym.wrappers import TimeLimit
class EnvironmentWrapperConfig(BaseModel):
"""Description of a CompilerEnvWrapper class."""
# === Start of fields list. ===
wrapper: str = Field(allow_mutation=False)
"""The name of the wrapper class. This class name must be imported into this
module.
"""
args: Dict[str, Any] = Field(default={}, allow_mutation=False)
""""A dictionary of arguments to pass to the wrapper constructor."""
# === Start of public API. ===
@property
def wrapper_class(self):
"""Return the wrapper class type."""
return self._to_class(self.wrapper)
def wrap(self, env: CompilerEnv) -> CompilerEnv:
"""Wrap the given environment."""
try:
return self.wrapper_class(env=env, **self.args)
except TypeError as e:
raise TypeError(
f"Error constructing CompilerEnv wrapper {self.wrapper_class.__name__}: {e}"
) from e
# === Start of implementation details. ===
@validator("wrapper")
def validate_wrapper(cls, value):
# Check that the class can be constructed.
cls._to_class(value)
return value
@staticmethod
def _to_class(value: str):
try:
return globals()[value]
except KeyError as e:
raise ValueError(
f"Unknown wrapper class: {value}\n"
"Make sure it is imported in rl/model/environment.py"
) from e
class Config:
validate_assignment = True
class Environment(BaseModel):
"""Represents a CompilerEnv environment."""
id: str = Field(allow_mutation=False)
"""The environment ID, as passed to :code:`gym.make(...)`."""
reward_space: Optional[str] = Field(default=None, allow_mutation=False)
"""The reward space to use, as a string."""
observation_space: Optional[str] = Field(default=None, allow_mutation=False)
"""The observation space to use, as a string."""
max_episode_steps: int = Field(allow_mutation=False, gt=0)
"""The maximum number of steps in an episode of this environment. For the
sake of consistency this *must* be defined.
"""
wrappers: List[EnvironmentWrapperConfig] = Field(default=[], allow_mutation=False)
"""A list of wrapper classes to apply to the environment."""
rllib_id: Optional[str] = Field(allow_mutation=False)
"""The ID of the custom environment to register with RLlib. This shows up in
the logs but has no effect on behavior. Defaults to the `id` value.
"""
# === Start of public API. ===
def make_env(self) -> CompilerEnv:
"""Construct a compiler environment from the given config."""
env = compiler_gym.make(self.id)
if self.observation_space:
env.observation_space = self.observation_space
if self.reward_space:
env.reward_space = self.reward_space
for wrapper in self.wrappers:
env = wrapper.wrap(env)
# Wrap the env to ignore errors during search.
env = JustKeepGoingEnv(env)
env = TimeLimit(env, max_episode_steps=self.max_episode_steps)
return env
# === Start of implementation details. ===
@validator("id")
def validate_id(cls, value):
assert (
value in compiler_gym.COMPILER_GYM_ENVS
), f"Not a CompilerGym environment: {value}"
return value
@validator("wrappers", pre=True)
def validate_wrappers(cls, value) -> List[EnvironmentWrapperConfig]:
# Convert the omegaconf ListConfig into a list of
# EnvironmentWrapperConfig objects.
return [EnvironmentWrapperConfig(**v) for v in value]
@root_validator
def rllib_id_default_value(cls, values):
values["rllib_id"] = values["rllib_id"] or values["id"]
return values
class Config:
validate_assignment = True
arbitrary_types_allowed = True
|
CompilerGym-development
|
examples/llvm_rl/model/environment.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from itertools import islice
from typing import Iterable, List
import numpy as np
from pydantic import BaseModel, Field, validator
from compiler_gym.datasets import Benchmark
from compiler_gym.envs import CompilerEnv
from .benchmarks import Benchmarks
logger = logging.getLogger(__name__)
class Testing(BaseModel):
"""The testing regime."""
__test__ = False # Prevent pytest from thinking that this class is a test.
# === Start of fields list. ===
timeout_hours: float = Field(allow_mutation=False, gt=0)
"""The timeout for test jobs, in hours."""
benchmarks: List[Benchmarks] = Field(allow_mutation=False)
"""The set of benchmarks to test on."""
runs_per_benchmark: int = Field(default=1, ge=1, allow_mutation=False)
"""The number of inference episodes to run on each benchmark. If the
environment and policy are deterministic then running multiple episodes per
benchmark is only useful for producing accurate aggregate measurements of
inference walltime.
"""
# === Start of public API. ===
def benchmarks_iterator(self, env: CompilerEnv) -> Iterable[Benchmark]:
"""Return an iterator over the test benchmarks."""
for _ in range(self.runs_per_benchmark):
for bm in self.benchmarks:
yield from bm.benchmarks_iterator(env)
def benchmark_uris_iterator(self, env: CompilerEnv) -> Iterable[str]:
"""Return an iterator over the test benchmark URIs."""
for _ in range(self.runs_per_benchmark):
for bm in self.benchmarks:
yield from bm.benchmark_uris_iterator(env)
# === Start of implementation details. ===
@validator("benchmarks", pre=True)
def validate_benchmarks(cls, value):
return [Benchmarks(**v) for v in value]
class Config:
validate_assignment = True
def get_testing_benchmarks(
env: CompilerEnv, max_benchmarks: int = 50, seed: int = 0
) -> List[str]:
rng = np.random.default_rng(seed=seed)
for dataset in env.datasets:
if dataset.name == "generator://csmith-v0":
yield from islice(dataset.benchmarks(), 50)
elif not dataset.size or dataset.size > max_benchmarks:
logger.info(
"Selecting random %d benchmarks from dataset %s of size %d",
max_benchmarks,
dataset,
dataset.size,
)
for _ in range(max_benchmarks):
yield dataset.random_benchmark(rng)
else:
logger.info(
"Selecting all %d benchmarks from dataset %s", dataset.size, dataset
)
yield from dataset.benchmarks()
|
CompilerGym-development
|
examples/llvm_rl/model/testing.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from itertools import islice
from typing import Iterable, List, Union
from pydantic import BaseModel, Field, root_validator, validator
from compiler_gym.datasets import Benchmark, BenchmarkUri
from compiler_gym.envs import CompilerEnv
class Benchmarks(BaseModel):
"""Represents a set of benchmarks to use for training/validation/testing.
There are two ways of describing benchmarks, either as a list of benchmark
URIs:
benchmarks:
uris:
- benchmark://cbench-v1/adpcm
- benchmark://cbench-v1/ghostscript
Or as a dataset to iterate over:
benchmarks:
dataset: benchmark://cbench-v1
max_benchmarks: 20
"""
# === Start of fields list. ===
dataset: str = Field(default=None, allow_mutation=False)
"""The name of a dataset to iterate over. If set, benchmarks are produced
by iterating over this dataset in order. If not set, the :code:`uris` list
must be provided.
"""
uris: List[str] = Field(default=[], allow_mutation=False)
"""A list of URIs to iterate over."""
max_benchmarks: int = Field(default=0, ge=0, allow_mutation=False)
"""The maximum number of benchmarks to yield from the given dataset or URIs
list.
"""
benchmarks_start_at: int = Field(default=0, ge=0, allow_mutation=False)
"""An offset into the dataset or URIs list to start iterating from.
Note that using very large offsets will slow things down as the
implementation still has to iterate over the excluded benchmarks.
"""
# === Start of public API. ===
def benchmarks_iterator(self, env: CompilerEnv) -> Iterable[Benchmark]:
"""Return an iterator over the benchmarks."""
return self._benchmark_iterator(env)
def benchmark_uris_iterator(self, env: CompilerEnv) -> Iterable[str]:
"""Return an iterator over the URIs of the benchmarks."""
return self._benchmark_iterator(env, uris=True)
# === Start of implementation details. ===
@root_validator
def check_that_either_dataset_or_uris_is_set(cls, values):
assert values.get("dataset") or values.get(
"uris"
), "Neither dataset or uris given"
return values
@validator("uris", pre=True)
def validate_uris(cls, value, *, values, **kwargs):
del kwargs
for uri in value:
uri = BenchmarkUri.from_string(uri)
assert uri.scheme and uri.dataset, f"Invalid benchmark URI: {uri}"
return list(value)
def _benchmark_iterator(
self, env: CompilerEnv, uris: bool = False
) -> Union[Iterable[Benchmark], Iterable[str]]:
return (
self._uris_iterator(env, uris)
if self.uris
else self._dataset_iterator(env, uris)
)
def _uris_iterator(
self, env: CompilerEnv, uris: bool = False
) -> Union[Iterable[Benchmark], Iterable[str]]:
"""Iterate from a URIs list."""
start = self.benchmarks_start_at
n = len(self.uris)
if self.max_benchmarks:
n = min(self.max_benchmarks, n)
if uris:
# Shortcut in case we already have a list of URIs that we can slice
# rather than iterating over.
return iter(self.uris[start:n])
return islice((env.datasets.benchmark(u) for u in self.uris), start, start + n)
def _dataset_iterator(
self, env: CompilerEnv, uris: bool = False
) -> Union[Iterable[Benchmark], Iterable[str]]:
"""Iterate from a dataset name."""
dataset = env.datasets[self.dataset]
dataset.install()
n = dataset.size or self.max_benchmarks # dataset.size == 0 for inf
if self.max_benchmarks:
n = min(self.max_benchmarks, n)
start = self.benchmarks_start_at
iterator = dataset.benchmark_uris if uris else dataset.benchmarks
return islice(iterator(), start, start + n)
class Config:
validate_assignment = True
|
CompilerGym-development
|
examples/llvm_rl/model/benchmarks.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Iterable, List
from pydantic import BaseModel, Field, validator
from compiler_gym.datasets import Benchmark
from compiler_gym.envs import CompilerEnv
from compiler_gym.wrappers import (
CycleOverBenchmarks,
CycleOverBenchmarksIterator,
IterateOverBenchmarks,
)
from .benchmarks import Benchmarks
from .validation import Validation
class Training(BaseModel):
"""The training regime."""
timeout_hours: float = Field(allow_mutation=False, gt=0)
"""The maximum runtime of the training job."""
episodes: int = Field(ge=1, allow_mutation=False)
"""The number of episodes to train for."""
benchmarks: List[Benchmarks] = Field(allow_mutation=False)
"""The programs to train over."""
validation: Validation = Field(allow_mutation=False)
"""The validation set."""
cycle_over_benchmarks: bool = Field(default=True, allow_mutation=False)
"""If :code:`True`, the benchmark iterator repeats itself once an entire
epoch has completed. Set this to :code:`False` to disable benchmarks from
being cached.
"""
cache_benchmarks: bool = Field(default=False, allow_mutation=False)
"""If :code:`True`, construct the actual benchmark objects during iteration.
This will make it faster to cycle over the same set of benchmarks multiple
times, but requires enough resources to hold all of the benchmark objects in
memory. If :code:`False`, just the benchmark URIs are cached in memory.
"""
# === Start of public API. ===
def benchmarks_iterator(self, env: CompilerEnv) -> Iterable[Benchmark]:
"""Return an iterator over the training benchmarks."""
for bm in self.benchmarks:
yield from bm.benchmarks_iterator(env)
def benchmark_uris_iterator(self, env: CompilerEnv) -> Iterable[str]:
"""Return an iterator over the training benchmark URIs."""
for bm in self.benchmarks:
yield from bm.benchmark_uris_iterator(env)
def wrap_env(self, env: CompilerEnv) -> CompilerEnv:
"""Wrap an environment for use in the training loop that is configured
to iterate over the training benchmarks on each call to :code:`reset()`.
"""
if self.cycle_over_benchmarks and self.cache_benchmarks:
wrapper = CycleOverBenchmarks
elif self.cycle_over_benchmarks:
return CycleOverBenchmarksIterator(
env=env,
make_benchmark_iterator=lambda: self.benchmark_uris_iterator(env),
)
else:
wrapper = IterateOverBenchmarks
iterator = (
self.benchmarks_iterator
if self.cache_benchmarks
else self.benchmark_uris_iterator
)
return wrapper(env=env, benchmarks=iterator(env))
# === Start of implementation details. ===
@validator("benchmarks", pre=True)
def validate_benchmarks(cls, value):
return [Benchmarks(**v) for v in value]
class Config:
validate_assignment = True
|
CompilerGym-development
|
examples/llvm_rl/model/training.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Iterable, List
from pydantic import BaseModel, Field, validator
from compiler_gym.datasets import Benchmark
from compiler_gym.envs import CompilerEnv
from compiler_gym.wrappers import CycleOverBenchmarks
from .benchmarks import Benchmarks
class Validation(BaseModel):
"""The validation set which is used for periodically evaluating agent
performance during training.
"""
# === Start of fields list. ===
benchmarks: List[Benchmarks] = Field(allow_mutation=False)
"""The benchmarks to evaluate agent performance on. These must be distinct
from the training and testing sets (this requirement is not enforced by the
API, you have to do it yourself).
"""
# === Start of public API. ===
def benchmarks_iterator(self, env: CompilerEnv) -> Iterable[Benchmark]:
"""Return an iterator over the validation benchmarks."""
for bm in self.benchmarks:
yield from bm.benchmarks_iterator(env)
def benchmark_uris_iterator(self, env: CompilerEnv) -> Iterable[str]:
"""Return an iterator over the training benchmark URIs."""
for bm in self.benchmarks:
yield from bm.benchmark_uris_iterator(env)
def wrap_env(self, env: CompilerEnv) -> CompilerEnv:
"""Wrap an environment for use in the training loop that is configured
to iterate over the validation benchmarks on each call to
:code:`reset()`.
"""
return CycleOverBenchmarks(env=env, benchmarks=self.benchmarks_iterator(env))
# === Start of implementation details. ===
@validator("benchmarks", pre=True)
def validate_benchmarks(cls, value):
return [Benchmarks(**v) for v in value]
class Config:
validate_assignment = True
|
CompilerGym-development
|
examples/llvm_rl/model/validation.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Estimate the immediate reward of different actions using random trials.
This script estimates the immediate reward that running a specific action has by
running trials. A trial is a random episode that ends with the determined
action.
Example Usage
-------------
Evaluate the impact of three passes on the codesize of the cBench-crc32
benchmark:
$ python -m sensitivity_analysis.action_sensitivity_analysis \
--env=llvm-v0 --reward=IrInstructionCountO3 \
--benchmark=cbench-v1/crc32 --num_action_sensitivity_trials=25 \
--action=-add-discriminators,-adce,-mem2reg
Evaluate the single-step immediate reward of all actions on LLVM codesize:
$ python -m sensitivity_analysis.action_sensitivity_analysis -- \
--env=llvm-v0 --reward=IrInstructionCountO3
"""
import random
from concurrent.futures import ThreadPoolExecutor
from pathlib import Path
from typing import List, Optional
import numpy as np
from absl import app, flags
from sensitivity_analysis.sensitivity_analysis_eval import (
SensitivityAnalysisResult,
run_sensitivity_analysis,
)
import compiler_gym.util.flags.nproc # noqa
from compiler_gym.envs import CompilerEnv
from compiler_gym.util.flags.benchmark_from_flags import benchmark_from_flags
from compiler_gym.util.flags.env_from_flags import env_from_flags
from compiler_gym.util.gym_type_hints import ActionType
from compiler_gym.util.runfiles_path import create_user_logs_dir
from compiler_gym.util.timer import Timer
flags.DEFINE_integer(
"num_action_sensitivity_trials",
100,
"The number of trials to perform when estimating the reward of each action. "
"A trial is a random episode that ends with the determined action. Increasing "
"this number increases the number of trials performed, leading to a higher "
"fidelity estimate of the reward of an action.",
)
flags.DEFINE_integer(
"max_warmup_steps",
25,
"The maximum number of random steps to make before determining the reward of an action.",
)
flags.DEFINE_list(
"action",
[],
"An optional list of actions to evaluate. If not specified, all actions will be evaluated.",
)
flags.DEFINE_integer(
"max_action_attempts_multiplier",
5,
"A trial may fail because the environment crashes, or an action produces an invalid state. "
"Limit the total number of trials performed for each action to "
"max_action_attempts_multiplier * num_trials.",
)
FLAGS = flags.FLAGS
def get_rewards(
action: int,
action_name: str,
reward_space: str,
num_trials: int,
max_warmup_steps: int,
max_attempts_multiplier: int = 5,
) -> SensitivityAnalysisResult:
"""Run random trials to get a list of num_trials immediate rewards."""
rewards, runtimes = [], []
benchmark = benchmark_from_flags()
num_attempts = 0
while (
num_attempts < max_attempts_multiplier * num_trials
and len(rewards) < num_trials
):
num_attempts += 1
with env_from_flags(benchmark=benchmark) as env:
env.observation_space = None
env.reward_space = None
env.reset(benchmark=benchmark)
with Timer() as t:
reward = run_one_trial(env, reward_space, action, max_warmup_steps)
if reward is not None:
rewards.append(reward)
runtimes.append(t.time)
return SensitivityAnalysisResult(
name=action_name, runtimes=np.array(runtimes), rewards=np.array(rewards)
)
def run_one_trial(
env: CompilerEnv, reward_space: str, action: int, max_warmup_steps: int
) -> Optional[float]:
"""Run a random number of "warmup" steps in an environment, then compute
the immediate reward of the given action.
:return: An immediate reward.
"""
num_warmup_steps = random.randint(0, max_warmup_steps)
warmup_actions = [env.action_space.sample() for _ in range(num_warmup_steps)]
env.reward_space = reward_space
_, _, done, _ = env.multistep(warmup_actions)
if done:
return None
_, (reward,), done, _ = env.step(action, reward_spaces=[reward_space])
return None if done else reward
def run_action_sensitivity_analysis(
actions: List[ActionType],
rewards_path: Path,
runtimes_path: Path,
reward_space: str,
num_trials: int,
max_warmup_steps: int,
nproc: int,
max_attempts_multiplier: int = 5,
):
"""Estimate the immediate reward of a given list of actions."""
with env_from_flags() as env:
action_names = env.action_space.names
with ThreadPoolExecutor(max_workers=nproc) as executor:
analysis_futures = {
executor.submit(
get_rewards,
action,
action_names[action],
reward_space,
num_trials,
max_warmup_steps,
max_attempts_multiplier,
)
for action in actions
}
return run_sensitivity_analysis(
analysis_futures=analysis_futures,
runtimes_path=runtimes_path,
rewards_path=rewards_path,
)
def main(argv):
"""Main entry point."""
argv = FLAGS(argv)
if len(argv) != 1:
raise app.UsageError(f"Unknown command line arguments: {argv[1:]}")
with env_from_flags() as env:
action_names = env.action_space.names
print(action_names)
if FLAGS.action:
actions = [env.action_space[a] for a in FLAGS.action]
else:
actions = list(range(len(action_names)))
logs_dir = Path(
FLAGS.output_dir or create_user_logs_dir("benchmark_sensitivity_analysis")
)
rewards_path = logs_dir / f"actions_{FLAGS.reward}.rewards.csv"
runtimes_path = logs_dir / f"actions_{FLAGS.reward}.runtimes.csv"
run_action_sensitivity_analysis(
rewards_path=rewards_path,
runtimes_path=runtimes_path,
actions=actions,
reward_space=FLAGS.reward,
num_trials=FLAGS.num_action_sensitivity_trials,
max_warmup_steps=FLAGS.max_warmup_steps,
nproc=FLAGS.nproc,
max_attempts_multiplier=FLAGS.max_action_attempts_multiplier,
)
if __name__ == "__main__":
app.run(main)
|
CompilerGym-development
|
examples/sensitivity_analysis/action_sensitivity_analysis.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""End-to-end test of //compiler_gym/bin:action_sensitivity_analysis."""
import tempfile
from pathlib import Path
from absl.flags import FLAGS
from flaky import flaky
from sensitivity_analysis.action_sensitivity_analysis import (
run_action_sensitivity_analysis,
)
from sensitivity_analysis.sensitivity_analysis_eval import run_sensitivity_analysis_eval
@flaky
def test_run_action_sensitivity_analysis():
actions = [0, 1]
env = "llvm-v0"
reward = "IrInstructionCountO3"
benchmark = "cbench-v1/crc32"
FLAGS.unparse_flags()
FLAGS(["argv0", f"--env={env}", f"--benchmark={benchmark}"])
with tempfile.TemporaryDirectory() as tmp:
tmp = Path(tmp)
run_action_sensitivity_analysis(
actions=actions,
rewards_path=tmp / "rewards.txt",
runtimes_path=tmp / "runtimes.txt",
reward_space=reward,
num_trials=2,
max_warmup_steps=5,
nproc=1,
)
assert (tmp / "rewards.txt").is_file()
assert (tmp / "runtimes.txt").is_file()
run_sensitivity_analysis_eval(
rewards_path=tmp / "rewards.txt",
runtimes_path=tmp / "runtimes.txt",
)
|
CompilerGym-development
|
examples/sensitivity_analysis/action_sensitivity_analysis_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Evaluate logs generated by sensitivity analysis.
Usage:
$ bazel run -c opt //compiler_gym/bin:sensitivity_analysis_eval -- \
--output_dir=/path/to/generated/logs \
--analysis=actions_IrInstructionCountO3
"""
import sys
from concurrent.futures import Future, as_completed
from pathlib import Path
from typing import Iterable, NamedTuple
import numpy as np
from absl import app, flags
import compiler_gym.util.flags.output_dir # noqa Flag definition.
from compiler_gym.util.tabulate import tabulate
from compiler_gym.util.timer import humanize_duration
flags.DEFINE_string(
"analysis", None, "The name of the sensitivity analysis logs to read"
)
FLAGS = flags.FLAGS
class SensitivityAnalysisResult(NamedTuple):
"""The result of running a sensitivity analysis."""
# The name of the thing being analyzed (e.g. an action or a benchmark).
name: str
# A list of runtimes, one per observation.
runtimes: np.ndarray
# A list of reward deltas, one per observation.
rewards: np.ndarray
def run_sensitivity_analysis(
analysis_futures: Iterable[Future],
rewards_path: Path,
runtimes_path: Path,
) -> None:
"""Run the provided sensitivity analyses to completion and log the results.
:param analysis_futures: A sequence of future analysis results. The future
should return a SensitityAnalysisResult.
:param rewards_path: The path of the CSV file to write rewards to.
:param runtimes_path: The path of the CSV file to write runtimes to.
"""
rewards_path.parent.mkdir(parents=True, exist_ok=True)
runtimes_path.parent.mkdir(parents=True, exist_ok=True)
print(f"Writing rewards to {rewards_path}", file=sys.stderr)
print(f"Writing runtimes to {runtimes_path}", file=sys.stderr)
print("Waiting for first result ... ", end="", flush=True, file=sys.stderr)
with open(str(rewards_path), "w") as rewards_f, open(
str(runtimes_path), "w"
) as runtimes_f:
for i, future in enumerate(as_completed(analysis_futures), start=1):
result: SensitivityAnalysisResult = future.result()
print(
f"\r\033[KCompleted {i} of {len(analysis_futures)} analyses. "
f"Latest: {result.name}, "
f"avg_delta={result.rewards.mean():.5%}, "
f"avg_runtime={humanize_duration(result.runtimes.mean())} ... ",
end="",
flush=True,
file=sys.stderr,
)
print(
result.name,
",".join(str(a) for a in result.rewards),
sep=",",
flush=True,
file=rewards_f,
)
print(
result.name,
",".join(str(a) for a in result.runtimes),
sep=",",
flush=True,
file=runtimes_f,
)
print(flush=True, file=sys.stderr)
return run_sensitivity_analysis_eval(rewards_path, runtimes_path)
def run_sensitivity_analysis_eval(rewards_path: Path, runtimes_path: Path) -> None:
"""Print a summary of sensitivity analysis logs."""
with open(str(rewards_path)) as f:
rewards_in = f.read().rstrip().split("\n")
with open(str(runtimes_path)) as f:
runtimes_in = f.read().rstrip().split("\n")
rows = []
for rewards_row, runtimes_row in zip(rewards_in, runtimes_in):
name, *rewards = rewards_row.split(",")
_, *runtimes = runtimes_row.split(",")
if rewards == [""]:
rows.append((name, "-", "-", "-", "-", "-"))
continue
rewards = np.array([float(v) for v in rewards])
runtimes = np.array([float(v) for v in runtimes])
rows.append(
(
name,
humanize_duration(runtimes.mean()),
f"{rewards.mean():.5%}",
f"{np.median(rewards):.5%}",
f"{rewards.max():.5%}",
f"{rewards.std():.5%}",
)
)
print(
tabulate(
sorted(rows),
headers=(
"Name",
"Time (avg)",
"Δ (avg)",
"Δ (median)",
"Δ (max)",
"Δ (std.)",
),
)
)
def main(argv):
"""Main entry point."""
argv = FLAGS(argv)
if len(argv) != 1:
raise app.UsageError(f"Unknown command line arguments: {argv[1:]}")
assert FLAGS.output_dir, "Required argument --logs_path not set"
assert FLAGS.analysis, "Required argument --analysis not set"
output_dir = Path(FLAGS.output_dir)
rewards_path = output_dir / f"{FLAGS.analysis}.rewards.csv"
runtimes_path = output_dir / f"{FLAGS.analysis}.runtimes.csv"
run_sensitivity_analysis_eval(
rewards_path=rewards_path, runtimes_path=runtimes_path
)
if __name__ == "__main__":
app.run(main)
|
CompilerGym-development
|
examples/sensitivity_analysis/sensitivity_analysis_eval.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Estimate the cumulative reward of random episodes on benchmarks.
This script estimates the cumulative reward for a random episode on a benchmark
by running trials. A trial is an episode in which a random number of random
actions are performed and the total cumulative reward is recorded.
Example Usage
-------------
Evaluate the impact on LLVM codesize of random actions on the cBench-crc32
benchmark:
$ python -m sensitivity_analysis.benchmark_sensitivity_analysis \
--env=llvm-v0 --reward=IrInstructionCountO3 \
--benchmark=cbench-v1/crc32 --num_benchmark_sensitivity_trials=25
Evaluate the LLVM codesize episode reward on all benchmarks:
$ python -m sensitivity_analysis.benchmark_sensitivity_analysis \
--env=llvm-v0 --reward=IrInstructionCountO3
"""
import random
from concurrent.futures import ThreadPoolExecutor
from itertools import islice
from pathlib import Path
from typing import List, Optional, Union
import numpy as np
from absl import app, flags
from sensitivity_analysis.sensitivity_analysis_eval import (
SensitivityAnalysisResult,
run_sensitivity_analysis,
)
import compiler_gym.util.flags.nproc # noqa
from compiler_gym.envs import CompilerEnv
from compiler_gym.service.proto import Benchmark
from compiler_gym.util.flags.benchmark_from_flags import benchmark_from_flags
from compiler_gym.util.flags.env_from_flags import env_from_flags
from compiler_gym.util.runfiles_path import create_user_logs_dir
from compiler_gym.util.timer import Timer
flags.DEFINE_integer(
"num_benchmark_sensitivity_trials",
100,
"The number of trials to perform when estimating the episode reward of each benchmark. "
"A trial is a random episode of a benchmark. Increasing this number increases the "
"number of trials performed, leading to a higher fidelity estimate of the reward "
"potential for a benchmark.",
)
flags.DEFINE_integer(
"min_steps",
10,
"The minimum number of random steps to make in a single trial.",
)
flags.DEFINE_integer(
"max_steps",
100,
"The maximum number of random steps to make in a single trial.",
)
flags.DEFINE_integer(
"max_benchmark_attempts_multiplier",
5,
"A trial may fail because the environment crashes, or an action produces an invalid state. "
"Limit the total number of trials performed for each action to "
"max_benchmark_attempts_multiplier * num_trials.",
)
FLAGS = flags.FLAGS
def get_rewards(
benchmark: Union[Benchmark, str],
reward_space: str,
num_trials: int,
min_steps: int,
max_steps: int,
max_attempts_multiplier: int = 5,
) -> SensitivityAnalysisResult:
"""Run random trials to get a list of num_trials episode rewards."""
rewards, runtimes = [], []
num_attempts = 0
while (
num_attempts < max_attempts_multiplier * num_trials
and len(rewards) < num_trials
):
num_attempts += 1
with env_from_flags(benchmark=benchmark) as env:
env.observation_space = None
env.reward_space = None
env.reset(benchmark=benchmark)
benchmark = env.benchmark
with Timer() as t:
reward = run_one_trial(env, reward_space, min_steps, max_steps)
if reward is not None:
rewards.append(reward)
runtimes.append(t.time)
return SensitivityAnalysisResult(
name=env.benchmark, runtimes=np.array(runtimes), rewards=np.array(rewards)
)
def run_one_trial(
env: CompilerEnv, reward_space: str, min_steps: int, max_steps: int
) -> Optional[float]:
"""Run a random number of random steps in an environment and return the
cumulative reward.
:return: A cumulative reward.
"""
num_steps = random.randint(min_steps, max_steps)
warmup_actions = [env.action_space.sample() for _ in range(num_steps)]
env.reward_space = reward_space
_, _, done, _ = env.multistep(warmup_actions)
if done:
return None
return env.episode_reward
def run_benchmark_sensitivity_analysis(
benchmarks: List[Union[Benchmark, str]],
rewards_path: Path,
runtimes_path: Path,
reward: str,
num_trials: int,
min_steps: int,
max_steps: int,
nproc: int,
max_attempts_multiplier: int = 5,
):
"""Estimate the cumulative reward of random walks on a list of benchmarks."""
with ThreadPoolExecutor(max_workers=nproc) as executor:
analysis_futures = [
executor.submit(
get_rewards,
benchmark,
reward,
num_trials,
min_steps,
max_steps,
max_attempts_multiplier,
)
for benchmark in benchmarks
]
return run_sensitivity_analysis(
analysis_futures=analysis_futures,
runtimes_path=runtimes_path,
rewards_path=rewards_path,
)
def main(argv):
"""Main entry point."""
argv = FLAGS(argv)
if len(argv) != 1:
raise app.UsageError(f"Unknown command line arguments: {argv[1:]}")
# Determine the benchmark that is being analyzed, or use all of them.
benchmark = benchmark_from_flags()
if benchmark:
benchmarks = [benchmark]
else:
with env_from_flags() as env:
benchmarks = islice(env.benchmarks, 100)
logs_dir = Path(
FLAGS.output_dir or create_user_logs_dir("benchmark_sensitivity_analysis")
)
rewards_path = logs_dir / f"benchmarks_{FLAGS.reward}.csv"
runtimes_path = logs_dir / f"benchmarks_{FLAGS.reward}_runtimes.csv"
run_benchmark_sensitivity_analysis(
rewards_path=rewards_path,
runtimes_path=runtimes_path,
benchmarks=benchmarks,
reward=FLAGS.reward,
num_trials=FLAGS.num_benchmark_sensitivity_trials,
min_steps=FLAGS.min_steps,
max_steps=FLAGS.max_steps,
nproc=FLAGS.nproc,
max_attempts_multiplier=FLAGS.max_benchmark_attempts_multiplier,
)
if __name__ == "__main__":
app.run(main)
|
CompilerGym-development
|
examples/sensitivity_analysis/benchmark_sensitivity_analysis.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""End-to-end test of //compiler_gym/bin:benchmark_sensitivity_analysis."""
import tempfile
from pathlib import Path
from absl.flags import FLAGS
from sensitivity_analysis.benchmark_sensitivity_analysis import (
run_benchmark_sensitivity_analysis,
)
from sensitivity_analysis.sensitivity_analysis_eval import run_sensitivity_analysis_eval
def test_run_benchmark_sensitivity_analysis():
env = "llvm-v0"
reward = "IrInstructionCountO3"
benchmarks = ["cbench-v1/crc32"]
FLAGS.unparse_flags()
FLAGS(["argv0", f"--env={env}"])
with tempfile.TemporaryDirectory() as tmp:
tmp = Path(tmp)
run_benchmark_sensitivity_analysis(
benchmarks=benchmarks,
rewards_path=tmp / "rewards.txt",
runtimes_path=tmp / "runtimes.txt",
reward=reward,
num_trials=2,
min_steps=3,
max_steps=5,
nproc=1,
)
assert (tmp / "rewards.txt").is_file()
assert (tmp / "runtimes.txt").is_file()
run_sensitivity_analysis_eval(
rewards_path=tmp / "rewards.txt",
runtimes_path=tmp / "runtimes.txt",
)
|
CompilerGym-development
|
examples/sensitivity_analysis/benchmark_sensitivity_analysis_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Smoke test for //benchmarks:parallelization_load_test."""
from pathlib import Path
from absl import flags
from benchmarks.parallelization_load_test import main as load_test
from compiler_gym.util.capture_output import capture_output
from tests.pytest_plugins.common import set_command_line_flags, skip_on_ci
from tests.test_main import main
FLAGS = flags.FLAGS
pytest_plugins = ["tests.pytest_plugins.llvm", "tests.pytest_plugins.common"]
@skip_on_ci
def test_load_test(env, tmpwd):
del env # Unused.
del tmpwd # Unused.
set_command_line_flags(
[
"arv0",
"--env=llvm-v0",
"--benchmark=cbench-v1/crc32",
"--max_nproc=3",
"--nproc_increment=1",
"--num_steps=2",
"--num_episodes=2",
]
)
with capture_output() as out:
load_test(["argv0"])
assert "Run 1 threaded workers in " in out.stdout
assert "Run 1 process workers in " in out.stdout
assert "Run 2 threaded workers in " in out.stdout
assert "Run 2 process workers in " in out.stdout
assert "Run 3 threaded workers in " in out.stdout
assert "Run 3 process workers in " in out.stdout
assert Path("parallelization_load_test.csv").is_file()
if __name__ == "__main__":
main()
|
CompilerGym-development
|
benchmarks/parallelization_load_test_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""A load test for measuring parallelization scalability.
This benchmark runs random episodes with varying numbers of parallel threads and
processes and records the time taken for each. The objective is to compare
performance of a simple random search when parallelized using thread-level
parallelism vs process-based parallelism.
This load test aims to provide a worst-case scenario for multithreading
performance testing: there is no communication or synchronization between
threads and the benchmark is entirely compute bound.
"""
from multiprocessing import Process, cpu_count
from threading import Thread
from absl import app, flags
from compiler_gym.util.flags.benchmark_from_flags import benchmark_from_flags
from compiler_gym.util.flags.env_from_flags import env_from_flags
from compiler_gym.util.timer import Timer
flags.DEFINE_integer("max_nproc", 2 * cpu_count(), "The maximum number of threads.")
flags.DEFINE_integer(
"nproc_increment",
cpu_count() // 4,
"The number of workers to change at each step of the load test.",
)
flags.DEFINE_integer(
"num_episodes", 50, "The number of episodes to run in each worker."
)
flags.DEFINE_integer("num_steps", 50, "The number of steps in each episode.")
flags.DEFINE_string(
"logfile",
"parallelization_load_test.csv",
"The path of the file to write results to.",
)
FLAGS = flags.FLAGS
def run_random_search(num_episodes, num_steps) -> None:
"""The inner loop of a load test benchmark."""
with env_from_flags(benchmark=benchmark_from_flags()) as env:
for _ in range(num_episodes):
env.reset()
for _ in range(num_steps):
_, _, done, _ = env.step(env.action_space.sample())
if done:
break
def main(argv):
assert len(argv) == 1, f"Unknown arguments: {argv[1:]}"
with open(FLAGS.logfile, "w") as f:
print(
"nproc",
"episodes_per_worker",
"steps_per_episode",
"total_episodes",
"thread_steps_per_second",
"process_steps_per_second",
"thread_walltime",
"process_walltime",
sep=",",
file=f,
)
for nproc in [1] + list(
range(FLAGS.nproc_increment, FLAGS.max_nproc + 1, FLAGS.nproc_increment)
):
# Perform the same `nproc * num_episodes` random trajectories first
# using threads, then using processes.
threads = [
Thread(
target=run_random_search,
args=(FLAGS.num_episodes, FLAGS.num_steps),
)
for _ in range(nproc)
]
with Timer(f"Run {nproc} threaded workers") as thread_time:
for thread in threads:
thread.start()
for thread in threads:
thread.join()
processes = [
Process(
target=run_random_search,
args=(FLAGS.num_episodes, FLAGS.num_steps),
)
for _ in range(nproc)
]
with Timer(f"Run {nproc} process workers") as process_time:
for process in processes:
process.start()
for process in processes:
process.join()
print(
nproc,
FLAGS.num_episodes,
FLAGS.num_steps,
FLAGS.num_episodes * nproc,
(FLAGS.num_episodes * FLAGS.num_steps * nproc) / thread_time.time,
(FLAGS.num_episodes * FLAGS.num_steps * nproc) / process_time.time,
thread_time.time,
process_time.time,
sep=",",
file=f,
flush=True,
)
if __name__ == "__main__":
app.run(main)
|
CompilerGym-development
|
benchmarks/parallelization_load_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Microbenchmarks for CompilerGym environments.
To run these benchmarks an optimized build using bazel:
$ bazel test -c opt --test_output=streamed //benchmarks:bench_test
A record of the benchmark results is stored in
/tmp/compiler_gym_<user>/pytest_benchmark/<device>/<run_id>_bench_test.json. Compare
multiple runs using:
$ pytest-benchmark compare --group-by=name --sort=fullname \
/tmp/compiler_gym_<user>/pytest_benchmark/*/*_bench_test.json
"""
from getpass import getuser
import gym
import pytest
import examples.example_compiler_gym_service as dummy
from compiler_gym.envs import CompilerEnv, LlvmEnv, llvm
from compiler_gym.service import CompilerGymServiceConnection
from compiler_gym.service.client_service_compiler_env import ClientServiceCompilerEnv
from tests.pytest_plugins.llvm import OBSERVATION_SPACE_NAMES, REWARD_SPACE_NAMES
from tests.test_main import main
@pytest.fixture(
params=["llvm-v0", "example-cc-v0", "example-py-v0"],
ids=["llvm", "dummy-cc", "dummy-py"],
)
def env_id(request) -> str:
yield request.param
@pytest.fixture(
params=["llvm-v0", "example-cc-v0", "example-py-v0"],
ids=["llvm", "dummy-cc", "dummy-py"],
)
def env(request) -> CompilerEnv:
yield request.param
@pytest.mark.parametrize(
"env_id",
["llvm-v0", "example-cc-v0", "example-py-v0"],
ids=["llvm", "dummy-cc", "dummy-py"],
)
def test_make_local(benchmark, env_id):
benchmark(lambda: gym.make(env_id).close())
@pytest.mark.parametrize(
"args",
[
(llvm.LLVM_SERVICE_BINARY, LlvmEnv),
(dummy.EXAMPLE_CC_SERVICE_BINARY, ClientServiceCompilerEnv),
(dummy.EXAMPLE_PY_SERVICE_BINARY, ClientServiceCompilerEnv),
],
ids=["llvm", "dummy-cc", "dummy-py"],
)
def test_make_service(benchmark, args):
service_binary, env_class = args
service = CompilerGymServiceConnection(service_binary)
try:
benchmark(lambda: env_class(service=service.connection.url).close())
finally:
service.close()
@pytest.mark.parametrize(
"make_env",
[
lambda: gym.make("llvm-autophase-ic-v0", benchmark="cbench-v1/crc32"),
lambda: gym.make("llvm-autophase-ic-v0", benchmark="cbench-v1/jpeg-d"),
lambda: gym.make("example-cc-v0"),
lambda: gym.make("example-py-v0"),
],
ids=["llvm;fast-benchmark", "llvm;slow-benchmark", "dummy-cc", "dummy-py"],
)
def test_reset(benchmark, make_env: CompilerEnv):
with make_env() as env:
benchmark(env.reset)
@pytest.mark.parametrize(
"args",
[
(
lambda: gym.make("llvm-autophase-ic-v0", benchmark="cbench-v1/crc32"),
"-globaldce",
),
(lambda: gym.make("llvm-autophase-ic-v0", benchmark="cbench-v1/crc32"), "-gvn"),
(
lambda: gym.make("llvm-autophase-ic-v0", benchmark="cbench-v1/jpeg-d"),
"-globaldce",
),
(
lambda: gym.make("llvm-autophase-ic-v0", benchmark="cbench-v1/jpeg-d"),
"-gvn",
),
(lambda: gym.make("example-cc-v0"), "a"),
(lambda: gym.make("example-py-v0"), "a"),
],
ids=[
"llvm;fast-benchmark;fast-action",
"llvm;fast-benchmark;slow-action",
"llvm;slow-benchmark;fast-action",
"llvm;slow-benchmark;slow-action",
"dummy-cc",
"dummy-py",
],
)
def test_step(benchmark, args):
make_env, action_name = args
with make_env() as env:
env.reset()
action = env.action_space[action_name]
benchmark(env.step, action)
_args = dict(
{
f"llvm;{obs}": (lambda: gym.make("llvm-v0", benchmark="cbench-v1/qsort"), obs)
for obs in OBSERVATION_SPACE_NAMES
},
**{
"dummy-cc": (lambda: gym.make("example-cc-v0"), "ir"),
"dummy-py": (lambda: gym.make("example-py-v0"), "features"),
},
)
@pytest.mark.parametrize("args", _args.values(), ids=_args.keys())
def test_observation(benchmark, args):
make_env, observation_space = args
with make_env() as env:
env.reset()
benchmark(lambda: env.observation[observation_space])
_args = dict(
{
f"llvm;{reward}": (
lambda: gym.make("llvm-v0", benchmark="cbench-v1/qsort"),
reward,
)
for reward in REWARD_SPACE_NAMES
},
**{
"dummy-cc": (lambda: gym.make("example-cc-v0"), "runtime"),
"dummy-py": (lambda: gym.make("example-py-v0"), "runtime"),
},
)
@pytest.mark.parametrize("args", _args.values(), ids=_args.keys())
def test_reward(benchmark, args):
make_env, reward_space = args
with make_env() as env:
env.reset()
benchmark(lambda: env.reward[reward_space])
@pytest.mark.parametrize(
"make_env",
[
lambda: gym.make("llvm-autophase-ic-v0", benchmark="cbench-v1/crc32"),
lambda: gym.make("llvm-autophase-ic-v0", benchmark="cbench-v1/jpeg-d"),
# TODO: Example service does not yet support fork() operator.
# lambda: gym.make("example-cc-v0"),
# lambda: gym.make("example-py-v0"),
],
ids=["llvm;fast-benchmark", "llvm;slow-benchmark"],
)
def test_fork(benchmark, make_env):
with make_env() as env:
env.reset()
benchmark(lambda: env.fork().close())
if __name__ == "__main__":
main(
extra_pytest_args=[
f"--benchmark-storage=/tmp/compiler_gym_{getuser()}/pytest_benchmark",
"--benchmark-save=bench_test",
"--benchmark-sort=name",
"-x",
],
debug_level=0,
)
|
CompilerGym-development
|
benchmarks/bench_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
# Based on the 'util/collect_env.py' script from PyTorch.
# <https://github.com/pytorch/pytorch>
#
# From PyTorch:
#
# Copyright (c) 2016- Facebook, Inc (Adam Paszke)
# Copyright (c) 2014- Facebook, Inc (Soumith Chintala)
# Copyright (c) 2011-2014 Idiap Research Institute (Ronan Collobert)
# Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu)
# Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu)
# Copyright (c) 2011-2013 NYU (Clement Farabet)
# Copyright (c) 2006-2010 NEC Laboratories America (Ronan Collobert, Leon Bottou, Iain Melvin, Jason Weston)
# Copyright (c) 2006 Idiap Research Institute (Samy Bengio)
# Copyright (c) 2001-2004 Idiap Research Institute (Ronan Collobert, Samy Bengio, Johnny Mariethoz)
#
# From Caffe2:
#
# Copyright (c) 2016-present, Facebook Inc. All rights reserved.
#
# All contributions by Facebook:
# Copyright (c) 2016 Facebook Inc.
#
# All contributions by Google:
# Copyright (c) 2015 Google Inc.
# All rights reserved.
#
# All contributions by Yangqing Jia:
# Copyright (c) 2015 Yangqing Jia
# All rights reserved.
#
# All contributions by Kakao Brain:
# Copyright 2019-2020 Kakao Brain
#
# All contributions by Cruise LLC:
# Copyright (c) 2022 Cruise LLC.
# All rights reserved.
#
# All contributions from Caffe:
# Copyright(c) 2013, 2014, 2015, the respective contributors
# All rights reserved.
#
# All other contributions:
# Copyright(c) 2015, 2016 the respective contributors
# All rights reserved.
#
# Caffe2 uses a copyright model similar to Caffe: each contributor holds
# copyright over their contributions to Caffe2. The project versioning records
# all such contribution and copyright details. If a contributor wants to further
# mark their specific copyright on a particular contribution, they should
# indicate their copyright solely in the commit message of the change when it is
# committed.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the names of Facebook, Deepmind Technologies, NYU, NEC Laboratories America
# and IDIAP Research Institute nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
from __future__ import print_function
# Unlike the rest of the PyTorch this file must be python2 compliant.
# This script outputs relevant system environment info
# Run it with `python collect_env.py`.
import locale
import os
import re
import subprocess
import sys
from collections import namedtuple
try:
import compiler_gym
COMPILER_GYM_AVAILABLE = True
except (ImportError, NameError, AttributeError, OSError):
COMPILER_GYM_AVAILABLE = False
# System Environment Information
SystemEnv = namedtuple(
"SystemEnv",
[
"compiler_gym_version",
"is_debug_build",
"gcc_version",
"clang_version",
"cmake_version",
"os",
"libc_version",
"python_version",
"python_platform",
"pip_version", # 'pip' or 'pip3'
"pip_packages",
"conda_packages",
],
)
def run(command):
"""Returns (return-code, stdout, stderr)"""
p = subprocess.Popen(
command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True
)
raw_output, raw_err = p.communicate()
rc = p.returncode
if get_platform() == "win32":
enc = "oem"
else:
enc = locale.getpreferredencoding()
output = raw_output.decode(enc)
err = raw_err.decode(enc)
return rc, output.strip(), err.strip()
def run_and_read_all(run_lambda, command):
"""Runs command using run_lambda; reads and returns entire output if rc is 0"""
rc, out, _ = run_lambda(command)
if rc != 0:
return None
return out
def run_and_parse_first_match(run_lambda, command, regex):
"""Runs command using run_lambda, returns the first regex match if it exists"""
rc, out, _ = run_lambda(command)
if rc != 0:
return None
match = re.search(regex, out)
if match is None:
return None
return match.group(1)
def run_and_return_first_line(run_lambda, command):
"""Runs command using run_lambda and returns first line if output is not empty"""
rc, out, _ = run_lambda(command)
if rc != 0:
return None
return out.split("\n")[0]
def get_conda_packages(run_lambda):
conda = os.environ.get("CONDA_EXE", "conda")
out = run_and_read_all(run_lambda, conda + " list")
if out is None:
return out
# Comment starting at beginning of line
comment_regex = re.compile(r"^#.*\n")
return re.sub(comment_regex, "", out)
def get_gcc_version(run_lambda):
return run_and_parse_first_match(run_lambda, "gcc --version", r"gcc (.*)")
def get_clang_version(run_lambda):
return run_and_parse_first_match(
run_lambda, "clang --version", r"clang version (.*)"
)
def get_cmake_version(run_lambda):
return run_and_parse_first_match(run_lambda, "cmake --version", r"cmake (.*)")
def get_platform():
if sys.platform.startswith("linux"):
return "linux"
elif sys.platform.startswith("win32"):
return "win32"
elif sys.platform.startswith("cygwin"):
return "cygwin"
elif sys.platform.startswith("darwin"):
return "darwin"
else:
return sys.platform
def get_mac_version(run_lambda):
return run_and_parse_first_match(run_lambda, "sw_vers -productVersion", r"(.*)")
def get_windows_version(run_lambda):
system_root = os.environ.get("SYSTEMROOT", "C:\\Windows")
wmic_cmd = os.path.join(system_root, "System32", "Wbem", "wmic")
findstr_cmd = os.path.join(system_root, "System32", "findstr")
return run_and_read_all(
run_lambda, "{} os get Caption | {} /v Caption".format(wmic_cmd, findstr_cmd)
)
def get_lsb_version(run_lambda):
return run_and_parse_first_match(
run_lambda, "lsb_release -a", r"Description:\t(.*)"
)
def check_release_file(run_lambda):
return run_and_parse_first_match(
run_lambda, "cat /etc/*-release", r'PRETTY_NAME="(.*)"'
)
def get_os(run_lambda):
from platform import machine
platform = get_platform()
if platform == "win32" or platform == "cygwin":
return get_windows_version(run_lambda)
if platform == "darwin":
version = get_mac_version(run_lambda)
if version is None:
return None
return "macOS {} ({})".format(version, machine())
if platform == "linux":
# Ubuntu/Debian based
desc = get_lsb_version(run_lambda)
if desc is not None:
return "{} ({})".format(desc, machine())
# Try reading /etc/*-release
desc = check_release_file(run_lambda)
if desc is not None:
return "{} ({})".format(desc, machine())
return "{} ({})".format(platform, machine())
# Unknown platform
return platform
def get_python_platform():
import platform
return platform.platform()
def get_libc_version():
import platform
if get_platform() != "linux":
return "N/A"
return "-".join(platform.libc_ver())
def indent(s):
return " " + "\n ".join(s.split("\n"))
def get_pip_packages(run_lambda):
"""Returns `pip list` output. Note: will also find conda-installed pytorch
and numpy packages."""
# People generally have `pip` as `pip` or `pip3`
# But here it is incoved as `python -mpip`
def run_with_pip(pip):
return run_and_read_all(run_lambda, pip + " list --format=freeze")
pip_version = "pip3" if sys.version[0] == "3" else "pip"
out = run_with_pip(sys.executable + " -mpip")
return pip_version, out
def get_cachingallocator_config():
ca_config = os.environ.get("PYTORCH_CUDA_ALLOC_CONF", "")
return ca_config
def get_env_info():
run_lambda = run
pip_version, pip_list_output = get_pip_packages(run_lambda)
if COMPILER_GYM_AVAILABLE:
version_str = compiler_gym.__version__
# NOTE(cummins): CompilerGym does not yet have a debug string.
debug_mode_str = "N/A"
else:
version_str = debug_mode_str = "N/A"
sys_version = sys.version.replace("\n", " ")
return SystemEnv(
compiler_gym_version=version_str,
is_debug_build=debug_mode_str,
python_version="{} ({}-bit runtime)".format(
sys_version, sys.maxsize.bit_length() + 1
),
python_platform=get_python_platform(),
pip_version=pip_version,
pip_packages=pip_list_output,
conda_packages=get_conda_packages(run_lambda),
os=get_os(run_lambda),
libc_version=get_libc_version(),
gcc_version=get_gcc_version(run_lambda),
clang_version=get_clang_version(run_lambda),
cmake_version=get_cmake_version(run_lambda),
)
env_info_fmt = """
CompilerGym: {compiler_gym_version}
Is debug build: {is_debug_build}
Python version: {python_version}
Python platform: {python_platform}
OS: {os}
GCC version: {gcc_version}
Clang version: {clang_version}
CMake version: {cmake_version}
Libc version: {libc_version}
Versions of all installed libraries:
{pip_packages}
{conda_packages}
""".strip()
def pretty_str(envinfo):
def replace_nones(dct, replacement="Could not collect"):
for key in dct.keys():
if dct[key] is not None:
continue
dct[key] = replacement
return dct
def replace_bools(dct, true="Yes", false="No"):
for key in dct.keys():
if dct[key] is True:
dct[key] = true
elif dct[key] is False:
dct[key] = false
return dct
def prepend(text, tag="[prepend]"):
lines = text.split("\n")
updated_lines = [tag + line for line in lines]
return "\n".join(updated_lines)
def replace_if_empty(text, replacement="No relevant packages"):
if text is not None and len(text) == 0:
return replacement
return text
def maybe_start_on_next_line(string):
# If `string` is multiline, prepend a \n to it.
if string is not None and len(string.split("\n")) > 1:
return "\n{}\n".format(string)
return string
mutable_dict = envinfo._asdict()
# Replace True with Yes, False with No
mutable_dict = replace_bools(mutable_dict)
# Replace all None objects with 'Could not collect'
mutable_dict = replace_nones(mutable_dict)
# If either of these are '', replace with 'No relevant packages'
mutable_dict["pip_packages"] = replace_if_empty(mutable_dict["pip_packages"])
mutable_dict["conda_packages"] = replace_if_empty(mutable_dict["conda_packages"])
# Tag conda and pip packages with a prefix
# If they were previously None, they'll show up as ie '[conda] Could not collect'
if mutable_dict["pip_packages"]:
mutable_dict["pip_packages"] = prepend(
mutable_dict["pip_packages"], " [{}] ".format(envinfo.pip_version)
)
if mutable_dict["conda_packages"]:
mutable_dict["conda_packages"] = prepend(
mutable_dict["conda_packages"], " [conda] "
)
return env_info_fmt.format(**mutable_dict)
def get_pretty_env_info():
return pretty_str(get_env_info())
def main():
print("Collecting environment information...")
print()
print(pretty_str(get_env_info()))
if __name__ == "__main__":
main()
|
CompilerGym-development
|
build_tools/collect_env.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Configuration for building an action space from a list of LLVM passes."""
from common import Pass
# A set of headers that must be included to use the generated pass list.
LLVM_ACTION_INCLUDES = {
"llvm/LinkAllPasses.h",
# A handle of coroutine utility passes are not pulled in by the
# LinkAllPasses.h header.
"llvm/Transforms/Coroutines.h",
}
# A mapping from the name of a pass as defined in a INITIALIZE_PASS(name, ...)
# macro invocation to the name of the pass as defined in the createPASS();
# factory function. Not all passes are named consistently.
CREATE_PASS_NAME_MAP = {
"ADCELegacyPass": "AggressiveDCEPass",
"AddDiscriminatorsLegacyPass": "AddDiscriminatorsPass",
"AggressiveInstCombinerLegacyPass": "AggressiveInstCombinerPass",
"AlignmentFromAssumptions": "AlignmentFromAssumptionsPass",
"ArgPromotion": "ArgumentPromotionPass",
"BarrierNoop": "BarrierNoopPass",
"BDCELegacyPass": "BitTrackingDCEPass",
"BlockExtractor": "BlockExtractorPass",
"BreakCriticalEdges": "BreakCriticalEdgesPass",
"CalledValuePropagationLegacyPass": "CalledValuePropagationPass",
"CallSiteSplittingLegacyPass": "CallSiteSplittingPass",
"CanonicalizeAliasesLegacyPass": "CanonicalizeAliasesPass",
"CFGSimplifyPass": "CFGSimplificationPass",
"CFGuard": ["CFGuardCheckPass", "CFGuardDispatchPass"],
"ConstantHoistingLegacyPass": "ConstantHoistingPass",
"ConstantMergeLegacyPass": "ConstantMergePass",
"ConstantPropagation": "ConstantPropagationPass",
"CoroCleanupLegacy": "CoroCleanupLegacyPass",
"CoroEarlyLegacy": "CoroEarlyLegacyPass",
"CoroElideLegacy": "CoroElideLegacyPass",
"CoroSplitLegacy": "CoroSplitLegacyPass",
"CorrelatedValuePropagation": "CorrelatedValuePropagationPass",
"CrossDSOCFI": "CrossDSOCFIPass",
"DAE": "DeadArgEliminationPass",
"DataFlowSanitizer": "DataFlowSanitizerPass",
"DCELegacyPass": "DeadCodeEliminationPass",
"DeadInstElimination": "DeadInstEliminationPass",
"DivRemPairsLegacyPass": "DivRemPairsPass",
"DSELegacyPass": "DeadStoreEliminationPass",
"EarlyCSELegacyPass": "EarlyCSEPass",
"EarlyCSEMemSSALegacyPass": "EarlyCSEMemSSAPass",
"EliminateAvailableExternallyLegacyPass": "EliminateAvailableExternallyPass",
"EntryExitInstrumenter": "EntryExitInstrumenterPass",
"Float2IntLegacyPass": "Float2IntPass",
"FunctionImportLegacyPass": "FunctionImportPass",
"GCOVProfilerLegacyPass": "GCOVProfilerPass",
"GlobalDCELegacyPass": "GlobalDCEPass",
"GlobalOptLegacyPass": "GlobalOptimizerPass",
"GlobalSplit": "GlobalSplitPass",
"GuardWideningLegacyPass": "GuardWideningPass",
"GVNHoistLegacyPass": "GVNHoistPass",
"GVNLegacyPass": "GVNPass",
"GVNSinkLegacyPass": "GVNSinkPass",
"HotColdSplittingLegacyPass": "HotColdSplittingPass",
"ICPPass": "IPConstantPropagationPass",
"IndVarSimplifyLegacyPass": "IndVarSimplifyPass",
"InferAddressSpaces": "InferAddressSpacesPass",
"InjectTLIMappingsLegacy": "InjectTLIMappingsLegacyPass",
"InstNamer": "InstructionNamerPass",
"InstrOrderFileLegacyPass": "InstrOrderFilePass",
"InternalizeLegacyPass": "InternalizePass",
"IPCP": "IPConstantPropagationPass",
"IPSCCPLegacyPass": "IPSCCPPass",
"IRCELegacyPass": "InductiveRangeCheckEliminationPass",
"JumpThreading": "JumpThreadingPass",
"LCSSAWrapperPass": "LCSSAPass",
"LegacyLICMPass": "LICMPass",
"LegacyLoopSinkPass": "LoopSinkPass",
"LibCallsShrinkWrapLegacyPass": "LibCallsShrinkWrapPass",
"LoadStoreVectorizerLegacyPass": "LoadStoreVectorizerPass",
"LoopDataPrefetchLegacyPass": "LoopDataPrefetchPass",
"LoopDeletionLegacyPass": "LoopDeletionPass",
"LoopDistributeLegacy": "LoopDistributePass",
"LoopExtractor": "LoopExtractorPass",
"LoopFuseLegacy": "LoopFusePass",
"LoopGuardWideningLegacyPass": "LoopGuardWideningPass",
"LoopIdiomRecognizeLegacyPass": "LoopIdiomPass",
"LoopInstSimplifyLegacyPass": "LoopInstSimplifyPass",
"LoopInterchange": "LoopInterchangePass",
"LoopLoadElimination": "LoopLoadEliminationPass",
"LoopPredicationLegacyPass": "LoopPredicationPass",
"LoopReroll": "LoopRerollPass",
"LoopRotateLegacyPass": "LoopRotatePass",
"LoopSimplify": "LoopSimplifyPass",
"LoopSimplifyCFGLegacyPass": "LoopSimplifyCFGPass",
"LoopStrengthReduce": "LoopStrengthReducePass",
"LoopUnroll": "LoopUnrollPass",
"LoopUnrollAndJam": "LoopUnrollAndJamPass",
"LoopUnswitch": "LoopUnswitchPass",
"LoopVectorize": "LoopVectorizePass",
"LoopVersioningLICM": "LoopVersioningLICMPass",
"LowerAtomicLegacyPass": "LowerAtomicPass",
"LowerConstantIntrinsics": "LowerConstantIntrinsicsPass",
"LowerExpectIntrinsic": "LowerExpectIntrinsicPass",
"LowerGuardIntrinsicLegacyPass": "LowerGuardIntrinsicPass",
"LowerInvokeLegacyPass": "LowerInvokePass",
"LowerMatrixIntrinsicsLegacyPass": "LowerMatrixIntrinsicsPass",
"LowerSwitch": "LowerSwitchPass",
"LowerWidenableConditionLegacyPass": "LowerWidenableConditionPass",
"MemCpyOptLegacyPass": "MemCpyOptPass",
"MemorySanitizerLegacyPass": "MemorySanitizerLegacyPassPass",
"MergedLoadStoreMotionLegacyPass": "MergedLoadStoreMotionPass",
"MergeFunctionsLegacyPass": "MergeFunctionsPass",
"MetaRenamer": "MetaRenamerPass",
"ModuleAddressSanitizerLegacyPass": "ModuleAddressSanitizerLegacyPassPass",
"ModuleSanitizerCoverageLegacyPass": "ModuleSanitizerCoverageLegacyPassPass",
"NameAnonGlobalLegacyPass": "NameAnonGlobalPass",
"NaryReassociateLegacyPass": "NaryReassociatePass",
"NewGVNLegacyPass": "NewGVNPass",
"ObjCARCAPElim": "ObjCARCAPElimPass",
"ObjCARCContract": "ObjCARCContractPass",
"ObjCARCExpand": "ObjCARCExpandPass",
"ObjCARCOpt": "ObjCARCOptPass",
"PAEval": "PAEvalPass",
"PartialInlinerLegacyPass": "PartialInliningPass",
"PartiallyInlineLibCallsLegacyPass": "PartiallyInlineLibCallsPass",
"PlaceSafepoints": "PlaceSafepointsPass",
"PostInlineEntryExitInstrumenter": "PostInlineEntryExitInstrumenterPass",
"PromoteLegacyPass": "PromoteMemoryToRegisterPass",
"PruneEH": "PruneEHPass",
"ReassociateLegacyPass": "ReassociatePass",
"RedundantDbgInstElimination": "RedundantDbgInstEliminationPass",
"RegToMem": "DemoteRegisterToMemoryPass",
"ReversePostOrderFunctionAttrsLegacyPass": "ReversePostOrderFunctionAttrsPass",
"RewriteSymbolsLegacyPass": "RewriteSymbolsPass",
"SampleProfileLoaderLegacyPass": "SampleProfileLoaderPass",
"ScalarizerLegacyPass": "ScalarizerPass",
"SCCPLegacyPass": "SCCPPass",
"SeparateConstOffsetFromGEP": "SeparateConstOffsetFromGEPPass",
"SimpleInliner": "FunctionInliningPass",
"SingleLoopExtractor": "SingleLoopExtractorPass",
"SinkingLegacyPass": "SinkingPass",
"SLPVectorizer": "SLPVectorizerPass",
"SpeculativeExecutionLegacyPass": "SpeculativeExecutionPass",
"SROALegacyPass": "SROAPass",
"StraightLineStrengthReduce": "StraightLineStrengthReducePass",
"StripDeadDebugInfo": "StripDeadDebugInfoPass",
"StripDeadPrototypesLegacyPass": "StripDeadPrototypesPass",
"StripDebugDeclare": "StripDebugDeclarePass",
"StripNonDebugSymbols": "StripNonDebugSymbolsPass",
"StripNonLineTableDebugInfo": "StripNonLineTableDebugInfoPass",
"StripSymbols": "StripSymbolsPass",
"StructurizeCFG": "StructurizeCFGPass",
"TailCallElim": "TailCallEliminationPass",
"ThreadSanitizerLegacyPass": "ThreadSanitizerLegacyPassPass",
"UnifyFunctionExitNodes": "UnifyFunctionExitNodesPass",
}
# A list of pass names that should be excluded from the action space.
_EXCLUDED_PASSES = {
# Irrelevant garbage collection passes.
"StripGCRelocates",
"PlaceBackedgeSafepointsImpl",
"PlaceSafepointsPass",
"RewriteStatepointsForGclegacyPass",
# Irrelevant Objective-C Automatic Reference Counting passes.
"ObjCARCAAWrapperPass",
"ObjCARCAPElim",
"ObjCARCAPElimPass",
"ObjCARCContractPass",
"ObjCARCExpandPass",
"ObjCARCOptPass",
# Doesn't use legacy pass constructor API, or requires additional
# constructor arguments that are not available.
"WholeProgramDevirt",
"MakeGuardsExplicitLegacyPass",
"LowerTypeTests",
# Unneeded debugging passes.
"WriteThinLTOBitcode",
"PredicateInfoPrinterLegacyPass",
"WarnMissedTransformationsLegacy",
"DAH", # Bugpoint only.
"MetaRenamerPass",
"PAEvalPass",
"BarrierNoop", # Used for debugging pass manager.
"StripNonLineTableDebugInfoPass", # Debug stripping.
"StripDeadDebugInfoPass", # Debug stripping.
"LoopExtractorPass", # Pulls out loops into functions. Changes semantics.
"SingleLoopExtractorPass", # Pulls out loops into functions. Changes semantics.
"BlockExtractorPass", # Pulls out blocks into functions. Changes semantics.
# Unwanted instrumentation passes.
"BoundsCheckingLegacyPass", # Inserts traps on illegal access. Changes semantics.
"ASanGlobalsMetadataWrapperPass",
"AddressSanitizerLegacyPass",
"HWAddressSanitizerLegacyPass",
"SampleProfileLoaderPass",
"MemorySanitizerLegacyPassPass",
"ThreadSanitizerLegacyPassPass",
"ModuleAddressSanitizerLegacyPassPass",
"FunctionImportPass",
"DataFlowSanitizerPass",
"InstrOrderFilePass",
"PostInlineEntryExitInstrumenter",
# Profile-guided optimization or profiling.
"PGOIndirectCallPromotionLegacyPass",
"PGOInstrumentationUseLegacyPass",
"PGOInstrumentationGenCreateVarLegacyPass",
"PGOInstrumentationGenLegacyPass",
"PGOInstrumentationUseLegacyPass",
"PGOMemOpsizeOptLegacyPass",
"PgomemOpsizeOptLegacyPass",
"InstrProfilingLegacyPass",
"ControlHeightReductionLegacyPass",
# Unneeded symbol rewriting pass.
"RewriteSymbolsPass",
# Microsoft's Control Flow Guard checks on Windows targets.
# https://llvm.org/doxygen/CFGuard_8cpp.html
"CFGuardCheckPass",
"CFGuardDispatchPass",
# We don't want to change the visibility of symbols.
"InternalizePass",
# NOTE(github.com/facebookresearch/CompilerGym/issues/103): The
# -structurizecg has been found to break the semantics of cBench benchmarks
# ghostscript and tiff2bw.
"StructurizeCFGPass",
# NOTE(github.com/facebookresearch/CompilerGym/issues/46): The -gvn-sink
# pass has been found to produce different states when run multiple times
# on the same input.
"GVNSinkPass",
}
# The name of the LLVM target to extract architecture-specific transforms for.
_TARGET = "X86"
def include_pass(pass_: Pass) -> bool:
"""Determine whether the pass should be included in the generated C++ sources."""
if pass_.name in _EXCLUDED_PASSES:
return False
return "lib/Transforms" in pass_.source or f"Targets/{_TARGET}" in pass_.source
|
CompilerGym-development
|
build_tools/llvm/legacy_pass_manager/config.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import NamedTuple, Optional
class Pass(NamedTuple):
"""The declaration of an LLVM pass."""
# The name of the pass, e.g. "AddDiscriminatorsPass".
name: str
# The opt commandline flag which turns this pass on, e.g. "-add-discriminators".
flag: str
# The docstring for this pass, as reported by `opt -help`. E.g. "Add DWARF path discriminators".
description: str
# The path of the C++ file which defines this pass, relative to the LLVM source tree root.
source: str
# The path of the C++ header which declares this pass, relative to the LLVM source tree root.
# If the header path could not be inferred, this is None.
header: Optional[str]
# Boolean flags set in INITIALIZE_PASS().
cfg: bool
is_analysis: bool
|
CompilerGym-development
|
build_tools/llvm/legacy_pass_manager/common.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Filter the list of LLVM passes to use as an action space.
This scripts reads a list of passes from stdin and for each, calls
config.include_pass() to determine whether it should be printed to stdout.
"""
import csv
import logging
import sys
from typing import Iterable
import config
from common import Pass
logger = logging.getLogger(__name__)
def filter_passes(pass_iterator: Iterable[Pass]) -> Iterable[Pass]:
"""Apply config.include_pass() to an input sequence of passes.
:param pass_iterator: An iterator over Pass objects.
:returns: A subset of the input Pass iterator.
"""
total_count = 0
selected_count = 0
for pass_ in pass_iterator:
total_count += 1
if config.include_pass(pass_):
selected_count += 1
logger.debug(
f"Selected {pass_.name} pass ({pass_.flag}) from {pass_.source}",
)
yield pass_
print(
f"Selected {selected_count} of {total_count} LLVM passes to use as actions",
file=sys.stderr,
)
def main(argv):
"""Main entry point."""
del argv
reader = csv.reader(sys.stdin, delimiter=",", quotechar='"')
next(reader)
pass_iterator = (Pass(*row) for row in reader)
filtered_passes = filter_passes(pass_iterator)
writer = csv.writer(sys.stdout, delimiter=",", quotechar='"')
writer.writerow(Pass._fields)
writer.writerows(sorted(list(filtered_passes), key=lambda r: r.name))
if __name__ == "__main__":
main(sys.argv)
|
CompilerGym-development
|
build_tools/llvm/legacy_pass_manager/filter_action_space.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Generate specifications for the LLVM service capabilities.
Usage: make_specs.py <service_binary> <output_path>.
"""
import signal
# TODO: As we add support for more compilers we could generalize this script
# to work with other compiler services rather than hardcoding to LLVM.
import sys
from pathlib import Path
from compiler_gym.envs.llvm.llvm_env import LlvmEnv
# The maximum number of seconds to wait before timing out.
TIMEOUT_SECONDS = 300
def timeout_handler(signum, frame):
del signum # unused
del frame # unused
print(f"error: Timeout reached after {TIMEOUT_SECONDS:,d} seconds", file=sys.stderr)
sys.exit(1)
def main(argv):
assert (
len(argv) == 3
), "Usage: make_specs.py <service_binary> <flag_descriptions> <output_path>"
service_path, flag_descriptions, output_path = argv[1:]
signal.signal(signal.SIGALRM, timeout_handler)
signal.alarm(TIMEOUT_SECONDS)
with open(flag_descriptions) as f:
flag_descriptions = [ln.rstrip() for ln in f.readlines()]
with LlvmEnv(Path(service_path)) as env:
with open(output_path, "w") as f:
print("from enum import Enum", file=f)
print(file=f)
print("class observation_spaces(Enum):", file=f)
for name in env.observation.spaces:
print(f' {name} = "{name}"', file=f)
print(file=f)
print("class reward_spaces(Enum):", file=f)
for name in env.reward.spaces:
print(f' {name} = "{name}"', file=f)
print(file=f)
print("class actions(Enum):", file=f)
for name in env.action_space.names:
enum_name = "".join([x.capitalize() for x in name[1:].split("-")])
print(f' {enum_name} = "{name}"', file=f)
print(file=f)
print("class action_descriptions(Enum):", file=f)
for name, description in zip(env.action_space.names, flag_descriptions):
enum_name = "".join([x.capitalize() for x in name[1:].split("-")])
sanitized_description = description.replace('" "', "")
sanitized_description = sanitized_description.replace('"', "")
print(f' {enum_name} = "{sanitized_description}"', file=f)
if __name__ == "__main__":
main(sys.argv)
|
CompilerGym-development
|
build_tools/llvm/legacy_pass_manager/make_specs.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Build generated files from a list of passes.
This script reads from stdin a list of passes and generates files so that these
passes can be used as an action space.
Usage:
$ make_action_space_genfiles.py <output-directory> < <pass-list>
The following files are generated:
<outdir>/ActionHeaders.h
------------------------
Example:
#pragma once
#include "llvm/LinkAllPasses.h"
#include "llvm/Transforms/AggressiveInstCombine/AggressiveInstCombine.h"
...
This file includes the set of LLVM headers that must be included to use the
passes.
<outdir>/ActionEnum.h
---------------------
Example:
enum class LlvmAction {
ADD_DISCRIMINATORS_PASS,
AGGRESSIVE_DCEPASS,
...
}
This defines an enum that names all of the passes.
<outdir>/ActionSwitch.h
-----------------------
Example:
#define HANDLE_ACTION(action, handlePass) \
switch (action) { \
case LlvmAction::ADD_DISCRIMINATORS_PASS: \
handlePass(llvm::createAddDiscriminatorsPass()); \
break; \
case LlvmAction::AGGRESSIVE_DCEPASS: \
handlePass(llvm::createAggressiveDCEPass()); \
break; \
...
}
To use the generated switch, call the HANDLE_ACTION() macro using an
LlvmAction enum value and a handlePass function which accepts a pass
instance as input.
<outdir>/flags.txt
-------------------------
Example:
-add-discriminators
-adce
...
A list of names for each pass.
<outdir>/flag_descriptions.txt
---------------------------------
Example:
Add DWARF path discriminators
Aggressive Dead Code Elimination
...
A list of descriptions of each pass.
"""
import csv
import logging
import sys
from pathlib import Path
from common import Pass
from config import LLVM_ACTION_INCLUDES
logger = logging.getLogger(__name__)
def process_pass(pass_, headers, enum_f, switch_f):
"""Extract and process transform passes in header."""
if pass_.header:
# Strip a leading "include/" from the header path.
header = pass_.header
if header.startswith("include/"):
header = header[len("include/") :]
headers.add(header)
# The name of the pass in UPPER_PASCAL_CASE.
enum_name = pass_.flag[1:].replace("-", "_").upper()
print(f" {enum_name},", file=enum_f)
print(f" case LlvmAction::{enum_name}: \\", file=switch_f)
print(f" handlePass(llvm::create{pass_.name}()); \\", file=switch_f)
print(" break; \\", file=switch_f)
def make_action_sources(pass_iterator, outpath: Path):
"""Generate the enum and switch content."""
total_passes = 0
headers = set(LLVM_ACTION_INCLUDES)
passes = sorted(list(pass_iterator), key=lambda p: p.name)
switch_path = Path(outpath / "ActionSwitch.h")
enum_path = Path(outpath / "ActionEnum.h")
include_path = Path(outpath / "ActionHeaders.h")
flags_path = Path(outpath / "flags.txt")
descriptions_path = Path(outpath / "flag_descriptions.txt")
with open(switch_path, "w", encoding="utf-8") as switch_f, open(
enum_path, "w", encoding="utf-8"
) as enum_f:
print("enum class LlvmAction {", file=enum_f)
print("#define HANDLE_ACTION(action, handlePass) \\", file=switch_f)
print(" switch (action) { \\", file=switch_f)
for pass_ in passes:
total_passes += 1
process_pass(pass_, headers, enum_f, switch_f)
print("};", file=enum_f)
print(" }", file=switch_f)
logger.debug("Generated %s", switch_path.name)
logger.debug("Generated %s", enum_path.name)
with open(include_path, "w", encoding="utf-8") as f:
print("#pragma once", file=f)
for header in sorted(headers):
print(f'#include "{header}"', file=f)
logger.debug("Generated %s", include_path.name)
with open(flags_path, "w", encoding="utf-8") as f:
print("\n".join(p.flag for p in passes), file=f)
logger.debug("Generated %s", flags_path.name)
with open(descriptions_path, "w", encoding="utf-8") as f:
print("\n".join(p.description for p in passes), file=f)
logger.debug("Generated %s", descriptions_path.name)
logger.debug("Created genfiles for %s pass actions", total_passes)
def main(argv):
"""Main entry point."""
outpath = Path(argv[1])
assert outpath.is_dir(), f"Output directory not found: {outpath}"
reader = csv.reader(sys.stdin, delimiter=",", quotechar='"')
next(reader)
outpath = Path(outpath).absolute().resolve()
pass_iterator = (Pass(*row) for row in reader)
make_action_sources(pass_iterator, outpath)
if __name__ == "__main__":
main(sys.argv)
|
CompilerGym-development
|
build_tools/llvm/legacy_pass_manager/make_action_space_genfiles.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Extract a list of passes form the LLVM source tree.
Usage:
$ python extract_passes_from_llvm_source_tree.py /path/to/llvm-project/llvm
Optionally accepts a list of specific files to examine:
$ python extract_passes_from_llvm_source_tree.py \
/path/to/llvm-project/llvm /path/to/llvm/source/file
Implementation notes
--------------------
This implements a not-very-good parser for the INITIALIZE_PASS() family of
macros, which are used in the LLVM sources to declare a pass using it's name,
flag, and docstring. Parsing known macros like this is fragile and likely to
break as the LLVM sources evolve. Currently only tested on LLVM 10.0 and 13.0.1.
A more robust solution would be to parse the C++ sources and extract all classes
which inherit from ModulePass etc.
"""
import codecs
import csv
import logging
import os
import re
import shlex
import subprocess
import sys
from pathlib import Path
from typing import Dict, Iterable, List, Optional, Tuple
from common import Pass
from config import CREATE_PASS_NAME_MAP
logger = logging.getLogger(__name__)
# A regular expression to match the start of an invocation of one of the
# InitializePass helper macros.
INITIALIZE_PASS_RE = r"(INITIALIZE_PASS|INITIALIZE_PASS_BEGIN|INITIALIZE_PASS_WITH_OPTIONS|INITIALIZE_PASS_WITH_OPTIONS_BEGIN)\("
# A regular expression to match static const string definitions.
CONST_CHAR_RE = r'^\s*static\s+const\s+char(\s+(?P<name>[a-zA-Z_]+)\s*\[\s*\]|\s*\*\s*(?P<ptr_name>[a-zA-Z_]+))\s*=\s*(?P<value>".+")\s*;'
class ParseError(ValueError):
def __init__(self, message: str, source: str, components: List[str]):
self.message = message
self.source = source
self.components = components
def parse_initialize_pass(
source_path: Path, header: Optional[str], input_source: str, defines: Dict[str, str]
) -> Iterable[Pass]:
"""A shitty parser for INITIALIZE_PASS() macro invocations.."""
# ****************************************************
# __ _
# _/ \ _(\(o
# / \ / _ ^^^o
# / ! \/ ! '!!!v'
# ! ! \ _' ( \____
# ! . \ _!\ \===^\)
# \ \_! / __!
# \! / \
# (\_ _/ _\ )
# \ ^^--^^ __-^ /(__
# ^^----^^ "^--v'
#
# HERE BE DRAGONS!
#
# TODO(cummins): Take this code out back and shoot it.
# ****************************************************
# Squish down to a single line.
source = re.sub(r"\n\s*", " ", input_source, re.MULTILINE)
# Contract multi-spaces to single space.
source = re.sub(r",", ", ", source)
source = re.sub(r"\s+", " ", source)
source = re.sub(r"\(\s+", "(", source)
source = re.sub(r"\)\s+", ")", source)
# Strip the INITIALIZE_PASS(...) macro.
match = re.match(rf"^\s*{INITIALIZE_PASS_RE}(?P<args>.+)\)", source)
if not match:
raise ParseError("Failed to match INITIALIZE_PASS regex", source, [])
source = match.group("args")
components = []
start = 0
in_quotes = False
in_comment = False
substr = ""
for i in range(len(source)):
if (
not in_comment
and source[i] == "/"
and i < len(source) - 1
and source[i + 1] == "*"
):
in_comment = True
substr += source[start:i].strip()
if (
in_comment
and source[i] == "*"
and i < len(source) - 1
and source[i + 1] == "/"
):
in_comment = False
start = i + 2
if source[i] == '"':
in_quotes = not in_quotes
if not in_quotes and source[i] == ",":
substr += source[start:i].strip()
components.append(substr)
substr = ""
start = i + 2
components.append(substr + source[start:].strip())
if len(components) != 5:
raise ParseError(
f"Expected 5 components, found {len(components)}", source, components
)
pass_name, arg, name, cfg, analysis = components
# Strip quotation marks in arg and name.
if not arg:
raise ParseError(f"Empty arg: `{arg}`", source, components)
if not name:
raise ParseError(f"Empty name: `{name}`", source, components)
# Dodgy code to combine adjacent strings with macro expansion. For example,
# 'DEBUG_TYPE "-foo"'.
arg_components = shlex.split(arg)
for i, _ in enumerate(arg_components):
while arg_components[i] in defines:
arg_components[i] = defines[arg_components[i]]
arg = " ".join(arg_components)
if arg[0] == '"' and arg[-1] == '"':
arg = arg[1:-1]
while name in defines:
name = defines[name]
if not (name[0] == '"' and name[-1] == '"'):
raise ParseError(f"Could not interpret name `{name}`", source, components)
name = name[1:-1]
# Convert cfg and analysis to bool.
if cfg not in {"true", "false"}:
raise ParseError(
f"Could not interpret bool cfg argument `{cfg}`", source, components
)
if analysis not in {"true", "false"}:
raise ParseError(
f"Could not interpret bool analysis argument `{analysis}`",
source,
components,
)
cfg = cfg == "true"
analysis = analysis == "true"
opts = {
"source": source_path,
"header": header,
"name": pass_name,
"flag": f"-{arg}",
"description": name,
"cfg": cfg,
"is_analysis": analysis,
}
pass_name_or_list = CREATE_PASS_NAME_MAP.get(pass_name, pass_name)
if isinstance(pass_name_or_list, str):
opts["name"] = pass_name_or_list
yield Pass(**opts)
else:
for name in pass_name_or_list:
opts["name"] = name
yield Pass(**opts)
def build_defines(source: str) -> Dict[str, str]:
"""A quick-and-dirty technique to build a translation table from #defines
and string literals to their values."""
defines = {}
lines = source.split("\n")
for i in range(len(lines)):
line = lines[i].strip()
if line.startswith("#define"):
# Match #define strings.
components = line[len("#define ") :].split()
name = components[0]
value = " ".join(components[1:]).strip()
if value == "\\":
value = lines[i + 1].strip()
defines[name] = value
else:
# Match string literals.
match = re.match(CONST_CHAR_RE, line)
if match:
defines[match.group("name") or match.group("ptr_name")] = match.group(
"value"
)
return defines
def handle_file(source_path: Path) -> Tuple[Path, List[Pass]]:
"""Parse the passes declared in a file."""
assert str(source_path).endswith(".cpp"), f"Unexpected file type: {source_path}"
header = Path("llvm/" + str(source_path)[len("lib") : -len("cpp")] + "h")
if not header.is_file():
header = ""
with codecs.open(source_path, "r", "utf-8") as f:
source = f.read()
defines = build_defines(source)
passes: List[Pass] = []
for match in re.finditer(INITIALIZE_PASS_RE, source):
start = match.start()
first_bracket = source.find("(", start)
bracket_depth = 1
end = first_bracket
for end in range(first_bracket + 1, len(source)):
if source[end] == "(":
bracket_depth += 1
elif source[end] == ")":
bracket_depth -= 1
if not bracket_depth:
break
try:
passes += list(
parse_initialize_pass(
source_path, header, source[start : end + 1], defines
)
)
except ParseError as e:
print(f"Parsing error: {e.message}", file=sys.stderr)
print(f"Parsed components: {e.components}", file=sys.stderr)
print(f"In line: {e.source}", file=sys.stderr)
print(f"In file: {source_path}", file=sys.stderr)
print("Fatal error. Aborting now.", file=sys.stderr)
sys.exit(1)
if passes:
logger.debug(
f"Extracted {len(passes)} {'passes' if len(passes) - 1 else 'pass'} from {source_path}",
)
else:
logger.debug(f"Found no passes in {source_path}")
return passes
def main(argv):
root = Path(argv[1])
assert root.is_dir(), f"Not a directory: {root}"
os.chdir(root)
if len(argv) > 2:
paths = [Path(path) for path in argv[2:]]
else:
# Get the names of all files which contain a pass definition.
matching_paths = []
try:
grep = subprocess.check_output(
["grep", "-l", "-E", rf"^\s*{INITIALIZE_PASS_RE}", "-R", "lib/"],
universal_newlines=True,
)
except subprocess.CalledProcessError:
print(
f"fatal: Failed to find any LLVM pass declarations in {root}",
file=sys.stderr,
)
sys.exit(1)
matching_paths += grep.strip().split("\n")
logger.debug("Processing %s files ...", len(matching_paths))
paths = [Path(path) for path in matching_paths]
# Build a list of pass entries.
rows = []
for path in sorted(paths):
passes = handle_file(path)
if passes:
rows += passes
writer = csv.writer(sys.stdout, delimiter=",", quotechar='"')
writer.writerow(Pass._fields)
writer.writerows(sorted(rows, key=lambda r: r.name))
if __name__ == "__main__":
main(sys.argv)
|
CompilerGym-development
|
build_tools/llvm/legacy_pass_manager/extract_passes_from_llvm_source_tree.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""This module defines the validation result tuple."""
import itertools
import re
from collections import Counter
from typing import Iterable, List
from pydantic import BaseModel, validator
from compiler_gym.compiler_env_state import CompilerEnvState
from compiler_gym.errors import ValidationError
from compiler_gym.util.shell_format import plural
from compiler_gym.util.truncate import truncate
class ValidationResult(BaseModel):
"""A tuple that represents the result of validating a compiler environment state."""
state: CompilerEnvState
"""The compiler environment state that was validated."""
walltime: float
"""The wall time in seconds that the validation took."""
reward_validated: bool = False
"""Whether the reward that was recorded in the original state was validated."""
actions_replay_failed: bool = False
"""Whether the commandline was unable to be reproduced."""
reward_validation_failed: bool = False
"""Whether the validated reward differed from the original state."""
benchmark_semantics_validated: bool = False
"""Whether the semantics of the benchmark were validated."""
benchmark_semantics_validation_failed: bool = False
"""Whether the semantics of the benchmark were found to have changed."""
errors: List[ValidationError] = []
"""A list of :class:`ValidationError <compiler_gym.ValidationError>` """
@validator("walltime")
def walltime_nonnegative(cls, v):
assert v >= 0, "Walltime cannot be negative"
return v
def __eq__(self, rhs):
"""Equality comparison.
Validation results are *not* compared on walltime, and are insensitive
to the order of errors.
"""
if not isinstance(rhs, ValidationResult):
return False
return (
self.state == rhs.state
and self.reward_validated == rhs.reward_validated
and self.actions_replay_failed == rhs.actions_replay_failed
and self.reward_validation_failed == rhs.reward_validation_failed
and self.benchmark_semantics_validated == rhs.benchmark_semantics_validated
and self.benchmark_semantics_validation_failed
== rhs.benchmark_semantics_validation_failed
and sorted(self.errors) == sorted(rhs.errors)
)
def __ne__(self, rhs):
return not self == rhs
@property
def error_details(self) -> str:
"""A summary description of the validation errors."""
if not self.errors:
return ""
msg = []
error_types = [e.type for e in self.errors]
freq = sorted(Counter(error_types).items(), key=lambda x: -x[1])
# Shortcut for when there is just a single message to aggregate. Use
# format: "${error_msg}" if there is a single error or "${n}×
# ${error_msg}" if there are multiple copies of the same error.
if len(freq) == 1:
message = str(error_types[0])
if len(error_types) == 1:
return message
return f"{len(error_types)}× {message}"
# If there are multiple error messages, number them using the format:
# "[${i}/${j}] ${n}× ${error_msg}". E.g. "[1/3] 18× Memory leak".
for j, (message, count) in enumerate(freq, start=1):
if count > 1:
msg.append(f"[{j}/{len(freq)}] {count}× {message}")
else:
msg.append(f"[{j}/{len(freq)}] {message}")
remaining = len(freq) - j
if j >= 3 and remaining > 3:
msg.append(
f"... ({remaining} more {plural(remaining, 'error', 'errors')})"
)
break
return ", ".join(msg)
def okay(self) -> bool:
"""Whether validation succeeded."""
return not (
self.actions_replay_failed
or self.reward_validation_failed
or self.benchmark_semantics_validation_failed
)
def __repr__(self):
# Remove default-scheme prefix to improve output readability.
benchmark = re.sub(r"^benchmark://", "", str(self.state.benchmark))
if not self.okay():
msg = ", ".join(self.error_details.strip().split("\n"))
return f"❌ {benchmark} {truncate(msg, max_lines=1, max_line_len=50)}"
elif self.state.reward is None:
return f"✅ {benchmark}"
else:
return f"✅ {benchmark} {self.state.reward:.4f}"
def __str__(self):
return repr(self)
@classmethod
def join(cls, results: Iterable["ValidationResult"]):
"""Create a validation result that is the union join of multiple results."""
results = list(results)
if not results:
raise ValueError("No states to join")
if any(r.state != results[0].state for r in results[1:]):
raise ValueError("All states must be the same")
return cls(
# NOTE: No checking that states are the same.
state=results[0].state,
walltime=sum(r.walltime for r in results),
reward_validated=any(r.reward_validated for r in results),
actions_replay_failed=any(r.actions_replay_failed for r in results),
reward_validation_failed=any(r.reward_validation_failed for r in results),
benchmark_semantics_validated=any(
r.benchmark_semantics_validated for r in results
),
benchmark_semantics_validation_failed=any(
r.benchmark_semantics_validation_failed for r in results
),
errors=list(itertools.chain.from_iterable(r.errors for r in results)),
)
|
CompilerGym-development
|
compiler_gym/validation_result.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import compiler_gym.errors
# Deprecated since v0.2.4.
# This type is for backwards compatibility that will be removed in a future release.
# Please, use errors from `compiler_gym.errors`.
ValidationError = compiler_gym.errors.ValidationError
|
CompilerGym-development
|
compiler_gym/validation_error.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import sys
from argparse import ArgumentParser
def make_config(argv):
parser = ArgumentParser()
parser.add_argument(
"--out-file-path", type=str, required=True, help="Path to the generated config."
)
parser.add_argument("--enable-llvm-env", action="store_true")
parser.add_argument("--enable-mlir-env", action="store_true")
args = parser.parse_args(args=argv[1:])
with open(args.out_file_path, "w") as f:
f.write(f"enable_llvm_env = {args.enable_llvm_env}\n")
f.write(f"enable_mlir_env = {args.enable_mlir_env}\n")
if __name__ == "__main__":
make_config(sys.argv)
|
CompilerGym-development
|
compiler_gym/make_config.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""CompilerGym is a set of compiler optimization environments for reinforcement learning.
After importing this module, the :class:`CompilerGym environments <compiler_gym.envs.CompilerEnv>`
will be available through the :code:`gym.make(...)` interface:
>>> import gym
>>> import compiler_gym
>>> gym.make("llvm-v0")
The list of CompilerGym environments that can be passed to :code:`gym.make(...)`
is available through :code:`compiler_gym.COMPILER_GYM_ENVS`:
>>> import compiler_gym
>>> compiler_gym.COMPILER_GYM_ENVS
['llvm-v0', 'llvm-ic-v0', 'llvm-autophase-ic-v0', 'llvm-ir-ic-v0']
"""
try:
from compiler_gym.util.version import __version__ # isort:skip
except ModuleNotFoundError as e:
# NOTE(https://github.com/facebookresearch/CompilerGym/issues/76): Handler
# for a particularly unhelpful error message.
raise ModuleNotFoundError(
f"{e}.\nAre you running in the root of the CompilerGym repository?\n"
"If so, please change to a different directory so that `import "
"compiler_gym` will work."
) from e
from compiler_gym.compiler_env_state import (
CompilerEnvState,
CompilerEnvStateReader,
CompilerEnvStateWriter,
)
from compiler_gym.envs import COMPILER_GYM_ENVS, CompilerEnv
from compiler_gym.errors import ValidationError
from compiler_gym.random_search import random_search
from compiler_gym.util.debug_util import (
get_debug_level,
get_logging_level,
set_debug_level,
)
from compiler_gym.util.download import download
from compiler_gym.util.registration import make
from compiler_gym.util.runfiles_path import (
cache_path,
site_data_path,
transient_cache_path,
)
from compiler_gym.validate import validate_states
from compiler_gym.validation_result import ValidationResult
# The top-level compiler_gym API.
__all__ = [
"__version__",
"cache_path",
"COMPILER_GYM_ENVS",
"make",
"CompilerEnv",
"CompilerEnvState",
"CompilerEnvStateWriter",
"CompilerEnvStateReader",
"download",
"get_debug_level",
"get_logging_level",
"random_search",
"set_debug_level",
"site_data_path",
"transient_cache_path",
"validate_states",
"ValidationError",
"ValidationResult",
]
|
CompilerGym-development
|
compiler_gym/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Simple parallelized random search."""
import json
import os
from multiprocessing import cpu_count
from pathlib import Path
from threading import Thread
from time import sleep, time
from typing import Callable, List, NamedTuple, Optional, Union
import humanize
from compiler_gym import config
from compiler_gym.envs import CompilerEnv
from compiler_gym.errors import ServiceError
from compiler_gym.util.gym_type_hints import ActionType
from compiler_gym.util.runfiles_path import create_user_logs_dir
from compiler_gym.util.tabulate import tabulate
if config.enable_llvm_env:
from compiler_gym.envs.llvm import LlvmEnv
class RandomSearchProgressLogEntry(NamedTuple):
"""A snapshot of incremental search progress."""
runtime_seconds: float
total_episode_count: int
total_step_count: int
num_passes: int
reward: float
def to_csv(self) -> str:
return ",".join(
[
f"{self.runtime_seconds:.3f}",
str(self.total_episode_count),
str(self.total_step_count),
str(self.num_passes),
str(self.reward),
]
)
@classmethod
def from_csv(cls, line: str) -> "RandomSearchProgressLogEntry":
(
runtime_seconds,
total_episode_count,
total_step_count,
num_passes,
reward,
) = line.split(",")
return RandomSearchProgressLogEntry(
float(runtime_seconds),
int(total_episode_count),
int(total_step_count),
int(num_passes),
float(reward),
)
class RandomAgentWorker(Thread):
"""Worker thread to run a repeating agent.
To stop the agent, set the :code:`alive` attribute of this thread to False.
"""
def __init__(
self,
make_env: Callable[[], CompilerEnv],
patience: int,
):
super().__init__()
self._make_env = make_env
self._patience = patience
# Incremental progress.
self.total_environment_count = 0
self.total_episode_count = 0
self.total_step_count = 0
self.best_returns = -float("inf")
self.best_actions: List[ActionType] = []
self.best_commandline: str = []
self.best_found_at_time = time()
self.alive = True # Set this to False to signal the thread to stop.
@property
def should_run_one_episode(self) -> bool:
"""Whether to run an episode."""
return self.alive or not self.total_episode_count
def run(self) -> None:
"""Run episodes in an infinite loop."""
while self.should_run_one_episode:
self.total_environment_count += 1
with self._make_env() as env:
self._patience = self._patience or env.action_space.n
self.run_one_environment(env)
def run_one_environment(self, env: CompilerEnv) -> None:
"""Run random walks in an infinite loop. Returns if the environment ends."""
while self.should_run_one_episode:
self.total_episode_count += 1
if not self.run_one_episode(env):
return
def run_one_episode(self, env: CompilerEnv) -> bool:
"""Run a single random episode.
:param env: An environment.
:return: True if the episode ended gracefully, else False.
"""
observation = env.reset()
actions: List[ActionType] = []
patience = self._patience
total_returns = 0
while patience >= 0:
patience -= 1
self.total_step_count += 1
# === Your agent here! ===
action = env.action_space.sample()
# === End of agent. ===
actions.append(action)
observation, reward, done, _ = env.step(action)
if done:
return False
total_returns += reward
if total_returns > self.best_returns:
patience = self._patience
self.best_returns = total_returns
self.best_actions = actions.copy()
try:
self.best_commandline = env.action_space.to_string(env.actions)
except NotImplementedError:
self.best_commandline = ""
self.best_found_at_time = time()
return True
def random_search(
make_env: Callable[[], CompilerEnv],
outdir: Optional[Union[str, Path]] = None,
total_runtime: Optional[float] = 600,
patience: int = 0,
nproc: int = cpu_count(),
skip_done: bool = False,
) -> CompilerEnv:
with make_env() as env:
env.reset()
if not isinstance(env.unwrapped, CompilerEnv):
raise TypeError(
f"random_search() requires CompilerEnv. Called with: {type(env).__name__}"
)
benchmark_uri = env.benchmark.uri
if not outdir:
outdir = create_user_logs_dir(
os.path.normpath(f"random/{benchmark_uri.scheme}/{benchmark_uri.path}")
)
outdir = Path(outdir)
if not env.reward_space:
raise ValueError("A reward space must be specified for random search")
reward_space_name = env.reward_space.name
action_space_names = list(env.action_space.names)
metadata_path = outdir / "random_search.json"
progress_path = outdir / "random_search_progress.csv"
best_actions_path = outdir / "random_search_best_actions.txt"
best_commandline_path = outdir / "random_search_best_actions_commandline.txt"
if skip_done and metadata_path.is_file():
# TODO(cummins): Return best reward.
return 0
# Write a metadata file.
metadata = {
"env": env.spec.id if env.spec else "",
"benchmark": str(benchmark_uri),
"reward": reward_space_name,
"patience": patience,
}
with open(str(metadata_path), "w") as f:
json.dump(metadata, f, sort_keys=True, indent=2)
workers = [RandomAgentWorker(make_env, patience) for _ in range(nproc)]
for worker in workers:
worker.start()
best_actions = []
best_commandline = ""
started = time()
last_best_returns = -float("inf")
print(
f"Started {len(workers)} worker threads for {benchmark_uri} "
f"using reward {reward_space_name}."
)
print(f"Writing logs to {outdir}")
end_time = time() + total_runtime if total_runtime else None
if end_time:
print(f"=== Running for {humanize.naturaldelta(total_runtime)} ===")
else:
print("=== WARNING: This will loop forever! Use C-c to terminate. ===")
print() # Blank line gets filled below when the cursor moves up one line.
try:
with open(str(progress_path), "w") as f:
print(
"runtime_seconds",
"total_episode_count",
"total_step_count",
"num_passes",
"reward",
sep=",",
file=f,
flush=True,
)
while not end_time or time() < end_time:
sleep(0.5)
total_episode_count = sum(
worker.total_episode_count for worker in workers
)
total_step_count = sum(worker.total_step_count for worker in workers)
total_environment_count = sum(
worker.total_environment_count for worker in workers
)
best_worker = max(workers, key=lambda worker: worker.best_returns)
best_returns = best_worker.best_returns
best_actions = best_worker.best_actions
best_commandline = best_worker.best_commandline
runtime = time() - started
print(
"\r\033[1A"
"\033[K"
f"Runtime: {humanize.naturaldelta(runtime)}. "
f"Num steps: {humanize.intcomma(total_step_count)} "
f"({humanize.intcomma(int(total_step_count / runtime))} / sec). "
f"Num episodes: {humanize.intcomma(total_episode_count)} "
f"({humanize.intcomma(int(total_episode_count / runtime))} / sec). "
f"Num restarts: {humanize.intcomma(total_environment_count - nproc)}.\n"
"\033[K"
f"Best reward: {best_returns:.4f} "
f"({len(best_actions)} passes, "
f"found after {humanize.naturaldelta(best_worker.best_found_at_time - started)})",
end="",
flush=True,
)
# Log the incremental progress improvements.
if best_returns > last_best_returns:
entry = RandomSearchProgressLogEntry(
runtime_seconds=runtime,
total_episode_count=total_episode_count,
total_step_count=total_step_count,
num_passes=len(best_actions),
reward=best_returns,
)
print(entry.to_csv(), file=f, flush=True)
last_best_returns = best_returns
except KeyboardInterrupt:
print("\nkeyboard interrupt", end="", flush=True)
best_action_names = [action_space_names[a] for a in best_actions]
with open(str(best_actions_path), "w") as f:
f.write("\n".join(best_action_names))
f.write("\n")
with open(str(best_commandline_path), "w") as f:
print(best_commandline, file=f)
print("\n", flush=True)
print("Ending worker threads ... ", end="", flush=True)
for worker in workers:
worker.alive = False
for worker in workers:
try:
worker.join()
except ServiceError:
# Service error can be raised on abrupt service termination causing
# RPC errors.
pass
print("done")
print("Replaying actions from best solution found:")
with make_env() as env:
env.reset()
replay_actions(env, best_action_names, outdir)
return env
def replay_actions(env: CompilerEnv, action_names: List[str], outdir: Path):
logs_path = outdir / "random_search_best_actions_progress.csv"
start_time = time()
if config.enable_llvm_env:
if isinstance(env, LlvmEnv):
env.write_bitcode(outdir / "unoptimized.bc")
with open(str(logs_path), "w") as f:
ep_reward = 0
for i, action in enumerate(action_names, start=1):
_, reward, done, _ = env.step(env.action_space.names.index(action))
assert not done
ep_reward += reward
print(
f"Step [{i:03d} / {len(action_names):03d}]: reward={reward:.4f} \t"
f"episode={ep_reward:.4f} \taction={action}"
)
progress = RandomSearchProgressLogEntry(
runtime_seconds=time() - start_time,
total_episode_count=1,
total_step_count=i,
num_passes=i,
reward=reward,
)
print(progress.to_csv(), action, file=f, sep=",")
if config.enable_llvm_env:
if isinstance(env, LlvmEnv):
env.write_bitcode(outdir / "optimized.bc")
print(
tabulate(
[
(
"IR instruction count",
env.observation["IrInstructionCountO0"],
env.observation["IrInstructionCountOz"],
env.observation["IrInstructionCount"],
),
(
"Object .text size (bytes)",
env.observation["ObjectTextSizeO0"],
env.observation["ObjectTextSizeOz"],
env.observation["ObjectTextSizeBytes"],
),
],
headers=("", "-O0", "-Oz", "final"),
)
)
def replay_actions_from_logs(env: CompilerEnv, logdir: Path, benchmark=None) -> None:
best_actions_path = logdir / "random_search_best_actions.txt"
meta_path = logdir / "random_search.json"
assert best_actions_path.is_file(), f"File not found: {best_actions_path}"
assert meta_path.is_file(), f"File not found: {meta_path}"
with open(meta_path, "rb") as f:
meta = json.load(f)
with open(best_actions_path) as f:
actions = [ln.strip() for ln in f.readlines() if ln.strip()]
benchmark = benchmark or meta["benchmark"]
env.reward_space = meta["reward"]
env.reset(benchmark=benchmark)
replay_actions(env, actions, logdir)
|
CompilerGym-development
|
compiler_gym/random_search.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Validate environment states."""
import random
from concurrent.futures import as_completed
from typing import Callable, Iterable, Optional
from compiler_gym.compiler_env_state import CompilerEnvState
from compiler_gym.envs.compiler_env import CompilerEnv
from compiler_gym.util import thread_pool
from compiler_gym.validation_result import ValidationResult
def _validate_states_worker(
make_env: Callable[[], CompilerEnv], state: CompilerEnvState
) -> ValidationResult:
with make_env() as env:
result = env.validate(state)
return result
def validate_states(
make_env: Callable[[], CompilerEnv],
states: Iterable[CompilerEnvState],
nproc: Optional[int] = None,
inorder: bool = False,
) -> Iterable[ValidationResult]:
"""A parallelized implementation of
:meth:`env.validate() <compiler_gym.envs.CompilerEnv.validate>` for batched
validation.
:param make_env: A callback which instantiates a compiler environment.
:param states: A sequence of compiler environment states to validate.
:param nproc: The number of parallel worker processes to run.
:param inorder: Whether to return results in the order they were provided,
or in the order that they are available.
:return: An iterator over validation results. The order of results may
differ from the input states.
"""
executor = thread_pool.get_thread_pool_executor()
if nproc == 1:
map_func = map
elif inorder:
map_func = executor.map
else:
# The validation function of benchmarks can vary wildly in computational
# demands. Shuffle the order of states (unless explicitly asked for them
# to be kept inorder) as crude load balancing for the case where
# multiple states are provided for each benchmark.
states = list(states)
random.shuffle(states)
def map_func(func, envs, states):
futures = (
executor.submit(func, env, state) for env, state in zip(envs, states)
)
return (r.result() for r in as_completed(futures))
yield from map_func(_validate_states_worker, [make_env] * len(states), states)
|
CompilerGym-development
|
compiler_gym/validate.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""This module defines a class to represent a compiler environment state."""
import csv
import re
import sys
from io import StringIO
from typing import Iterable, List, Optional, TextIO
import requests
from pydantic import BaseModel, Field, validator
from compiler_gym.datasets.uri import BenchmarkUri
from compiler_gym.util.truncate import truncate
class CompilerEnvState(BaseModel):
"""The representation of a compiler environment state.
The state of an environment is defined as a benchmark and a sequence of
actions that has been applied to it. For a given environment, the state
contains the information required to reproduce the result.
"""
benchmark: str = Field(
allow_mutation=False,
examples=[
"benchmark://cbench-v1/crc32",
"generator://csmith-v0/0",
],
)
"""The URI of the benchmark used for this episode."""
commandline: str
"""The list of actions that produced this state, as a commandline."""
walltime: float
"""The walltime of the episode in seconds. Must be non-negative."""
reward: Optional[float] = Field(
required=False,
default=None,
allow_mutation=True,
)
"""The cumulative reward for this episode. Optional."""
@validator("walltime")
def walltime_nonnegative(cls, v):
if v is not None:
assert v >= 0, "Walltime cannot be negative"
return v
@validator("benchmark", pre=True)
def validate_benchmark(cls, value):
if isinstance(value, BenchmarkUri):
return str(value)
return value
@property
def has_reward(self) -> bool:
"""Return whether the state has a reward value."""
return self.reward is not None
def __eq__(self, rhs) -> bool:
if not isinstance(rhs, CompilerEnvState):
return False
epsilon = 1e-5
# Only compare reward if both states have it.
if not (self.has_reward and rhs.has_reward):
reward_equal = True
else:
reward_equal = abs(self.reward - rhs.reward) < epsilon
# Note that walltime is excluded from equivalence checks as two states
# are equivalent if they define the same point in the optimization space
# irrespective of how long it took to get there.
return (
self.benchmark == rhs.benchmark
and reward_equal
and self.commandline == rhs.commandline
)
def __ne__(self, rhs) -> bool:
return not self == rhs
class Config:
validate_assignment = True
class CompilerEnvStateWriter:
"""Serialize compiler environment states to CSV.
Example use:
>>> with CompilerEnvStateWriter(open("results.csv", "wb")) as writer:
... writer.write_state(env.state)
"""
def __init__(self, f: TextIO, header: bool = True):
"""Constructor.
:param f: The file to write to.
:param header: Whether to include a header row.
"""
self.f = f
self.writer = csv.writer(self.f, lineterminator="\n")
self.header = header
def write_state(self, state: CompilerEnvState, flush: bool = False) -> None:
"""Write the state to file.
:param state: A compiler environment state.
:param flush: Write to file immediately.
"""
if self.header:
self.writer.writerow(("benchmark", "reward", "walltime", "commandline"))
self.header = False
self.writer.writerow(
(state.benchmark, state.reward, state.walltime, state.commandline)
)
if flush:
self.f.flush()
def __enter__(self):
"""Support with-statement for the writer."""
return self
def __exit__(self, *args):
"""Support with-statement for the writer."""
self.f.close()
class CompilerEnvStateReader:
"""Read states from a CSV file.
Example usage:
>>> with CompilerEnvStateReader(open("results.csv", "rb")) as reader:
... for state in reader:
... print(state)
"""
def __init__(self, f: TextIO):
"""Constructor.
:param f: The file to read.
"""
self.f = f
self.reader = csv.reader(self.f)
def __iter__(self) -> Iterable[CompilerEnvState]:
"""Read the states from the file."""
columns_in_order = ["benchmark", "reward", "walltime", "commandline"]
# Read the CSV and coerce the columns into the expected order.
for (
benchmark,
reward,
walltime,
commandline,
) in self._iterate_columns_in_order(self.reader, columns_in_order):
yield CompilerEnvState(
benchmark=benchmark,
reward=None if reward == "" else float(reward),
walltime=0 if walltime == "" else float(walltime),
commandline=commandline,
)
@staticmethod
def _iterate_columns_in_order(
reader: csv.reader, columns: List[str]
) -> Iterable[List[str]]:
"""Read the input CSV and return each row in the given column order.
Supports CSVs both with and without a header. If no header, columns are
expected to be in the correct order. Else the header row is used to
determine column order.
Header row detection is case insensitive.
:param reader: The CSV file to read.
:param columns: A list of column names in the order that they are
expected.
:return: An iterator over rows.
"""
try:
row = next(reader)
except StopIteration:
# Empty file.
return
if len(row) != len(columns):
raise ValueError(
f"Expected {len(columns)} columns in the first row of CSV: {truncate(row)}"
)
# Convert the maybe-header columns to lowercase for case-insensitive
# comparison.
maybe_header = [v.lower() for v in row]
if set(maybe_header) == set(columns):
# The first row matches the expected columns names, so use it to
# determine the column order.
column_order = [maybe_header.index(v) for v in columns]
yield from ([row[v] for v in column_order] for row in reader)
else:
# The first row isn't a header, so assume that all rows are in
# expected column order.
yield row
yield from reader
def __enter__(self):
"""Support with-statement for the reader."""
return self
def __exit__(self, *args):
"""Support with-statement for the reader."""
self.f.close()
@staticmethod
def read_paths(paths: Iterable[str]) -> Iterable[CompilerEnvState]:
"""Read a states from a list of file paths.
Read states from stdin using a special path :code:`"-"`.
:param: A list of paths.
:return: A generator of compiler env states.
"""
for path in paths:
if path == "-":
yield from iter(CompilerEnvStateReader(sys.stdin))
elif (
re.match(r"^(http|https)://[a-zA-Z0-9.-_/]+(\.csv)$", path) is not None
):
response: requests.Response = requests.get(path)
if response.status_code == 200:
yield from iter(CompilerEnvStateReader(StringIO(response.text)))
else:
raise requests.exceptions.InvalidURL(
f"Url {path} content could not be obtained"
)
else:
with open(path) as f:
yield from iter(CompilerEnvStateReader(f))
|
CompilerGym-development
|
compiler_gym/compiler_env_state.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""This module implements a wrapper that logs state transitions to an sqlite
database.
"""
import logging
import pickle
import sqlite3
import zlib
from pathlib import Path
from time import time
from typing import Iterable, Optional, Union
import numpy as np
from compiler_gym.envs import LlvmEnv
from compiler_gym.spaces import Reward
from compiler_gym.util.gym_type_hints import ActionType
from compiler_gym.util.timer import Timer, humanize_duration
from compiler_gym.views import ObservationSpaceSpec
from compiler_gym.wrappers import CompilerEnvWrapper
DB_CREATION_SCRIPT = """
CREATE TABLE IF NOT EXISTS States (
benchmark_uri TEXT NOT NULL, -- The URI of the benchmark.
done INTEGER NOT NULL, -- 0 = False, 1 = True.
ir_instruction_count_oz_reward REAL NULLABLE,
state_id TEXT NOT NULL, -- 40-char sha1.
actions TEXT NOT NULL, -- Decode: [int(x) for x in field.split()]
PRIMARY KEY (benchmark_uri, actions),
FOREIGN KEY (state_id) REFERENCES Observations(state_id) ON UPDATE CASCADE
);
CREATE TABLE IF NOT EXISTS Observations (
state_id TEXT NOT NULL, -- 40-char sha1.
ir_instruction_count INTEGER NOT NULL,
compressed_llvm_ir BLOB NOT NULL, -- Decode: zlib.decompress(...)
pickled_compressed_programl BLOB NOT NULL, -- Decode: pickle.loads(zlib.decompress(...))
autophase TEXT NOT NULL, -- Decode: np.array([int(x) for x in field.split()], dtype=np.int64)
instcount TEXT NOT NULL, -- Decode: np.array([int(x) for x in field.split()], dtype=np.int64)
PRIMARY KEY (state_id)
);
"""
class SynchronousSqliteLogger(CompilerEnvWrapper):
"""A wrapper for an LLVM environment that logs all transitions to an sqlite
database.
Wrap an existing LLVM environment and then use it as per normal:
>>> env = SynchronousSqliteLogger(
... env=gym.make("llvm-autophase-ic-v0"),
... db_path="example.db",
... )
Connect to the database file you specified:
.. code-block::
$ sqlite3 example.db
There are two tables:
1. States: records every unique combination of benchmark + actions. For each
entry, records an identifying state ID, the episode reward, and whether
the episode is terminated:
.. code-block::
sqlite> .mode markdown
sqlite> .headers on
sqlite> select * from States limit 5;
| benchmark_uri | done | ir_instruction_count_oz_reward | state_id | actions |
|--------------------------|------|--------------------------------|------------------------------------------|----------------|
| generator://csmith-v0/99 | 0 | 0.0 | d625b874e58f6d357b816e21871297ac5c001cf0 | |
| generator://csmith-v0/99 | 0 | 0.0 | d625b874e58f6d357b816e21871297ac5c001cf0 | 31 |
| generator://csmith-v0/99 | 0 | 0.0 | 52f7142ef606d8b1dec2ff3371c7452c8d7b81ea | 31 116 |
| generator://csmith-v0/99 | 0 | 0.268005818128586 | d8c05bd41b7a6c6157b6a8f0f5093907c7cc7ecf | 31 116 103 |
| generator://csmith-v0/99 | 0 | 0.288621664047241 | c4d7ecd3807793a0d8bc281104c7f5a8aa4670f9 | 31 116 103 109 |
2. Observations: records pickled, compressed, and text observation values
for each unique state.
Caveats of this implementation:
1. Only :class:`LlvmEnv <compiler_gym.envs.LlvmEnv>` environments may be
wrapped.
2. The wrapped environment must have an observation space and reward space
set.
3. The observation spaces and reward spaces that are logged to database
are hardcoded. To change what is recorded, you must copy and modify this
implementation.
4. Writing to the database is synchronous and adds significant overhead to
the compute cost of the environment.
"""
def __init__(
self,
env: LlvmEnv,
db_path: Path,
commit_frequency_in_seconds: int = 300,
max_step_buffer_length: int = 5000,
):
"""Constructor.
:param env: The environment to wrap.
:param db_path: The path of the database to log to. This file may
already exist. If it does, new entries are appended. If the files
does not exist, it is created.
:param commit_frequency_in_seconds: The maximum amount of time to elapse
before writing pending logs to the database.
:param max_step_buffer_length: The maximum number of calls to
:code:`step()` before writing pending logs to the database.
"""
super().__init__(env)
if not hasattr(env, "unwrapped"):
raise TypeError("Requires LlvmEnv base environment")
if not isinstance(self.unwrapped, LlvmEnv):
raise TypeError("Requires LlvmEnv base environment")
db_path.parent.mkdir(exist_ok=True, parents=True)
self.connection = sqlite3.connect(str(db_path))
self.cursor = self.connection.cursor()
self.commit_frequency = commit_frequency_in_seconds
self.max_step_buffer_length = max_step_buffer_length
self.cursor.executescript(DB_CREATION_SCRIPT)
self.connection.commit()
self.last_commit = time()
self.observations_buffer = {}
self.step_buffer = []
# House keeping notice: Keep these lists in sync with record().
self._observations = [
self.env.observation.spaces["IrSha1"],
self.env.observation.spaces["Ir"],
self.env.observation.spaces["Programl"],
self.env.observation.spaces["Autophase"],
self.env.observation.spaces["InstCount"],
self.env.observation.spaces["IrInstructionCount"],
]
self._rewards = [
self.env.reward.spaces["IrInstructionCountOz"],
self.env.reward.spaces["IrInstructionCount"],
]
self._reward_totals = np.zeros(len(self._rewards))
def flush(self) -> None:
"""Flush the buffered steps and observations to database."""
n_steps, n_observations = len(self.step_buffer), len(self.observations_buffer)
# Nothing to flush.
if not n_steps:
return
with Timer() as flush_time:
# House keeping notice: Keep these statements in sync with record().
self.cursor.executemany(
"INSERT OR IGNORE INTO States VALUES (?, ?, ?, ?, ?)",
self.step_buffer,
)
self.cursor.executemany(
"INSERT OR IGNORE INTO Observations VALUES (?, ?, ?, ?, ?, ?)",
((k, *v) for k, v in self.observations_buffer.items()),
)
self.step_buffer = []
self.observations_buffer = {}
self.connection.commit()
logging.info(
"Wrote %d state records and %d observations in %s. Last flush %s ago",
n_steps,
n_observations,
flush_time,
humanize_duration(time() - self.last_commit),
)
self.last_commit = time()
def reset(self, *args, **kwargs):
observation = self.env.reset(*args, **kwargs)
observations, rewards, done, info = self.env.multistep(
actions=[],
observation_spaces=self._observations,
reward_spaces=self._rewards,
)
assert not done, f"reset() failed! {info}"
self._reward_totals = np.array(rewards, dtype=np.float32)
rewards = self._reward_totals
self._record(
actions=self.actions,
observations=observations,
rewards=self._reward_totals,
done=False,
)
return observation
def step(
self,
action: ActionType,
observation_spaces: Optional[Iterable[Union[str, ObservationSpaceSpec]]] = None,
reward_spaces: Optional[Iterable[Union[str, Reward]]] = None,
observations: Optional[Iterable[Union[str, ObservationSpaceSpec]]] = None,
rewards: Optional[Iterable[Union[str, Reward]]] = None,
):
assert self.observation_space, "No observation space set"
assert self.reward_space, "No reward space set"
assert (
observation_spaces is None
), "SynchronousSqliteLogger does not support observation_spaces"
assert (
reward_spaces is None
), "SynchronousSqliteLogger does not support reward_spaces"
assert (
observations is None
), "SynchronousSqliteLogger does not support observations"
assert rewards is None, "SynchronousSqliteLogger does not support rewards"
observations, rewards, done, info = self.env.step(
action=action,
observation_spaces=self._observations + [self.observation_space_spec],
reward_spaces=self._rewards + [self.reward_space],
)
self._reward_totals += rewards[:-1]
self._record(
actions=self.actions,
observations=observations[:-1],
rewards=self._reward_totals,
done=done,
)
return observations[-1], rewards[-1], done, info
def _record(self, actions, observations, rewards, done) -> None:
state_id, ir, programl, autophase, instcount, instruction_count = observations
instruction_count_reward = float(rewards[0])
self.step_buffer.append(
(
str(self.benchmark.uri),
1 if done else 0,
instruction_count_reward,
state_id,
" ".join(str(x) for x in actions),
)
)
self.observations_buffer[state_id] = (
instruction_count,
zlib.compress(ir.encode("utf-8")),
zlib.compress(pickle.dumps(programl)),
" ".join(str(x) for x in autophase),
" ".join(str(x) for x in instcount),
)
if (
len(self.step_buffer) >= self.max_step_buffer_length
or time() - self.last_commit >= self.commit_frequency
):
self.flush()
def close(self):
self.flush()
self.env.close()
def fork(self):
raise NotImplementedError
|
CompilerGym-development
|
compiler_gym/wrappers/sqlite_logger.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""This module implements fork wrappers."""
from typing import List
from compiler_gym.envs import CompilerEnv
from compiler_gym.wrappers import CompilerEnvWrapper
class ForkOnStep(CompilerEnvWrapper):
"""A wrapper that creates a fork of the environment before every step.
This wrapper creates a new fork of the environment before every call to
:meth:`env.reset() <compiler_gym.envs.CompilerEnv.reset>`. Because of this,
this environment supports an additional :meth:`env.undo()
<compiler_gym.wrappers.ForkOnStep.undo>` method that can be used to
backtrack.
Example usage:
>>> env = ForkOnStep(compiler_gym.make("llvm-v0"))
>>> env.step(0)
>>> env.actions
[0]
>>> env.undo()
>>> env.actions
[]
:ivar stack: A fork of the environment before every previous call to
:meth:`env.reset() <compiler_gym.envs.CompilerEnv.reset>`, ordered
oldest to newest.
:vartype stack: List[CompilerEnv]
"""
def __init__(self, env: CompilerEnv):
"""Constructor.
:param env: The environment to wrap.
"""
super().__init__(env)
self.stack: List[CompilerEnv] = []
def undo(self) -> CompilerEnv:
"""Undo the previous action.
:returns: Self.
"""
if not self.stack:
return
self.env.close()
self.env = self.stack.pop()
return self.env
def close(self) -> None:
for env in self.stack:
env.close()
self.stack: List[CompilerEnv] = []
self.env.close()
self.custom_close = True
def reset(self, *args, **kwargs):
self.env.reset()
for env in self.stack:
env.close()
self.stack: List[CompilerEnv] = []
def step(self, *args, **kwargs):
self.stack.append(self.env.fork())
return self.env.step(*args, **kwargs)
def fork(self):
raise NotImplementedError
|
CompilerGym-development
|
compiler_gym/wrappers/fork.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from itertools import cycle
from typing import Callable, Iterable, Optional, Union
import numpy as np
from compiler_gym.datasets import Benchmark
from compiler_gym.envs import CompilerEnv
from compiler_gym.util.parallelization import thread_safe_tee
from compiler_gym.wrappers.core import CompilerEnvWrapper
BenchmarkLike = Union[str, Benchmark]
class IterateOverBenchmarks(CompilerEnvWrapper):
"""Iterate over a (possibly infinite) sequence of benchmarks on each call to
reset(). Will raise :code:`StopIteration` on :meth:`reset()
<compiler_gym.envs.CompilerEnv.reset>` once the iterator is exhausted. Use
:class:`CycleOverBenchmarks` or :class:`RandomOrderBenchmarks` for wrappers
which will loop over the benchmarks.
"""
def __init__(
self,
env: CompilerEnv,
benchmarks: Iterable[BenchmarkLike],
fork_shares_iterator: bool = False,
):
"""Constructor.
:param env: The environment to wrap.
:param benchmarks: An iterable sequence of benchmarks.
:param fork_shares_iterator: If :code:`True`, the :code:`benchmarks`
iterator will bet shared by a forked environment created by
:meth:`env.fork() <compiler_gym.envs.CompilerEnv.fork>`. This means
that calling :meth:`env.reset()
<compiler_gym.envs.CompilerEnv.reset>` with one environment will
advance the iterator in the other. If :code:`False`, forked
environments will use :code:`itertools.tee()` to create a copy of
the iterator so that each iterator may advance independently.
However, this requires shared buffers between the environments which
can lead to memory overheads if :meth:`env.reset()
<compiler_gym.envs.CompilerEnv.reset>` is called many times more in
one environment than the other.
"""
super().__init__(env)
self.benchmarks = iter(benchmarks)
self.fork_shares_iterator = fork_shares_iterator
def reset(self, benchmark: Optional[BenchmarkLike] = None, **kwargs):
if benchmark is not None:
raise TypeError("Benchmark passed to IterateOverBenchmarks.reset()")
benchmark: BenchmarkLike = next(self.benchmarks)
return self.env.reset(benchmark=benchmark)
def fork(self) -> "IterateOverBenchmarks":
if self.fork_shares_iterator:
other_benchmarks_iterator = self.benchmarks
else:
self.benchmarks, other_benchmarks_iterator = thread_safe_tee(
self.benchmarks
)
return IterateOverBenchmarks(
env=self.env.fork(),
benchmarks=other_benchmarks_iterator,
fork_shares_iterator=self.fork_shares_iterator,
)
class CycleOverBenchmarks(IterateOverBenchmarks):
"""Cycle through a list of benchmarks on each call to :meth:`reset()
<compiler_gym.envs.CompilerEnv.reset>`. Same as
:class:`IterateOverBenchmarks` except the list of benchmarks repeats once
exhausted.
"""
def __init__(
self,
env: CompilerEnv,
benchmarks: Iterable[BenchmarkLike],
fork_shares_iterator: bool = False,
):
"""Constructor.
:param env: The environment to wrap.
:param benchmarks: An iterable sequence of benchmarks.
:param fork_shares_iterator: If :code:`True`, the :code:`benchmarks`
iterator will be shared by a forked environment created by
:meth:`env.fork() <compiler_gym.envs.CompilerEnv.fork>`. This means
that calling :meth:`env.reset()
<compiler_gym.envs.CompilerEnv.reset>` with one environment will
advance the iterator in the other. If :code:`False`, forked
environments will use :code:`itertools.tee()` to create a copy of
the iterator so that each iterator may advance independently.
However, this requires shared buffers between the environments which
can lead to memory overheads if :meth:`env.reset()
<compiler_gym.envs.CompilerEnv.reset>` is called many times more in
one environment than the other.
"""
super().__init__(
env, benchmarks=cycle(benchmarks), fork_shares_iterator=fork_shares_iterator
)
class CycleOverBenchmarksIterator(CompilerEnvWrapper):
"""Same as :class:`CycleOverBenchmarks
<compiler_gym.wrappers.CycleOverBenchmarks>` except that the user generates
the iterator.
"""
def __init__(
self,
env: CompilerEnv,
make_benchmark_iterator: Callable[[], Iterable[BenchmarkLike]],
):
"""Constructor.
:param env: The environment to wrap.
:param make_benchmark_iterator: A callback that returns an iterator over
a sequence of benchmarks. Once the iterator is exhausted, this
callback is called to produce a new iterator.
"""
super().__init__(env)
self.make_benchmark_iterator = make_benchmark_iterator
self.benchmarks = iter(self.make_benchmark_iterator())
def reset(self, benchmark: Optional[BenchmarkLike] = None, **kwargs):
if benchmark is not None:
raise TypeError("Benchmark passed toIterateOverBenchmarks.reset()")
try:
benchmark: BenchmarkLike = next(self.benchmarks)
except StopIteration:
self.benchmarks = iter(self.make_benchmark_iterator())
benchmark: BenchmarkLike = next(self.benchmarks)
return self.env.reset(benchmark=benchmark)
def fork(self) -> "CycleOverBenchmarksIterator":
return CycleOverBenchmarksIterator(
env=self.env.fork(),
make_benchmark_iterator=self.make_benchmark_iterator,
)
class RandomOrderBenchmarks(IterateOverBenchmarks):
"""Select randomly from a list of benchmarks on each call to :meth:`reset()
<compiler_gym.envs.CompilerEnv.reset>`.
.. note::
Uniform random selection is provided by evaluating the input benchmarks
iterator into a list and sampling randomly from the list. For very large
and infinite iterables of benchmarks you must use the
:class:`IterateOverBenchmarks
<compiler_gym.wrappers.IterateOverBenchmarks>` wrapper with your own
random sampling iterator.
"""
def __init__(
self,
env: CompilerEnv,
benchmarks: Iterable[BenchmarkLike],
rng: Optional[np.random.Generator] = None,
):
"""Constructor.
:param env: The environment to wrap.
:param benchmarks: An iterable sequence of benchmarks. The entirety of
this input iterator is evaluated during construction.
:param rng: A random number generator to use for random benchmark
selection.
"""
self._all_benchmarks = list(benchmarks)
rng = rng or np.random.default_rng()
super().__init__(
env,
benchmarks=(rng.choice(self._all_benchmarks) for _ in iter(int, 1)),
fork_shares_iterator=True,
)
def fork(self) -> "IterateOverBenchmarks":
"""Fork the random order benchmark wrapper.
Note that RNG state is not copied to forked environments.
"""
return IterateOverBenchmarks(
env=self.env.fork(), benchmarks=self._all_benchmarks
)
|
CompilerGym-development
|
compiler_gym/wrappers/datasets.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""The :code:`compiler_gym.wrappers` module provides a set of classes that can
be used to transform an environment in a modular way.
For example:
>>> env = compiler_gym.make("llvm-v0")
>>> env = TimeLimit(env, n=10)
>>> env = CycleOverBenchmarks(
... env,
... benchmarks=[
... "benchmark://cbench-v1/crc32",
... "benchmark://cbench-v1/qsort",
... ],
... )
.. warning::
CompilerGym environments are incompatible with the `OpenAI Gym wrappers
<https://github.com/openai/gym/tree/master/gym/wrappers>`_. This is because
CompilerGym extends the environment API with additional arguments and
methods. You must use the wrappers from this module when wrapping
CompilerGym environments. We provide a set of base wrappers that are
equivalent to those in OpenAI Gym that you can use to write your own
wrappers.
"""
from compiler_gym import config
from compiler_gym.wrappers.commandline import (
CommandlineWithTerminalAction,
ConstrainedCommandline,
)
from compiler_gym.wrappers.core import (
ActionWrapper,
CompilerEnvWrapper,
ObservationWrapper,
RewardWrapper,
)
from compiler_gym.wrappers.counter import Counter
from compiler_gym.wrappers.datasets import (
CycleOverBenchmarks,
CycleOverBenchmarksIterator,
IterateOverBenchmarks,
RandomOrderBenchmarks,
)
from compiler_gym.wrappers.fork import ForkOnStep
if config.enable_llvm_env:
from compiler_gym.wrappers.llvm import RuntimePointEstimateReward # noqa: F401
from compiler_gym.wrappers.sqlite_logger import ( # noqa: F401
SynchronousSqliteLogger,
)
from compiler_gym.wrappers.time_limit import TimeLimit
from .validation import ValidateBenchmarkAfterEveryStep
__all__ = [
"ActionWrapper",
"CommandlineWithTerminalAction",
"CompilerEnvWrapper",
"ConstrainedCommandline",
"Counter",
"CycleOverBenchmarks",
"CycleOverBenchmarksIterator",
"ForkOnStep",
"IterateOverBenchmarks",
"ObservationWrapper",
"RandomOrderBenchmarks",
"RewardWrapper",
"TimeLimit",
"ValidateBenchmarkAfterEveryStep",
]
if config.enable_llvm_env:
__all__.append("RuntimePointEstimateReward")
__all__.append("SynchronousSqliteLogger")
|
CompilerGym-development
|
compiler_gym/wrappers/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import warnings
from abc import ABC, abstractmethod
from collections.abc import Iterable as IterableType
from typing import Any, Iterable, List, Optional, Tuple, Union
from deprecated.sphinx import deprecated
from gym import Wrapper
from gym.spaces import Space
from compiler_gym.compiler_env_state import CompilerEnvState
from compiler_gym.datasets import Benchmark, BenchmarkUri, Dataset
from compiler_gym.envs import CompilerEnv
from compiler_gym.spaces.reward import Reward
from compiler_gym.util.gym_type_hints import ActionType, ObservationType
from compiler_gym.validation_result import ValidationResult
from compiler_gym.views import ObservationSpaceSpec, ObservationView, RewardView
class CompilerEnvWrapper(CompilerEnv, Wrapper):
"""Wraps a :class:`CompilerEnv <compiler_gym.envs.CompilerEnv>` environment
to allow a modular transformation.
This class is the base class for all wrappers. This class must be used
rather than :code:`gym.Wrapper` to support the CompilerGym API extensions
such as the :code:`fork()` method.
"""
def __init__(self, env: CompilerEnv): # pylint: disable=super-init-not-called
"""Constructor.
:param env: The environment to wrap.
:raises TypeError: If :code:`env` is not a :class:`CompilerEnv
<compiler_gym.envs.CompilerEnv>`.
"""
# No call to gym.Wrapper superclass constructor here because we need to
# avoid setting the observation_space member variable, which in the
# CompilerEnv class is a property with a custom setter. Instead we set
# the observation_space_spec directly.
self.env = env
def close(self):
self.env.close()
def reset(self, *args, **kwargs) -> Optional[ObservationType]:
return self.env.reset(*args, **kwargs)
def fork(self) -> CompilerEnv:
return type(self)(env=self.env.fork())
def step( # pylint: disable=arguments-differ
self,
action: ActionType,
observation_spaces: Optional[Iterable[Union[str, ObservationSpaceSpec]]] = None,
reward_spaces: Optional[Iterable[Union[str, Reward]]] = None,
observations: Optional[Iterable[Union[str, ObservationSpaceSpec]]] = None,
rewards: Optional[Iterable[Union[str, Reward]]] = None,
timeout: Optional[float] = 300,
):
if isinstance(action, IterableType):
warnings.warn(
"Argument `action` of CompilerEnv.step no longer accepts a list "
" of actions. Please use CompilerEnv.multistep instead",
category=DeprecationWarning,
)
return self.multistep(
action,
observation_spaces=observation_spaces,
reward_spaces=reward_spaces,
observations=observations,
rewards=rewards,
)
if observations is not None:
warnings.warn(
"Argument `observations` of CompilerEnv.multistep has been "
"renamed `observation_spaces`. Please update your code",
category=DeprecationWarning,
)
observation_spaces = observations
if rewards is not None:
warnings.warn(
"Argument `rewards` of CompilerEnv.multistep has been renamed "
"`reward_spaces`. Please update your code",
category=DeprecationWarning,
)
reward_spaces = rewards
return self.multistep(
actions=[action],
observation_spaces=observation_spaces,
reward_spaces=reward_spaces,
)
def multistep(
self,
actions: Iterable[ActionType],
observation_spaces: Optional[Iterable[Union[str, ObservationSpaceSpec]]] = None,
reward_spaces: Optional[Iterable[Union[str, Reward]]] = None,
observations: Optional[Iterable[Union[str, ObservationSpaceSpec]]] = None,
rewards: Optional[Iterable[Union[str, Reward]]] = None,
timeout: Optional[float] = 300,
):
if observations is not None:
warnings.warn(
"Argument `observations` of CompilerEnv.multistep has been "
"renamed `observation_spaces`. Please update your code",
category=DeprecationWarning,
)
observation_spaces = observations
if rewards is not None:
warnings.warn(
"Argument `rewards` of CompilerEnv.multistep has been renamed "
"`reward_spaces`. Please update your code",
category=DeprecationWarning,
)
reward_spaces = rewards
return self.env.multistep(
actions=actions,
observation_spaces=observation_spaces,
reward_spaces=reward_spaces,
)
def render(
self,
mode="human",
) -> Optional[str]:
return self.env.render(mode)
@property
def reward_range(self) -> Tuple[float, float]:
return self.env.reward_range
@reward_range.setter
def reward_range(self, value: Tuple[float, float]):
self.env.reward_range = value
@property
def observation_space(self):
return self.env.observation_space
@observation_space.setter
def observation_space(
self, observation_space: Optional[Union[str, ObservationSpaceSpec]]
) -> None:
self.env.observation_space = observation_space
@property
def observation(self) -> ObservationView:
return self.env.observation
@observation.setter
def observation(self, observation: ObservationView) -> None:
self.env.observation = observation
@property
def observation_space_spec(self):
return self.env.observation_space_spec
@observation_space_spec.setter
def observation_space_spec(
self, observation_space_spec: Optional[ObservationSpaceSpec]
) -> None:
self.env.observation_space_spec = observation_space_spec
@property
def reward_space_spec(self) -> Optional[Reward]:
return self.env.reward_space_spec
@reward_space_spec.setter
def reward_space_spec(self, val: Optional[Reward]):
self.env.reward_space_spec = val
@property
def reward_space(self) -> Optional[Reward]:
return self.env.reward_space
@reward_space.setter
def reward_space(self, reward_space: Optional[Union[str, Reward]]) -> None:
self.env.reward_space = reward_space
@property
def reward(self) -> RewardView:
return self.env.reward
@reward.setter
def reward(self, reward: RewardView) -> None:
self.env.reward = reward
@property
def action_space(self) -> Space:
return self.env.action_space
@action_space.setter
def action_space(self, action_space: Optional[str]):
self.env.action_space = action_space
@property
def action_spaces(self) -> List[str]:
return self.env.action_spaces
@action_spaces.setter
def action_spaces(self, action_spaces: List[str]):
self.env.action_spaces = action_spaces
@property
def spec(self) -> Any:
return self.env.spec
@property
def benchmark(self) -> Benchmark:
return self.env.benchmark
@benchmark.setter
def benchmark(self, benchmark: Optional[Union[str, Benchmark, BenchmarkUri]]):
self.env.benchmark = benchmark
@property
def datasets(self) -> Iterable[Dataset]:
return self.env.datasets
@datasets.setter
def datasets(self, datasets: Iterable[Dataset]):
self.env.datasets = datasets
@property
def episode_walltime(self) -> float:
return self.env.episode_walltime
@property
def in_episode(self) -> bool:
return self.env.in_episode
@property
def episode_reward(self) -> Optional[float]:
return self.env.episode_reward
@episode_reward.setter
def episode_reward(self, episode_reward: Optional[float]):
self.env.episode_reward = episode_reward
@property
def actions(self) -> List[ActionType]:
return self.env.actions
@property
def version(self) -> str:
return self.env.version
@property
def compiler_version(self) -> str:
return self.env.compiler_version
@property
def state(self) -> CompilerEnvState:
return self.env.state
@deprecated(
version="0.2.5", reason="Use env.action_space.to_string(env.actions) instead"
)
def commandline(self) -> str:
return self.env.commandline()
@deprecated(
version="0.2.5", reason='Use env.action_space.from_string("...") instead'
)
def commandline_to_actions(self, commandline: str) -> List[ActionType]:
return self.env.commandline_to_actions(commandline)
def apply(self, state: CompilerEnvState) -> None: # noqa
self.env.apply(state)
def validate(self, state: Optional[CompilerEnvState] = None) -> ValidationResult:
return self.env.validate(state)
class ActionWrapper(CompilerEnvWrapper):
"""Wraps a :class:`CompilerEnv <compiler_gym.envs.CompilerEnv>` environment
to allow an action space transformation.
"""
def multistep(
self,
actions: Iterable[ActionType],
observation_spaces: Optional[Iterable[ObservationSpaceSpec]] = None,
reward_spaces: Optional[Iterable[Reward]] = None,
observations: Optional[Iterable[Union[str, ObservationSpaceSpec]]] = None,
rewards: Optional[Iterable[Union[str, Reward]]] = None,
timeout: Optional[float] = 300,
):
return self.env.multistep(
[self.action(a) for a in actions],
observation_spaces=observation_spaces,
reward_spaces=reward_spaces,
observations=observations,
rewards=rewards,
)
def action(self, action: ActionType) -> ActionType:
"""Translate the action to the new space."""
raise NotImplementedError
def reverse_action(self, action: ActionType) -> ActionType:
"""Translate an action from the new space to the wrapped space."""
raise NotImplementedError
class ObservationWrapper(CompilerEnvWrapper, ABC):
"""Wraps a :class:`CompilerEnv <compiler_gym.envs.CompilerEnv>` environment
to allow an observation space transformation.
"""
def reset(self, *args, **kwargs):
observation = self.env.reset(*args, **kwargs)
return self.convert_observation(observation)
def multistep(
self,
actions: List[ActionType],
observation_spaces: Optional[Iterable[Union[str, ObservationSpaceSpec]]] = None,
reward_spaces: Optional[Iterable[Union[str, Reward]]] = None,
observations: Optional[Iterable[Union[str, ObservationSpaceSpec]]] = None,
rewards: Optional[Iterable[Union[str, Reward]]] = None,
timeout: Optional[float] = 300,
):
observation, reward, done, info = self.env.multistep(
actions,
observation_spaces=observation_spaces,
reward_spaces=reward_spaces,
observations=observations,
rewards=rewards,
)
return self.convert_observation(observation), reward, done, info
@abstractmethod
def convert_observation(self, observation: ObservationType) -> ObservationType:
"""Translate an observation to the new space."""
raise NotImplementedError
class RewardWrapper(CompilerEnvWrapper, ABC):
"""Wraps a :class:`CompilerEnv <compiler_gym.envs.CompilerEnv>` environment
to allow an reward space transformation.
"""
def reset(self, *args, **kwargs):
return self.env.reset(*args, **kwargs)
def multistep(
self,
actions: List[ActionType],
observation_spaces: Optional[Iterable[Union[str, ObservationSpaceSpec]]] = None,
reward_spaces: Optional[Iterable[Union[str, Reward]]] = None,
observations: Optional[Iterable[Union[str, ObservationSpaceSpec]]] = None,
rewards: Optional[Iterable[Union[str, Reward]]] = None,
timeout: Optional[float] = 300,
):
observation, reward, done, info = self.env.multistep(
actions,
observation_spaces=observation_spaces,
reward_spaces=reward_spaces,
observations=observations,
rewards=rewards,
)
# Undo the episode_reward update and reapply it once we have transformed
# the reward.
#
# TODO(cummins): Refactor step() so that we don't have to do this
# recalculation of episode_reward, as this is prone to errors if, say,
# the base reward returns NaN or an invalid type.
if reward is not None and self.episode_reward is not None:
self.unwrapped.episode_reward -= reward
reward = self.convert_reward(reward)
self.unwrapped.episode_reward += reward
return observation, reward, done, info
@abstractmethod
def convert_reward(self, reward):
"""Translate a reward to the new space."""
raise NotImplementedError
|
CompilerGym-development
|
compiler_gym/wrappers/core.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections.abc import Iterable as IterableType
from typing import Dict, Iterable, List, Optional, Union
from gym import Space
from compiler_gym.envs import CompilerEnv
from compiler_gym.spaces import Commandline, CommandlineFlag, Reward
from compiler_gym.util.gym_type_hints import ActionType, StepType
from compiler_gym.views import ObservationSpaceSpec
from compiler_gym.wrappers.core import ActionWrapper, CompilerEnvWrapper
class CommandlineWithTerminalAction(CompilerEnvWrapper):
"""Creates a new action space with a special "end of episode" terminal
action at the start. If step() is called with it, the "done" flag is set.
"""
def __init__(
self,
env: CompilerEnv,
terminal=CommandlineFlag(
name="end-of-episode",
flag="# end-of-episode",
description="End the episode",
),
):
"""Constructor.
:param env: The environment to wrap.
:param terminal: The flag to use as the terminal action. Optional.
"""
super().__init__(env)
if not isinstance(env.action_space.wrapped, Commandline):
raise TypeError(
f"Unsupported action space: {type(env.action_space).__name__}"
)
# Redefine the action space, inserting the terminal action at the start.
self.action_space = Commandline(
items=[
CommandlineFlag(
name=name,
flag=flag,
description=description,
)
for name, flag, description in zip(
env.action_space.names,
env.action_space.flags,
env.action_space.descriptions,
)
]
+ [terminal],
name=f"{type(self).__name__}<{env.action_space.name}>",
)
def multistep(
self,
actions: List[ActionType],
observation_spaces: Optional[Iterable[Union[str, ObservationSpaceSpec]]] = None,
reward_spaces: Optional[Iterable[Union[str, Reward]]] = None,
observations: Optional[Iterable[Union[str, ObservationSpaceSpec]]] = None,
rewards: Optional[Iterable[Union[str, Reward]]] = None,
) -> StepType:
terminal_action: int = len(self.action_space.flags) - 1
try:
index_of_terminal = actions.index(terminal_action)
except ValueError:
index_of_terminal = -1
# Run only the actions up to the terminal action.
if index_of_terminal >= 0:
actions = actions[:index_of_terminal]
observation, reward, done, info = self.env.multistep(
actions,
observation_spaces=observation_spaces,
reward_spaces=reward_spaces,
observations=observations,
rewards=rewards,
)
# Communicate back to the frontend.
if index_of_terminal >= 0 and not done:
done = True
info["terminal_action"] = True
return observation, reward, done, info
@property
def action_space(self) -> Space:
return self._action_space
@action_space.setter
def action_space(self, action_space: Space):
self._action_space = action_space
class ConstrainedCommandline(ActionWrapper):
"""Constrains a Commandline action space to a subset of the original space's
flags.
"""
def __init__(
self, env: CompilerEnv, flags: Iterable[str], name: Optional[str] = None
):
"""Constructor.
:param env: The environment to wrap.
:param flags: A list of entries from :code:`env.action_space.flags`
denoting flags that are available in this wrapped environment.
:param name: The name of the new action space.
"""
super().__init__(env)
self._flags = flags
if not flags:
raise TypeError("No flags provided")
if not issubclass(type(env.action_space.wrapped), Commandline):
raise TypeError(
"Can only wrap Commandline action space. "
f"Received: {type(env.action_space.wrapped).__name__}"
)
self._forward_translation: List[int] = [self.action_space[f] for f in flags]
self._reverse_translation: Dict[int, int] = {
v: i for i, v in enumerate(self._forward_translation)
}
# Redefine the action space using this smaller set of flags.
self.action_space = Commandline(
items=[
CommandlineFlag(
name=env.action_space.names[a],
flag=env.action_space.flags[a],
description=env.action_space.descriptions[a],
)
for a in (env.action_space.flags.index(f) for f in flags)
],
name=f"{type(self).__name__}<{name or env.action_space.name}, {len(flags)}>",
)
def action(self, action: Union[int, List[int]]):
if isinstance(action, IterableType):
return [self._forward_translation[a] for a in action]
return self._forward_translation[action]
def reverse_action(self, action: Union[int, List[int]]):
if isinstance(action, IterableType):
return [self._reverse_translation[a] for a in action]
return self._reverse_translation[action]
@property
def actions(self) -> List[int]:
"""Reverse-translate actions back into the constrained space."""
return self.reverse_action(self.env.actions)
def fork(self) -> "ConstrainedCommandline":
return ConstrainedCommandline(
env=self.env.fork(), flags=self._flags, name=self.action_space.name
)
@property
def action_space(self) -> Space:
return self._action_space
@action_space.setter
def action_space(self, action_space: Space):
self._action_space = action_space
|
CompilerGym-development
|
compiler_gym/wrappers/commandline.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Wrapper classes for the LLVM environments."""
from typing import Callable, Iterable
import numpy as np
from compiler_gym.envs.llvm import LlvmEnv
from compiler_gym.spaces import RuntimeReward
from compiler_gym.wrappers import CompilerEnvWrapper
class RuntimePointEstimateReward(CompilerEnvWrapper):
"""LLVM wrapper that uses a point estimate of program runtime as reward.
This class wraps an LLVM environment and registers a new runtime reward
space. Runtime is estimated from one or more runtime measurements, after
optionally running one or more warmup runs. At each step, reward is the
change in runtime estimate from the runtime estimate at the previous step.
"""
def __init__(
self,
env: LlvmEnv,
runtime_count: int = 30,
warmup_count: int = 0,
estimator: Callable[[Iterable[float]], float] = np.median,
):
"""Constructor.
:param env: The environment to wrap.
:param runtime_count: The number of times to execute the binary when
estimating the runtime.
:param warmup_count: The number of warmup runs of the binary to perform
before measuring the runtime.
:param estimator: A function that takes a list of runtime measurements
and produces a point estimate.
"""
super().__init__(env)
self.env.unwrapped.reward.add_space(
RuntimeReward(
runtime_count=runtime_count,
warmup_count=warmup_count,
estimator=estimator,
)
)
self.env.unwrapped.reward_space = "runtime"
self.env.unwrapped.runtime_observation_count = runtime_count
self.env.unwrapped.runtime_warmup_runs_count = warmup_count
def fork(self) -> "RuntimePointEstimateReward":
fkd = self.env.fork()
# Remove the original "runtime" space so that we that new
# RuntimePointEstimateReward wrapper instance does not attempt to
# redefine, raising a warning.
del fkd.unwrapped.reward.spaces["runtime"]
return RuntimePointEstimateReward(
env=fkd,
runtime_count=self.reward.spaces["runtime"].runtime_count,
warmup_count=self.reward.spaces["runtime"].warmup_count,
estimator=self.reward.spaces["runtime"].estimator,
)
|
CompilerGym-development
|
compiler_gym/wrappers/llvm.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Iterable, Optional
from compiler_gym.envs import CompilerEnv
from compiler_gym.util.gym_type_hints import ActionType
from compiler_gym.wrappers.core import CompilerEnvWrapper
class TimeLimit(CompilerEnvWrapper):
"""A step-limited wrapper that is compatible with CompilerGym.
Example usage:
>>> env = TimeLimit(env, max_episode_steps=3)
>>> env.reset()
>>> _, _, done, _ = env.step(0)
>>> _, _, done, _ = env.step(0)
>>> _, _, done, _ = env.step(0)
>>> done
True
"""
def __init__(self, env: CompilerEnv, max_episode_steps: Optional[int] = None):
super().__init__(env=env)
if max_episode_steps is None and self.env.spec is not None:
max_episode_steps = env.spec.max_episode_steps
if self.env.spec is not None:
self.env.spec.max_episode_steps = max_episode_steps
self._max_episode_steps = max_episode_steps
self._elapsed_steps = None
def multistep(self, actions: Iterable[ActionType], **kwargs):
actions = list(actions)
assert (
self._elapsed_steps is not None
), "Cannot call env.step() before calling reset()"
observation, reward, done, info = self.env.multistep(actions, **kwargs)
self._elapsed_steps += len(actions)
if self._elapsed_steps >= self._max_episode_steps:
info["TimeLimit.truncated"] = not done
done = True
return observation, reward, done, info
def reset(self, **kwargs):
self._elapsed_steps = 0
return self.env.reset(**kwargs)
def fork(self) -> "TimeLimit":
"""Fork the wrapped environment.
The time limit state of the forked environment is the same as the source
state.
"""
fkd = type(self)(env=self.env.fork(), max_episode_steps=self._max_episode_steps)
fkd._elapsed_steps = self._elapsed_steps # pylint: disable=protected-access
return fkd
|
CompilerGym-development
|
compiler_gym/wrappers/time_limit.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections.abc import Mapping
from copy import deepcopy
from math import factorial
from numbers import Integral
from typing import Iterable, Optional, Union
import numpy as np
from gym.spaces import Space
from compiler_gym.envs import CompilerEnv
from compiler_gym.spaces import Box
from compiler_gym.spaces import Dict as DictSpace
from compiler_gym.spaces import (
Discrete,
NamedDiscrete,
Permutation,
Reward,
Scalar,
SpaceSequence,
)
from compiler_gym.spaces import Tuple as TupleSpace
from compiler_gym.util.gym_type_hints import ActionType, ObservationType, StepType
from compiler_gym.util.permutation import convert_number_to_permutation
from compiler_gym.views import ObservationSpaceSpec
from compiler_gym.wrappers.core import (
ActionWrapper,
CompilerEnvWrapper,
ObservationWrapper,
)
def convert_permutation_to_discrete_space(permutation: Permutation) -> Discrete:
return Discrete(name=permutation.name, n=factorial(permutation.size_range[0]))
def get_tile_size_discrete_space(min: Integral) -> NamedDiscrete:
items = [str(min * 2**i) for i in range(11)]
return NamedDiscrete(items=items, name=None)
def convert_tile_sizes_space(box: Box) -> TupleSpace:
spaces = [get_tile_size_discrete_space(box.low[i]) for i in range(box.shape[0])]
return TupleSpace(spaces=spaces, name=box.name)
def convert_bool_to_discrete_space(x: Scalar) -> NamedDiscrete:
if x.min or not x.max:
raise ValueError(
f"Invalid scalar range [{x.min}, {x.max}. [False, True] expected."
)
return NamedDiscrete(name=x.name, items=["False", "True"])
def convert_action_space(
action_space: SpaceSequence, max_subactions: Optional[Integral]
) -> Space:
template_space = deepcopy(action_space.space)
template_space["tile_options"][
"interchange_vector"
] = convert_permutation_to_discrete_space(
template_space["tile_options"]["interchange_vector"]
)
template_space["tile_options"]["tile_sizes"] = convert_tile_sizes_space(
template_space["tile_options"]["tile_sizes"]
)
template_space["tile_options"]["promote"] = convert_bool_to_discrete_space(
template_space["tile_options"]["promote"]
)
template_space["tile_options"][
"promote_full_tile"
] = convert_bool_to_discrete_space(
template_space["tile_options"]["promote_full_tile"]
)
template_space["vectorize_options"][
"unroll_vector_transfers"
] = convert_bool_to_discrete_space(
template_space["vectorize_options"]["unroll_vector_transfers"]
)
res = TupleSpace(name=None, spaces=[])
for i in range(action_space.size_range[0]):
res.spaces.append(deepcopy(template_space))
if max_subactions is None:
loop_bound = action_space.size_range[1]
else:
if action_space.size_range[0] > max_subactions:
raise ValueError(
f"max_subactions {max_subactions} must be greater than the minimum the environment expects {action_space.size_range[0]}."
)
loop_bound = max_subactions
for i in range(action_space.size_range[0], loop_bound):
res.spaces.append(
DictSpace(
name=None,
spaces={
"space": deepcopy(template_space),
"is_present": NamedDiscrete(name=None, items=["False", "True"]),
},
)
)
return res
_tile_size_discrite_values = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
def convert_matmul_op_action(action: ActionType) -> ActionType:
res = deepcopy(action)
res["tile_options"]["interchange_vector"] = convert_number_to_permutation(
action["tile_options"]["interchange_vector"], permutation_size=3
)
tile_sizes = action["tile_options"]["tile_sizes"]
res["tile_options"]["tile_sizes"] = np.array(
[_tile_size_discrite_values[tile_sizes[i]] for i in range(len(tile_sizes))],
dtype=int,
)
res["tile_options"]["promote"] = bool(action["tile_options"]["promote"])
res["tile_options"]["promote_full_tile"] = bool(
action["tile_options"]["promote_full_tile"]
)
res["vectorize_options"]["unroll_vector_transfers"] = bool(
action["vectorize_options"]["unroll_vector_transfers"]
)
return res
def convert_action(action: ActionType) -> ActionType:
res = []
for a in action:
if not isinstance(a, Mapping) or "is_present" not in a:
res.append(convert_matmul_op_action(a))
elif a["is_present"] != 0:
res.append(convert_matmul_op_action(a["space"]))
return res
def convert_observation_space(space: Space) -> Scalar:
return Box(
name=space.name,
shape=[1],
low=space.scalar_range.min,
high=space.scalar_range.max,
dtype=float,
)
def convert_observation(observation: ObservationType) -> ObservationType:
return (
None if observation is None else np.array([np.median(observation)], dtype=float)
)
class MlirRlObservationWrapperEnv(ObservationWrapper):
@property
def observation_space(self):
return convert_observation_space(self.env.observation_space)
@observation_space.setter
def observation_space(
self, observation_space: Optional[Union[str, ObservationSpaceSpec]]
) -> None:
self.env.observation_space = observation_space
def convert_observation(self, observation: ObservationType) -> ObservationType:
return convert_observation(observation)
class MlirRlActionWrapperEnv(ActionWrapper):
def __init__(
self,
env: CompilerEnv,
max_subactions: Optional[Integral] = None,
):
super().__init__(env)
self.max_subactions = max_subactions
@property
def action_space(self) -> Space:
return convert_action_space(
self.env.action_space, max_subactions=self.max_subactions
)
@action_space.setter
def action_space(self, action_space: Optional[str]):
self.env.action_space = action_space
def action(self, action: ActionType) -> ActionType:
return convert_action(action)
class MlirRlErrorWrapperEnv(CompilerEnvWrapper):
def multistep(
self,
actions: Iterable[ActionType],
observation_spaces: Optional[Iterable[Union[str, ObservationSpaceSpec]]] = None,
reward_spaces: Optional[Iterable[Union[str, Reward]]] = None,
observations: Optional[Iterable[Union[str, ObservationSpaceSpec]]] = None,
rewards: Optional[Iterable[Union[str, Reward]]] = None,
) -> StepType:
observation, reward, done, info = super().multistep(
actions,
observation_spaces=observation_spaces,
reward_spaces=reward_spaces,
observations=observations,
rewards=rewards,
)
if "error_type" in info:
raise RuntimeError(str(info))
return observation, reward, done, info
def step(
self,
action: ActionType,
observation_spaces: Optional[Iterable[Union[str, ObservationSpaceSpec]]] = None,
reward_spaces: Optional[Iterable[Union[str, Reward]]] = None,
observations: Optional[Iterable[Union[str, ObservationSpaceSpec]]] = None,
rewards: Optional[Iterable[Union[str, Reward]]] = None,
):
return self.multistep(
[action], observation_spaces, reward_spaces, observations, rewards
)
def make_mlir_rl_wrapper_env(
env: CompilerEnv, max_subactions: Optional[Integral] = None
):
"""Create a wrapper for the MLIR environment that is suitable to interface with
off-the-shelf RL frameworks.
"""
env.reward_space = "runtime"
env.observation_space = "Runtime"
res = MlirRlActionWrapperEnv(env, max_subactions=max_subactions)
res = MlirRlObservationWrapperEnv(res)
res = MlirRlErrorWrapperEnv(res)
return res
|
CompilerGym-development
|
compiler_gym/wrappers/mlir.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""This module implements a wrapper that counts calls to operations.
"""
from typing import Dict
from compiler_gym.envs import CompilerEnv
from compiler_gym.wrappers import CompilerEnvWrapper
class Counter(CompilerEnvWrapper):
"""A wrapper that counts the number of calls to its operations.
The counters are _not_ reset by :meth:`env.reset()
<compiler_gym.envs.CompilerEnv.reset>`.
Example usage:
>>> env = Counter(compiler_gym.make("llvm-v0"))
>>> env.counters
{"close": 0, "reset": 0, "step": 0, "fork": 0}
>>> env.step(0)
{"close": 0, "reset": 0, "step": 1, "fork": 0}
:ivar counters: A dictionary of counters for different operation types.
:vartype counters: Dict[str, int]
"""
def __init__(self, env: CompilerEnv):
"""Constructor.
:param env: The environment to wrap.
"""
super().__init__(env)
self.counters: Dict[str, int] = {
"close": 0,
"reset": 0,
"step": 0,
"fork": 0,
}
def close(self) -> None:
self.counters["close"] += 1
self.env.close()
def reset(self, *args, **kwargs):
self.counters["reset"] += 1
return self.env.reset(*args, **kwargs)
def step(self, *args, **kwargs):
self.counters["step"] += 1
return self.env.step(*args, **kwargs)
def fork(self):
self.counters["fork"] += 1
return self.env.fork()
|
CompilerGym-development
|
compiler_gym/wrappers/counter.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import List
from compiler_gym.envs import CompilerEnv
from compiler_gym.util.gym_type_hints import ActionType
from compiler_gym.wrappers.core import CompilerEnvWrapper
class ValidateBenchmarkAfterEveryStep(CompilerEnvWrapper):
"""Run the benchmark validation routine after every step of the environment
and end the episode with a penalty reward if validation fails.
"""
def __init__(
self,
env: CompilerEnv,
reward_penalty: float = -1e3,
):
"""Constructor.
:param env: The environment to wrap.
:param reward_penalty: The reward value that is returned by
:code:`step()` if validation fails.
"""
super().__init__(env)
self.reward_penalty = reward_penalty
def multistep(
self,
actions: List[ActionType],
observation_spaces=None,
reward_spaces=None,
observations=None,
rewards=None,
):
observation, reward, done, info = self.env.multistep(
actions,
observation_spaces=observation_spaces,
reward_spaces=reward_spaces,
observations=observations,
rewards=rewards,
)
# Early exit if environment reaches terminal state.
if done:
return observation, reward, done, info
try:
# Try and get an error from the validation callback.
info["error_details"] = next(self.env.benchmark.ivalidate(self.env))
return observation, self.reward_penalty, True, info
except StopIteration:
# No error, we're good.
return observation, reward, done, info
|
CompilerGym-development
|
compiler_gym/wrappers/validation.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import List
import gym
from gym.envs.registration import register as gym_register
# A list of gym environment names defined by CompilerGym.
COMPILER_GYM_ENVS: List[str] = []
def make(id: str, **kwargs):
"""Equivalent to :code:`gym.make()`."""
return gym.make(id, **kwargs)
def _parse_version_string(version):
"""Quick and dirty <major>.<minor>.<micro> parser. Very hacky."""
components = version.split(".")
if len(components) != 3:
return None
try:
return tuple([int(x) for x in components])
except (TypeError, ValueError):
return None
def register(id: str, order_enforce: bool = False, **kwargs):
COMPILER_GYM_ENVS.append(id)
# As of gym==0.21.0 a new OrderEnforcing wrapper is enabled by default. Turn
# this off as CompilerEnv already enforces this and the wrapper obscures the
# docstrings of the base class.
gym_version = _parse_version_string(gym.__version__)
if gym_version and gym_version >= (0, 21):
kwargs["order_enforce"] = order_enforce
gym_register(id=id, **kwargs)
|
CompilerGym-development
|
compiler_gym/util/registration.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from enum import Enum
from typing import Any, Dict, List, Optional, Tuple, TypeVar, Union
# A JSON dictionary.
JsonDictType = Dict[str, Any]
# A default value for the reward_space parameter in env.reset() and rev.observation_space() functions.
class OptionalArgumentValue(Enum):
UNCHANGED = 1
# Type hints for the values returned by gym.Env.step().
ObservationType = TypeVar("ObservationType")
ActionType = TypeVar("ActionType")
RewardType = float
DoneType = bool
InfoType = JsonDictType
StepType = Tuple[
Optional[Union[ObservationType, List[ObservationType]]],
Optional[Union[RewardType, List[RewardType]]],
DoneType,
InfoType,
]
|
CompilerGym-development
|
compiler_gym/util/gym_type_hints.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging as logging_
import sys
from typing import Optional
def init_logging(level: int = logging_.INFO, logger: Optional[logging_.Logger] = None):
logger = logger or logging_.getLogger()
logger.setLevel(level)
handler = logging_.StreamHandler(sys.stdout)
handler.setLevel(level)
formatter = logging_.Formatter(
fmt="%(asctime)s %(name)s] %(message)s", datefmt="%m%d %H:%M:%S"
)
handler.setFormatter(formatter)
logger.addHandler(handler)
|
CompilerGym-development
|
compiler_gym/util/logging.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import deque
from typing import Iterable
def truncate(
string: str,
max_line_len: int = 60,
max_lines: int = 1,
tail: bool = False,
) -> str:
"""Truncate a string using ellipsis.
For multi-line inputs, each line is truncated independently.
For example:
>>> truncate("abcdefghijklmnop\n1234", max_line_len=10)
"abcdefg...\n1234"
:param string: The string to truncate.
:param max_line_len: The maximum number of characters in each line.
:param max_lines: The maximum number of lines in the output string.
:return: A (possibly truncated) string.
"""
return truncate_lines(
str(string).split("\n"),
max_line_len=max_line_len,
max_lines=max_lines,
tail=tail,
)
def truncate_lines(
lines: Iterable[str],
max_line_len: int = 60,
max_lines: int = 1,
tail: bool = False,
) -> str:
"""Truncate a sequence of lines, one string per line, using ellipsis.
Each line is truncated independently and combined into a single multi-line
string.
For example:
>>> truncate_lines(["abcdefghijklmnop", "1234"], max_line_len=10)
"abcdefg...\n1234"
:param string: The string to truncate.
:param max_line_len: The maximum number of characters in each line.
:param max_lines: The maximum number of lines in the output string.
:return: A (possibly truncated) string.
"""
if max_line_len <= 3:
raise ValueError("Lines must be greater than 3 characeters long.")
def _truncate_line(line: str):
if len(line) > max_line_len:
return f"{line[:max_line_len-3]}..."
return line
def _consume(iterable, n):
"""Consume fist or last `n` elements from iterable."""
if tail:
yield from deque(iterable, n)
else:
for _ in range(n):
try:
yield next(iterable)
except StopIteration:
return
lines = iter(lines)
truncated_lines = [_truncate_line(str(ln)) for ln in _consume(lines, max_lines)]
# Truncate the final line if required.
try:
next(lines)
truncated_lines[-1] = _truncate_line(f"{truncated_lines[-1]}...")
except StopIteration:
pass
return "\n".join(truncated_lines)
|
CompilerGym-development
|
compiler_gym/util/truncate.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import csv
from io import StringIO
from typing import Any, Iterable, Optional
from tabulate import tabulate as tabulate_lib
def tabulate(
rows: Iterable[Iterable[Any]],
headers: Iterable[str],
tablefmt: Optional[str] = "grid",
) -> str:
"""A wrapper around the third-party tabulate function that adds support
for tab- and comma-separate formats.
:param rows: The data to tabulate.
:param headers: A list of table headers.
:param tablefmt: The format of tables to print. For a full list of options,
see: https://github.com/astanin/python-tabulate#table-format.
:return: A formatted table as a string.
"""
if tablefmt == "tsv" or tablefmt == "csv":
sep = {"tsv": "\t", "csv": ","}[tablefmt]
buf = StringIO()
writer = csv.writer(buf, delimiter=sep)
writer.writerow([str(x) for x in headers])
for row in rows:
writer.writerow([str(x) for x in row])
return buf.getvalue()
else:
return tabulate_lib(
rows,
headers=headers,
tablefmt=tablefmt,
)
|
CompilerGym-development
|
compiler_gym/util/tabulate.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from concurrent.futures import ThreadPoolExecutor
from multiprocessing import cpu_count
from threading import Lock
_executor_lock = Lock()
_executor = None
def get_thread_pool_executor() -> ThreadPoolExecutor:
"""Return a singleton :code:`ThreadPoolExecutor`.
This executor is intended to be used for multithreaded parallelism. The
maximum number of threads in the pool is equal to the number of cores on the
machine. This is based on the assumption that CompilerGym workloads are
typically CPU bound and not I/O bound, so the number of active threads
should correspond to the number of available cores.
:returns: A thread pool executor.
"""
with _executor_lock:
global _executor
if _executor is None:
_executor = ThreadPoolExecutor(max_workers=cpu_count())
return _executor
|
CompilerGym-development
|
compiler_gym/util/thread_pool.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Module for resolving a runfiles path."""
import os
from datetime import datetime
from getpass import getuser
from pathlib import Path
from threading import Lock
from time import sleep
from typing import Optional
# NOTE(cummins): Moving this file may require updating this relative path.
_PACKAGE_ROOT = Path(os.path.join(os.path.dirname(__file__), "../../")).resolve(
strict=True
)
_CREATE_LOGGING_DIR_LOCK = Lock()
def runfiles_path(relpath: str) -> Path:
"""Resolve the path to a runfiles data path.
No checks are to made to ensure that the path, or the containing directory,
exist.
Use environment variable COMPILER_GYM_RUNFILES=/path/to/runfiles if running
outside of bazel.
:param relpath: The relative path within the runfiles tree.
:return: An absolute path.
"""
# There are three ways of determining a runfiles path:
# 1. Set the COMPILER_GYM_RUNFILES environment variable.
# 2. Using the rules_python library that is provided by bazel. This will
# fail if not being executed within a bazel sandbox.
# 3. Computing the path relative to the location of this file. This is the
# fallback approach that is used for when the code has been installed
# by setuptools.
runfiles_path = os.environ.get("COMPILER_GYM_RUNFILES")
if runfiles_path:
return Path(runfiles_path) / relpath
else:
try:
from rules_python.python.runfiles import runfiles
return Path(
runfiles.Create().Rlocation(
"CompilerGym" if relpath == "." else f"CompilerGym/{relpath}"
)
)
except (ModuleNotFoundError, TypeError):
return _PACKAGE_ROOT / relpath
def site_data_path(relpath: str) -> Path:
"""Return a path within the site data directory.
CompilerGym uses a directory to store persistent site data files in, such as
benchmark datasets. The default location is
:code:`~/.local/share/compiler_gym`. Set the environment variable
:code:`$COMPILER_GYM_SITE_DATA` to override this default location.
No checks are to made to ensure that the path, or the containing directory,
exist.
Files in this directory are intended to be long lived (this is not a cache),
but it is safe to delete this directory, so long as no CompilerGym
environments are running.
:param relpath: The relative path within the site data tree.
:return: An absolute path.
"""
# NOTE(cummins): This function has a matching implementation in the C++
# sources, compiler_gym::service::getSiteDataPath(). Any change to behavior
# here must be reflected in the C++ version.
forced = os.environ.get("COMPILER_GYM_SITE_DATA")
if forced:
return Path(forced) / relpath
elif os.environ.get("HOME"):
return Path("~/.local/share/compiler_gym").expanduser() / relpath
else:
return Path(f"/tmp/compiler_gym_{getuser()}/site_data") / relpath
def cache_path(relpath: str) -> Path:
"""Return a path within the cache directory.
CompilerGym uses a directory to cache files in, such as downloaded content.
The default location for this cache is :code:`~/.local/cache/compiler_gym`.
Set the environment variable :code:`$COMPILER_GYM_CACHE` to override this
default location.
It is safe to delete this directory, so long as no CompilerGym environments
are running.
No checks are to made to ensure that the path, or the containing directory,
exist.
:param relpath: The relative path within the cache tree.
:return: An absolute path.
"""
forced = os.environ.get("COMPILER_GYM_CACHE")
if forced:
return Path(forced) / relpath
elif os.environ.get("HOME"):
return Path("~/.local/cache/compiler_gym").expanduser() / relpath
else:
return Path(f"/tmp/compiler_gym_{getuser()}/cache") / relpath
def transient_cache_path(relpath: str) -> Path:
"""Return a path within the transient cache directory.
The transient cache is a directory used to store files that do not need to
persist beyond the lifetime of the current process. When available, the
temporary filesystem :code:`/dev/shm` will be used. Else,
:meth:`cache_path() <compiler_gym.cache_path>` is used as a fallback. Set
the environment variable :code:`$COMPILER_GYM_TRANSIENT_CACHE` to override
the default location.
Files in this directory are not meant to outlive the lifespan of the
CompilerGym environment that creates them. It is safe to delete this
directory, so long as no CompilerGym environments are running.
No checks are to made to ensure that the path, or the containing directory,
exist.
:param relpath: The relative path within the cache tree.
:return: An absolute path.
"""
forced = os.environ.get("COMPILER_GYM_TRANSIENT_CACHE")
if forced:
return Path(forced) / relpath
elif Path("/dev/shm").is_dir():
return Path(f"/dev/shm/compiler_gym_{getuser()}") / relpath
else:
# Fallback to using the regular cache.
return cache_path(relpath)
def create_user_logs_dir(name: str, dir: Optional[Path] = None) -> Path:
"""Create a directory for writing logs to.
Defaults to ~/logs/compiler_gym base directory, set the
:code:`COMPILER_GYM_LOGS` environment variable to override this.
Example use:
>>> create_user_logs_dir("my_experiment")
Path("~/logs/compiler_gym/my_experiment/2020-11-03T11:00:00")
:param name: The grouping name for the logs.
:return: A unique timestamped directory for logging. This directory exists.
"""
base_dir = Path(
os.environ.get("COMPILER_GYM_LOGS", dir or "~/logs/compiler_gym")
).expanduser()
group_dir = base_dir / name
with _CREATE_LOGGING_DIR_LOCK:
# Require that logging directory timestamps are unique by waiting until
# a unique timestamp is generated.
while True:
now = datetime.now()
subdirs = now.strftime("%Y-%m-%d/%H-%M-%S")
logs_dir = group_dir / subdirs
if logs_dir.is_dir():
sleep(0.3)
continue
logs_dir.mkdir(parents=True, exist_ok=False)
# Create a symlink to the "latest" logs results.
if (group_dir / "latest").exists():
os.unlink(group_dir / "latest")
os.symlink(subdirs, group_dir / "latest")
return logs_dir
|
CompilerGym-development
|
compiler_gym/util/runfiles_path.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from time import time
from typing import Callable, Optional
from absl.logging import skip_log_prefix
def humanize_duration(seconds: float) -> str:
"""Format a time for humans."""
value = abs(seconds)
sign = "-" if seconds < 0 else ""
if value < 1e-6:
return f"{sign}{value*1e9:.1f}ns"
elif value < 1e-3:
return f"{sign}{value*1e6:.1f}us"
if value < 1:
return f"{sign}{value*1e3:.1f}ms"
elif value < 60:
return f"{sign}{value:.3f}s"
else:
return f"{sign}{value:.1f}s"
def humanize_duration_hms(seconds: float) -> str:
"""Format a time in to :code:`hours:minutes:seconds` format."""
seconds = int(seconds)
return f"{seconds // 3600}:{(seconds % 3600) // 60:02d}:{seconds % 60:02d}"
class Timer:
"""A very simple scoped timer.
Example:
>>> with Timer() as timer:
time.sleep(10)
print(f"That took {timer}")
That took 10.0s
If you're feeling even more terse:
>>> with Timer("Did stuff"):
# do stuff ...
Did stuff in 5.6ms
You can control where the print out should be logged to:
>>> with Timer("Did stuff", logging.getLogger().info)
# do stuff ...
[log] Did stuff in 11us
"""
def __init__(
self, label: Optional[str] = None, print_fn: Callable[[str], None] = print
):
self._start_time = None
self._elapsed = None
self.label = label
self.print_fn = print_fn
def reset(self) -> "Timer":
self._start_time = time()
return self
def __enter__(self) -> "Timer":
return self.reset()
@property
def time(self) -> float:
if self._elapsed:
return self._elapsed
elif self._start_time:
return time() - self._start_time
else:
return 0
@skip_log_prefix
def __exit__(self, *args):
self._elapsed = time() - self._start_time
if self.label:
self.print_fn(f"{self.label} in {self}")
def __str__(self):
return humanize_duration(self.time)
|
CompilerGym-development
|
compiler_gym/util/timer.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import hashlib
import logging
from time import sleep
from typing import List, Optional, Union
import fasteners
import requests
import compiler_gym.errors
from compiler_gym.util.filesystem import atomic_file_write
from compiler_gym.util.runfiles_path import cache_path
from compiler_gym.util.truncate import truncate
logger = logging.getLogger(__name__)
# Deprecated since v0.2.4.
# This type is for backwards compatibility that will be removed in a future release.
# Please, use errors from `compiler_gym.errors`.
DownloadFailed = compiler_gym.errors.DownloadFailed
# Deprecated since v0.2.4.
# This type is for backwards compatibility that will be removed in a future release.
# Please, use errors from `compiler_gym.errors`.
TooManyRequests = compiler_gym.errors.TooManyRequests
def _get_url_data(url: str) -> bytes:
try:
req = requests.get(url)
except IOError as e:
# Re-cast an error raised by requests library to DownloadFailed type.
raise DownloadFailed(str(e)) from e
try:
if req.status_code == 429:
raise TooManyRequests("429 Too Many Requests")
elif req.status_code != 200:
raise DownloadFailed(f"GET returned status code {req.status_code}: {url}")
return req.content
finally:
req.close()
def _do_download_attempt(url: str, sha256: Optional[str]) -> bytes:
logger.info("Downloading %s ...", url)
content = _get_url_data(url)
if sha256:
# Validate the checksum.
checksum = hashlib.sha256()
checksum.update(content)
actual_sha256 = checksum.hexdigest()
if sha256 != actual_sha256:
raise DownloadFailed(
f"Checksum of download does not match:\n"
f"Url: {url}\n"
f"Expected: {sha256}\n"
f"Actual: {actual_sha256}"
)
# Cache the downloaded file.
path = cache_path(f"downloads/{sha256}")
path.parent.mkdir(parents=True, exist_ok=True)
with atomic_file_write(path, fileobj=True) as f:
f.write(content)
logger.debug(f"Downloaded {url}")
return content
def _download(urls: List[str], sha256: Optional[str], max_retries: int) -> bytes:
if not urls:
raise ValueError("No URLs to download")
# Cache hit.
if sha256 and cache_path(f"downloads/{sha256}").is_file():
with open(str(cache_path(f"downloads/{sha256}")), "rb") as f:
return f.read()
# A retry loop, and loop over all urls provided.
last_exception = None
wait_time = 10
for _ in range(max(max_retries, 1)):
for url in urls:
try:
return _do_download_attempt(url, sha256)
except TooManyRequests as e:
last_exception = e
logger.info(
"Download attempt failed with Too Many Requests error. "
"Watiting %.1f seconds",
wait_time,
)
sleep(wait_time)
wait_time *= 1.5
except DownloadFailed as e:
logger.info("Download attempt failed: %s", truncate(e))
last_exception = e
raise last_exception
def download(
urls: Union[str, List[str]], sha256: Optional[str] = None, max_retries: int = 5
) -> bytes:
"""Download a file and return its contents.
If :code:`sha256` is provided and the download succeeds, the file contents
are cached locally in :code:`$cache_path/downloads/$sha256`. See
:func:`compiler_gym.cache_path`.
An inter-process lock ensures that only a single call to this function may
execute at a time.
:param urls: Either a single URL of the file to download, or a list of URLs
to download.
:param sha256: The expected sha256 checksum of the file.
:return: The contents of the downloaded file.
:raises IOError: If the download fails, or if the downloaded content does
match the expected :code:`sha256` checksum.
"""
# Convert a singular string into a list of strings.
urls = [urls] if not isinstance(urls, list) else urls
# Only a single process may download a file at a time. The idea here is to
# prevent redundant downloads when multiple simultaneous processes all try
# and download the same resource. If we don't have an ID for the resource
# then we just lock globally to reduce NIC thrashing.
if sha256:
with fasteners.InterProcessLock(cache_path(f"downloads/.{sha256}.lock")):
return _download(urls, sha256, max_retries)
else:
with fasteners.InterProcessLock(cache_path("downloads/.lock")):
return _download(urls, None, max_retries)
|
CompilerGym-development
|
compiler_gym/util/download.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Utilities for working with the filesystem."""
import os
import tempfile
from contextlib import contextmanager
from pathlib import Path
from typing import BinaryIO, List, TextIO, Union
from compiler_gym.util import runfiles_path
def get_storage_paths() -> List[Path]:
"""Return the list of paths used by CompilerGym for filesystem storage.
:return: A list of filesystem paths that CompilerGym uses to store files.
"""
return sorted(
{
runfiles_path.cache_path("."),
runfiles_path.transient_cache_path("."),
runfiles_path.site_data_path("."),
}
)
@contextmanager
def atomic_file_write(
path: Path, fileobj: bool = False, mode: str = "wb"
) -> Union[Path, TextIO, BinaryIO]:
"""A context manager for atomically writing to a file.
Provides a lock-free mechanism for ensuring concurrent safe writes to a
filesystem path. Use this to prevent filesystem races when multiple callers
may be writing to the same file. This is best suited for cases where the
chance of a race are low, as it does not prevent redundant writes. It simply
guarantees that each write is atomic.
This relies on POSIX atomic file renaming.
Use it as a context manager that yields the path of a temporary file to
write to:
>>> outpath = Path("some_file.txt")
>>> with atomic_file_write(outpath) as tmp_path:
... with open(tmp_path, "w") as f:
... f.write("Hello\n")
>>> outpath.is_file()
True
It can also return a file object if passed the :code:`fileobj` argument:
>>> outpath = Path("some_file.txt")
>>> with atomic_file_write(outpath, fileobj=True) as f:
... f.write(file_data)
>>> outpath.is_file()
True
:param path: The path to write to atomically write to.
:param fileobj: If :code:`True`, return a file object in the given
:code:`mode`.
:param mode: The file mode to use when returning a file object.
:returns: The path of a temporary file to write to.
"""
with tempfile.NamedTemporaryFile(dir=path.parent, delete=False, mode=mode) as tmp:
tmp_path = Path(tmp.name)
try:
yield tmp if fileobj else tmp_path
finally:
if tmp_path.is_file():
os.rename(tmp_path, path)
def is_in_memory(path: Path) -> bool:
"""Determine if a path's mountpoint is in-memory.
:param path: A filesystem path.
:returns: True if the path is in-memory.
"""
# TODO(cummins): This is totally hacky and intended to work only for the
# transient_cache_path() case. There will be false negatives, though not
# likely false positives.
return str(path).startswith("/dev/shm")
def is_within_directory(directory, target) -> bool:
abs_directory = os.path.abspath(directory)
abs_target = os.path.abspath(target)
prefix = os.path.commonprefix([abs_directory, abs_target])
return prefix == abs_directory
def extract_tar(tar, path=".", members=None, *, numeric_owner=False) -> None:
for member in tar.getmembers():
member_path = os.path.join(path, member.name)
if not is_within_directory(path, member_path):
raise Exception("Attempted Path Traversal in Tar File")
tar.extractall(path, members, numeric_owner=numeric_owner)
|
CompilerGym-development
|
compiler_gym/util/filesystem.py
|
CompilerGym-development
|
compiler_gym/util/__init__.py
|
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""This module defines utilities for minimizing trajectories.
A trajectory is the sequence of actions applied to an environment. The goal of
trajectory minimization is to identify the shortest subregion of a trajectory
such that some hypothesis still holds. A a hypothesis is a boolean test on an
environment, for example, a hypothesis could be that :code:`env.validate()`
returns an error.
"""
import logging
import random
from math import ceil, log
from typing import Callable, Iterable
from compiler_gym.util.truncate import truncate
logger = logging.getLogger(__name__)
class MinimizationError(OSError):
"""Error raised if trajectory minimization fails."""
# A hypothesis is a callback that accepts as input an enivornment in a given
# state returns true if a particular hypothesis holds, else false.
Hypothesis = Callable[["CompilerEnv"], bool] # noqa: F821
def environment_validation_fails(env: "CompilerEnv") -> bool: # noqa: F821
"""A hypothesis that holds true if environment validation fails."""
validation_result = env.validate()
logger.debug(truncate(str(validation_result), max_lines=1, max_line_len=120))
return not validation_result.okay()
def _apply_and_test(env, actions, hypothesis, flakiness) -> bool:
"""Run specific actions on environment and return whether hypothesis holds."""
env.reset(benchmark=env.benchmark)
for _ in range(flakiness):
logger.debug("Applying %d actions ...", len(actions))
_, _, done, info = env.multistep(actions)
if done:
raise MinimizationError(
f"Failed to replay actions: {info.get('error_details', '')}"
)
logger.debug("Applied %d actions", len(actions))
if hypothesis(env):
return True
return False
def bisect_trajectory(
env: "CompilerEnv", # noqa: F821
hypothesis: Hypothesis = environment_validation_fails,
reverse: bool = False,
flakiness: int = 1,
) -> Iterable["CompilerEnv"]: # noqa: F821
"""Run a binary search to remove the suffix or prefix of a trjectory.
Requires worst-case O(log n) evaluation rounds, where n is the length of the
trajectory.
:param env: An environment whose action trajectory should be minimized.
:param hypothesis: The hypothesis that is used to determine if a trajectory
is valid. A callback that accepts as argument the :code:`env:`
instance and returns true if the hypothesis holds, else false. The
hypothesis must hold on the initial trajectory.
:param reverse: If :code:`True`, minimize the trajectory from the front
(i.e. the prefix). Else, minimization occurs form the back (i.e. the
suffix).
:param flakiness: The maximum number of times the hypothesis is repeated
to check if it holds. If the hypothesis returns :code:`True` within this
many iterations, it is said to hold. It needs to only return
:code:`True` once.
:returns: A generator that yields the input environment every time the
trajectory is successfully reduced.
:raises MinimizationError: If the environment action replay fails, or if
the hypothesis does not hold on the initial trajectory.
"""
def apply_and_test(actions):
return _apply_and_test(env, actions, hypothesis, flakiness)
all_actions = env.actions.copy()
# No actions to minimize.
if not all_actions:
return env
logger.info(
"%sisecting sequence of %d actions",
"Reverse b" if reverse else "B",
len(all_actions),
)
if not apply_and_test(all_actions):
raise MinimizationError(
"Hypothesis failed on the initial state! The hypothesis must hold for the first state."
)
left = 0
right = len(all_actions) - 1
step = 0
while right >= left:
step += 1
remaining_steps = int(log(max(right - left, 1), 2))
mid = left + ((right - left) // 2)
logger.debug(
"Bisect step=%d, left=%d, right=%d, mid=%d", step, left, right, mid
)
actions = all_actions[mid:] if reverse else all_actions[:mid]
if apply_and_test(actions):
logger.info(
"🟢 Hypothesis holds at num_actions=%d, remaining bisect steps=%d",
mid,
remaining_steps,
)
yield env
if reverse:
left = mid + 1
else:
right = mid - 1
else:
logger.info(
"🔴 Hypothesis does not hold at num_actions=%d, remaining bisect steps=%d",
mid,
remaining_steps,
)
if reverse:
right = mid - 1
else:
left = mid + 1
mid = max(left, right) - 1 if reverse else min(left, right) + 1
if (reverse and mid < 0) or (not reverse and mid >= len(all_actions)):
actions = all_actions
logger.info("Failed to reduce trajectory length using bisection")
else:
actions = all_actions[mid:] if reverse else all_actions[:mid]
logger.info(
"Determined that action %d of %d is the first at which the hypothesis holds: %s",
mid,
len(all_actions),
env.action_space.flags[all_actions[mid]],
)
if not apply_and_test(actions):
raise MinimizationError("Post-bisect sanity check failed!")
yield env
def random_minimization(
env: "CompilerEnv", # noqa: F821
hypothesis: Hypothesis = environment_validation_fails,
num_steps_ratio_multiplier: float = 5,
init_discard_ratio: float = 0.75,
discard_ratio_decay: float = 0.75,
min_trajectory_len: int = 5,
flakiness: int = 1,
) -> Iterable["CompilerEnv"]: # noqa: F821
"""Run an iterative process of randomly removing actions to minimize a
trajectory.
For each round of minimization, a number of actions are discarded randomly
and the hypothesis is tested. If the hypothesis still holds with those
actions removed, the minimization proceeds. Else the actions are re-inserted
into the trajectory and a new set of actions are removed. After a failure
Performs up to O(num_steps_ratio_multiplier * log n) evaluation rounds,
where n is the length of the trajectory.
:param env: An environment whose action trajectory should be minimized.
:param hypothesis: The hypothesis that is used to determine if a trajectory
is valid. A callback that accepts as argument the :code:`env:`
instance and returns true if the hypothesis holds, else false. The
hypothesis must hold on the initial trajectory.
:param num_steps_ratio_multiplier: A multiplier for the number of rounds of
minimization to perform, using log(n) the length of the trajectory as
the factor.
:param init_discard_ratio: The number of actions that will be randomly
discarded, as a multiplier of the length of the trajectory.
:param discard_ratio_decay: The ratio of decay for the discard ratio on
failure.
:param min_trajectory_len: The minimum number of actions in the trajectory
for minimization to run. If the trajectory contains fewer than this many
actions, minimization stops.
:param flakiness: The maximum number of times the hypothesis is repeated
to check if it holds. If the hypothesis returns :code:`True` within this
many iterations, it is said to hold. It needs to only return
:code:`True` once.
:returns: A generator that yields the input environment every time the
trajectory is successfully reduced.
:raises MinimizationError: If the environment action replay fails, or if
the hypothesis does not hold on the initial trajectory.
"""
def apply_and_test(actions):
return _apply_and_test(env, actions, hypothesis, flakiness)
actions = env.actions.copy()
if not apply_and_test(actions):
raise MinimizationError(
"Hypothesis failed on the initial state! The hypothesis must hold for the first state."
)
max_num_steps = int(log(len(actions), 2) * num_steps_ratio_multiplier)
num_steps = 0
discard_ratio = init_discard_ratio
while len(actions) >= min_trajectory_len and num_steps < max_num_steps:
num_steps += 1
num_to_remove = int(ceil(len(actions) * discard_ratio))
candidate_actions = actions.copy()
# Delete actions randomly.
for _ in range(num_to_remove):
del candidate_actions[random.randint(0, len(candidate_actions) - 1)]
if apply_and_test(candidate_actions):
logger.info(
"🟢 Hypothesis holds with %s of %s actions randomly removed, continuing",
num_to_remove,
len(actions),
)
actions = candidate_actions
discard_ratio = init_discard_ratio
yield env
else:
logger.info(
"🔴 Hypothesis does not hold with %s of %s actions randomly removed, rolling back",
num_to_remove,
len(actions),
)
discard_ratio *= discard_ratio_decay
if num_to_remove == 1:
logger.info(
"Terminating random minimization after failing with only a single action removed"
)
break
if not apply_and_test(actions):
raise MinimizationError("Post-minimization sanity check failed!")
yield env
def minimize_trajectory_iteratively(
env: "CompilerEnv", # noqa: F821
hypothesis: Hypothesis = environment_validation_fails,
flakiness: int = 1,
) -> Iterable["CompilerEnv"]: # noqa: F821
"""Minimize a trajectory by remove actions, one at a time, until a minimal
trajectory is reached.
Performs up to O(n * n / 2) evaluation rounds, where n is the length of the
trajectory.
:param env: An environment whose action trajectory should be minimized.
:param hypothesis: The hypothesis that is used to determine if a trajectory
is valid. A callback that accepts as argument the :code:`env:`
instance and returns true if the hypothesis holds, else false. The
hypothesis must hold on the initial trajectory.
:param flakiness: The maximum number of times the hypothesis is repeated
to check if it holds. If the hypothesis returns :code:`True` within this
many iterations, it is said to hold. It needs to only return
:code:`True` once.
:returns: A generator that yields the input environment every time the
trajectory is successfully reduced.
:raises MinimizationError: If the environment action replay fails, or if
the hypothesis does not hold on the initial trajectory.
"""
def apply_and_test(actions):
return _apply_and_test(env, actions, hypothesis, flakiness)
all_actions = env.actions.copy()
init_num_actions = len(all_actions)
if not all_actions: # Nothing to minimize.
return
if not apply_and_test(all_actions):
raise MinimizationError(
"Hypothesis failed on the initial state! The hypothesis must hold for the first state."
)
pass_num = 0
actions_removed = 0
action_has_been_pruned = True
# Outer loop. Repeat iterative reduction until no change is made.
while action_has_been_pruned and len(all_actions) > 1:
pass_num += 1
action_has_been_pruned = False
action_mask = [True] * len(all_actions)
logger.info("Minimization pass on sequence of %d actions", len(all_actions))
# Inner loop. Go through every action and see if it can be removed.
for i in range(len(action_mask)):
action_mask[i] = False
action_name = env.action_space.flags[all_actions[i]]
actions = [action for action, mask in zip(all_actions, action_mask) if mask]
if apply_and_test(actions):
logger.info(
"🟢 Hypothesis holds with action %s removed, %d actions remaining",
action_name,
sum(action_mask),
)
action_has_been_pruned = True
actions_removed += 1
yield env
else:
action_mask[i] = True
logger.info(
"🔴 Hypothesis does not hold with action %s removed, %d actions remaining",
action_name,
sum(action_mask),
)
all_actions = [action for action, mask in zip(all_actions, action_mask) if mask]
logger.info(
"Minimization halted after %d passes, %d of %d actions removed",
pass_num,
actions_removed,
init_num_actions,
)
if not apply_and_test(all_actions):
raise ValueError("Post-bisect sanity check failed!")
yield env
|
CompilerGym-development
|
compiler_gym/util/minimize_trajectory.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""A context manager to set a temporary working directory."""
import os
import tempfile
from contextlib import contextmanager
from pathlib import Path
from typing import Optional, Union
@contextmanager
def temporary_working_directory(directory: Optional[Union[str, Path]] = None) -> Path:
"""Temporarily set the working directory.
This function provides a way to set the working directory within the
scope of a "with statement". Example usage:
.. code-block:: python
print(os.getcwd()) # /tmp/foo
with temporary_working_directory("/tmp/bar"):
# Now in scope of new working directory.
print(os.getcwd()) # /tmp/bar
# Return to original working directory.
print(os.getcwd()) # /tmp/foo
:param directory: A directory to set as the temporary working directory. If
not provided, a temporary directory is created and deleted once out of
scope.
:return: The temporary working directory.
"""
old_working_directory = os.getcwd()
try:
if directory:
os.chdir(directory)
yield Path(directory)
else:
with tempfile.TemporaryDirectory(prefix="compiler_gym-") as d:
os.chdir(d)
yield Path(d)
finally:
os.chdir(old_working_directory)
|
CompilerGym-development
|
compiler_gym/util/temporary_working_directory.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Utilities for parallelization / threading / concurrency."""
from itertools import tee
from threading import Lock
from typing import Any, Iterable
class _ThreadSafeTee:
"""An extension of :code:`itertools.tee()` that uses a lock to ensure
exclusive access to the iterator.
"""
def __init__(self, tee_obj, lock):
self.tee_obj = tee_obj
self.lock = lock
def __iter__(self):
return self
def __next__(self):
with self.lock:
return next(self.tee_obj)
def __copy__(self):
return _ThreadSafeTee(self.tee_obj.__copy__(), self.lock)
def thread_safe_tee(iterable: Iterable[Any], n: int = 2):
"""An extension of :code:`itertools.tee()` that yields thread-safe iterators."""
lock = Lock()
return tuple(_ThreadSafeTee(tee_obj, lock) for tee_obj in tee(iterable, n))
|
CompilerGym-development
|
compiler_gym/util/parallelization.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from numbers import Integral
from typing import List
import numpy as np
def convert_number_to_permutation(
n: Integral, permutation_size: Integral
) -> List[Integral]:
m = n
res = np.zeros(permutation_size, dtype=type(permutation_size))
elements = np.arange(permutation_size, dtype=type(permutation_size))
for i in range(permutation_size):
j = m % (permutation_size - i)
m = m // (permutation_size - i)
res[i] = elements[j]
elements[j] = elements[permutation_size - i - 1]
return res
def convert_permutation_to_number(permutation: List[Integral]) -> Integral:
pos = np.arange(len(permutation), dtype=int)
elements = np.arange(len(permutation), dtype=int)
m = 1
res = 0
for i in range(len(permutation) - 1):
res += m * pos[permutation[i]]
m = m * (len(permutation) - i)
pos[elements[len(permutation) - i - 1]] = pos[permutation[i]]
elements[pos[permutation[i]]] = elements[len(permutation) - i - 1]
return res
|
CompilerGym-development
|
compiler_gym/util/permutation.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import shlex
import sys
from typing import Any, Iterable
class ShellFormatCodes:
"""Shell escape codes for pretty-printing."""
PURPLE = "\033[95m"
CYAN = "\033[96m"
DARKCYAN = "\033[36m"
BLUE = "\033[94m"
GREEN = "\033[92m"
YELLOW = "\033[93m"
RED = "\033[91m"
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
END = "\033[0m"
def emph(stringable: Any) -> str:
"""Emphasize a string."""
return f"{ShellFormatCodes.BOLD}{ShellFormatCodes.BLUE}{stringable}{ShellFormatCodes.END}"
def plural(quantity: int, singular: str, plural: str) -> str:
"""Return the singular or plural word."""
return singular if quantity == 1 else plural
def indent(string: str, n=4) -> str:
"""Indent a multi-line string by given number of spaces."""
return "\n".join(" " * n + x for x in str(string).split("\n"))
def join_cmd(cmd: Iterable[str]) -> str:
"""Join a list of command line arguments into a single string.
This is intended for logging purposes only. It does not provide any safety
guarantees.
"""
if sys.version_info >= (3, 8, 0):
return shlex.join(cmd)
return " ".join(cmd)
|
CompilerGym-development
|
compiler_gym/util/shell_format.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
def geometric_mean(array_like):
"""Zero-length-safe geometric mean."""
values = np.asarray(array_like)
if not values.size:
return 0
# Shortcut to return 0 when any element of the input is not positive.
if not np.all(values > 0):
return 0
a = np.log(values)
return np.exp(a.sum() / len(a))
def arithmetic_mean(array_like):
"""Zero-length-safe arithmetic mean."""
values = np.asarray(array_like)
if not values.size:
return 0
return values.mean()
def stdev(array_like):
"""Zero-length-safe standard deviation."""
values = np.asarray(array_like)
if not values.size:
return 0
return values.std()
|
CompilerGym-development
|
compiler_gym/util/statistics.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import subprocess
import sys
from contextlib import contextmanager
from signal import Signals
from subprocess import Popen as _Popen
from typing import List
def run_command(cmd: List[str], timeout: int):
with Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True
) as process:
stdout, stderr = process.communicate(timeout=timeout)
if process.returncode:
returncode = process.returncode
try:
# Try and decode the name of a signal. Signal returncodes
# are negative.
returncode = f"{returncode} ({Signals(abs(returncode)).name})"
except ValueError:
pass
raise OSError(
f"Compilation job failed with returncode {returncode}\n"
f"Command: {' '.join(cmd)}\n"
f"Stderr: {stderr.strip()}"
)
return stdout
def communicate(process, input=None, timeout=None):
"""subprocess.communicate() which kills subprocess on timeout."""
try:
return process.communicate(input=input, timeout=timeout)
except subprocess.TimeoutExpired:
# kill() was added in Python 3.7.
if sys.version_info >= (3, 7, 0):
process.kill()
else:
process.terminate()
# Wait for shutdown to complete.
try:
process.communicate(timeout=timeout)
except subprocess.TimeoutExpired:
pass # Stubborn process won't die, nothing can be done.
raise
@contextmanager
def Popen(*args, **kwargs):
"""subprocess.Popen() with resilient process termination at end of scope."""
with _Popen(*args, **kwargs) as process:
try:
yield process
finally:
# Process has not yet terminated, kill it.
if process.poll() is None:
# kill() was added in Python 3.7.
if sys.version_info >= (3, 7, 0):
process.kill()
else:
process.terminate()
# Wait for shutdown to complete.
try:
process.communicate(timeout=60)
except subprocess.TimeoutExpired:
pass # Stubborn process won't die, nothing can be done.
|
CompilerGym-development
|
compiler_gym/util/commands.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import multiprocessing
from collections import deque
from contextlib import contextmanager
from enum import Enum
from itertools import islice
from os import cpu_count
from pathlib import Path
from threading import Lock
from typing import Optional
from pydantic import BaseModel, Field, validator
from pydantic.class_validators import root_validator
logger = logging.getLogger(__name__)
_executor_lock = Lock()
_executor = None
class Executor(BaseModel):
"""Defines an execution environment for jobs.
E.g. a node on a cluster, the local machine, etc. To create jobs,
instantiate this class and submit functions to using the executor API:
>>> executor = Executor(executor="local", block=True)
>>> with executor.get_executor() as executor:
... executor.submit(my_job, arg1, arg2)
... executor.submit(another_job)
"""
class Type(str, Enum):
"""Types of execution environments."""
SLURM = "slurm"
"""Submit jobs to a SLURM cluster scheduler."""
LOCAL = "local"
"""Submit jobs to run on the current machine."""
DEBUG = "debug"
"""Submit jobs to run synchronously on the current machine."""
NOOP = "noop"
"""Submitted jobs return immediately without executing. This can be
useful for debugging, where you want to validate the code and
configuration without performing any computation.
"""
type: Type = Field(allow_mutation=False)
"""The execution environment."""
slurm_partition: Optional[str] = Field(default=None, allow_mutation=False)
"""The name of the SLURM partition to submit jobs to.
Only used for :code:`Type.SLURM` executors.
"""
cpus: int = Field(default=1, allow_mutation=False, ge=-1)
"""The number of CPU threads to provision.
If the type of executor is :code:`Type.SLURM`, this is the number of CPU
threads to provision for each job. If the type of executor is
:code:`Type.LOCAL`, this is the number of parallel jobs to process in a
thread pool. If the value is -1 and the executor is :code:`Type.LOCAL`, the
number of physical cores on the machine is used. Has no effect for
:code:`Type.DEBUG` and :code:`Type.NOOP`.
"""
gpus: int = Field(default=0, allow_mutation=False, ge=0)
"""The number of GPUs to provision.
This is used only by the :code:`Type.SLURM` executor.
"""
timeout_hours: float = Field(default=12, allow_mutation=False, gt=0)
block: bool = Field(default=False, allow_mutation=False)
"""If :code:`True`, the :code:`get_executor()` context manager will block
until all jobs have completed when exiting scope. Jobs are still submitted
asynchronously for parallel execution.
"""
# === Start of public API. ===
@contextmanager
def get_executor(
self, logs_dir: Path, timeout_hours: Optional[float] = None, cpus=None
) -> "Executor":
cpus = cpus or self.cpus
timeout_hours = timeout_hours or self.timeout_hours
if self.type == self.Type.SLURM:
try:
from submitit import AutoExecutor
except ImportError as e:
raise OSError(
"Using the slurm executor requires the submitit library. "
"Install submitit using: python -m pip install submitit"
) from e
executor = AutoExecutor(folder=logs_dir)
executor.update_parameters(
timeout_min=int(round(timeout_hours * 60)),
nodes=1,
cpus_per_task=cpus,
gpus_per_node=self.gpus,
slurm_partition=self.slurm_partition,
)
name = self.slurm_partition or "slurm" # default value for logging
elif self.type == self.Type.LOCAL:
executor, name = (
LocalParallelExecutor(
cpus=cpus,
timeout_seconds=int(round(timeout_hours * 3600)),
),
"local",
)
elif self.type == self.Type.DEBUG:
executor, name = LocalSynchronousExecutor(), "local"
elif self.type == self.Type.NOOP:
executor, name = DummyExecutor(), "noop"
else:
assert False, f"Unknown executor: {self.type} ({type(self.type).__name__})"
executor = WrappedExecutor(executor, name=name)
yield executor
if self.type == self.Type.DEBUG or self.block:
wait_on_jobs(
executor.jobs,
executor_name=str(executor),
cancel_on_error=self.type == self.Type.SLURM,
)
if hasattr(executor.unwrapped, "close"):
executor.unwrapped.close()
@staticmethod
def get_default_local_executor():
"""Return a singleton :code:`Executor`.
:returns: An executor.
"""
with _executor_lock:
global _executor
if _executor is None:
_executor = Executor(type="local", cpus=cpu_count())
return _executor
# === Start of implementation details. ===
@validator("slurm_partition")
def validate_slurm_partition(cls, value, *, values, **kwargs):
del kwargs
if values["type"] == cls.Type.SLURM:
assert value, f"Must specify a partition for executor: {values['executor']}"
return value
@validator("cpus", pre=True)
def validate_cpus(cls, value, *, values, **kwargs):
del kwargs
# -1 CPU count defaults to CPU count.
if values["type"] == cls.Type.LOCAL and value == -1:
return cpu_count()
return value
@root_validator
def local_always_blocks(cls, values):
if values["type"] == cls.Type.LOCAL or values["type"] == cls.Type.NOOP:
values["block"] = True
return values
class Config:
validate_assignment = True
class WrappedExecutor:
"""An executor-like interface that records all jobs that are submitted."""
def __init__(self, executor, name: str):
self.unwrapped = executor
self.jobs = []
self.name = name
def submit(self, *args, **kwargs):
job = self.unwrapped.submit(*args, **kwargs)
logger.info("Submitting job %s to %s ...", job.job_id, self)
self.jobs.append(job)
return job
def __repr__(self) -> str:
return self.name
def wait_on_jobs(jobs, executor_name: str = "executor", cancel_on_error: bool = True):
njobs = len(jobs)
jobs = deque(jobs)
def cancel_all_jobs(jobs):
print(f"Cancelling {len(jobs)} {executor_name} jobs")
for job in jobs:
try:
job.cancel()
except: # noqa
pass
# Produce a list of the first few job IDs
max_num_job_ids_to_show = 8
job_ids = [j.job_id for j in islice(jobs, max_num_job_ids_to_show)]
job_ids = ", ".join(str(x) for x in job_ids)
job_ids = f"job ID: {job_ids}" if len(jobs) == 1 else f"job IDs: {job_ids}"
if len(jobs) > max_num_job_ids_to_show:
job_ids = f"{job_ids} ..."
logger.info(
f"Waiting for {len(jobs)} {executor_name} jobs to complete with {job_ids}"
)
completed = 0
while jobs:
job = jobs.popleft()
if cancel_on_error:
try:
job.result()
completed += 1
logger.info(f"Jobs completed = {completed} of {njobs} ...")
except Exception as e: # noqa Intentionally broad.
logger.error(f"Caught: {type(e).__name__}: {e}")
jobs.append(job)
return cancel_all_jobs(jobs)
else:
job.result()
completed += 1
logger.info(f"Jobs completed = {completed} of {njobs} ...")
logger.info("All done.")
class LocalParallelExecutor:
"""An executor which uses a process pool to process jobs in parallel on the
local machine.
"""
class LocalJob:
def __init__(self, job_id: int, async_result, timeout_seconds: int):
self._async_result = async_result
self.job_id = job_id
self.timeout_seconds = timeout_seconds
def result(self):
return self._async_result.get(timeout=self.timeout_seconds)
def cancel(self):
pass
def __init__(self, cpus: int, timeout_seconds: int):
self.last_job_id = 0
self.process_pool = multiprocessing.Pool(cpus)
self.timeout_seconds = timeout_seconds
self.futures = []
def submit(self, fn, *args, **kwargs):
self.last_job_id += 1
self.futures.append(self.process_pool.apply_async(fn, args, kwargs))
return self.LocalJob(
self.last_job_id,
self.futures[-1],
self.timeout_seconds,
)
def close(self):
# Block until all jobs have completed.
for future in self.futures:
future.get()
self.process_pool.close()
class LocalSynchronousExecutor:
"""An executor where each job is executed synchronously when result() is
called."""
class LocalJob:
def __init__(self, job_id: int, fn, *args, **kwargs):
self._callback = lambda: fn(*args, **kwargs)
self.job_id = job_id
def result(self):
return self._callback()
def cancel(self):
pass
def __init__(self):
self.last_job_id = 0
def submit(self, fn, *args, **kwargs):
self.last_job_id += 1
return self.LocalJob(self.last_job_id, fn, *args, **kwargs)
class DummyExecutor:
class DummyJob:
def __init__(self, job_id: int):
self.job_id = job_id
def result(self):
return None
def cancel(self):
pass
def __init__(self) -> None:
self.last_job_id = 0
def submit(self, fn, *args, **kwargs):
del fn
del args
del kwargs
self.last_job_id += 1
return self.DummyJob(self.last_job_id)
|
CompilerGym-development
|
compiler_gym/util/executor.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""This module contains debugging helpers."""
import logging
import os
# Map for translating between COMPILER_GYM_DEBUG levels to python logging
# severity values.
_DEBUG_LEVEL_LOGGING_LEVEL_MAP = {
0: logging.ERROR,
1: logging.WARNING,
2: logging.INFO,
3: logging.DEBUG,
}
_LOGGING_LEVEL_DEBUG_LEVEL_MAP = {
v: k for k, v in _DEBUG_LEVEL_LOGGING_LEVEL_MAP.items()
}
def get_debug_level() -> int:
"""Get the debugging level.
The debug level is a non-negative integer that controls the verbosity of
logging messages and other debugging behavior. At each level, the types of
messages that are logged are:
* :code:`0` - only non-fatal errors are logged (default).
* :code:`1` - extra warnings message are logged.
* :code:`2` - enables purely informational logging messages.
* :code:`3` and above - extremely verbose logging messages are enabled that
may be useful for debugging.
The debugging level can be set using the :code:`$COMPILER_GYM_DEBUG`
environment variable, or by calling :func:`set_debug_level`.
:return: A non-negative integer.
"""
return max(int(os.environ.get("COMPILER_GYM_DEBUG", "0")), 0)
def get_logging_level() -> int:
"""Returns the logging level.
The logging level is not set directly, but as a result of setting the debug
level using :func:`set_debug_level`.
:return: An integer.
"""
return _DEBUG_LEVEL_LOGGING_LEVEL_MAP.get(get_debug_level(), logging.DEBUG)
def set_debug_level(level: int) -> None:
"""Set a new debugging level.
See :func:`get_debug_level` for a description of the debug levels.
The debugging level should be set first when interacting with CompilerGym as
many CompilerGym objects will check the debug level only at initialization
time and not throughout their lifetime.
Setting the debug level affects the entire process and is not thread safe.
:param level: The debugging level to use.
"""
os.environ["COMPILER_GYM_DEBUG"] = str(level)
logging.getLogger("compiler_gym").setLevel(
_DEBUG_LEVEL_LOGGING_LEVEL_MAP.get(level, logging.DEBUG)
)
def logging_level_to_debug_level(logging_level: int) -> int:
"""Convert a python logging level to a debug level.
See :func:`get_debug_level` for a description of the debug levels.
:param logging_level: A python logging level.
:returns: An integer logging level in the range :code:`[0,3]`.
"""
return max(_LOGGING_LEVEL_DEBUG_LEVEL_MAP.get(logging_level, 1) - 1, 0)
|
CompilerGym-development
|
compiler_gym/util/debug_util.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import contextlib
import sys
from io import StringIO
from typing import Iterator
class CapturedOutput:
def __init__(self):
self.stdout = StringIO()
self.stderr = StringIO()
@contextlib.contextmanager
def capture_output() -> Iterator[CapturedOutput]:
"""Context manager to temporarily capture stdout/stderr."""
stdout, stderr = sys.stdout, sys.stderr
try:
captured = CapturedOutput()
sys.stdout, sys.stderr = captured.stdout, captured.stderr
yield captured
finally:
sys.stdout, sys.stderr = stdout, stderr
captured.stdout = captured.stdout.getvalue()
captured.stderr = captured.stderr.getvalue()
|
CompilerGym-development
|
compiler_gym/util/capture_output.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import functools
from typing import Any, Callable
def memoized_property(func: Callable[..., Any]) -> Callable[..., Any]:
"""A property decorator that memoizes the result.
This is used to memoize the results of class properties, to be used when
computing the property value is expensive.
:param func: The function which should be made to a property.
:returns: The decorated property function.
"""
attribute_name = "_memoized_property_" + func.__name__
@property
@functools.wraps(func)
def decorator(self):
if not hasattr(self, attribute_name):
setattr(self, attribute_name, func(self))
return getattr(self, attribute_name)
return decorator
def frozen_class(cls):
"""Prevents setting attributes on a class after construction.
Wrap a class definition to declare it frozen:
@frozen_class class MyClass:
def __init__(self):
self.foo = 0
Any attempt to set an attribute outside of construction will then raise an
error:
>>> c = MyClass()
>>> c.foo = 5
>>> c.bar = 10
TypeError
"""
cls._frozen = False
def frozen_setattr(self, key, value):
if self._frozen and not hasattr(self, key):
raise TypeError(
f"Cannot set attribute {key} on frozen class {cls.__name__}"
)
object.__setattr__(self, key, value)
def init_decorator(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
func(self, *args, **kwargs)
self.__frozen = True
return wrapper
cls.__setattr__ = frozen_setattr
cls.__init__ = init_decorator(cls.__init__)
return cls
|
CompilerGym-development
|
compiler_gym/util/decorators.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""A consistent way to interpret a user-specified environment from commandline flags."""
import sys
from contextlib import contextmanager
from pathlib import Path
from typing import Optional, Union
import gym
from absl import app, flags
from compiler_gym.envs import CompilerEnv
from compiler_gym.service import ConnectionOpts
from compiler_gym.service.proto import Benchmark
from compiler_gym.util.registration import COMPILER_GYM_ENVS
flags.DEFINE_string(
"env",
None,
"The name of an environment to use. The environment must be registered with "
"gym.",
)
flags.DEFINE_string(
"service",
None,
"If set, this specifies the hostname and port of a service to connect to, "
"rather than creating a new environment. Use the format <hostname>:<port>. "
"Supersedes --local_service_binary.",
)
flags.DEFINE_string(
"local_service_binary",
None,
"If set, this specifies the path of a local service binary to run to "
"provide the environment service, rather than the default service binary.",
)
flags.DEFINE_string(
"observation",
None,
"The name of a observation space to use. If set, this overrides any "
"default set by the environment.",
)
flags.DEFINE_string(
"reward",
None,
"The name of a reward space to use. If set, this overrides any default "
"set by the environment.",
)
flags.DEFINE_boolean(
"ls_env",
False,
"Print the list of available environments that can be passed to --env and exit.",
)
# Connection settings.
flags.DEFINE_float(
"service_rpc_call_max_seconds",
300,
"Service configuration option. Limits the maximum number of seconds to wait "
"for a service RPC to return a response.",
)
flags.DEFINE_float(
"service_init_max_seconds",
10,
"Service configuration option. Limits the maximum number of seconds to wait "
"to establish a connection to a service.",
)
flags.DEFINE_integer(
"service_init_max_attempts",
5,
"Service configuration option. Limits the maximum number of attempts to "
"initialize a service.",
)
flags.DEFINE_float(
"local_service_port_init_max_seconds",
10,
"Service configuration option. Limits the maximum number of seconds to wait "
"for a local service to write a port.txt file on initialization.",
)
flags.DEFINE_float(
"local_service_exit_max_seconds",
10,
"Service configuration option. Limits the maximum number of seconds to wait "
"for a local service to terminate on close.",
)
flags.DEFINE_float(
"service_rpc_init_max_seconds",
3,
"Service configuration option. Limits the number of seconds to wait for an "
"RPC connection to establish on initialization.",
)
FLAGS = flags.FLAGS
def connection_settings_from_flags(
service_url: str = None, local_service_binary: Path = None
) -> ConnectionOpts:
"""Returns either the name of the benchmark, or a Benchmark message."""
return ConnectionOpts(
rpc_call_max_seconds=FLAGS.service_rpc_call_max_seconds,
init_max_seconds=FLAGS.service_init_max_seconds,
init_max_attempts=FLAGS.service_init_max_attempts,
local_service_port_init_max_seconds=FLAGS.local_service_port_init_max_seconds,
local_service_exit_max_seconds=FLAGS.local_service_exit_max_seconds,
rpc_init_max_seconds=FLAGS.service_rpc_init_max_seconds,
)
def env_from_flags(benchmark: Optional[Union[str, Benchmark]] = None) -> CompilerEnv:
if FLAGS.ls_env:
print("\n".join(sorted(COMPILER_GYM_ENVS)))
sys.exit(0)
connection_settings = connection_settings_from_flags()
if not FLAGS.env:
raise app.UsageError("--env must be set")
init_opts = {
"benchmark": benchmark,
"connection_settings": connection_settings,
}
if FLAGS.local_service_binary:
init_opts["service"] = Path(FLAGS.service)
if FLAGS.service:
init_opts["service"] = FLAGS.service
env = gym.make(FLAGS.env, **init_opts)
if FLAGS.observation:
env.observation_space = FLAGS.observation
if FLAGS.reward:
env.reward_space = FLAGS.reward
return env
@contextmanager
def env_session_from_flags(
benchmark: Optional[Union[str, Benchmark]] = None
) -> CompilerEnv:
with env_from_flags(benchmark=benchmark) as env:
yield env
|
CompilerGym-development
|
compiler_gym/util/flags/env_from_flags.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from absl import flags
flags.DEFINE_integer("episode_length", 5, "The number of steps in each episode.")
|
CompilerGym-development
|
compiler_gym/util/flags/episode_length.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from absl import flags
flags.DEFINE_string(
"output_dir",
None,
"The directory to read and write files to.",
)
|
CompilerGym-development
|
compiler_gym/util/flags/output_dir.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from multiprocessing import cpu_count
from absl import flags
flags.DEFINE_integer("nproc", cpu_count(), "The number of parallel processes to run.")
|
CompilerGym-development
|
compiler_gym/util/flags/nproc.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from absl import flags
flags.DEFINE_float("learning_rate", 0.008, "The learning rate for training.")
|
CompilerGym-development
|
compiler_gym/util/flags/learning_rate.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from absl import flags
flags.DEFINE_integer("seed", 0xCC, "Random state initializer.")
|
CompilerGym-development
|
compiler_gym/util/flags/seed.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""A consistent way to interpret a user-specified benchmark from commandline flags."""
from pathlib import Path
from typing import Optional, Union
from absl import flags
from compiler_gym.datasets import Benchmark
flags.DEFINE_string(
"benchmark",
None,
"The URI of the benchmark to use. Use the benchmark:// scheme to "
"reference named benchmarks, or the file:/// scheme to reference paths "
"to program data. If no scheme is specified, benchmark:// is implied.",
)
FLAGS = flags.FLAGS
def benchmark_from_flags() -> Optional[Union[Benchmark, str]]:
"""Returns either the name of the benchmark, or a Benchmark message."""
if FLAGS.benchmark:
if FLAGS.benchmark.startswith("file:///"):
path = Path(FLAGS.benchmark[len("file:///") :])
uri = f"benchmark://user-v0/{path}"
return Benchmark.from_file(uri=uri, path=path)
else:
return FLAGS.benchmark
else:
# No benchmark was specified.
return None
|
CompilerGym-development
|
compiler_gym/util/flags/benchmark_from_flags.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from absl import flags
flags.DEFINE_integer("episodes", 2000, "The number of episodes to run.")
|
CompilerGym-development
|
compiler_gym/util/flags/episodes.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""This program can be used to query and run the CompilerGym services.
Listing available environments
------------------------------
List the environments that are available using:
.. code-block::
$ python -m compiler_gym.bin.service --ls_env
Querying the capabilities of a service
--------------------------------------
Query the capabilities of a service using:
.. code-block::
$ python -m compiler_gym.bin.service --env=<env>
For example:
.. code-block::
$ python -m compiler_gym.bin.service --env=llvm-v0
Datasets
--------
+----------------------------+--------------------------+------------------------------+
| Dataset | Num. Benchmarks [#f1]_ | Description |
+============================+==========================+==============================+
| benchmark://anghabench-v0 | 1,042,976 | Compile-only C/C++ functions |
+----------------------------+--------------------------+------------------------------+
| benchmark://blas-v0 | 300 | Basic linear algebra kernels |
+----------------------------+--------------------------+------------------------------+
...
Observation Spaces
------------------
+--------------------------+----------------------------------------------+
| Observation space | Shape |
+==========================+==============================================+
| Autophase | `Box(0, 9223372036854775807, (56,), int64)` |
+--------------------------+----------------------------------------------+
| AutophaseDict | `Dict(ArgsPhi:int<0,inf>, BB03Phi:int<0,...` |
+--------------------------+----------------------------------------------+
| BitcodeFile | `str_list<>[0,4096.0])` |
+--------------------------+----------------------------------------------+
...
The output is tabular summaries of the environment's datasets, observation
spaces, reward spaces, and action spaces, using reStructuredText syntax
(https://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html#tables).
To query the capabilities of an unmanaged service, use :code:`--service`. For
example, query a service running the :code:`llvm-v0` environment at
:code:`localhost:8080` using:
.. code-block::
$ python -m compiler_gym.bin.service --env=llvm-v0 --service=localhost:8080
To query the capability of a binary that implements the RPC service interface,
use the :code:`--local_service_binary` flag:
.. code-block::
$ python -m compiler_gym.bin.service --env=llvm-v0 --local_service_binary=/path/to/service/binary
Running a Service
-----------------
This module can also be used to launch a service that can then be connected to
by other environments. Start a service by specifying a port number using:
.. code-block::
$ python -m compiler_gym.bin.service --env=llvm-v0 --run_on_port=7777
Environment can connect to this service by passing the :code:`<hostname>:<port>`
address during environment initialization time. For example, in python:
>>> env = compiler_gym.make("llvm-v0", service="localhost:7777")
Or at the command line:
.. code-block::
$ python -m compiler_gym.bin.random_search --env=llvm-v0 --service=localhost:7777
"""
import signal
import sys
from typing import Iterable
import gym
from absl import app, flags
from compiler_gym.datasets import Dataset
from compiler_gym.envs import CompilerEnv
from compiler_gym.service.connection import ConnectionOpts
from compiler_gym.spaces import Commandline, NamedDiscrete
from compiler_gym.util.flags.env_from_flags import env_from_flags
from compiler_gym.util.tabulate import tabulate
from compiler_gym.util.truncate import truncate
flags.DEFINE_string(
"heading_underline_char",
"-",
"The character to repeat to underline headings.",
)
flags.DEFINE_integer(
"run_on_port",
None,
"Is specified, serve an instance of the service on the requested port. "
"This never terminates.",
)
FLAGS = flags.FLAGS
def header(message: str):
underline = FLAGS.heading_underline_char * (
len(message) // len(FLAGS.heading_underline_char)
)
return f"\n\n{message}\n{underline}\n"
def shape2str(shape, n: int = 80):
string = str(shape)
if len(string) > n:
return f"`{string[:n-4]}` ..."
return f"`{string}`"
def summarize_datasets(datasets: Iterable[Dataset]) -> str:
rows = []
# Override the default iteration order of datasets.
for dataset in sorted(datasets, key=lambda d: d.name):
# Raw numeric values here, formatted below.
description = truncate(dataset.description, max_line_len=60)
links = ", ".join(
f"`{name} <{url}>`__" for name, url in sorted(dataset.references.items())
)
if links:
description = f"{description} [{links}]"
rows.append(
(
dataset.name,
dataset.size,
description,
dataset.validatable,
)
)
rows.append(("Total", sum(r[1] for r in rows), "", ""))
return (
tabulate(
[
(
n,
# A size of zero means infinite.
f"{f:,d}" if f > 0 else "∞",
l,
v,
)
for n, f, l, v in rows
],
headers=(
"Dataset",
"Num. Benchmarks [#f1]_",
"Description",
"Validatable [#f2]_",
),
)
+ f"""
.. [#f1] Values obtained on {sys.platform}. Datasets are platform-specific.
.. [#f2] A **validatable** dataset is one where the behavior of the benchmarks
can be checked by compiling the programs to binaries and executing
them. If the benchmarks crash, or are found to have different behavior,
then validation fails. This type of validation is used to check that
the compiler has not broken the semantics of the program.
See :mod:`compiler_gym.bin.validate`.
"""
)
def print_service_capabilities(env: CompilerEnv):
"""Discover and print the capabilities of a CompilerGym service.
:param env: An environment.
"""
print(header("Datasets"))
print(
summarize_datasets(env.datasets),
)
print(header("Observation Spaces"))
print(
tabulate(
sorted(
[
(space, f"`{truncate(shape.space, max_line_len=80)}`")
for space, shape in env.observation.spaces.items()
]
),
headers=("Observation space", "Shape"),
)
)
print(header("Reward Spaces"))
print(
tabulate(
[
(
name,
space.range,
space.success_threshold,
"Yes" if space.deterministic else "No",
"Yes" if space.platform_dependent else "No",
)
for name, space in sorted(env.reward.spaces.items())
],
headers=(
"Reward space",
"Range",
"Success threshold",
"Deterministic?",
"Platform dependent?",
),
)
)
for action_space in env.action_spaces:
print(header(f"{action_space.name} Action Space"))
# Special handling for commandline action spaces to print additional
# information.
if isinstance(action_space, Commandline):
table = tabulate(
[
(f"`{n}`", d)
for n, d in zip(
action_space.names,
action_space.descriptions,
)
],
headers=("Action", "Description"),
)
print(table)
elif isinstance(action_space, NamedDiscrete):
table = tabulate(
[(a,) for a in sorted(action_space.names)],
headers=("Action",),
)
print(table)
else:
print(f'Action space "{action_space}" is not supported.')
def main(argv):
"""Main entry point."""
assert len(argv) == 1, f"Unrecognized flags: {argv[1:]}"
if FLAGS.run_on_port:
assert FLAGS.env, "Must specify an --env to run"
settings = ConnectionOpts(script_args=["--port", str(FLAGS.run_on_port)])
with gym.make(FLAGS.env, connection_settings=settings) as env:
print(
f"=== Started a service on port {FLAGS.run_on_port}. Use C-c to terminate. ==="
)
signal.pause()
with env_from_flags() as env:
print_service_capabilities(env)
if __name__ == "__main__":
app.run(main)
|
CompilerGym-development
|
compiler_gym/bin/service.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Run a parallelized random search of an environment's action space.
.. code-block::
$ python -m compiler_gym.bin.random_search --env=<env> --benchmark=<name> [--runtime=<sec>]
This program runs a simple random agent on the action space of a single
benchmark. The best reward, and sequence of actions that produced this, are
logged to file.
For example, run a random search of the LLVM instruction count optimization
space on a Dijkstra benchmark for 60 seconds using:
.. code-block::
$ python -m compiler_gym.bin.random_search --env=llvm-ic-v0 --benchmark=cbench-v1/dijkstra --runtime=60
Started 16 worker threads for benchmark benchmark://cbench-v1/dijkstra (410 instructions) using reward IrInstructionCountOz.
=== Running for a minute ===
Runtime: a minute. Num steps: 470,407 (7,780 / sec). Num episodes: 4,616 (76 / sec). Num restarts: 0.
Best reward: 101.59% (96 passes, found after 35 seconds)
Ending jobs ... done
Step [000 / 096]: reward=0.621951
Step [001 / 096]: reward=0.621951, change=0.000000, action=AlwaysInlinerLegacyPass
...
Step [094 / 096]: reward=1.007905, change=0.066946, action=CfgsimplificationPass
Step [095 / 096]: reward=1.007905, change=0.000000, action=LoopVersioningPass
Step [096 / 096]: reward=1.015936, change=0.008031, action=NewGvnpass
Search strategy
---------------
At each step, the agent selects an action randomly and records the
reward. After a number of steps without improving reward (the "patience" of the
agent), the agent terminates, and the environment resets. The number of steps
to take without making progress can be configured using the
:code:`--patience=<num>` flag.
Use :code:`--runtime` to limit the total runtime of the search. If not provided,
the search will run indefinitely. Use :code:`C-c` to cancel an in-progress
search.
Execution strategy
------------------
The results of the search are logged to files. Control the location of these
logs using the :code:`--output_dir=/path` flag.
Multiple agents are run in parallel. By default, the number of agents is equal
to the number of processors on the host machine. Set a different value using
:code:`--nproc`.
"""
import sys
from pathlib import Path
from absl import app, flags
import compiler_gym.util.flags.nproc # noqa Flag definition.
import compiler_gym.util.flags.output_dir # noqa Flag definition.
from compiler_gym.random_search import random_search
from compiler_gym.util.flags.benchmark_from_flags import benchmark_from_flags
from compiler_gym.util.flags.env_from_flags import env_from_flags
flags.DEFINE_boolean("ls_reward", False, "List the available reward spaces and exit.")
flags.DEFINE_integer(
"patience",
0,
"The number of steps that a random agent makes without improvement before terminating. "
"If 0, use the size of the action space for the patience value.",
)
flags.DEFINE_float("runtime", None, "If set, limit the search to this many seconds.")
flags.DEFINE_boolean(
"skip_done",
False,
"If set, don't overwrite existing experimental results.",
)
flags.DEFINE_float(
"fail_threshold",
None,
"If set, define a minimum threshold for reward. The script will exit with return code 1 "
"if this threshold is not reached.",
)
FLAGS = flags.FLAGS
def main(argv):
"""Main entry point."""
argv = FLAGS(argv)
if len(argv) != 1:
raise app.UsageError(f"Unknown command line arguments: {argv[1:]}")
if FLAGS.ls_reward:
with env_from_flags() as env:
print("\n".join(sorted(env.reward.indices.keys())))
return
assert FLAGS.patience >= 0, "--patience must be >= 0"
# Create an environment now to catch a startup time error before we launch
# a bunch of workers.
with env_from_flags() as env:
env.reset(benchmark=benchmark_from_flags())
env = random_search(
make_env=lambda: env_from_flags(benchmark=benchmark_from_flags()),
outdir=Path(FLAGS.output_dir) if FLAGS.output_dir else None,
patience=FLAGS.patience,
total_runtime=FLAGS.runtime,
nproc=FLAGS.nproc,
skip_done=FLAGS.skip_done,
)
try:
# Exit with error if --fail_threshold was set and the best reward does not
# meet this value.
if (
FLAGS.fail_threshold is not None
and env.episode_reward < FLAGS.fail_threshold
):
print(
f"Best reward {env.episode_reward:.3f} below threshold of {FLAGS.fail_threshold}",
file=sys.stderr,
)
sys.exit(1)
finally:
env.close()
if __name__ == "__main__":
app.run(main)
|
CompilerGym-development
|
compiler_gym/bin/random_search.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Validate environment states.
Example usage:
.. code-block::
$ cat << EOF |
benchmark,reward,walltime,commandline
cbench-v1/crc32,0,1.2,opt input.bc -o output.bc
EOF
python -m compiler_gym.bin.validate --env=llvm-ic-v0 -
Use this script to validate environment states. Environment states are read from
stdin as a comma-separated list of benchmark names, walltimes, episode rewards,
and commandlines. Each state is validated by replaying the commandline and
validating that the reward matches the expected value. Further, some benchmarks
allow for validation of program semantics. When available, those additional
checks will be automatically run.
Input Format
------------
The correct format for generating input states can be generated using
:class:`CompilerEnvStateWriter <compiler_gym.CompilerEnvStateWriter>`. For
example:
>>> env = gym.make("llvm-autophase-ic-v0")
>>> env.reset()
>>> env.step(env.action_space.sample())
>>> with CompilerEnvStateWriter(open("results.csv", "wb")) as writer:
... writer.write_state(env.state)
Output Format
-------------
This script prints one line per input state. The order of input states is not
preserved. A successfully validated state has the format:
.. code-block::
✅ <benchmark_name> <reproduced_reward>
Else if validation fails, the output is:
.. code-block::
❌ <benchmark_name> <error_details>
"""
import json
import re
import sys
import numpy as np
from absl import app, flags
import compiler_gym.util.flags.nproc # noqa Flag definition.
from compiler_gym.compiler_env_state import CompilerEnvState, CompilerEnvStateReader
from compiler_gym.util.flags.env_from_flags import env_from_flags
from compiler_gym.util.shell_format import emph, plural
from compiler_gym.util.statistics import arithmetic_mean, geometric_mean, stdev
from compiler_gym.validate import ValidationResult, validate_states
flags.DEFINE_boolean(
"inorder",
False,
"Whether to print results in the order they are provided. "
"The default is to print results as soon as they are available.",
)
flags.DEFINE_string(
"reward_aggregation",
"geomean",
"The aggregation method to use for rewards. Allowed values are 'mean' for "
"arithmetic mean and 'geomean' for geometric mean.",
)
flags.DEFINE_boolean(
"debug_force_valid",
False,
"Debugging flags. Skips the validation and prints output as if all states "
"were succesfully validated.",
)
flags.DEFINE_boolean(
"summary_only",
False,
"Do not print individual validation results, print only the summary at the " "end.",
)
flags.DEFINE_string(
"validation_logfile",
"validation.log.json",
"The path of a file to write a JSON validation log to.",
)
FLAGS = flags.FLAGS
def state_name(state: CompilerEnvState) -> str:
"""Get the string name for a state."""
return re.sub(r"^benchmark://", "", state.benchmark)
def to_string(result: ValidationResult, name_col_width: int) -> str:
"""Format a validation result for printing."""
name = state_name(result.state)
if not result.okay():
msg = ", ".join(result.error_details.strip().split("\n"))
return f"❌ {name} {msg}"
elif result.state.reward is None:
return f"✅ {name}"
else:
return f"✅ {name:<{name_col_width}} {result.state.reward:9.4f}"
def main(argv):
"""Main entry point."""
try:
states = list(CompilerEnvStateReader.read_paths(argv[1:]))
except ValueError as e:
print(e, file=sys.stderr)
sys.exit(1)
if not states:
print(
"No inputs to validate. Pass a CSV file path as an argument, or "
"use - to read from stdin.",
file=sys.stderr,
)
sys.exit(1)
# Send the states off for validation
if FLAGS.debug_force_valid:
validation_results = (
ValidationResult(
state=state,
reward_validated=True,
actions_replay_failed=False,
reward_validation_failed=False,
benchmark_semantics_validated=False,
benchmark_semantics_validation_failed=False,
walltime=0,
)
for state in states
)
else:
validation_results = validate_states(
env_from_flags,
states,
nproc=FLAGS.nproc,
inorder=FLAGS.inorder,
)
# Determine the name of the reward space.
with env_from_flags() as env:
if FLAGS.reward_aggregation == "geomean":
def reward_aggregation(a):
return geometric_mean(np.clip(a, 0, None))
reward_aggregation_name = "Geometric mean"
elif FLAGS.reward_aggregation == "mean":
reward_aggregation = arithmetic_mean
reward_aggregation_name = "Mean"
else:
raise app.UsageError(
f"Unknown aggregation type: '{FLAGS.reward_aggregation}'"
)
if env.reward_space:
reward_name = f"{reward_aggregation_name} {env.reward_space.name}"
else:
reward_name = ""
# Determine the maximum column width required for printing tabular output.
max_state_name_length = max(
len(s)
for s in [state_name(s) for s in states]
+ [
"Mean inference walltime",
reward_name,
]
)
name_col_width = min(max_state_name_length + 2, 78)
error_count = 0
rewards = []
walltimes = []
if FLAGS.summary_only:
def intermediate_print(*args, **kwargs):
del args
del kwargs
else:
intermediate_print = print
def progress_message(i):
intermediate_print(
f"{i} remaining {plural(i, 'state', 'states')} to validate ... ",
end="",
flush=True,
)
progress_message(len(states))
result_dicts = []
def dump_result_dicts_to_json():
with open(FLAGS.validation_logfile, "w") as f:
json.dump(result_dicts, f)
for i, result in enumerate(validation_results, start=1):
intermediate_print("\r\033[K", to_string(result, name_col_width), sep="")
progress_message(len(states) - i)
result_dicts.append(result.dict())
if not result.okay():
error_count += 1
elif result.reward_validated and not result.reward_validation_failed:
rewards.append(result.state.reward)
walltimes.append(result.state.walltime)
if not i % 10:
dump_result_dicts_to_json()
dump_result_dicts_to_json()
# Print a summary footer.
intermediate_print("\r\033[K----", "-" * name_col_width, "-----------", sep="")
print(f"Number of validated results: {emph(len(walltimes))} of {len(states)}")
walltime_mean = f"{arithmetic_mean(walltimes):.3f}s"
walltime_std = f"{stdev(walltimes):.3f}s"
print(
f"Mean walltime per benchmark: {emph(walltime_mean)} "
f"(std: {emph(walltime_std)})"
)
reward = f"{reward_aggregation(rewards):.3f}"
reward_std = f"{stdev(rewards):.3f}"
print(f"{reward_name}: {emph(reward)} (std: {emph(reward_std)})")
if error_count:
sys.exit(1)
if __name__ == "__main__":
app.run(main)
|
CompilerGym-development
|
compiler_gym/bin/validate.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Note that the tutorial is extracted from the doc string with the repeated ^
# signs. So, please keep them as they are.
"""Run a CompilerGym environment with text interface controls.
.. code-block::
$ python -m compiler_gym.bin.manual_env --env=<env> [--benchmark=<name>] [--observation=<space>] [--reward=<space>]
The benchmark to use can be specified using :code:`--benchmark=<name>`.
**************************
CompilerGym Shell Tutorial
**************************
This program gives a basic shell through which many of commands from CompilerGym
can be executed. CompilerGym provides a simple Python interface to various
compiler functions, enabling programs to be compiled in different ways and to
make queries about those programs. The goal is to have a simple system for
machine learning in compilers.
Setting a Benchmark, Reward and Observation
-------------------------------------------
The CompilerGym operates on a program or benchmark. If not set on the command
line, the benchmark can be specified in the shell with:
.. code-block::
compiler_gym:cbench-v1/qsort> set_benchmark <benchmark-name>
When a benchmark is set, the prompt will update with the name of the benchmark.
Supposing that is "bench", then the prompt would be:
.. code-block::
compiler_gym:bench>
The list of available benchmarks can be shown with, though this is limited to
the first 200 benchmarks:
.. code-block::
compiler_gym:bench> list_benchmarks
You can also see what datasets are available with this command:
.. code-block::
compiler_gym:cbench-v1/qsort> list_datasets
The default reward and observation can be similarly set with:
.. code-block::
compiler_gym:bench> set_default_reward <reward-name>
compiler_gym:bench> set_default_observation <observation-name>
And lists of the choices are available with:
.. code-block::
compiler_gym:bench> list_rewards
compiler_gym:bench> list_observations
The default rewards and observations will be reported every time an action is
taken. So, if, for example, you want to see how the instruction count of the
benchmark program is affected by your actions, set the default reward to
"IrInstructionCount". Then the change in instruction count for each action will
be reported.
Additionally, some of the search techniques require the default reward to be
set, since they will try to optimise that reward.
Actions and the Action Stack
----------------------------
In CompilerGym an action corresponds to invoking an compiler operation
(currently an LLVM opt pass) on the intermediate representation of the program.
Each action acts on the result of the previous action and so on.
So, for example, to apply first the 'tail call elimination' pass, then the 'loop
unrolling' pass we call two actions:
.. code-block::
compiler_gym:bench> action -tailcallelim
compiler_gym:bench> action -loop-unroll
Each action will report its default reward. Note that multiple actions can be
placed on a single line, so that the above is equivalent to:
.. code-block::
compiler_gym:bench> action -tailcallelim -loop-unroll
You can choose a random action, by using just a '-' as the action name:
.. code-block::
compiler_gym:bench> action -
Since an empty line on the shell repeats the last action, you can execute many
random actions by typing that line first then holding down return.
The actions are recorded in a stack, with the latest action on the top of the
stack. You can view the action stack with stack command:
.. code-block::
compiler_gym:bench> stack
This will show for each action if it had an effect (as computed by the
underlying compiler), whether this terminated compiler, and what the per action
and cumulative rewards are.
The last action can be undone by:
.. code-block::
compiler_gym:bench> undo
All actions in the stack can be undone at once by:
.. code-block::
compiler_gym:bench> reset
You can find out what the effect of each action would be by calling this
command:
.. code-block::
compiler_gym:bench> try_all_actions
This will show a table with the reward for each action, sorted by best first.
If you have a large stack of actions, many of which are not profitable, you can
simplify the stack with this command:
.. code-block::
compiler_gym:bench> simplify_stack
This will redo the entire stack, keeping only those actions which previously
gave good rewards. (Note this doesn't mean that the simplified stack will only
have positive rewards, some negative actions may be necessary set up for a later
positive reward.)
Current Status
--------------
For the current state of the program - after whatever actions have been called
on it - you can make several queries.
The first is to get a reward. This might not be the same as the current default
reward:
.. code-block::
compiler_gym:bench> reward <reward-name>
You can see various observations with:
.. code-block::
compiler_gym:bench> observation <observation-name>
Finally, you can print the equivalent command line for achieving the same
behaviour as the actions through the standard system shell:
.. code-block::
compiler_gym:bench> commandline
Searching
---------
Some very basic search capabilities are supported, directly in the shell. Each
of them just looks for another action to add.
First, is the random search through this command:
.. code-block::
compiler_gym:bench> action -
Multiple steps can be taken by holding down the return key.
A hill-climbing search tries an action, but will only accept it if it yields a
positive reward:
.. code-block::
compiler_gym:bench> hill_climb <num-steps>
A simple greedy search tries all possible actions and takes the one with the
highest reward, stopping when no action has a positive reward:
.. code-block::
compiler_gym:bench> greedy <num-steps>
Miscellaneous
-------------
One useful command is:
.. code-block::
compiler_gym:bench> breakpoint
Which drops into the python debugger. This is very useful if you want to see
what is going on internally. There is a 'self.env' object that represents the
environment that is definitely worth exploring.
And finally:
.. code-block::
compiler_gym:bench> exit
Drops out of the shell. :code:`Ctrl-D` should have the same effect.
"""
import cmd
import random
import readline
import sys
from itertools import islice
from absl import app, flags
from compiler_gym.envs import CompilerEnv
from compiler_gym.util.flags.benchmark_from_flags import benchmark_from_flags
from compiler_gym.util.flags.env_from_flags import env_from_flags
from compiler_gym.util.shell_format import emph
from compiler_gym.util.tabulate import tabulate
from compiler_gym.util.timer import Timer
FLAGS = flags.FLAGS
# Extract the tutorial from the doc string
tutorial = "**************************".join(
__doc__.split("**************************")[1:]
)
class ActionHistoryElement:
"""The compiler gym shell records a list of actions taken. This class represent those elements."""
def __init__(self, action_name, action_index, observation, reward, done, info):
"""Arguments are the returns from env.step"""
self.action_name = action_name
self.action_index = action_index
self.observation = observation
self.reward = reward
self.done = done
self.info = info
def has_no_effect(self):
"""Determine if the service thinks this action had no effect"""
return self.info.get("action_had_no_effect")
def has_effect(self):
"""Determine if the service thinks this action had an effect"""
return not self.has_no_effect()
class CompilerGymShell(cmd.Cmd):
"""Run an environment manually.
The manual environment allows the user to step through the environment,
selection observations, rewards, and actions to run as they see fit. This is
useful for debugging.
"""
intro = """Welcome to the CompilerGym Shell!
---------------------------------
Type help or ? for more information.
The 'tutorial' command will give a step by step guide."""
def __init__(self, env: CompilerEnv):
"""Initialise with an environment.
:param env: The environment to run.
"""
super().__init__()
self.env = env
# Get the benchmarks
self.benchmarks = []
for dataset in self.env.datasets:
self.benchmarks += islice(dataset.benchmark_uris(), 50)
self.benchmarks.sort()
# Strip default benchmark:// scheme.
for i, benchmark in enumerate(self.benchmarks):
if benchmark.startswith("benchmark://"):
self.benchmarks[i] = benchmark[len("benchmark://") :]
# Get the observations
self.observations = sorted(self.env.observation.spaces.keys())
# Get the rewards
self.rewards = sorted(self.env.reward.spaces.keys())
# Set up the stack.
self.stack = []
self.set_prompt()
def __del__(self):
"""Tidy up in case postloop() is not called."""
if self.env:
self.env.close()
self.env = None
def do_tutorial(self, arg):
"""Print the turorial"""
print(tutorial)
def preloop(self):
self.old_completer_delims = readline.get_completer_delims()
readline.set_completer_delims(" \t\n")
def postloop(self):
readline.set_completer_delims(self.old_completer_delims)
# Clear the stack
self.stack.clear()
self.env.close()
self.env = None
def set_prompt(self):
"""Set the prompt - shows the benchmark name"""
uri = self.env.benchmark.uri
benchmark_name = (
f"{uri.dataset}{uri.path}"
if uri.scheme == "benchmark"
else f"{uri.scheme}://{uri.dataset}{uri.path}"
)
prompt = f"compiler_gym:{benchmark_name}>"
self.prompt = f"\n{emph(prompt)} "
def simple_complete(self, text, options):
"""Return a list of options that match the text prefix"""
if text:
return [opt for opt in options if opt.startswith(text)]
else:
return options
def get_datasets(self):
"""Get the list of datasets"""
return sorted([k.name for k in self.env.datasets.datasets()])
def do_list_datasets(self, arg):
"""List all of the datasets"""
print(", ".join(self.get_datasets()))
def do_list_benchmarks(self, arg):
"""List the benchmarks"""
print(", ".join(self.benchmarks))
def complete_set_benchmark(self, text, line, begidx, endidx):
"""Complete the set_benchmark argument"""
return self.simple_complete(text, self.benchmarks)
def do_set_benchmark(self, arg):
"""Set the current benchmark.
set_benchmark <name> - set the benchmark
The name should come from the list of benchmarks printed by the command list_benchmarks.
Tab completion will be used if available.
This command will delete the action history.
Use '-' for a random benchmark.
"""
if arg == "-":
arg = self.env.datasets.random_benchmark().uri
print(f"set_benchmark {arg}")
try:
benchmark = self.env.datasets.benchmark(arg)
self.stack.clear()
# Set the current benchmark
with Timer() as timer:
observation = self.env.reset(benchmark=benchmark)
print(f"Reset {self.env.benchmark} environment in {timer}")
if self.env.observation_space and observation is not None:
print(
f"Observation: {self.env.observation_space_spec.to_string(observation)}"
)
self.set_prompt()
except LookupError:
print("Unknown benchmark, '" + arg + "'")
print("Benchmarks are listed with command, list_benchmarks")
def get_actions(self):
"""Get the list of actions"""
return self.env.action_space.names
def do_list_actions(self, arg):
"""List all of the available actions"""
actions = self.get_actions()
print(", ".join(actions))
def complete_action(self, text, line, begidx, endidx):
"""Complete the action argument"""
return self.simple_complete(text, self.get_actions())
def do_action(self, arg):
"""Take a single action step.
action <name> - take the named action
The name should come from the list of actions printed by the command list_actions.
Tab completion will be used if available.
Use '-' for a random action.
"""
if self.stack and self.stack[-1].done:
print(
"No action possible, last action ended by the environment with error:",
self.stack[-1].info["error_details"],
)
print("Consider commands, back or reset")
return
# Determine which action to apply
actions = self.get_actions()
# Allow for multiple actions at once
args = arg.split()
if not args:
print("No action given")
print("Actions are listed with command, list_actions")
print("Use '-' for a random action")
return
# Check each action before executing
for arg in args:
if arg != "-" and actions.count(arg) == 0:
print("Unknown action, '" + arg + "'")
print("Actions are listed with command, list_actions")
print("Use '-' for a random action")
return
# Replace random actions
for i in range(len(args)):
if args[i] == "-":
args[i] = actions[random.randrange(self.env.action_space.n)]
# Now do the actions
cum_reward = 0
actions_taken = []
with Timer() as timer:
for a in args:
print(f"Action {a}")
index = actions.index(a)
observation, reward, done, info = self.env.step(index)
# Print the observation, if available.
if self.env.observation_space and observation is not None:
print(
f"Observation: {self.env.observation_space_spec.to_string(observation)}"
)
# Print the reward, if available.
if self.env.reward_space and reward is not None:
print(f"Reward: {reward:.6f}")
cum_reward += reward
# Append the history element
hist = ActionHistoryElement(
self.env.action_space.names[index],
index,
observation,
reward,
done,
info,
)
self.stack.append(hist)
if hist.has_no_effect():
print("No effect")
actions_taken.append(a)
if hist.done:
print("Episode ended by environment: ", info["error_details"])
print("No further actions will be possible")
break
print(
f"Actions {' '.join(actions_taken)} in {timer} with reward {cum_reward}.",
flush=True,
)
def rerun_stack(self, check_rewards=True):
"""Rerun all the actions on the stack."""
self.env.reset()
old_stack = self.stack
self.stack = []
for i, old_hist in enumerate(old_stack):
observation, reward, done, info = self.env.step(old_hist.action_index)
hist = ActionHistoryElement(
old_hist.action_name,
old_hist.action_index,
observation,
reward,
done,
info,
)
self.stack.append(hist)
if check_rewards and reward != old_hist.reward:
print(
f"Warning previous reward at {i}: {hist.action_name} was {hist.reward:.6f} now {reward:.6f}"
)
def do_hill_climb(self, arg):
"""Do some steps of hill climbing.
A random action is taken, but only accepted if it has a positive reward.
An argument, if given, should be the number of steps to take.
The search will try to improve the default reward. Please call set_default_reward if needed.
"""
if not self.env.reward_space:
print("No default reward set. Call set_default_reward")
return
try:
num_steps = max(1, int(arg))
except ValueError:
num_steps = 1
num_accepted = 0
cum_reward = 0
with Timer() as timer:
for i in range(num_steps):
index = random.randrange(self.env.action_space.n)
action = self.env.action_space.names[index]
observation, reward, done, info = self.env.step(index)
accept = not done and (reward is not None) and (reward > 0)
if accept:
# Append the history element
hist = ActionHistoryElement(
action, index, observation, reward, done, info
)
self.stack.append(hist)
num_accepted += 1
cum_reward += reward
else:
# Basically undo
self.rerun_stack()
print(
f"Step: {i+1} Action: {action} Reward: {reward:.6f} Accept: {accept}"
)
if done:
print("Episode ended by environment: ", info["error_details"])
print(
f"Hill climb complete in {timer}. Accepted {num_accepted} of {num_steps} steps for total reward of {cum_reward}."
)
def get_action_rewards(self):
"""Get all the rewards for the possible actions at this point"""
items = []
for index, action in enumerate(self.env.action_space.names):
self.rerun_stack()
observation, reward, done, info = self.env.step(index)
hist = ActionHistoryElement(action, index, observation, reward, done, info)
items.append(hist)
print(f"Action: {action} Reward: {reward:.6f}")
self.rerun_stack()
items.sort(key=lambda h: h.reward, reverse=True)
return items
def do_try_all_actions(self, args):
"""Tries all actions from this position and reports the results in sorted order by reward"""
if not self.env.reward_space:
print("No default reward set. Call set_default_reward")
return
with Timer("Got actions"):
items = self.get_action_rewards()
def row(item):
return (
item.action_name,
item.has_effect(),
item.done,
f"{item.reward:.6f}",
)
rows = [row(item) for item in items]
headers = ["Action", "Effect", "Done", "Reward"]
print(tabulate(rows, headers=headers, tablefmt="presto"))
def do_greedy(self, arg):
"""Do some greedy steps.
All actions are tried and the one with the biggest positive reward is accepted.
An argument, if given, should be the number of steps to take.
The search will try to improve the default reward. Please call set_default_reward if needed.
"""
if not self.env.reward_space:
print("No default reward set. Call set_default_reward")
return
try:
num_steps = max(1, int(arg))
except ValueError:
num_steps = 1
with Timer() as timer:
for i in range(num_steps):
best = self.get_action_rewards()[0]
if (not best.done) and (best.reward is not None) and (best.reward > 0):
self.env.step(best.action_index)
self.stack.append(best)
print(
f"Step: {i+1} Selected action: {best.action_name} Reward: {best.reward:.6f}"
)
else:
print(f"Step: {i+1} Selected no action.")
if i + 1 < num_steps:
print("Greedy search stopping early.")
break
print(f"Greedy {i+1} steps in {timer}")
def do_list_observations(self, arg):
"""List the available observations"""
print(", ".join(self.observations))
def complete_observation(self, text, line, begidx, endidx):
"""Complete the observation argument"""
return self.simple_complete(text, self.observations)
def do_observation(self, arg):
"""Show an observation value
observation <name> - show the named observation
The name should come from the list of observations printed by the command list_observations.
Tab completion will be used if available.
"""
if arg == "" and self.env.observation_space:
arg = self.env.observation_space_spec.id
if self.observations.count(arg):
with Timer() as timer:
value = self.env.observation[arg]
print(self.env.observation.spaces[arg].to_string(value))
print(f"Observation {arg} in {timer}")
else:
print("Unknown observation, '" + arg + "'")
print("Observations are listed with command, list_observations")
def complete_set_default_observation(self, text, line, begidx, endidx):
"""Complete the set_default_observation argument"""
return self.simple_complete(text, self.observations)
def do_set_default_observation(self, arg):
"""Set the default observation space
set_default_observation <name> - set the named observation
The name should come from the list of observations printed by the command list_observations.
Tab completion will be used if available.
With no argument it will set to None.
This command will rerun the actions on the stack.
"""
arg = arg.strip()
if not arg or self.observations.count(arg):
with Timer() as timer:
self.env.observation_space = arg if arg else None
self.rerun_stack(check_rewards=False)
print(f"Observation {arg} in {timer}")
else:
print("Unknown observation, '" + (arg if arg else "None") + "'")
print("Observations are listed with command, list_observations")
def do_list_rewards(self, arg):
"""List the available rewards"""
print(", ".join(self.rewards))
def complete_reward(self, text, line, begidx, endidx):
"""Complete the reward argument"""
return self.simple_complete(text, self.rewards)
def do_reward(self, arg):
"""Show an reward value
reward <name> - show the named reward
The name should come from the list of rewards printed by the command list_rewards.
Tab completion will be used if available.
"""
if arg == "" and self.env.reward_space:
arg = self.env.reward_space.name
if self.rewards.count(arg):
with Timer(f"Reward {arg}"):
print(f"{self.env.reward[arg]:.6f}")
else:
print(f"Unknown reward, '{arg}'")
print("Rewards are listed with command, list_rewards")
def complete_set_default_reward(self, text, line, begidx, endidx):
"""Complete the set_default_reward argument"""
return self.simple_complete(text, self.rewards)
def do_set_default_reward(self, arg):
"""Set the default reward space
set_default_reward <name> - set the named reward
The name should come from the list of rewards printed by the command list_rewards.
Tab completion will be used if available.
With no argument it will set to None.
This command will rerun the actions on the stack.
"""
arg = arg.strip()
if not arg or self.rewards.count(arg):
with Timer(f"Reward {arg}"):
self.env.reward_space = arg if arg else None
self.rerun_stack(check_rewards=False)
else:
print("Unknown reward, '" + (arg if arg else "None") + "'")
print("Rewards are listed with command, list_rewards")
def do_commandline(self, arg):
"""Show the command line equivalent of the actions taken so far"""
print("$", self.env.action_space.to_string(self.env.actions), flush=True)
def do_stack(self, arg):
"""Show the environments on the stack. The current environment is the first shown."""
rows = []
total = 0
for i, hist in enumerate(self.stack):
name = hist.action_name
effect = hist.has_effect()
done = hist.done
reward = f"{hist.reward:.6f}" if hist.reward is not None else "-"
total += hist.reward or 0
row = (i + 1, name, effect, done, reward, f"{total:.6f}")
rows.append(row)
rows.reverse()
rows.append((0, "<init>", False, False, 0, 0))
headers = ["Depth", "Action", "Effect", "Done", "Reward", "Cumulative Reward"]
print(tabulate(rows, headers=headers, tablefmt="presto"))
def do_simplify_stack(self, arg):
"""Simplify the stack
There may be many actions on the stack which have no effect or created a negative reward.
This command makes a basic attempt to remove them. It reruns the stack, using only the
commands which appeared to have a effect and positive reward. If the reward is None
e.g. if there was no default reward set, then it will only check if there was some effect.
Note that the new rewards are not checked, so there may be odd effects caused by an action
being removed that previously had a negative reward being necessary for a later action to
have a positive reward. This means you might see non-positive rewards on the stack afterwards.
"""
self.env.reset()
old_stack = self.stack
self.stack = []
for i, old_hist in enumerate(old_stack):
if old_hist.has_effect() and (
old_hist.reward is None or old_hist.reward > 0
):
observation, reward, done, info = self.env.step(old_hist.action_index)
hist = ActionHistoryElement(
old_hist.action_name,
old_hist.action_index,
observation,
reward,
done,
info,
)
self.stack.append(hist)
if reward != old_hist.reward:
print(
f"Warning previous reward at {i}: {hist.action_name} was {old_hist.reward:.6f} now {reward:.6f}"
)
def do_reset(self, arg):
"""Clear the stack of any actions and reset"""
self.stack.clear()
with Timer("Reset"):
self.env.reset()
self.set_prompt()
def do_back(self, arg):
"""Undo the last action, if any"""
if self.stack:
top = self.stack.pop()
with Timer(f"Undid {top.action_name}"):
self.rerun_stack()
else:
print("No actions to undo")
def do_exit(self, arg):
"""Exit"""
print("Exiting")
return True
def do_breakpoint(self, arg):
"""Enter the debugger.
If you suddenly want to do something funky with self.env, or the self.stack, this is your way in!
"""
breakpoint()
def default(self, line):
"""Override default to quit on end of file"""
if line == "EOF":
return self.do_exit(line)
return super().default(line)
def main(argv):
"""Main entry point."""
argv = FLAGS(argv)
if len(argv) != 1:
raise app.UsageError(f"Unknown command line arguments: {argv[1:]}")
with Timer("Initialized environment"):
benchmark = benchmark_from_flags()
env = env_from_flags(benchmark)
shell = CompilerGymShell(env)
shell.cmdloop()
if __name__ == "__main__":
try:
main(sys.argv)
except app.UsageError as err:
print("Usage Error: " + str(err))
|
CompilerGym-development
|
compiler_gym/bin/manual_env.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Replay the best solution found from a random search.
.. code-block::
$ python -m compiler_gym.bin.random_replay --env=llvm-ic-v0 --output_dir=/path/to/logs
Given a set of :mod:`compiler_gym.bin.random_search` logs generated from a
prior search, replay the best sequence of actions found and record the
incremental reward of each action.
"""
from pathlib import Path
from absl import app, flags
import compiler_gym.util.flags.output_dir # noqa Flag definition.
from compiler_gym.random_search import replay_actions_from_logs
from compiler_gym.util.flags.benchmark_from_flags import benchmark_from_flags
from compiler_gym.util.flags.env_from_flags import env_from_flags
FLAGS = flags.FLAGS
def main(argv):
"""Main entry point."""
argv = FLAGS(argv)
if len(argv) != 1:
raise app.UsageError(f"Unknown command line arguments: {argv[1:]}")
output_dir = Path(FLAGS.output_dir).expanduser().resolve().absolute()
assert (
output_dir / "random_search.json"
).is_file(), f"Invalid --output_dir: {output_dir}"
with env_from_flags() as env:
benchmark = benchmark_from_flags()
replay_actions_from_logs(env, output_dir, benchmark=benchmark)
if __name__ == "__main__":
app.run(main)
|
CompilerGym-development
|
compiler_gym/bin/random_replay.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Evaluate the logs of a random run."""
import json
from pathlib import Path
import humanize
import numpy as np
from absl import app, flags
import compiler_gym.util.flags.output_dir # noqa Flag definition.
from compiler_gym.random_search import RandomSearchProgressLogEntry
from compiler_gym.util.statistics import geometric_mean
from compiler_gym.util.tabulate import tabulate
FLAGS = flags.FLAGS
def eval_logs(outdir: Path) -> None:
rows = []
totals = {
"instructions": 0,
"init_reward": [],
"max_reward": [],
"attempts": 0,
"time": 0,
"actions": 0,
}
for results_dir in sorted(outdir.iterdir()):
benchmark = results_dir.name
progress_path = results_dir / "random_search_progress.csv"
meta_path = results_dir / "random_search.json"
if (
not results_dir.is_dir()
or not progress_path.is_file()
or not meta_path.is_file()
):
continue
with open(meta_path, "rb") as f:
meta = json.load(f)
with open(str(progress_path)) as f:
final_line = f.readlines()[-1]
best = RandomSearchProgressLogEntry.from_csv(final_line)
totals["instructions"] += meta["num_instructions"]
totals["init_reward"].append(meta["init_reward"])
totals["max_reward"].append(best.reward)
totals["attempts"] += best.total_episode_count
totals["time"] += best.runtime_seconds
totals["actions"] += best.num_passes
rows.append(
(
benchmark,
humanize.intcomma(meta["num_instructions"]),
f"{meta['init_reward']:.4f}",
f"{best.reward:.4f}",
(
f"{humanize.intcomma(best.total_episode_count)} attempts "
f"in {humanize.naturaldelta(best.runtime_seconds)}"
),
humanize.intcomma(best.num_passes),
)
)
row_count = len(totals["init_reward"])
rows.append(
(
"Geomean",
"",
f"{geometric_mean(totals['init_reward']):.4f}",
f"{geometric_mean(totals['max_reward']):.4f}",
"",
"",
)
)
rows.append(
(
"Average",
humanize.intcomma(int(totals["instructions"] / row_count)),
f"{np.array(totals['init_reward']).mean():.4f}",
f"{np.array(totals['max_reward']).mean():.4f}",
(
f"{humanize.intcomma(int(totals['attempts'] / row_count))} attempts "
f"in {humanize.naturaldelta(totals['time'] / row_count)}"
),
humanize.intcomma(int(totals["actions"] / row_count)),
)
)
print(
tabulate(
rows,
headers=(
"Benchmark",
"#. instructions",
"Init Reward",
"Max Reward",
"Found after",
"#. actions",
),
)
)
def main(argv):
"""Main entry point."""
argv = FLAGS(argv)
if len(argv) != 1:
raise app.UsageError(f"Unknown command line arguments: {argv[1:]}")
output_dir = Path(FLAGS.output_dir).expanduser().resolve().absolute()
assert output_dir.is_dir(), f"Directory not found: {output_dir}"
eval_logs(output_dir)
if __name__ == "__main__":
app.run(main)
|
CompilerGym-development
|
compiler_gym/bin/random_eval.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from concurrent.futures import as_completed
from pathlib import Path
from typing import Callable, Iterable, List, NamedTuple, Optional, Union
import compiler_gym.errors
from compiler_gym.datasets.uri import BenchmarkUri
from compiler_gym.errors import ValidationError
from compiler_gym.service.proto import Benchmark as BenchmarkProto
from compiler_gym.service.proto import File
from compiler_gym.util import thread_pool
from compiler_gym.util.decorators import frozen_class, memoized_property
# A validation callback is a function that takes a single CompilerEnv instance
# as its argument and returns an iterable sequence of zero or more
# ValidationError tuples.
ValidationCallback = Callable[["CompilerEnv"], Iterable[ValidationError]] # noqa: F821
class BenchmarkSource(NamedTuple):
"""A source file that is used to generate a benchmark. A benchmark may
comprise many source files.
.. warning::
The :class:`BenchmarkSource <compiler_gym.datasets.BenchmarkSource>`
class is new and is likely to change in the future.
"""
filename: str
"""The name of the file."""
contents: bytes
"""The contents of the file as a byte array."""
def __repr__(self) -> str:
return str(self.filename)
@frozen_class
class Benchmark:
"""A benchmark represents a particular program that is being compiled.
A benchmark is a program that can be used by a :class:`CompilerEnv
<compiler_gym.envs.CompilerEnv>` as a program to optimize. A benchmark
comprises the data that is fed into the compiler, identified by a URI.
Benchmarks are not normally instantiated directly. Instead, benchmarks are
instantiated using :meth:`env.datasets.benchmark(uri)
<compiler_gym.datasets.Datasets.benchmark>`:
>>> env.datasets.benchmark("benchmark://npb-v0/20")
benchmark://npb-v0/20
The available benchmark URIs can be queried using
:meth:`env.datasets.benchmark_uris()
<compiler_gym.datasets.Datasets.benchmark_uris>`.
>>> next(env.datasets.benchmark_uris())
'benchmark://cbench-v1/adpcm'
Compiler environments may provide additional helper functions for generating
benchmarks, such as :meth:`env.make_benchmark()
<compiler_gym.envs.LlvmEnv.make_benchmark>` for LLVM.
A Benchmark instance wraps an instance of the :code:`Benchmark` protocol
buffer from the `RPC interface
<https://github.com/facebookresearch/CompilerGym/blob/development/compiler_gym/service/proto/compiler_gym_service.proto>`_
with additional functionality. The data underlying benchmarks should be
considered immutable. New attributes cannot be assigned to Benchmark
instances.
The benchmark for an environment can be set during :meth:`env.reset()
<compiler_gym.envs.CompilerEnv.reset>`. The currently active benchmark can
be queried using :attr:`env.benchmark
<compiler_gym.envs.CompilerEnv.benchmark>`:
>>> env = gym.make("llvm-v0")
>>> env.reset(benchmark="benchmark://cbench-v1/crc32")
>>> env.benchmark
benchmark://cbench-v1/crc32
"""
def __init__(
self,
proto: BenchmarkProto,
validation_callbacks: Optional[List[ValidationCallback]] = None,
sources: Optional[List[BenchmarkSource]] = None,
):
self._proto = proto
self._validation_callbacks = validation_callbacks or []
self._sources = list(sources or [])
def __repr__(self) -> str:
return str(self.uri)
def __hash__(self) -> int:
return hash(self.uri)
@property
def uri(self) -> BenchmarkUri:
"""The URI of the benchmark.
Benchmark URIs should be unique, that is, that two URIs with the same
value should resolve to the same benchmark. However, URIs do not have
uniquely describe a benchmark. That is, multiple identical benchmarks
could have different URIs.
:return: A URI string. :type: string
"""
return BenchmarkUri.from_string(self._proto.uri)
@property
def proto(self) -> BenchmarkProto:
"""The protocol buffer representing the benchmark.
:return: A Benchmark message.
:type: :code:`Benchmark`
"""
return self._proto
@property
def sources(self) -> Iterable[BenchmarkSource]:
"""The original source code used to produce this benchmark, as a list of
:class:`BenchmarkSource <compiler_gym.datasets.BenchmarkSource>`
instances.
:return: A sequence of source files.
:type: :code:`Iterable[BenchmarkSource]`
.. warning::
The :meth:`Benchmark.sources
<compiler_gym.datasets.Benchmark.sources>` property is new and is
likely to change in the future.
"""
return (BenchmarkSource(*x) for x in self._sources)
def is_validatable(self) -> bool:
"""Whether the benchmark has any validation callbacks registered.
:return: :code:`True` if the benchmark has at least one validation
callback.
"""
return self._validation_callbacks != []
def validate(self, env: "CompilerEnv") -> List[ValidationError]: # noqa: F821
"""Run the validation callbacks and return any errors.
If no errors are returned, validation has succeeded:
>>> benchmark.validate(env)
[]
If an error occurs, a :class:`ValidationError
<compiler_gym.ValidationError>` tuple will describe the type of the
error, and optionally contain other data:
>>> benchmark.validate(env)
[ValidationError(type="RuntimeError")]
Multiple :class:`ValidationError <compiler_gym.ValidationError>` errors
may be returned to indicate multiple errors.
This is a synchronous version of :meth:`ivalidate()
<compiler_gym.datasets.Benchmark.ivalidate>` that blocks until all
results are ready:
>>> benchmark.validate(env) == list(benchmark.ivalidate(env))
True
:param env: The :class:`CompilerEnv <compiler_gym.envs.CompilerEnv>`
instance that is being validated.
:return: A list of zero or more :class:`ValidationError
<compiler_gym.ValidationError>` tuples that occurred during
validation.
"""
return list(self.ivalidate(env))
def ivalidate(self, env: "CompilerEnv") -> Iterable[ValidationError]: # noqa: F821
"""Run the validation callbacks and return a generator of errors.
This is an asynchronous version of :meth:`validate()
<compiler_gym.datasets.Benchmark.validate>` that returns immediately.
:parameter env: A :class:`CompilerEnv <compiler_gym.envs.CompilerEnv>`
instance to validate.
:return: A generator of :class:`ValidationError
<compiler_gym.ValidationError>` tuples that occur during validation.
"""
executor = thread_pool.get_thread_pool_executor()
futures = (
executor.submit(validator, env) for validator in self.validation_callbacks()
)
for future in as_completed(futures):
result: Iterable[ValidationError] = future.result()
if result:
yield from result
def validation_callbacks(
self,
) -> List[ValidationCallback]:
"""Return the list of registered validation callbacks.
:return: A list of callables. See :meth:`add_validation_callback()
<compiler_gym.datasets.Benchmark.add_validation_callback>`.
"""
return self._validation_callbacks
def add_source(self, source: BenchmarkSource) -> None:
"""Register a new source file for this benchmark.
:param source: The :class:`BenchmarkSource
<compiler_gym.datasets.BenchmarkSource>` to register.
"""
self._sources.append(source)
def add_validation_callback(
self,
validation_callback: ValidationCallback,
) -> None:
"""Register a new validation callback that will be executed on
:meth:`validate() <compiler_gym.datasets.Benchmark.validate>`.
:param validation_callback: A callback that accepts a single
:class:`CompilerEnv <compiler_gym.envs.CompilerEnv>` argument and
returns an iterable sequence of zero or more :class:`ValidationError
<compiler_gym.ValidationError>` tuples. Validation callbacks must be
thread safe and must not modify the environment.
"""
self._validation_callbacks.append(validation_callback)
def write_sources_to_directory(self, directory: Path) -> int:
"""Write the source files for this benchmark to the given directory.
This writes each of the :attr:`benchmark.sources
<compiler_gym.datasets.Benchmark.sources>` files to disk.
If the benchmark has no sources, no files are written.
:param directory: The directory to write results to. If it does not
exist, it is created.
:return: The number of files written.
"""
directory = Path(directory)
directory.mkdir(exist_ok=True, parents=True)
uniq_paths = set()
for filename, contents in self.sources:
path = directory / filename
uniq_paths.add(path)
path.parent.mkdir(exist_ok=True, parents=True)
with open(path, "wb") as f:
f.write(contents)
return len(uniq_paths)
@classmethod
def from_file(cls, uri: Union[str, BenchmarkUri], path: Path):
"""Construct a benchmark from a file.
:param uri: The URI of the benchmark.
:param path: A filesystem path.
:raise FileNotFoundError: If the path does not exist.
:return: A :class:`Benchmark <compiler_gym.datasets.Benchmark>`
instance.
"""
path = Path(path)
if not path.is_file():
raise FileNotFoundError(path)
# Read the file data into memory and embed it inside the File protocol
# buffer. An alternative would be to simply embed the file path in the
# File.uri field, but this won't work for distributed services which
# don't share a filesystem.
with open(path, "rb") as f:
contents = f.read()
return cls(proto=BenchmarkProto(uri=str(uri), program=File(contents=contents)))
@classmethod
def from_file_contents(cls, uri: Union[str, BenchmarkUri], data: bytes):
"""Construct a benchmark from raw data.
:param uri: The URI of the benchmark.
:param data: An array of bytes that will be passed to the compiler
service.
"""
return cls(proto=BenchmarkProto(uri=str(uri), program=File(contents=data)))
def __eq__(self, other: Union[str, "Benchmark"]):
if isinstance(other, Benchmark):
return self.uri == other.uri
return self.uri == other
def __lt__(self, other: Union[str, "Benchmark"]):
if isinstance(other, Benchmark):
return self.uri < other.uri
return self.uri < other
def __le__(self, other: Union[str, "Benchmark"]):
return self < other or self == other
# Deprecated since v0.2.4.
# This type is for backwards compatibility that will be removed in a future release.
# Please, use errors from `compiler_gym.errors`.
BenchmarkInitError = compiler_gym.errors.BenchmarkInitError
class BenchmarkWithSource(Benchmark):
"""A benchmark which has a single source file."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._src_name = None
self._src_path = None
@classmethod
def create(
cls, uri: str, input_path: Path, src_name: str, src_path: Path
) -> Benchmark:
"""Create a benchmark from paths."""
benchmark = cls.from_file(uri, input_path)
benchmark._src_name = src_name # pylint: disable=protected-access
benchmark._src_path = src_path # pylint: disable=protected-access
return benchmark
@memoized_property
def sources( # pylint: disable=invalid-overridden-method
self,
) -> Iterable[BenchmarkSource]:
with open(self._src_path, "rb") as f:
return [
BenchmarkSource(filename=self._src_name, contents=f.read()),
]
@property
def source(self) -> str:
"""Return the single source file contents as a string."""
return list(self.sources)[0].contents.decode("utf-8")
|
CompilerGym-development
|
compiler_gym/datasets/benchmark.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import bz2
import gzip
import io
import logging
import shutil
import tarfile
from threading import Lock
from typing import Iterable, List, Optional
from fasteners import InterProcessLock
from compiler_gym.datasets.files_dataset import FilesDataset
from compiler_gym.util.decorators import memoized_property
from compiler_gym.util.download import download
from compiler_gym.util.filesystem import atomic_file_write
logger = logging.getLogger(__name__)
# Module-level locks that ensures exclusive access to install routines across
# threads. Note that these lock are shared across all TarDataset instances. We
# don't use per-dataset locks as locks cannot be pickled.
_TAR_INSTALL_LOCK = Lock()
_TAR_MANIFEST_INSTALL_LOCK = Lock()
class TarDataset(FilesDataset):
"""A dataset comprising a files tree stored in a tar archive.
This extends the :class:`FilesDataset <compiler_gym.datasets.FilesDataset>`
class by adding support for compressed archives of files. The archive is
downloaded and unpacked on-demand.
"""
def __init__(
self,
tar_urls: List[str],
tar_sha256: Optional[str] = None,
tar_compression: str = "bz2",
strip_prefix: str = "",
**dataset_args,
):
"""Constructor.
:param tar_urls: A list of redundant URLS to download the tar archive from.
:param tar_sha256: The SHA256 checksum of the downloaded tar archive.
:param tar_compression: The tar archive compression type. One of
{"bz2", "gz"}.
:param strip_prefix: An optional path prefix to strip. Only files that
match this path prefix will be used as benchmarks.
:param dataset_args: See :meth:`FilesDataset.__init__()
<compiler_gym.datasets.FilesDataset.__init__>`.
"""
super().__init__(
dataset_root=None, # Set below once site_data_path is resolved.
**dataset_args,
)
self.dataset_root = self.site_data_path / "contents" / strip_prefix
self.tar_urls = tar_urls
self.tar_sha256 = tar_sha256
self.tar_compression = tar_compression
self.strip_prefix = strip_prefix
self._tar_extracted_marker = self.site_data_path / ".extracted"
self._tar_lockfile = self.site_data_path / ".install_lock"
@property
def installed(self) -> bool:
return self._tar_extracted_marker.is_file()
def install(self) -> None:
super().install()
if self.installed:
return
# Thread-level and process-level locks to prevent races.
with _TAR_INSTALL_LOCK, InterProcessLock(self._tar_lockfile):
# Repeat the check to see if we have already installed the
# dataset now that we have acquired the lock.
if self.installed:
return
# Remove any partially-completed prior extraction.
shutil.rmtree(self.site_data_path / "contents", ignore_errors=True)
logger.warning(
"Installing the %s dataset. This may take a few moments ...", self.name
)
tar_data = io.BytesIO(download(self.tar_urls, self.tar_sha256))
logger.info("Unpacking %s dataset to %s", self.name, self.site_data_path)
with tarfile.open(
fileobj=tar_data, mode=f"r:{self.tar_compression}"
) as arc:
arc.extractall(str(self.site_data_path / "contents"))
# We're done. The last thing we do is create the marker file to
# signal to any other install() invocations that the dataset is
# ready.
self._tar_extracted_marker.touch()
if self.strip_prefix and not self.dataset_root.is_dir():
raise FileNotFoundError(
f"Directory prefix '{self.strip_prefix}' not found in dataset '{self.name}'"
)
class TarDatasetWithManifest(TarDataset):
"""A tarball-based dataset that reads the benchmark URIs from a separate
manifest file.
A manifest file is a plain text file containing a list of benchmark names,
one per line, and is shipped separately from the tar file. The idea is to
allow the list of benchmark URIs to be enumerated in a more lightweight
manner than downloading and unpacking the entire dataset. It does this by
downloading and unpacking only the manifest to iterate over the URIs.
The manifest file is assumed to be correct and is not validated.
"""
def __init__(
self,
manifest_urls: List[str],
manifest_sha256: str,
manifest_compression: str = "bz2",
**dataset_args,
):
"""Constructor.
:param manifest_urls: A list of redundant URLS to download the
compressed text file containing a list of benchmark URI suffixes,
one per line.
:param manifest_sha256: The sha256 checksum of the compressed manifest
file.
:param manifest_compression: The manifest compression type. One of
{"bz2", "gz"}.
:param dataset_args: See :meth:`TarDataset.__init__()
<compiler_gym.datasets.TarDataset.__init__>`.
"""
super().__init__(**dataset_args)
self.manifest_urls = manifest_urls
self.manifest_sha256 = manifest_sha256
self.manifest_compression = manifest_compression
self._manifest_path = self.site_data_path / f"manifest-{manifest_sha256}.txt"
self._manifest_lockfile = self.site_data_path / ".manifest_lock"
def _read_manifest(self, manifest_data: str) -> List[str]:
"""Read the manifest data into a list of URIs. Does not validate the
manifest contents.
"""
lines = manifest_data.rstrip().split("\n")
return [f"{self.name}/{line}" for line in lines]
def _read_manifest_file(self) -> List[str]:
"""Read the benchmark URIs from an on-disk manifest file.
Does not check that the manifest file exists.
"""
with open(self._manifest_path, encoding="utf-8") as f:
uris = self._read_manifest(f.read())
logger.debug("Read %s manifest, %d entries", self.name, len(uris))
return uris
@memoized_property
def _benchmark_uris(self) -> List[str]:
"""Fetch or download the URI list."""
if self._manifest_path.is_file():
return self._read_manifest_file()
# Thread-level and process-level locks to prevent races.
with _TAR_MANIFEST_INSTALL_LOCK, InterProcessLock(self._manifest_lockfile):
# Now that we have acquired the lock, repeat the check, since
# another thread may have downloaded the manifest.
if self._manifest_path.is_file():
return self._read_manifest_file()
# Determine how to decompress the manifest data.
decompressor = {
"bz2": lambda compressed_data: bz2.BZ2File(compressed_data),
"gz": lambda compressed_data: gzip.GzipFile(compressed_data),
}.get(self.manifest_compression, None)
if not decompressor:
raise TypeError(
f"Unknown manifest compression: {self.manifest_compression}"
)
# Decompress the manifest data.
logger.debug("Downloading %s manifest", self.name)
manifest_data = io.BytesIO(
download(self.manifest_urls, self.manifest_sha256)
)
with decompressor(manifest_data) as f:
manifest_data = f.read()
# Although we have exclusive-execution locks, we still need to
# create the manifest atomically to prevent calls to _benchmark_uris
# racing to read an incompletely written manifest.
with atomic_file_write(self._manifest_path, fileobj=True) as f:
f.write(manifest_data)
uris = self._read_manifest(manifest_data.decode("utf-8"))
logger.debug("Downloaded %s manifest, %d entries", self.name, len(uris))
return uris
@memoized_property
def size(self) -> int:
return len(self._benchmark_uris)
def benchmark_uris(self) -> Iterable[str]:
yield from iter(self._benchmark_uris)
|
CompilerGym-development
|
compiler_gym/datasets/tar_dataset.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from collections import deque
from pathlib import Path
from typing import Dict, Iterable, List, Optional, Set, TypeVar
import numpy as np
from compiler_gym.datasets.benchmark import Benchmark
from compiler_gym.datasets.dataset import Dataset
from compiler_gym.datasets.uri import BenchmarkUri
from compiler_gym.service.proto import Benchmark as BenchmarkProto
T = TypeVar("T")
def round_robin_iterables(iters: Iterable[Iterable[T]]) -> Iterable[T]:
"""Yield from the given iterators in round robin order."""
# Use a queue of iterators to iterate over. Repeatedly pop an iterator from
# the queue, yield the next value from it, then put it at the back of the
# queue. The iterator is discarded once exhausted.
iters = deque(iters)
while len(iters) > 1:
it = iters.popleft()
try:
yield next(it)
iters.append(it)
except StopIteration:
pass
# Once we have only a single iterator left, return it directly rather
# continuing with the round robin.
if len(iters) == 1:
yield from iters.popleft()
class Datasets:
"""A collection of datasets.
This class provides a dictionary-like interface for indexing and iterating
over multiple :class:`Dataset <compiler_gym.datasets.Dataset>` objects.
Select a dataset by URI using:
>>> env.datasets["benchmark://cbench-v1"]
Check whether a dataset exists using:
>>> "benchmark://cbench-v1" in env.datasets
True
Or iterate over the datasets using:
>>> for dataset in env.datasets:
... print(dataset.name)
benchmark://cbench-v1
benchmark://github-v0
benchmark://npb-v0
To select a benchmark from the datasets, use :meth:`benchmark()`:
>>> env.datasets.benchmark("benchmark://a-v0/a")
Use the :meth:`benchmarks()` method to iterate over every benchmark in the
datasets in a stable round robin order:
>>> for benchmark in env.datasets.benchmarks():
... print(benchmark)
benchmark://cbench-v1/1
benchmark://github-v0/1
benchmark://npb-v0/1
benchmark://cbench-v1/2
...
If you want to exclude a dataset, delete it:
>>> del env.datasets["benchmark://b-v0"]
"""
def __init__(
self,
datasets: Iterable[Dataset],
):
self._datasets: Dict[str, Dataset] = {d.name: d for d in datasets}
self._visible_datasets: Set[str] = set(
name for name, dataset in self._datasets.items() if not dataset.deprecated
)
def datasets(self, with_deprecated: bool = False) -> Iterable[Dataset]:
"""Enumerate the datasets.
Dataset order is consistent across runs.
:param with_deprecated: If :code:`True`, include datasets that have been
marked as deprecated.
:return: An iterable sequence of :meth:`Dataset
<compiler_gym.datasets.Dataset>` instances.
"""
datasets = self._datasets.values()
if not with_deprecated:
datasets = (d for d in datasets if not d.deprecated)
yield from sorted(datasets, key=lambda d: (d.sort_order, d.name))
def __iter__(self) -> Iterable[Dataset]:
"""Iterate over the datasets.
Dataset order is consistent across runs.
Equivalent to :meth:`datasets.datasets()
<compiler_gym.datasets.Dataset.datasets>`, but without the ability to
iterate over the deprecated datasets.
If the number of benchmarks in any of the datasets is infinite
(:code:`len(dataset) == math.inf`), the iterable returned by this method
will continue indefinitely.
:return: An iterable sequence of :meth:`Dataset
<compiler_gym.datasets.Dataset>` instances.
"""
return self.datasets()
def dataset(self, dataset: str) -> Dataset:
"""Get a dataset.
Return the corresponding :meth:`Dataset
<compiler_gym.datasets.Dataset>`. Name lookup will succeed whether or
not the dataset is deprecated.
:param dataset: A dataset name.
:return: A :meth:`Dataset <compiler_gym.datasets.Dataset>` instance.
:raises LookupError: If :code:`dataset` is not found.
"""
return self.dataset_from_parsed_uri(BenchmarkUri.from_string(dataset))
def dataset_from_parsed_uri(self, uri: BenchmarkUri) -> Dataset:
"""Get a dataset.
Return the corresponding :meth:`Dataset
<compiler_gym.datasets.Dataset>`. Name lookup will succeed whether or
not the dataset is deprecated.
:param uri: A parsed URI.
:return: A :meth:`Dataset <compiler_gym.datasets.Dataset>` instance.
:raises LookupError: If :code:`dataset` is not found.
"""
key = self._dataset_key_from_uri(uri)
if key not in self._datasets:
raise LookupError(f"Dataset not found: {key}")
return self._datasets[key]
@staticmethod
def _dataset_key_from_uri(uri: BenchmarkUri) -> str:
if not (uri.scheme and uri.dataset):
raise LookupError(f"Invalid benchmark URI: '{uri}'")
return f"{uri.scheme}://{uri.dataset}"
def __getitem__(self, dataset: str) -> Dataset:
"""Lookup a dataset.
:param dataset: A dataset name.
:return: A :meth:`Dataset <compiler_gym.datasets.Dataset>` instance.
:raises LookupError: If :code:`dataset` is not found.
"""
return self.dataset(dataset)
def __setitem__(self, key: str, dataset: Dataset):
"""Add a dataset to the collection.
:param key: The name of the dataset.
:param dataset: The dataset to add.
"""
key = self._dataset_key_from_uri(BenchmarkUri.from_string(key))
self._datasets[key] = dataset
if not dataset.deprecated:
self._visible_datasets.add(key)
def __delitem__(self, dataset: str):
"""Remove a dataset from the collection.
This does not affect any underlying storage used by dataset. See
:meth:`uninstall() <compiler_gym.datasets.Datasets.uninstall>` to clean
up.
:param dataset: The name of a dataset.
:return: :code:`True` if the dataset was removed, :code:`False` if it
was already removed.
"""
key = self._dataset_key_from_uri(BenchmarkUri.from_string(dataset))
if key in self._visible_datasets:
self._visible_datasets.remove(key)
del self._datasets[key]
def __contains__(self, dataset: str) -> bool:
"""Returns whether the dataset is contained."""
try:
self.dataset(dataset)
return True
except LookupError:
return False
def benchmarks(self, with_deprecated: bool = False) -> Iterable[Benchmark]:
"""Enumerate the (possibly infinite) benchmarks lazily.
Benchmarks order is consistent across runs. One benchmark from each
dataset is returned in round robin order until all datasets have been
fully enumerated. The order of :meth:`benchmarks()
<compiler_gym.datasets.Datasets.benchmarks>` and :meth:`benchmark_uris()
<compiler_gym.datasets.Datasets.benchmark_uris>` is the same.
If the number of benchmarks in any of the datasets is infinite
(:code:`len(dataset) == math.inf`), the iterable returned by this method
will continue indefinitely.
:param with_deprecated: If :code:`True`, include benchmarks from
datasets that have been marked deprecated.
:return: An iterable sequence of :class:`Benchmark
<compiler_gym.datasets.Benchmark>` instances.
"""
return round_robin_iterables(
(d.benchmarks() for d in self.datasets(with_deprecated=with_deprecated))
)
def benchmark_uris(self, with_deprecated: bool = False) -> Iterable[str]:
"""Enumerate the (possibly infinite) benchmark URIs.
Benchmark URI order is consistent across runs. URIs from datasets are
returned in round robin order. The order of :meth:`benchmarks()
<compiler_gym.datasets.Datasets.benchmarks>` and :meth:`benchmark_uris()
<compiler_gym.datasets.Datasets.benchmark_uris>` is the same.
If the number of benchmarks in any of the datasets is infinite
(:code:`len(dataset) == math.inf`), the iterable returned by this method
will continue indefinitely.
:param with_deprecated: If :code:`True`, include benchmarks from
datasets that have been marked deprecated.
:return: An iterable sequence of benchmark URI strings.
"""
return round_robin_iterables(
(d.benchmark_uris() for d in self.datasets(with_deprecated=with_deprecated))
)
def benchmark(self, uri: str) -> Benchmark:
"""Select a benchmark.
Returns the corresponding :class:`Benchmark
<compiler_gym.datasets.Benchmark>`, regardless of whether the containing
dataset is installed or deprecated.
:param uri: The URI of the benchmark to return.
:return: A :class:`Benchmark <compiler_gym.datasets.Benchmark>`
instance.
"""
return self.benchmark_from_parsed_uri(BenchmarkUri.from_string(uri))
def benchmark_from_parsed_uri(self, uri: BenchmarkUri) -> Benchmark:
"""Select a benchmark.
Returns the corresponding :class:`Benchmark
<compiler_gym.datasets.Benchmark>`, regardless of whether the containing
dataset is installed or deprecated.
:param uri: The parsed URI of the benchmark to return.
:return: A :class:`Benchmark <compiler_gym.datasets.Benchmark>`
instance.
"""
if uri.scheme == "proto":
path = Path(os.path.normpath(f"{uri.dataset}/{uri.path}"))
if not path.is_file():
raise FileNotFoundError(str(path))
proto = BenchmarkProto()
with open(path, "rb") as f:
proto.ParseFromString(f.read())
return Benchmark(proto=proto)
if uri.scheme == "file":
path = Path(os.path.normpath(f"{uri.dataset}/{uri.path}"))
if not path.is_file():
raise FileNotFoundError(str(path))
return Benchmark.from_file(uri=uri, path=path)
dataset = self.dataset_from_parsed_uri(uri)
return dataset.benchmark_from_parsed_uri(uri)
def random_benchmark(
self,
random_state: Optional[np.random.Generator] = None,
weighted: bool = False,
weights: Optional[Dict[str, float]] = None,
) -> Benchmark:
"""Select a benchmark randomly.
First, a dataset is selected randomly using
:code:`random_state.choice(list(datasets))`. Then the
:meth:`random_benchmark()
<compiler_gym.datasets.Dataset.random_benchmark>` method of the chosen
dataset is called to select a benchmark.
By default datasets are selected uniformly randomly. This means that
datasets with a small number of benchmarks will be overrepresented
compared to datasets with many benchmarks. To correct for this bias pass
the argument :code:`weighted=True`, which weights the dataset choice by
the number of benchmarks in each dataset, equivalent to:
>>> random.choices(datasets, weights=[len(p) for p in datasets])
Weighting the choice of datasets by their size means that datasets with
infinite sizes (such as random program generators) will be excluded from
sampling as their size is :code:`0`. To override the weights of datasets
pass a :code:`weights` mapping:
>>> env.datasets.random_benchmark(weighted=True, weights={
"benchmark://dataset-v0": 10,
"benchmark://another-dataset-v0": 555,
})
:param random_state: A random number generator. If not provided, a
default :code:`np.random.default_rng()` is used.
:param weighted: If set, weight the choice of dataset by the number of
benchmarks in each dataset, or the value specified in the
:code:`weights` mapping.
:param weights: An optional mapping from dataset URI to the weight to
use when :code:`weighted=True`. This overrides the default value of
using the dataset size.
:return: A :class:`Benchmark <compiler_gym.datasets.Benchmark>`
instance.
"""
random_state = random_state or np.random.default_rng()
datasets: List[str] = list(self._visible_datasets)
# Assume weighted=True if weights dictionary is specified.
weighted = weighted or weights
if weighted:
weights: Dict[str, float] = weights or {}
w: List[float] = np.array(
[weights.get(d, self[d].size) for d in datasets], dtype=float
)
dataset = random_state.choice(datasets, p=w / w.sum())
else:
dataset = random_state.choice(datasets)
return self[dataset].random_benchmark(random_state=random_state)
@property
def size(self) -> int:
return len(self._visible_datasets)
def __len__(self) -> int:
"""The number of datasets in the collection."""
return self.size
|
CompilerGym-development
|
compiler_gym/datasets/datasets.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Manage datasets of benchmarks."""
from compiler_gym.datasets.benchmark import (
Benchmark,
BenchmarkInitError,
BenchmarkSource,
)
from compiler_gym.datasets.dataset import (
Dataset,
DatasetInitError,
activate,
deactivate,
delete,
require,
)
from compiler_gym.datasets.datasets import Datasets
from compiler_gym.datasets.files_dataset import FilesDataset
from compiler_gym.datasets.tar_dataset import TarDataset, TarDatasetWithManifest
from compiler_gym.datasets.uri import BenchmarkUri
__all__ = [
"activate",
"Benchmark",
"BenchmarkInitError",
"BenchmarkSource",
"BenchmarkUri",
"Dataset",
"DatasetInitError",
"Datasets",
"deactivate",
"delete",
"FilesDataset",
"require",
"TarDataset",
"TarDatasetWithManifest",
]
|
CompilerGym-development
|
compiler_gym/datasets/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import re
import shutil
import warnings
from pathlib import Path
from typing import Dict, Iterable, Optional, Union
import numpy as np
# The "deprecated" name is used as a constructor argument to Dataset, so rename
# this import to prevent shadowing.
from deprecated.sphinx import deprecated as mark_deprecated
import compiler_gym.errors
from compiler_gym.datasets.benchmark import Benchmark
from compiler_gym.datasets.uri import BenchmarkUri
logger = logging.getLogger(__name__)
_DATASET_VERSION_PATTERN = r"[a-zA-z0-9-_]+-v(?P<version>[0-9]+)"
_DATASET_VERSION_RE = re.compile(_DATASET_VERSION_PATTERN)
class Dataset:
"""A dataset is a collection of benchmarks.
The Dataset class has methods for installing and managing groups of
benchmarks, for listing the available benchmark URIs, and for instantiating
:class:`Benchmark <compiler_gym.datasets.Benchmark>` objects.
The Dataset class is an abstract base for implementing datasets. At a
minimum, subclasses must implement the :meth:`benchmark()
<compiler_gym.datasets.Dataset.benchmark>` and :meth:`benchmark_uris()
<compiler_gym.datasets.Dataset.benchmark_uris>` methods, and :meth:`size
<compiler_gym.datasets.Dataset.size>`. Other methods such as
:meth:`install() <compiler_gym.datasets.Dataset.install>` may be used where
helpful.
"""
def __init__(
self,
name: str,
description: str,
license: str, # pylint: disable=redefined-builtin
site_data_base: Optional[Path] = None,
benchmark_class=Benchmark,
references: Optional[Dict[str, str]] = None,
deprecated: Optional[str] = None,
sort_order: int = 0,
validatable: str = "No",
):
"""Constructor.
:param name: The name of the dataset, in the format:
:code:`scheme://name`.
:param description: A short human-readable description of the dataset.
:param license: The name of the dataset's license.
:param site_data_base: An optional directory that can be used by the
dataset to house the "site data", i.e. persistent files on disk. The
site data directory is a subdirectory of this :code:`site_data_base`
path, which can be shared by multiple datasets. If not provided, the
:attr:`dataset.site_data_path
<compiler_gym.datasets.Dataset.site_data_path>` attribute will raise
an error. Use :attr:`dataset.has_site_data
<compiler_gym.datasets.Dataset.has_site_data>` to check if a site
data path was set.
:param benchmark_class: The class to use when instantiating benchmarks.
It must have the same constructor signature as :class:`Benchmark
<compiler_gym.datasets.Benchmark>`.
:param references: A dictionary of useful named URLs for this dataset
containing extra information, download links, papers, etc.
:param deprecated: Mark the dataset as deprecated and issue a warning
when :meth:`install() <compiler_gym.datasets.Dataset.install>`,
including the given method. Deprecated datasets are excluded from
the :meth:`datasets() <compiler_gym.datasets.Datasets.dataset>`
iterator by default.
:param sort_order: An optional numeric value that should be used to
order this dataset relative to others. Lowest value sorts first.
:param validatable: Whether the dataset is validatable. A validatable
dataset is one where the behavior of the benchmarks can be checked
by compiling the programs to binaries and executing them. If the
benchmarks crash, or are found to have different behavior, then
validation fails. This type of validation is used to check that the
compiler has not broken the semantics of the program. This value
takes a string and is used for documentation purposes only.
Suggested values are "Yes", "No", or "Partial".
:raises ValueError: If :code:`name` does not match the expected type.
"""
self._name = name
uri = BenchmarkUri.from_string(name)
self._description = description
self._license = license
self._scheme = uri.scheme
match = _DATASET_VERSION_RE.match(uri.dataset)
self._version = int(match.group("version") if match else 0)
self._references = references or {}
self._deprecation_message = deprecated
self._validatable = validatable
self.sort_order = sort_order
self.benchmark_class = benchmark_class
# Set up the site data name.
if site_data_base:
self._site_data_path = (
Path(site_data_base).resolve() / uri.scheme / uri.dataset
)
def __repr__(self):
return self.name
@property
def name(self) -> str:
"""The name of the dataset.
:type: str
"""
return self._name
@property
def description(self) -> str:
"""A short human-readable description of the dataset.
:type: str
"""
return self._description
@property
def license(self) -> str:
"""The name of the license of the dataset.
:type: str
"""
return self._license
@property
@mark_deprecated(
version="0.2.2", reason="The `protocol` attribute has been renamed `scheme`"
)
def protocol(self) -> str:
"""The URI scheme that is used to identify benchmarks in this dataset.
:type: str
"""
return self.scheme
@property
def scheme(self) -> str:
"""The URI scheme that is used to identify benchmarks in this dataset.
:type: str
"""
return self._scheme
@property
def version(self) -> int:
"""The version tag for this dataset. Defaults to zero.
:type: int
"""
return self._version
@property
def references(self) -> Dict[str, str]:
"""A dictionary of useful named URLs for this dataset containing extra
information, download links, papers, etc.
For example:
>>> dataset.references
{'Paper': 'https://arxiv.org/pdf/1407.3487.pdf',
'Homepage': 'https://ctuning.org/wiki/index.php/CTools:CBench'}
:type: Dict[str, str]
"""
return self._references
@property
def deprecated(self) -> bool:
"""Whether the dataset is included in the iterable sequence of datasets
of a containing :class:`Datasets <compiler_gym.datasets.Datasets>`
collection.
:type: bool
"""
return self._deprecation_message is not None
@property
def validatable(self) -> str:
"""Whether the dataset is validatable. A validatable dataset is one
where the behavior of the benchmarks can be checked by compiling the
programs to binaries and executing them. If the benchmarks crash, or are
found to have different behavior, then validation fails. This type of
validation is used to check that the compiler has not broken the
semantics of the program.
This property takes a string and is used for documentation purposes
only. Suggested values are "Yes", "No", or "Partial".
:type: str
"""
return self._validatable
@property
def has_site_data(self) -> bool:
"""Return whether the dataset has a site data directory.
:type: bool
"""
return hasattr(self, "_site_data_path")
@property
def site_data_path(self) -> Path:
"""The filesystem path used to store persistent dataset files.
This directory may not exist.
:type: Path
:raises ValueError: If no site data path was specified at constructor
time.
"""
if not self.has_site_data:
raise ValueError(f"Dataset has no site data path: {self.name}")
return self._site_data_path
@property
def site_data_size_in_bytes(self) -> int:
"""The total size of the on-disk data used by this dataset.
:type: int
"""
if not self.has_site_data:
return 0
if not self.site_data_path.is_dir():
return 0
total_size = 0
for dirname, _, filenames in os.walk(self.site_data_path):
total_size += sum(
os.path.getsize(os.path.join(dirname, f)) for f in filenames
)
return total_size
@property
def size(self) -> int:
"""The number of benchmarks in the dataset.
If the number of benchmarks is unknown or unbounded, for example because
the dataset represents a program generator that can produce an infinite
number of programs, the value is 0.
:type: int
"""
return 0
def __len__(self) -> int:
"""The number of benchmarks in the dataset.
This is the same as :meth:`Dataset.size
<compiler_gym.datasets.Dataset.size>`:
>>> len(dataset) == dataset.size
True
If the number of benchmarks is unknown or unbounded, for example because
the dataset represents a program generator that can produce an infinite
number of programs, the value is 0.
:return: An integer.
"""
return self.size
def __eq__(self, other: Union["Dataset", str]) -> bool:
if isinstance(other, Dataset):
return self.name == other.name
return self.name == other
def __lt__(self, other: Union["Dataset", str]) -> bool:
if isinstance(other, Dataset):
return self.name < other.name
return self.name < other
def __le__(self, other: Union["Dataset", str]) -> bool:
return self < other or self == other
@property
def installed(self) -> bool:
"""Whether the dataset is installed locally. Installation occurs
automatically on first use, or by calling :meth:`install()
<compiler_gym.datasets.Dataset.install>`.
:type: bool
"""
return True
def install(self) -> None:
"""Install this dataset locally.
Implementing this method is optional. If implementing this method, you
must call :code:`super().install()` first.
This method should not perform redundant work. This method should first
detect whether any work needs to be done so that repeated calls to
:code:`install()` will complete quickly.
"""
if self.deprecated:
warnings.warn(
f"Dataset '{self.name}' is marked as deprecated. {self._deprecation_message}",
category=DeprecationWarning,
stacklevel=2,
)
def uninstall(self) -> None:
"""Remove any local data for this benchmark.
This method undoes the work of :meth:`install()
<compiler_gym.datasets.Dataset.install>`. The dataset can still be used
after calling this method.
"""
if self.has_site_data() and self.site_data_path.is_dir():
shutil.rmtree(self.site_data_path)
def benchmarks(self) -> Iterable[Benchmark]:
"""Enumerate the (possibly infinite) benchmarks lazily.
Iteration order is consistent across runs. The order of
:meth:`benchmarks() <compiler_gym.datasets.Dataset.benchmarks>` and
:meth:`benchmark_uris() <compiler_gym.datasets.Dataset.benchmark_uris>`
is the same.
If the number of benchmarks in the dataset is infinite
(:code:`len(dataset) == math.inf`), the iterable returned by this method
will continue indefinitely.
:return: An iterable sequence of :class:`Benchmark
<compiler_gym.datasets.Benchmark>` instances.
"""
# Default implementation. Subclasses may wish to provide an alternative
# implementation that is optimized to specific use cases.
yield from (self.benchmark(uri) for uri in self.benchmark_uris())
def __iter__(self) -> Iterable[Benchmark]:
"""Enumerate the (possibly infinite) benchmarks lazily.
This is the same as :meth:`Dataset.benchmarks()
<compiler_gym.datasets.Dataset.benchmarks>`:
>>> from itertools import islice
>>> list(islice(dataset, 100)) == list(islice(datset.benchmarks(), 100))
True
:return: An iterable sequence of :meth:`Benchmark
<compiler_gym.datasets.Benchmark>` instances.
"""
yield from self.benchmarks()
def benchmark_uris(self) -> Iterable[str]:
"""Enumerate the (possibly infinite) benchmark URIs.
Iteration order is consistent across runs. The order of
:meth:`benchmarks() <compiler_gym.datasets.Dataset.benchmarks>` and
:meth:`benchmark_uris() <compiler_gym.datasets.Dataset.benchmark_uris>`
is the same.
If the number of benchmarks in the dataset is infinite
(:code:`len(dataset) == math.inf`), the iterable returned by this method
will continue indefinitely.
:return: An iterable sequence of benchmark URI strings.
"""
raise NotImplementedError("abstract class")
def benchmark_from_parsed_uri(self, uri: BenchmarkUri) -> Benchmark:
"""Select a benchmark.
Subclasses must implement this method. Implementors may assume that the
URI is well formed and that the :code:`scheme` and :code:`dataset`
components are correct.
:param uri: The parsed URI of the benchmark to return.
:return: A :class:`Benchmark <compiler_gym.datasets.Benchmark>`
instance.
:raise LookupError: If :code:`uri` is not found.
:raise ValueError: If the URI is invalid.
"""
raise NotImplementedError("abstract class")
def benchmark(self, uri: str) -> Benchmark:
"""Select a benchmark.
:param uri: The URI of the benchmark to return.
:return: A :class:`Benchmark <compiler_gym.datasets.Benchmark>`
instance.
:raise LookupError: If :code:`uri` is not found.
:raise ValueError: If the URI is invalid.
"""
return self.benchmark_from_parsed_uri(BenchmarkUri.from_string(uri))
def random_benchmark(
self, random_state: Optional[np.random.Generator] = None
) -> Benchmark:
"""Select a benchmark randomly.
:param random_state: A random number generator. If not provided, a
default :code:`np.random.default_rng()` is used.
:return: A :class:`Benchmark <compiler_gym.datasets.Benchmark>`
instance.
"""
random_state = random_state or np.random.default_rng()
return self._random_benchmark(random_state)
def _random_benchmark(self, random_state: np.random.Generator) -> Benchmark:
"""Private implementation of the random benchmark getter.
Subclasses must implement this method so that it selects a benchmark
from the available benchmarks with uniform probability, using only
:code:`random_state` as a source of randomness.
"""
raise NotImplementedError("abstract class")
def __getitem__(self, uri: str) -> Benchmark:
"""Select a benchmark by URI.
This is the same as :meth:`Dataset.benchmark(uri)
<compiler_gym.datasets.Dataset.benchmark>`:
>>> dataset["benchmark://cbench-v1/crc32"] == dataset.benchmark("benchmark://cbench-v1/crc32")
True
:return: A :class:`Benchmark <compiler_gym.datasets.Benchmark>`
instance.
:raise LookupError: If :code:`uri` does not exist.
"""
return self.benchmark(uri)
# Deprecated since v0.2.4.
# This type is for backwards compatibility that will be removed in a future release.
# Please, use errors from `compiler_gym.errors`.
DatasetInitError = compiler_gym.errors.DatasetInitError
@mark_deprecated(
version="0.1.4",
reason=(
"Datasets are now automatically activated. "
"`More information <https://github.com/facebookresearch/CompilerGym/issues/45>`_."
),
)
def activate(env, dataset: Union[str, Dataset]) -> bool:
"""Deprecated function for managing datasets.
:param dataset: The name of the dataset to download, or a :class:`Dataset
<compiler_gym.datasets.Dataset>` instance.
:return: :code:`True` if the dataset was activated, else :code:`False` if
already active.
:raises ValueError: If there is no dataset with that name.
"""
return False
@mark_deprecated(
version="0.1.4",
reason=(
"Please use :meth:`del env.datasets[dataset] <compiler_gym.datasets.Datasets.__delitem__>`. "
"`More information <https://github.com/facebookresearch/CompilerGym/issues/45>`_."
),
)
def delete(env, dataset: Union[str, Dataset]) -> bool:
"""Deprecated function for managing datasets.
Please use :meth:`del env.datasets[dataset]
<compiler_gym.datasets.Datasets.__delitem__>`.
:param dataset: The name of the dataset to download, or a :class:`Dataset
<compiler_gym.datasets.Dataset>` instance.
:return: :code:`True` if the dataset was deleted, else :code:`False` if
already deleted.
"""
del env.datasets[dataset]
return False
@mark_deprecated(
version="0.1.4",
reason=(
"Please use :meth:`env.datasets.deactivate() <compiler_gym.datasets.Datasets.deactivate>`. "
"`More information <https://github.com/facebookresearch/CompilerGym/issues/45>`_."
),
)
def deactivate(env, dataset: Union[str, Dataset]) -> bool:
"""Deprecated function for managing datasets.
Please use :meth:`del env.datasets[dataset]
<compiler_gym.datasets.Datasets.__delitem__>`.
:param dataset: The name of the dataset to download, or a :class:`Dataset
<compiler_gym.datasets.Dataset>` instance.
:return: :code:`True` if the dataset was deactivated, else :code:`False` if
already inactive.
"""
del env.datasets[dataset]
return False
@mark_deprecated(
version="0.1.7",
reason=(
"Datasets are now installed automatically, there is no need to call :code:`require()`. "
"`More information <https://github.com/facebookresearch/CompilerGym/issues/45>`_."
),
)
def require(env, dataset: Union[str, Dataset]) -> bool:
"""Deprecated function for managing datasets.
Datasets are now installed automatically. See :class:`env.datasets
<compiler_gym.datasets.Datasets>`.
:param env: The environment that this dataset is required for.
:param dataset: The name of the dataset to download, or a :class:`Dataset
<compiler_gym.datasets.Dataset>` instance.
:return: :code:`True` if the dataset was downloaded, or :code:`False` if the
dataset was already available.
"""
return False
|
CompilerGym-development
|
compiler_gym/datasets/dataset.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""This module contains utility code for working with URIs."""
from typing import Dict, List, Union
from urllib.parse import ParseResult, parse_qs, urlencode, urlparse, urlunparse
from pydantic import BaseModel
class BenchmarkUri(BaseModel):
"""A URI used to identify a benchmark, and optionally a set of parameters
for the benchmark.
A URI has the following format:
.. code-block::
scheme://dataset/path?params#fragment
where:
* :code:`scheme` (optional, default :code:`benchmark`): An arbitrary string
used to group datasets, for example :code:`generator` if the dataset is a
benchmark generator.
* :code:`dataset`: The name of a dataset, optionally with a version tag, for
example :code:`linux-v0`.
* :code:`path` (optional, default empty string): The path of a benchmark
within a dataset.
* :code:`params` (optional, default empty dictionary): A set of query
parameters for the benchmark. This is parsed a dictionary of string keys
to a list of string values. For example :code:`dataset=1&debug=true` which
will be parsed as :code:`{"dataset": ["1"], "debug": ["true"]}`.
* :code:`fragment` (optional, default empty string): An optional fragment
within the benchmark.
The :code:`scheme` and :code:`dataset` components are used to resolve a
:class:`Dataset <compiler_gym.datasets.Dataset>` class that can serve the
benchmark. The :meth:`Dataset.benchmark_from_parsed_uri()` method is then
used to interpret the remainder of the URI components.
A benchmark URI may resolve to zero or more benchmarks, for example:
* :code:`benchmark://csmith-v0` resolves to any benchmark from the
:code:`benchmark://csmith-v0` dataset.
* :code:`cbench-v0/qsort` resolves to the path :code:`/qsort`
within the dataset :code:`benchmark://cbench-v0` using the default scheme.
* :code:`benchmark://cbench-v0/qsort?debug=true` also resolves to the path
:code:`/qsort` within the dataset :code:`benchmark://cbench-v0`, but with
an additional parameter :code:`debug=true`.
"""
scheme: str
"""The benchmark scheme. Defaults to :code:`benchmark`."""
dataset: str
"""The name of the dataset."""
path: str
"""The path of the benchmark. Empty string if not set."""
params: Dict[str, List[str]] = {}
"""A dictionary of query parameters. Empty dictionary if not set."""
fragment: str = ""
"""The URL fragment. Empty string if not set."""
@staticmethod
def canonicalize(uri: str):
return str(BenchmarkUri.from_string(uri))
@classmethod
def from_string(cls, uri: str) -> "BenchmarkUri":
components = urlparse(uri)
# Add the default "benchmark://" scheme if required.
if not components.scheme and not components.netloc:
components = urlparse(f"benchmark://{uri}")
return cls(
scheme=components.scheme,
dataset=components.netloc,
path=components.path,
params=parse_qs(components.query),
fragment=components.fragment,
)
def startswith(self, *args):
return str(self).startswith(*args)
def endswith(self, *args):
return str(self).endswith(*args)
def __repr__(self):
return urlunparse(
ParseResult(
scheme=self.scheme,
netloc=self.dataset,
path=self.path,
query=urlencode(self.params, doseq=True),
fragment=self.fragment,
params="", # Field not used.
)
)
def __str__(self) -> str:
return repr(self)
def __hash__(self) -> int:
return hash(str(self))
def __eq__(self, other: Union["BenchmarkUri", str]) -> bool:
return str(self) == str(other)
def __lt__(self, other: Union["BenchmarkUri", str]) -> bool:
return str(self) < str(other)
|
CompilerGym-development
|
compiler_gym/datasets/uri.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from pathlib import Path
from typing import Iterable, List
import numpy as np
from compiler_gym.datasets.dataset import Benchmark, Dataset
from compiler_gym.datasets.uri import BenchmarkUri
from compiler_gym.util.decorators import memoized_property
class FilesDataset(Dataset):
"""A dataset comprising a directory tree of files.
A FilesDataset is a root directory that contains (a possibly nested tree of)
files, where each file represents a benchmark. The directory contents can be
filtered by specifying a filename suffix that files must match.
The URI of benchmarks is the relative path of each file, stripped of a
required filename suffix, if specified. For example, given the following
file tree:
.. code-block::
/tmp/dataset/a.txt
/tmp/dataset/LICENSE
/tmp/dataset/subdir/subdir/b.txt
/tmp/dataset/subdir/subdir/c.txt
a FilesDataset :code:`benchmark://ds-v0` rooted at :code:`/tmp/dataset` with
filename suffix :code:`.txt` will contain the following URIs:
>>> list(dataset.benchmark_uris())
[
"benchmark://ds-v0/a",
"benchmark://ds-v0/subdir/subdir/b",
"benchmark://ds-v0/subdir/subdir/c",
]
"""
def __init__(
self,
dataset_root: Path,
benchmark_file_suffix: str = "",
memoize_uris: bool = True,
**dataset_args,
):
"""Constructor.
:param dataset_root: The root directory to look for benchmark files.
:param benchmark_file_suffix: A file extension that must be matched for
a file to be used as a benchmark.
:param memoize_uris: Whether to memoize the list of URIs contained in
the dataset. Memoizing the URIs enables faster repeated iteration
over :meth:`dataset.benchmark_uris()
<compiler_gym.datasets.Dataset.benchmark_uris>` at the expense of
increased memory overhead as the file list must be kept in memory.
:param dataset_args: See :meth:`Dataset.__init__()
<compiler_gym.datasets.Dataset.__init__>`.
"""
super().__init__(**dataset_args)
self.dataset_root = dataset_root
self.benchmark_file_suffix = benchmark_file_suffix
self.memoize_uris = memoize_uris
self._memoized_uris = None
@memoized_property
def size(self) -> int: # pylint: disable=invalid-overriden-method
self.install()
return sum(
sum(1 for f in files if f.endswith(self.benchmark_file_suffix))
for (_, _, files) in os.walk(self.dataset_root)
)
@property
def _benchmark_uris_iter(self) -> Iterable[str]:
"""Return an iterator over benchmark URIs that is consistent across runs."""
self.install()
for root, dirs, files in os.walk(self.dataset_root):
# Sort the subdirectories so that os.walk() order is stable between
# runs.
dirs.sort()
reldir = root[len(str(self.dataset_root)) + 1 :]
for filename in sorted(files):
# If we have an expected file suffix then ignore files that do
# not match, and strip the suffix from files that do match.
if self.benchmark_file_suffix:
if not filename.endswith(self.benchmark_file_suffix):
continue
filename = filename[: -len(self.benchmark_file_suffix)]
# Use os.path.join() rather than simple '/' concatenation as
# reldir may be empty.
yield os.path.join(self.name, reldir, filename)
@property
def _benchmark_uris(self) -> List[str]:
return list(self._benchmark_uris_iter)
def benchmark_uris(self) -> Iterable[str]:
if self._memoized_uris:
yield from self._memoized_uris
elif self.memoize_uris:
self._memoized_uris = self._benchmark_uris
yield from self._memoized_uris
else:
yield from self._benchmark_uris_iter
def benchmark_from_parsed_uri(self, uri: BenchmarkUri) -> Benchmark:
self.install()
path = Path(
# Use normpath() rather than joinpath() because uri.path may start
# with a leading '/'.
os.path.normpath(
f"{self.dataset_root}/{uri.path}{self.benchmark_file_suffix}"
)
)
if not path.is_file():
raise LookupError(f"Benchmark not found: {uri} (file not found: {path})")
return self.benchmark_class.from_file(uri, path)
def _random_benchmark(self, random_state: np.random.Generator) -> Benchmark:
return self.benchmark(random_state.choice(list(self.benchmark_uris())))
|
CompilerGym-development
|
compiler_gym/datasets/files_dataset.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import Counter
from collections.abc import Collection
from typing import Optional, Tuple
import numpy as np
from gym.spaces import Space
class SpaceSequence(Space):
"""Variable-length sequence of subspaces that have the same definition."""
def __init__(
self, name: str, space: Space, size_range: Tuple[int, Optional[int]] = (0, None)
):
"""Constructor.
:param name: The name of the space.
:param space: Shared definition of the spaces in the sequence.
:param size_range: Range of the sequence length.
"""
self.name = name
self.space = space
self.size_range = size_range
def contains(self, x):
if not isinstance(x, Collection):
return False
lower_bound = self.size_range[0]
upper_bound = float("inf") if self.size_range[1] is None else self.size_range[1]
if not (lower_bound <= len(x) <= upper_bound):
return False
for element in x:
if not self.space.contains(element):
return False
return True
def __eq__(self, other) -> bool:
return (
isinstance(self, other.__class__)
and self.name == other.name
and Counter(self.size_range) == Counter(other.size_range)
and self.space == other.space
)
def sample(self):
return [
self.space.sample()
for _ in range(
np.random.randint(
low=self.size_range[0],
high=None if self.size_range[1] is None else self.size_range[1] + 1,
)
)
]
|
CompilerGym-development
|
compiler_gym/spaces/space_sequence.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import List
from gym.spaces import Space
from gym.spaces import Tuple as GymTuple
class Tuple(GymTuple):
"""A tuple (i.e., product) of simpler spaces.
Wraps the underlying :code:`gym.spaces.Tuple` space with a name attribute.
"""
def __init__(self, spaces: List[Space], name: str):
"""Constructor.
:param spaces: The composite spaces.
:param name: The name of the space.
"""
super().__init__(spaces)
self.name = name
def __eq__(self, other) -> bool:
return (
isinstance(self, other.__class__)
and self.name == other.name
and super().__eq__(other)
)
|
CompilerGym-development
|
compiler_gym/spaces/tuple.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Iterable, Optional
import numpy as np
from gym.spaces import Box as GymBox
class Box(GymBox):
"""A (possibly unbounded) box in R^n. Specifically, a Box represents the
Cartesian product of n closed intervals. Each interval has the form of one
of [a, b], (-oo, b], [a, oo), or (-oo, oo).
Wraps the underlying :code:`gym.spaces.Box` with a name attribute.
"""
def __init__(
self,
low: float,
high: float,
name: str,
shape: Optional[Iterable[int]] = None,
dtype=np.float32,
):
"""Constructor.
:param low: The lower bound, inclusive.
:param high: The upper bound, inclusive.
:param name: The name of the space.
:param shape: The shape of the space.
:param dtype: The dtype of the space.
"""
super().__init__(low=low, high=high, shape=shape, dtype=dtype)
self.name = name
def __eq__(self, other) -> bool:
return (
isinstance(self, other.__class__)
and self.name == other.name
and super().__eq__(other)
)
|
CompilerGym-development
|
compiler_gym/spaces/box.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Optional, Tuple
import numpy as np
from gym.spaces import Space
from compiler_gym.spaces.common import issubdtype
from compiler_gym.spaces.scalar import Scalar
class Sequence(Space):
"""A sequence of values. Each element of the sequence is of `dtype`. The
length of the sequence is bounded by `size_range`.
Example:
::
>>> space = Sequence(size_range=(0, None), dtype=str)
>>> space.contains("Hello, world!")
True
::
>>> space = Sequence(size_range=(256, 256), dtype=bytes)
>>> space.contains("Hello, world!")
False
:ivar size_range: A tuple indicating the `(lower, upper)` bounds for
sequence lengths. An upper bound of `None` means no upper bound. All
sequences must have a lower bound of length >= 0.
:ivar dtype: The data type for each element in a sequence.
:ivar opaque_data_format: An optional string describing an opaque data
format, e.g. a data structure that is serialized to a string/binary
array for transmission to the client. It is up to the client and service
to agree on how to decode observations using this value. For example,
an opaque_data_format of `string_json` could be used to indicate that
the observation is a string-serialized JSON value.
"""
def __init__(
self,
name: str,
size_range: Tuple[int, Optional[int]] = (0, None),
dtype=bytes,
opaque_data_format: Optional[str] = None,
scalar_range: Optional[Scalar] = None,
):
"""Constructor.
:param name: The name of the space.
:param size_range: A tuple indicating the `(lower, upper)` bounds for
sequence lengths. An upper bound of `None` means no upper bound. All
sequences must have a lower bound of length >= 0.
:param dtype: The data type for each element in a sequence.
:param opaque_data_format: An optional string describing an opaque data
format, e.g. a data structure that is serialized to a string/binary
array for transmission to the client. It is up to the client and
service to agree on how to decode observations using this value. For
example, an opaque_data_format of `string_json` could be used to
indicate that the observation is a string-serialized JSON value.
:param scalar_range: If specified, this denotes the legal range of each
element in the sequence. This is enforced by :meth:`contains()
<compiler_gym.spaces.Sequence.contains>` checks.
"""
self.name = name
self.size_range = size_range
self.dtype = dtype
self.opaque_data_format = opaque_data_format
self.scalar_range = scalar_range
def __repr__(self) -> str:
upper_bound = "inf" if self.size_range[1] is None else self.size_range[1]
d = f" -> {self.opaque_data_format}" if self.opaque_data_format else ""
return (
f"{self.dtype.__name__}_list<>[{int(self.size_range[0])},{upper_bound}]){d}"
)
def contains(self, x):
lower_bound = self.size_range[0]
upper_bound = float("inf") if self.size_range[1] is None else self.size_range[1]
if not (lower_bound <= len(x) <= upper_bound):
return False
# TODO(cummins): The dtype API is inconsistent. When dtype=str or
# dtype=bytes, we expect this to be the type of the entire sequence. But
# for dtype=int, we expect this to be the type of each element. We
# should distinguish these differences better.
if self.dtype in {str, bytes}:
if not isinstance(x, self.dtype):
return False
elif hasattr(x, "dtype"):
if not issubdtype(x.dtype, self.dtype):
return False
# Run the bounds check on every scalar element, if there is a scalar
# range specified.
elif self.scalar_range:
return all(self.scalar_range.contains(s) for s in x)
else:
for element in x:
if not issubdtype(type(element), self.dtype):
return False
return True
def sample(self):
"""
.. warning::
The `Sequence` space cannot be sampled from.
:raises NotImplementedError: Not supported.
"""
raise NotImplementedError
def __eq__(self, other):
if not isinstance(other, Sequence):
return False
return (
self.name == other.name
and self.size_range == other.size_range
and np.dtype(self.dtype) == np.dtype(other.dtype)
and self.opaque_data_format == other.opaque_data_format
and self.scalar_range == other.scalar_range
)
|
CompilerGym-development
|
compiler_gym/spaces/sequence.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import random
from typing import Optional
import numpy as np
from gym.spaces import Space
from compiler_gym.spaces.common import issubdtype
class Scalar(Space):
"""A scalar value."""
def __init__(
self,
name: str,
min: Optional[float] = None,
max: Optional[float] = None,
dtype=np.float64,
):
"""Constructor.
:param name: The name of the space.
:param min: The lower bound for a value in this space. If None, there is
no lower bound.
:param max: The upper bound for a value in this space. If None, there is
no upper bound.
:param dtype: The type of this scalar.
"""
self.name = name
self.min = min
self.max = max
self.dtype = dtype
def sample(self):
min = 0 if self.min is None else self.min
max = 1 if self.max is None else self.max
return self.dtype(random.uniform(min, max))
def contains(self, x):
if not issubdtype(type(x), self.dtype):
return False
min = -float("inf") if self.min is None else self.min
max = float("inf") if self.max is None else self.max
return min <= x <= max
def __repr__(self):
if self.min is None and self.max is None:
return self.dtype.__name__
lower_bound = "-inf" if self.min is None else self.min
upper_bound = "inf" if self.max is None else self.max
return f"{self.dtype.__name__}<{lower_bound},{upper_bound}>"
def __eq__(self, rhs):
"""Equality test."""
if not isinstance(rhs, Scalar):
return False
return (
self.name == rhs.name
and self.min == rhs.min
and self.max == rhs.max
and np.dtype(self.dtype) == np.dtype(rhs.dtype)
)
|
CompilerGym-development
|
compiler_gym/spaces/scalar.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from gym.spaces import Discrete as GymDiscrete
class Discrete(GymDiscrete):
"""A discrete space in :math:`{ 0, 1, \\dots, n-1 }`.
Wraps the underlying :code:`gym.spaces.Discrete` space with a name attribute.
"""
def __init__(self, n: int, name: str):
"""Constructor.
:param n: The upper bound.
:param name: The name of the space.
"""
super().__init__(n)
self.name = name
def __eq__(self, other) -> bool:
return (
isinstance(self, other.__class__)
and self.name == other.name
and super().__eq__(other)
)
|
CompilerGym-development
|
compiler_gym/spaces/discrete.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from compiler_gym.spaces.action_space import ActionSpace
from compiler_gym.spaces.box import Box
from compiler_gym.spaces.commandline import Commandline, CommandlineFlag
from compiler_gym.spaces.dict import Dict
from compiler_gym.spaces.discrete import Discrete
from compiler_gym.spaces.named_discrete import NamedDiscrete
from compiler_gym.spaces.permutation import Permutation
from compiler_gym.spaces.reward import DefaultRewardFromObservation, Reward
from compiler_gym.spaces.runtime_reward import RuntimeReward
from compiler_gym.spaces.scalar import Scalar
from compiler_gym.spaces.sequence import Sequence
from compiler_gym.spaces.space_sequence import SpaceSequence
from compiler_gym.spaces.tuple import Tuple
__all__ = [
"ActionSpace",
"Box",
"Commandline",
"CommandlineFlag",
"DefaultRewardFromObservation",
"Dict",
"Discrete",
"NamedDiscrete",
"Permutation",
"Reward",
"RuntimeReward",
"Scalar",
"Sequence",
"SpaceSequence",
"Tuple",
]
|
CompilerGym-development
|
compiler_gym/spaces/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Iterable, List, NamedTuple
from compiler_gym.spaces.named_discrete import NamedDiscrete
class CommandlineFlag(NamedTuple):
"""A single flag in a Commandline space."""
name: str
"""The name of the flag, e.g. :code:`LoopUnroll`."""
flag: str
"""The flag string, e.g. :code:`--unroll`."""
description: str
"""A human-readable description of the flag."""
class Commandline(NamedDiscrete):
"""A :class:`NamedDiscrete <compiler_gym.spaces.NamedDiscrete>` space where
each element represents a commandline flag.
Example usage:
>>> space = Commandline([
CommandlineFlag("a", "-a", "A flag"),
CommandlineFlag("b", "-b", "Another flag"),
])
>>> space.n
2
>>> space["a"]
0
>>> space.names[0]
a
>>> space.flags[0]
-a
>>> space.descriptions[0]
A flag
>>> space.sample()
1
>>> space.commandline([0, 1])
-a -b
:ivar flags: A list of flag strings.
:ivar descriptions: A list of flag descriptions.
"""
def __init__(self, items: Iterable[CommandlineFlag], name: str):
"""Constructor.
:param items: The commandline flags that comprise the space.
:param name: The name of the space.
"""
items = list(items)
self.flags = [f.flag for f in items]
self.descriptions = [f.description for f in items]
super().__init__([f.flag for f in items], name=name)
def __repr__(self) -> str:
return f"Commandline([{' '.join(self.flags)}])"
def to_string(self, values: List[int]) -> str:
"""Produce a commandline invocation from a sequence of values.
:param values: A numeric value from the space, or sequence of values.
:return: A string commandline invocation.
"""
return " ".join([self.flags[v] for v in values])
def from_string(self, commandline: str) -> List[int]:
"""Produce a sequence of actions from a commandline.
:param commandline: A string commandline invocation, as produced by
:func:`to_string()
<compiler_gym.spaces.commandline.Commandline.to_string>`.
:return: A list of action values.
:raises LookupError: If any of the flags in the commandline are not
recognized.
"""
flags = commandline.split()
values = []
for flag in flags:
try:
values.append(self.flags.index(flag))
except IndexError:
raise LookupError(f"Unknown flag: `{flag}`")
return values
|
CompilerGym-development
|
compiler_gym/spaces/commandline.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from inspect import isclass
from numbers import Integral, Real
import numpy as np
def issubdtype(subtype, supertype):
if isclass(subtype) and isclass(supertype) and issubclass(subtype, supertype):
return True
subdtype = np.dtype(subtype)
superdtype = np.dtype(supertype)
if np.dtype(subdtype) == np.dtype(superdtype):
return True
common_dtype = np.find_common_type([], [subdtype, superdtype])
if not np.issubdtype(common_dtype, superdtype):
return False
if (
issubclass(common_dtype.type, Real)
and issubclass(subdtype.type, Integral)
and 2 ** np.finfo(common_dtype).nmant < np.iinfo(subdtype).max
):
return False
return True
|
CompilerGym-development
|
compiler_gym/spaces/common.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from numbers import Integral
import numpy as np
from compiler_gym.spaces.scalar import Scalar
from compiler_gym.spaces.sequence import Sequence
class Permutation(Sequence):
"""The space of permutations of all numbers in the range `scalar_range`."""
def __init__(self, name: str, scalar_range: Scalar):
"""Constructor.
:param name: The name of the permutation space.
:param scalar_range: Range of numbers in the permutation.
For example the scalar range [1, 3] would define permutations like
[1, 2, 3] or [2, 1, 3], etc.
:raises TypeError: If `scalar_range.dtype` is not an integral type.
"""
if not issubclass(np.dtype(scalar_range.dtype).type, Integral):
raise TypeError("Permutation space can have integral scalar range only.")
sz = scalar_range.max - scalar_range.min + 1
super().__init__(
name=name,
size_range=(sz, sz),
dtype=scalar_range.dtype,
scalar_range=scalar_range,
)
def sample(self):
return (
np.random.choice(self.size_range[0], size=self.size_range[1], replace=False)
+ self.scalar_range.min
)
def __eq__(self, other) -> bool:
return isinstance(self, other.__class__) and super().__eq__(other)
|
CompilerGym-development
|
compiler_gym/spaces/permutation.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import Counter
from collections.abc import Iterable as IterableType
from typing import Iterable, List, Union
from compiler_gym.spaces.discrete import Discrete
from compiler_gym.util.gym_type_hints import ActionType
class NamedDiscrete(Discrete):
"""An extension of the :code:`Discrete` space in which each point in the
space has a name. Additionally, the space itself may have a name.
:ivar name: The name of the space.
:vartype name: str
:ivar names: A list of names for each element in the space.
:vartype names: List[str]
Example usage:
>>> space = NamedDiscrete(["a", "b", "c"])
>>> space.n
3
>>> space["a"]
0
>>> space.names[0]
a
>>> space.sample()
1
"""
def __init__(self, items: Iterable[str], name: str):
"""Constructor.
:param items: A list of names for items in the space.
:param name: The name of the space.
"""
items = list(items)
if not items:
raise ValueError("No values for discrete space")
self.names = [str(x) for x in items]
super().__init__(n=len(self.names), name=name)
def __getitem__(self, name: str) -> int:
"""Lookup the numeric value of a point in the space.
:param name: A name.
:return: The numeric value.
:raises ValueError: If the name is not in the space.
"""
return self.names.index(name)
def __repr__(self) -> str:
return f"NamedDiscrete([{', '.join(self.names)}])"
def to_string(self, values: Union[int, Iterable[ActionType]]) -> str:
"""Convert an action, or sequence of actions, to string.
:param values: A numeric value, or list of numeric values.
:return: A string representing the values.
"""
if isinstance(values, IterableType):
return " ".join([self.names[v] for v in values])
else:
return self.names[values]
def from_string(self, string: str) -> Union[ActionType, List[ActionType]]:
"""Convert a name, or list of names, to numeric values.
:param values: A name, or list of names.
:return: A numeric value, or list of numeric values.
"""
return [self.names.index(v) for v in string.split(" ")]
def __eq__(self, other) -> bool:
return (
isinstance(self, other.__class__)
and self.name == other.name
and Counter(self.names) == Counter(other.names)
and super().__eq__(other)
)
|
CompilerGym-development
|
compiler_gym/spaces/named_discrete.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.