python_code
stringlengths
0
4.04M
repo_name
stringlengths
8
58
file_path
stringlengths
5
147
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Dict as DictType from typing import List, Union from gym.spaces import Dict as GymDict from gym.spaces import Space class Dict(GymDict): """A dictionary of simpler spaces. Wraps the underlying :code:`gym.spaces.Dict` space with a name attribute. """ def __init__(self, spaces: Union[DictType[str, Space], List[Space]], name: str): """Constructor. :param spaces: The composite spaces. :param name: The name of the space. """ super().__init__(spaces) self.name = name def __eq__(self, other) -> bool: return ( isinstance(self, other.__class__) and self.name == other.name and super().__eq__(other) )
CompilerGym-development
compiler_gym/spaces/dict.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import List, Optional, Tuple, Union import numpy as np import compiler_gym from compiler_gym.spaces.scalar import Scalar from compiler_gym.util.gym_type_hints import ActionType, ObservationType, RewardType class Reward(Scalar): """An extension of the :class:`Scalar <compiler_gym.spaces.Scalar>` space that is used for computing a reward signal. A :code:`Reward` is a scalar value used to determine the reward for a particular action. An instance of :code:`Reward` is used to represent the reward function for a particular episode. For every :meth:`env.step() <compiler_gym.envs.CompilerEnv.step>` of the environment, the :meth:`reward.update() <compiler_gym.spaces.Reward.update>` method is called to produce a new incremental reward. Environments provide implementations of :code:`Reward` that compute reward signals based on observation values computed by the backend service. """ def __init__( self, name: str, observation_spaces: Optional[List[str]] = None, default_value: RewardType = 0, min: Optional[RewardType] = None, max: Optional[RewardType] = None, default_negates_returns: bool = False, success_threshold: Optional[RewardType] = None, deterministic: bool = False, platform_dependent: bool = True, ): """Constructor. :param name: The name of the reward space. This is a unique name used to represent the reward. :param observation_spaces: A list of observation space IDs (:class:`space.id <compiler_gym.views.ObservationSpaceSpec>` values) that are used to compute the reward. May be an empty list if no observations are requested. Requested observations will be provided to the :code:`observations` argument of :meth:`reward.update() <compiler_gym.spaces.Reward.update>`. :param default_value: A default reward. This value will be returned by :meth:`env.step() <compiler_gym.envs.CompilerEnv.step>` if the service terminates. :param min: The lower bound of the reward. :param max: The upper bound of the reward. :param default_negates_returns: If true, the default value will be offset by the sum of all rewards for the current episode. For example, given a default reward value of *-10.0* and an episode with prior rewards *[0.1, 0.3, -0.15]*, the default value is: *-10.0 - sum(0.1, 0.3, -0.15)*. :param success_threshold: The cumulative reward threshold before an episode is considered successful. For example, episodes where reward is scaled to an existing heuristic can be considered “successful” when the reward exceeds the existing heuristic. :param deterministic: Whether the reward space is deterministic. :param platform_dependent: Whether the reward values depend on the execution environment of the service. """ super().__init__( name=name, min=-np.inf if min is None else min, max=np.inf if max is None else max, dtype=np.float64, ) self.name = name or id if not self.name: raise TypeError("No name given") self.observation_spaces = observation_spaces or [] self.default_value: RewardType = default_value self.default_negates_returns: bool = default_negates_returns self.success_threshold = success_threshold self.deterministic = deterministic self.platform_dependent = platform_dependent def reset( self, benchmark: str, observation_view: "compiler_gym.views.ObservationView" ) -> None: """Reset the rewards space. This is called on :meth:`env.reset() <compiler_gym.envs.CompilerEnv.reset>`. :param benchmark: The URI of the benchmark that is used for this episode. :param observation: An observation view for reward initialization """ pass def update( self, actions: List[ActionType], observations: List[ObservationType], observation_view: "compiler_gym.views.ObservationView", # noqa: F821 ) -> RewardType: """Calculate a reward for the given action. :param action: The action performed. :param observations: A list of observation values as requested by the :code:`observation_spaces` constructor argument. :param observation_view: The :class:`ObservationView <compiler_gym.views.ObservationView>` instance. """ raise NotImplementedError("abstract class") def reward_on_error(self, episode_reward: RewardType) -> RewardType: """Return the reward value for an error condition. This method should be used to produce the reward value that should be used if the compiler service cannot be reached, e.g. because it has crashed or the connection has dropped. :param episode_reward: The current cumulative reward of an episode. :return: A reward. """ if self.default_negates_returns: return self.default_value - episode_reward else: return self.default_value @property def range(self) -> Tuple[RewardType, RewardType]: """The lower and upper bounds of the reward.""" return (self.min, self.max) def __repr__(self): return self.name def __eq__(self, other: Union["Reward", str]) -> bool: if isinstance(other, str): return self.name == other elif isinstance(other, Reward): return self.name == other.name else: return False class DefaultRewardFromObservation(Reward): def __init__(self, observation_name: str, **kwargs): super().__init__( observation_spaces=[observation_name], name=observation_name, **kwargs ) self.previous_value: Optional[ObservationType] = None def reset(self, benchmark: str, observation_view) -> None: """Called on env.reset(). Reset incremental progress.""" del benchmark # unused self.previous_value = None def update( self, action: int, observations: List[ObservationType], observation_view: "compiler_gym.views.ObservationView", # noqa: F821 ) -> RewardType: """Called on env.step(). Compute and return new reward.""" del action # unused del observation_view # unused value: RewardType = observations[0] if self.previous_value is None: self.previous_value = 0 reward = RewardType(value - self.previous_value) self.previous_value = value return reward
CompilerGym-development
compiler_gym/spaces/reward.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import List, Optional from gym.spaces import Space from compiler_gym.util.gym_type_hints import ActionType class ActionSpace(Space): """A wrapper around a :code:`gym.spaces.Space` with additional functionality for action spaces. """ def __init__(self, space: Space): """Constructor. :param space: The space that this action space wraps. """ self.wrapped = space def __getattr__(self, name: str): return getattr(self.wrapped, name) def __getitem__(self, name: str): return self.wrapped[name] def sample(self) -> ActionType: return self.wrapped.sample() def seed(self, seed: Optional[int] = None) -> ActionType: return self.wrapped.seed(seed) def contains(self, x: ActionType) -> bool: """Return boolean specifying if x is a valid member of this space.""" raise self.wrapped.contains(x) def __contains__(self, x: ActionType) -> bool: """Return boolean specifying if x is a valid member of this space.""" return self.wrapped.contains(x) def __eq__(self, rhs) -> bool: if isinstance(rhs, ActionSpace): return self.wrapped == rhs.wrapped else: return self.wrapped == rhs def __ne__(self, rhs) -> bool: if isinstance(rhs, ActionSpace): return self.wrapped != rhs.wrapped else: return self.wrapped != rhs def to_string(self, actions: List[ActionType]) -> str: """Render the provided list of actions to a string. This method is used to produce a human-readable string to represent a sequence of actions. Subclasses may override the default implementation to provide custom rendering. This is the complement of :meth:`from_string() <compiler_gym.spaces.ActionSpace.from_string>`. The two methods are bidirectional: >>> actions = env.actions >>> s = env.action_space.to_string(actions) >>> actions == env.action_space.from_string(s) True :param actions: A list of actions drawn from this space. :return: A string representation that can be decoded using :meth:`from_string() <compiler_gym.spaces.ActionSpace.from_string>`. """ if hasattr(self.wrapped, "to_string"): return self.wrapped.to_string(actions) return ",".join(str(x) for x in actions) def from_string(self, string: str) -> List[ActionType]: """Return a list of actions from the given string. This is the complement of :meth:`to_string() <compiler_gym.spaces.ActionSpace.to_string>`. The two methods are bidirectional: >>> actions = env.actions >>> s = env.action_space.to_string(actions) >>> actions == env.action_space.from_string(s) True :param string: A string. :return: A list of actions. """ if hasattr(self.wrapped, "from_string"): return self.wrapped.from_string(string) return [self.dtype.type(x) for x in string.split(",")] def __repr__(self) -> str: return f"{type(self).__name__}({self.wrapped})"
CompilerGym-development
compiler_gym/spaces/action_space.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Callable, Iterable, List, Optional from compiler_gym.errors import BenchmarkInitError, ServiceError from compiler_gym.spaces.reward import Reward from compiler_gym.util.gym_type_hints import ActionType, ObservationType class RuntimeReward(Reward): def __init__( self, runtime_count: int, warmup_count: int, estimator: Callable[[Iterable[float]], float], default_value: int = 0, ): super().__init__( name="runtime", observation_spaces=["Runtime"], default_value=default_value, min=None, max=None, default_negates_returns=True, deterministic=False, platform_dependent=True, ) self.runtime_count = runtime_count self.warmup_count = warmup_count self.starting_runtime: Optional[float] = None self.previous_runtime: Optional[float] = None self.current_benchmark: Optional[str] = None self.estimator = estimator def reset(self, benchmark, observation_view) -> None: # If we are changing the benchmark then check that it is runnable. if benchmark != self.current_benchmark: if not observation_view["IsRunnable"]: raise BenchmarkInitError(f"Benchmark is not runnable: {benchmark}") self.current_benchmark = benchmark self.starting_runtime = None # Compute initial runtime if required, else use previously computed # value. if self.starting_runtime is None: self.starting_runtime = self.estimator(observation_view["Runtime"]) self.previous_runtime = self.starting_runtime def update( self, actions: List[ActionType], observations: List[ObservationType], observation_view, ) -> float: del actions # unused del observation_view # unused runtimes = observations[0] if len(runtimes) != self.runtime_count: raise ServiceError( f"Expected {self.runtime_count} runtimes but received {len(runtimes)}" ) runtime = self.estimator(runtimes) reward = self.previous_runtime - runtime self.previous_runtime = runtime return reward
CompilerGym-development
compiler_gym/spaces/runtime_reward.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """This package contains modules that can be used for preparing leaderboard submissions. We provide `leaderboards <https://github.com/facebookresearch/CompilerGym#leaderboards>`_ to track the performance of user-submitted algorithms on compiler optimization tasks. The goal of the leaderboards is to provide a venue for researchers to promote their work, and to provide a common framework for evaluating and comparing different approaches. We accept submissions to the leaderboards through pull requests, see `here <https://facebookresearch.github.io/CompilerGym/contributing.html#leaderboard-submissions>`_ for instructions. """
CompilerGym-development
compiler_gym/leaderboard/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """LLVM is a popular open source compiler used widely in industry and research. The :code:`llvm-ic-v0` environment exposes LLVM's optimizing passes as a set of actions that can be applied to a particular program. The goal of the agent is to select the sequence of optimizations that lead to the greatest reduction in instruction count in the program being compiled. Reward is the reduction in instruction count achieved scaled to the reduction achieved by LLVM's builtin :code:`-Oz` pipeline. +--------------------+------------------------------------------------------+ | Property | Value | +====================+======================================================+ | Environment | :class:`LlvmEnv <compiler_gym.envs.LlvmEnv>`. | +--------------------+------------------------------------------------------+ | Observation Space | Any. | +--------------------+------------------------------------------------------+ | Reward Space | Instruction count reduction relative to :code:`-Oz`. | +--------------------+------------------------------------------------------+ | Test Dataset | The 23 cBench benchmarks. | +--------------------+------------------------------------------------------+ Users who wish to create a submission for this leaderboard may use :func:`eval_llvm_instcount_policy() <compiler_gym.leaderboard.llvm_instcount.eval_llvm_instcount_policy>` to automatically evaluate their agent on the test set. """ import logging import os from itertools import islice from pathlib import Path from threading import Thread from time import sleep from typing import Callable, List import gym import humanize from absl import app, flags import compiler_gym.envs # noqa Register environments. from compiler_gym.bin.validate import main as validate from compiler_gym.compiler_env_state import ( CompilerEnvState, CompilerEnvStateReader, CompilerEnvStateWriter, ) from compiler_gym.envs import LlvmEnv from compiler_gym.util.statistics import arithmetic_mean, geometric_mean from compiler_gym.util.timer import Timer, humanize_duration_hms flags.DEFINE_string( "leaderboard_results", "llvm_instcount-results.csv", "The path of the file to write results to.", ) flags.DEFINE_string( "leaderboard_logfile", "llvm_instcount-results.log", "The path of a file to stream CompilerGym logs to.", ) flags.DEFINE_integer( "max_benchmarks", 0, "If > 0, use only the the first --max_benchmarks benchmarks from the " "dataset, as determined by alphabetical sort. If not set, all benchmarks " "from the dataset are used.", ) flags.DEFINE_integer( "n", 10, "The number of repetitions of the search to run for each benchmark." ) flags.DEFINE_string("test_dataset", "cbench-v1", "The dataset to use for the search.") flags.DEFINE_boolean("validate", True, "Run validation on the results.") flags.DEFINE_boolean( "resume", False, "If true, read the --leaderboard_results file first and run only the " "evaluations not already in the results file.", ) FLAGS = flags.FLAGS # A policy is a function that accepts as input an LLVM environment, and # interacts with that environment with the goal of maximising cumulative reward. Policy = Callable[[LlvmEnv], None] class _EvalPolicyWorker(Thread): """Worker thread to evaluate a policy.""" def __init__( self, env: LlvmEnv, benchmarks: List[str], policy: Policy, init_states: List[CompilerEnvState], ): super().__init__() self.env = env self.benchmarks = benchmarks self.policy = policy self.states: List[CompilerEnvState] = init_states self.alive = True def run(self): # Determine if we need to print a header. header = ( not Path(FLAGS.leaderboard_results).is_file() or os.stat(FLAGS.leaderboard_results).st_size == 0 ) with CompilerEnvStateWriter( open(FLAGS.leaderboard_results, "a"), header=header ) as writer: for benchmark in self.benchmarks: self.env.reset(benchmark=benchmark) with Timer() as timer: self.policy(self.env) # Sanity check that the policy didn't change the expected # experimental setup. assert self.env.in_episode, "Environment is no longer in an episode" assert self.env.benchmark and ( self.env.benchmark == benchmark ), "Policy changed environment benchmark" assert self.env.reward_space, "Policy unset environment reward space" assert ( self.env.reward_space.name == "IrInstructionCountOz" ), "Policy changed environment reward space" # Override walltime in the generated state. state = self.env.state.copy() state.walltime = timer.time writer.write_state(state, flush=True) self.states.append(state) if not self.alive: return def eval_llvm_instcount_policy(policy: Policy) -> None: """Evaluate an LLVM codesize policy and generate results for a leaderboard submission. To use it, you define your policy as a function that takes an :class:`LlvmEnv <compiler_gym.envs.LlvmEnv>` instance as input and modifies it in place. For example, for a trivial random policy: >>> from compiler_gym.envs import LlvmEnv >>> def my_policy(env: LlvmEnv) -> None: .... # Defines a policy that takes 10 random steps. ... for _ in range(10): ... _, _, done, _ = env.step(env.action_space.sample()) ... if done: break If your policy is stateful, you can use a class and override the :code:`__call__()` method: >>> class MyPolicy: ... def __init__(self): ... self.my_stateful_vars = {} # or similar ... def __call__(self, env: LlvmEnv) -> None: ... pass # ... do fun stuff! >>> my_policy = MyPolicy() The role of your policy is to perform a sequence of actions on the supplied environment so as to maximize cumulative reward. By default, no observation space is set on the environment, so :meth:`env.step() <compiler_gym.envs.CompilerEnv.step>` will return :code:`None` for the observation. You may set a new observation space: >>> env.observation_space = "InstCount" # Set a new space for env.step() >>> env.observation["InstCount"] # Calculate a one-off observation. However, the policy may not change the reward space of the environment, or the benchmark. Once you have defined your policy, call the :func:`eval_llvm_instcount_policy() <compiler_gym.leaderboard.llvm_instcount.eval_llvm_instcount_policy>` helper function, passing it your policy as its only argument: >>> eval_llvm_instcount_policy(my_policy) The :func:`eval_llvm_instcount_policy() <compiler_gym.leaderboard.llvm_instcount.eval_llvm_instcount_policy>` function calls the policy function for each benchmark in the dataset, one at a time, from a single thread. Stateful policies can assume thread safe access to member variables. Put together as a complete example, a leaderboard submission script may look like: .. code-block:: python # my_policy.py from compiler_gym.leaderboard.llvm_instcount import eval_llvm_instcount_policy from compiler_gym.envs import LlvmEnv def my_policy(env: LlvmEnv) -> None: env.observation_space = "InstCount" # we're going to use instcount space pass # ... do fun stuff! if __name__ == "__main__": eval_llvm_instcount_policy(my_policy) The :func:`eval_llvm_instcount_policy() <compiler_gym.leaderboard.llvm_instcount.eval_llvm_instcount_policy>` helper defines a number of commandline flags that can be overriden to control the behavior of the evaluation. For example the flag :code:`--n` determines the number of times the policy is run on each benchmark (default is 10), and :code:`--leaderboard_results` determines the path of the generated results file: .. code-block:: $ python my_policy.py --n=5 --leaderboard_results=my_policy_results.csv You can use :code:`--helpfull` flag to list all of the flags that are defined: .. code-block:: $ python my_policy.py --helpfull Once you are happy with your approach, see the `contributing guide <https://github.com/facebookresearch/CompilerGym/blob/development/CONTRIBUTING.md#leaderboard-submissions>`_ for instructions on preparing a submission to the leaderboard. """ def main(argv): assert len(argv) == 1, f"Unknown args: {argv[:1]}" assert FLAGS.n > 0, "n must be > 0" with gym.make("llvm-ic-v0") as env: # Stream verbose CompilerGym logs to file. logger = logging.getLogger("compiler_gym") logger.setLevel(logging.DEBUG) log_handler = logging.FileHandler(FLAGS.leaderboard_logfile) logger.addHandler(log_handler) logger.propagate = False print(f"Writing results to {FLAGS.leaderboard_results}") print(f"Writing logs to {FLAGS.leaderboard_logfile}") # Build the list of benchmarks to evaluate. benchmarks = env.datasets[FLAGS.test_dataset].benchmark_uris() if FLAGS.max_benchmarks: benchmarks = islice(benchmarks, FLAGS.max_benchmarks) benchmarks = list(benchmarks) # Repeat the searches for the requested number of iterations. benchmarks *= FLAGS.n total_count = len(benchmarks) # If we are resuming from a previous job, read the states that have # already been proccessed and remove those benchmarks from the list # of benchmarks to evaluate. init_states = [] if FLAGS.resume and Path(FLAGS.leaderboard_results).is_file(): with CompilerEnvStateReader(open(FLAGS.leaderboard_results)) as reader: for state in reader: init_states.append(state) if state.benchmark in benchmarks: benchmarks.remove(state.benchmark) # Run the benchmark loop in background so that we can asynchronously # log progress. worker = _EvalPolicyWorker(env, benchmarks, policy, init_states) worker.start() timer = Timer().reset() try: print( f"=== Evaluating policy on " f"{humanize.intcomma(total_count)} " f"{FLAGS.test_dataset} benchmarks ===" "\n\n" # Blank lines will be filled below ) while worker.is_alive(): done_count = len(worker.states) remaining_count = total_count - done_count time = timer.time gmean_reward = geometric_mean([s.reward for s in worker.states]) mean_walltime = ( arithmetic_mean([s.walltime for s in worker.states]) or time ) print( "\r\033[2A" "\033[K" f"Runtime: {humanize_duration_hms(time)}. " f"Estimated completion: {humanize_duration_hms(mean_walltime * remaining_count)}. " f"Completed: {humanize.intcomma(done_count)} / {humanize.intcomma(total_count)} " f"({done_count / total_count:.1%})." "\n\033[K" f"Current mean walltime: {mean_walltime:.3f}s / benchmark." "\n\033[K" f"Current geomean reward: {gmean_reward:.4f}.", flush=True, end="", ) sleep(1) except KeyboardInterrupt: print("\nkeyboard interrupt", flush=True) worker.alive = False # User interrupt, don't validate. FLAGS.validate = False if FLAGS.validate: FLAGS.env = "llvm-ic-v0" validate(["argv0", FLAGS.leaderboard_results]) app.run(main)
CompilerGym-development
compiler_gym/leaderboard/llvm_instcount.py
# Protoxygen, from https://github.com/lisroach/Protoxygen ## # Doxygen filter for Google Protocol Buffers .proto files. # This script converts .proto files into C++ style ones # and prints the output to standard output. # # version 0.6-beta # # How to enable this filter in Doxygen: # 1. Generate Doxygen configuration file with command 'doxygen -g <filename>' # e.g. doxygen -g doxyfile # 2. In the Doxygen configuration file, find JAVADOC_AUTOBRIEF and set it enabled # JAVADOC_AUTOBRIEF = YES # 3. In the Doxygen configuration file, find FILE_PATTERNS and add *.proto # FILE_PATTERNS = *.proto # 4. In the Doxygen configuration file, find EXTENSION_MAPPING and add proto=C # EXTENSION_MAPPING = proto=C # 5. In the Doxygen configuration file, find INPUT_FILTER and add this script # INPUT_FILTER = "python proto2cpp.py" # 6. Run Doxygen with the modified configuration # doxygen doxyfile # # # Copyright (C) 2012-2015 Timo Marjoniemi # All rights reserved. # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # ## import fnmatch import inspect import os import re import sys # Class for converting Google Protocol Buffers .proto files into C++ style output to enable Doxygen usage. ## # The C++ style output is printed into standard output.<br /> # There are three different logging levels for the class: # <ul><li>#logNone: do not log anything</li> # <li>#logErrors: log errors only</li> # <li>#logAll: log everything</li></ul> # Logging level is determined by \c #logLevel.<br /> # Error logs are written to file determined by \c #errorLogFile.<br /> # Debug logs are written to file determined by \c #logFile. # class proto2cpp: # Logging level: do not log anything. logNone = 0 # Logging level: log errors only. logErrors = 1 # Logging level: log everything. logAll = 2 # Conmessageor # def __init__(self): # Debug log file name. self.logFile = "proto2cpp.log" # Error log file name. self.errorLogFile = "proto2cpp.error.log" # Logging level. self.logLevel = self.logNone # Handles a file. ## # If @p fileName has .proto suffix, it is processed through parseFile(). # Otherwise it is printed to stdout as is except for file \c proto2cpp.py without # path since it's the script given to python for processing. ## # @param fileName Name of the file to be handled. # def handleFile(self, fileName): if fnmatch.fnmatch(filename, "*.proto"): self.log("\nXXXXXXXXXX\nXX " + filename + "\nXXXXXXXXXX\n\n") # Open the file. Use try to detect whether or not we have an actual # file. try: with open(filename, "r") as inputFile: self.parseFile(inputFile) pass except IOError: self.logError( "the file " + filename + " could not be opened for reading" ) elif not fnmatch.fnmatch( filename, os.path.basename(inspect.getfile(inspect.currentframe())) ): self.log("\nXXXXXXXXXX\nXX " + filename + "\nXXXXXXXXXX\n\n") try: with open(filename, "r") as theFile: output = "" for theLine in theFile: output += theLine print(output) self.log(output) pass except IOError: self.logError( "the file " + filename + " could not be opened for reading" ) else: self.log("\nXXXXXXXXXX\nXX " + filename + " --skipped--\nXXXXXXXXXX\n\n") # Parser function. ## # The function takes a .proto file object as input # parameter and modifies the contents into C++ style. # The modified data is printed into standard output. ## # @param inputFile Input file object # def parseFile(self, inputFile): # Go through the input file line by line. isEnum = False # This variable is here as a workaround for not getting extra line breaks (each line # ends with a line separator and print() method will add another one). # We will be adding lines into this var and then print the var out at # the end. theOutput = "" for line in inputFile: # Search for comment ("//") and add one more slash character ("/") to the comment # block to make Doxygen detect it. matchComment = re.search("//", line) # Search for semicolon and if one is found before comment, add a third slash character # ("/") and a smaller than ("<") chracter to the comment to make Doxygen detect it. matchSemicolon = re.search(";", line) if matchSemicolon is not None and ( matchComment is not None and matchSemicolon.start() < matchComment.start() ): line = ( line[: matchComment.start()] + "///<" + line[matchComment.end() :] ) elif matchSemicolon is not None and ( matchComment is not None and matchSemicolon.start() > matchComment.start() ): line = line.replace("//", "") elif matchComment is not None: line = line[: matchComment.start()] + "///" + line[matchComment.end() :] # Search for "enum" and if one is found before comment, # start changing all semicolons (";") to commas (","). matchEnum = re.search("enum", line) if matchEnum is not None and ( matchComment is None or matchEnum.start() < matchComment.start() ): isEnum = True # Search again for semicolon if we have detected an enum, and # replace semicolon with comma. if isEnum is True and re.search(";", line) is not None: matchSemicolon = re.search(";", line) line = ( line[: matchSemicolon.start()] + "," + line[matchSemicolon.end() :] ) # Search for a closing brace. matchClosingBrace = re.search("}", line) if isEnum is True and matchClosingBrace is not None: line = ( line[: matchClosingBrace.start()] + "};" + line[matchClosingBrace.end() :] ) isEnum = False elif isEnum is False and re.search("}", line) is not None: # Message (to be struct) ends => add semicolon so that it'll # be a proper C(++) message and Doxygen will handle it # correctly. line = ( line[: matchClosingBrace.start()] + "};" + line[matchClosingBrace.end() :] ) # Search for 'import' and replace it with '#include' unless # 'import' is behind a comment. matchMsg = re.search("message", line) if matchMsg is not None and ( matchComment is None or matchMsg.start() < matchComment.start() ): line = "struct" + line[: matchMsg.start()] + line[matchMsg.end() :] matchSrv = re.search("^service", line) if matchSrv is not None and ( matchComment is None or matchSrv.start() < matchComment.start() ): line = "namespace" + line[: matchSrv.start()] + line[matchSrv.end() :] matchImp = re.search("import", line) if matchImp is not None and ( matchComment is None or matchImp.start() < matchComment.start() ): line = "#include" + line[: matchImp.start()] + line[matchImp.end() :] else: theOutput += line # Search for 'stuct' and replace it with 'message' unless 'message' # is behind a comment. # Now that we've got all lines in the string let's split the lines and print out # one by one. # This is a workaround to get rid of extra empty line at the end which # print() method adds. lines = theOutput.splitlines() for line in lines: if len(line) > 0: print(line) # Our logger does not add extra line breaks so explicitly # adding one to make the log more readable. self.log(line + "\n") else: self.log("\n --- skipped empty line") # Writes @p string to log file. ## # logLevel must be #logAll or otherwise the logging is skipped. ## # @param string String to be written to log file. # def log(self, string): if self.logLevel >= self.logAll: with open(self.logFile, "a") as theFile: theFile.write(string) # Writes @p string to error log file. ## # logLevel must be #logError or #logAll or otherwise the logging is skipped. ## # @param string String to be written to error log file. # def logError(self, string): if self.logLevel >= self.logError: with open(self.errorLogFile, "a") as theFile: theFile.write(string) converter = proto2cpp() # Doxygen will give us the file names for filename in sys.argv[1:]: converter.handleFile(filename) # end of file
CompilerGym-development
compiler_gym/third_party/proto2cpp.py
"""This module defines an API for processing LLVM-IR with inst2vec.""" import pickle from typing import List import numpy as np from compiler_gym.third_party.inst2vec import inst2vec_preprocess from compiler_gym.util.runfiles_path import runfiles_path _PICKLED_VOCABULARY = runfiles_path( "compiler_gym/third_party/inst2vec/dictionary.pickle" ) _PICKLED_EMBEDDINGS = runfiles_path( "compiler_gym/third_party/inst2vec/embeddings.pickle" ) class Inst2vecEncoder: """An LLVM encoder for inst2vec.""" def __init__(self): # TODO(github.com/facebookresearch/CompilerGym/issues/122): Lazily # instantiate inst2vec encoder. with open(str(_PICKLED_VOCABULARY), "rb") as f: self.vocab = pickle.load(f) with open(str(_PICKLED_EMBEDDINGS), "rb") as f: self.embeddings = pickle.load(f) self.unknown_vocab_element = self.vocab["!UNK"] def preprocess(self, ir: str) -> List[str]: """Produce a list of pre-processed statements from an IR.""" lines = [[x] for x in ir.split("\n")] try: structs = inst2vec_preprocess.GetStructTypes(ir) for line in lines: for struct, definition in structs.items(): line[0] = line[0].replace(struct, definition) except ValueError: pass preprocessed_lines, _ = inst2vec_preprocess.preprocess(lines) preprocessed_texts = [ inst2vec_preprocess.PreprocessStatement(x[0]) if len(x) else "" for x in preprocessed_lines ] return [x for x in preprocessed_texts if x] def encode(self, preprocessed: List[str]) -> List[int]: """Produce embedding indices for a list of pre-processed statements.""" return [ self.vocab.get(statement, self.unknown_vocab_element) for statement in preprocessed ] def embed(self, encoded: List[int]) -> np.ndarray: """Produce a matrix of embeddings from a list of encoded statements.""" return np.vstack([self.embeddings[index] for index in encoded])
CompilerGym-development
compiler_gym/third_party/inst2vec/__init__.py
# NCC: Neural Code Comprehension # https://github.com/spcl/ncc # Copyright 2018 ETH Zurich # All rights reserved. # # Redistribution and use in source and binary forms, with or without modification, are permitted provided that the # following conditions are met: # 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following # disclaimer. # 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided with the distribution. # 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote # products derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, # INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ============================================================================== # flake8: noqa """Helper variables and functions for regular expressions and statement tags""" import re ######################################################################################################################## # Regex manipulation: helper functions ######################################################################################################################## def any_of(possibilities, to_add=""): r""" Helper function for regex manipulation: Construct a regex representing "any of" the given possibilities :param possibilities: list of strings representing different word possibilities :param to_add: string to add at the beginning of each possibility (optional) :return: string corresponding to regex which represents any of the given possibilities r""" assert len(possibilities) > 0 s = r"(?:" if len(to_add) > 0: s += possibilities[0] + to_add + r" " else: s += possibilities[0] for i in range(len(possibilities) - 1): if len(to_add) > 0: s += r"|" + possibilities[i + 1] + to_add + r" " else: s += r"|" + possibilities[i + 1] return s + r")" ######################################################################################################################## # Regex manipulation: helper variables ######################################################################################################################## # Identifiers global_id = r'(?<!%")@[r"\w\d\.\-\_\$\\]+' local_id_no_perc = r'[r"\@\d\w\.\-\_\:]+' local_id = r"%" + local_id_no_perc local_or_global_id = r"(" + global_id + r"|" + local_id + r")" # Options and linkages linkage = any_of( [ r" private", r" external", r" internal", r" linkonce_odr", r" appending", r" external", r" internal", r" unnamed_addr", r" common", r" hidden", r" weak", r" linkonce", r" extern_weak", r" weak_odr", r" private", r" available_externally", r" local_unnamed_addr", r" thread_local", r" linker_private", ] ) # Immediate values immediate_value_ad_hoc = r"#[\d\w]+" immediate_value_true = r"true" immediate_value_false = r"false" immediate_value_bool = ( r"(?:" + immediate_value_true + r"|" + immediate_value_false + r")" ) immediate_value_int = r"(?<!\w)[-]?[0-9]+" immediate_value_float_sci = r"(?<!\w)[-]?[0-9]+\.[0-9]+(?:e\+?-?[0-9]+)?" immediate_value_float_hexa = r"(?<!\w)[-]?0[xX][hklmHKLM]?[A-Fa-f0-9]+" immediate_value_float = ( r"(?:" + immediate_value_float_sci + r"|" + immediate_value_float_hexa + r")" ) immediate_value_vector_bool = ( r"<i1 " + immediate_value_bool + r"(?:, i1 (?:" + immediate_value_bool + r"|undef))*>" ) immediate_value_vector_int = ( r"<i\d+ r" + immediate_value_int + r"(?:, i\d+ (?:" + immediate_value_int + r"|undef))*>" ) immediate_value_vector_float = ( r"<float " + immediate_value_float + r"(?:, float (?:" + immediate_value_float + r"|undef))*>" ) immediate_value_vector_double = ( r"<double " + immediate_value_float + r"(?:, double (?:" + immediate_value_float + r"|undef))*>" ) immediate_value_string = r'(?<!\w)c".+"' immediate_value_misc = r"(?:null|zeroinitializer)" immediate_value = any_of( [ immediate_value_true, immediate_value_false, immediate_value_int, immediate_value_float_sci, immediate_value_float_hexa, immediate_value_string, immediate_value_misc, ] ) immediate_value_undef = r"undef" immediate_value_or_undef = any_of( [ immediate_value_true, immediate_value_false, immediate_value_int, immediate_value_float_sci, immediate_value_float_hexa, immediate_value_string, immediate_value_misc, immediate_value_ad_hoc, immediate_value_undef, ] ) # Combos immediate_or_local_id = any_of( [ immediate_value_true, immediate_value_false, immediate_value_int, immediate_value_float_sci, immediate_value_float_hexa, immediate_value_vector_int, immediate_value_vector_float, immediate_value_vector_double, local_id, immediate_value_misc, ] ) immediate_or_local_id_or_undef = any_of( [ immediate_value_true, immediate_value_false, immediate_value_int, immediate_value_float_sci, immediate_value_float_hexa, immediate_value_vector_int, immediate_value_vector_float, immediate_value_vector_double, local_id, immediate_value_misc, immediate_value_undef, ] ) # Names of aggregate types # Lookahead so that names like '%struct.attribute_group**' won't be matched as just %struct.attribute struct_lookahead = r"(?=[\s,\*\]\}])" struct_name_add_on = r'(?:\([\w\d=]+\)")?' struct_name_without_lookahead = ( r'%[r"\@\d\w\.\-\_:]+(?:(?:<[r"\@\d\w\.\-\_:,<>\(\) \*]+>|\([r"\@\d\w\.\-\_:,<> \*]+\)|\w+)?::[r" \@\d\w\.\-\_:\)\(]*)*' + struct_name_add_on ) struct_name = struct_name_without_lookahead + struct_lookahead # Functions func_name = r"@[\"\w\d\._\$\\]+" func_call_pattern = r".* @[\w\d\._]+" func_call_pattern_or_bitcast = r"(.* @[\w\d\._]+|.*bitcast .* @[\w\d\._]+ to .*)" # new basic block start_basic_block = ( r"((?:<label>:)?(" + local_id_no_perc + r"):|; <label>:" + local_id_no_perc + r" )" ) # Types base_type = r"(?:i\d+|double|float|opaque)\**" first_class_types = [ r"i\d+", r"half", r"float", r"double", r"fp_128", r"x86_fp80", r"ppc_fp128", r"<%ID>", ] first_class_type = any_of(first_class_types) + r"\**" base_type_or_struct_name = any_of([base_type, struct_name_without_lookahead]) ptr_to_base_type = base_type + r"\*+" vector_type = r"<\d+ x " + base_type + r">" ptr_to_vector_type = vector_type + r"\*+" array_type = r"\[\d+ x " + base_type + r"\]" ptr_to_array_type = array_type + r"\*+" array_of_array_type = r"\[\d+ x " + r"\[\d+ x " + base_type + r"\]" + r"\]" struct = struct_name_without_lookahead ptr_to_struct = struct + r"\*+" function_type = ( base_type + r" \(" + any_of([base_type, vector_type, array_type, "..."], ",") + r"*" + any_of([base_type, vector_type, array_type, "..."]) + r"\)\**" ) any_type = any_of( [ base_type, ptr_to_base_type, vector_type, ptr_to_vector_type, array_type, ptr_to_array_type, ] ) any_type_or_struct = any_of( [ base_type, ptr_to_base_type, vector_type, ptr_to_vector_type, array_type, ptr_to_array_type, ptr_to_struct, ] ) structure_entry = any_of( [ base_type, vector_type, array_type, array_of_array_type, function_type, r"{ .* }\**", ] ) structure_entry_with_comma = any_of( [base_type, vector_type, array_type, array_of_array_type, function_type], "," ) literal_structure = ( r"(<?{ " + structure_entry_with_comma + r"*" + structure_entry + r" }>?|{})" ) # Tokens unknown_token = r"!UNK" # starts with '!' to guarantee it will appear first in the alphabetically sorted vocabulary ######################################################################################################################## # Tags for clustering statements (by statement semantics) and helper functions ######################################################################################################################## # List of families of operations llvm_IR_stmt_families = [ # [r"tag level 1", r"tag level 2", r"tag level 3", r"regex" ] [r"unknown token", "unknown token", "unknown token", "!UNK"], [r"integer arithmetic", "addition", "add integers", "<%ID> = add .*"], [r"integer arithmetic", "subtraction", "subtract integers", "<%ID> = sub .*"], [ r"integer arithmetic", r"multiplication", r"multiply integers", r"<%ID> = mul .*", ], [ r"integer arithmetic", r"division", r"unsigned integer division", r"<%ID> = udiv .*", ], [ r"integer arithmetic", r"division", r"signed integer division", r"<%ID> = sdiv .*", ], [ r"integer arithmetic", r"remainder", r"remainder of signed div", r"<%ID> = srem .*", ], [ r"integer arithmetic", r"remainder", r"remainder of unsigned div.", r"<%ID> = urem .*", ], [r"floating-point arithmetic", "addition", "add floats", "<%ID> = fadd .*"], [ r"floating-point arithmetic", r"subtraction", r"subtract floats", r"<%ID> = fsub .*", ], [ r"floating-point arithmetic", r"multiplication", r"multiply floats", r"<%ID> = fmul .*", ], [r"floating-point arithmetic", "division", "divide floats", "<%ID> = fdiv .*"], [r"bitwise arithmetic", "and", "and", "<%ID> = and .*"], [r"bitwise arithmetic", "or", "or", "<%ID> = or .*"], [r"bitwise arithmetic", "xor", "xor", "<%ID> = xor .*"], [r"bitwise arithmetic", "shift left", "shift left", "<%ID> = shl .*"], [r"bitwise arithmetic", "arithmetic shift right", "ashr", "<%ID> = ashr .*"], [ r"bitwise arithmetic", r"logical shift right", r"logical shift right", r"<%ID> = lshr .*", ], [ r"comparison operation", r"compare integers", r"compare integers", r"<%ID> = icmp .*", ], [ r"comparison operation", r"compare floats", r"compare floats", r"<%ID> = fcmp .*", ], [ r"conversion operation", r"bitcast", r"bitcast single val", r"<%ID> = bitcast (i\d+|float|double|x86_fp80|opaque) .* to .*", ], [ r"conversion operation", r"bitcast", r"bitcast single val*", r"<%ID> = bitcast (i\d+|float|double|x86_fp80|opaque)\* .* to .*", ], [ r"conversion operation", r"bitcast", r"bitcast single val**", r"<%ID> = bitcast (i\d+|float|double|x86_fp80|opaque)\*\* .* to .*", ], [ r"conversion operation", r"bitcast", r"bitcast single val***", r"<%ID> = bitcast (i\d+|float|double|x86_fp80|opaque)\*\*\* .* to .*", ], [ r"conversion operation", r"bitcast", r"bitcast single val****", r"<%ID> = bitcast (i\d+|float|double|x86_fp80|opaque)\*\*\*\* .* to .*", ], [ r"conversion operation", r"bitcast", r"bitcast array", r"<%ID> = bitcast \[\d.* to .*", ], [ r"conversion operation", r"bitcast", r"bitcast vector", r"<%ID> = bitcast <\d.* to .*", ], [ r"conversion operation", r"bitcast", r"bitcast structure", r'<%ID> = bitcast (%"|<{|<%|{).* to .*', ], [r"conversion operation", "bitcast", "bitcast void", "<%ID> = bitcast void "], [ r"conversion operation", r"extension/truncation", r"extend float", r"<%ID> = fpext .*", ], [ r"conversion operation", r"extension/truncation", r"truncate floats", r"<%ID> = fptrunc .*", ], [ r"conversion operation", r"extension/truncation", r"sign extend ints", r"<%ID> = sext .*", ], [ r"conversion operation", r"extension/truncation", r"truncate int to ... ", r"<%ID> = trunc .* to .*", ], [ r"conversion operation", r"extension/truncation", r"zero extend integers", r"<%ID> = zext .*", ], [ r"conversion operation", r"convert", r"convert signed integers to... ", r"<%ID> = sitofp .*", ], [ r"conversion operation", r"convert", r"convert unsigned integer to... ", r"<%ID> = uitofp .*", ], [ r"conversion operation", r"convert int to ptr", r"convert int to ptr", r"<%ID> = inttoptr .*", ], [ r"conversion operation", r"convert ptr to int", r"convert ptr to int", r"<%ID> = ptrtoint .*", ], [ r"conversion operation", r"convert floats", r"convert float to sint", r"<%ID> = fptosi .*", ], [ r"conversion operation", r"convert floats", r"convert float to uint", r"<%ID> = fptoui .*", ], [r"control flow", "phi", "phi", "<%ID> = phi .*"], [ r"control flow", r"switch", r"jump table line", r"i\d{1,2} <(INT|FLOAT)>, label <%ID>", ], [r"control flow", "select", "select", "<%ID> = select .*"], [r"control flow", "invoke", "invoke and ret type", "<%ID> = invoke .*"], [r"control flow", "invoke", "invoke void", "invoke (fastcc )?void .*"], [r"control flow", "branch", "branch conditional", "br i1 .*"], [r"control flow", "branch", "branch unconditional", "br label .*"], [r"control flow", "branch", "branch indirect", "indirectbr .*"], [r"control flow", "control flow", "switch", "switch .*"], [r"control flow", "return", "return", "ret .*"], [r"control flow", "resume", "resume", "resume .*"], [r"control flow", "unreachable", "unreachable", "unreachable.*"], [r"control flow", "exception handling", "catch block", "catch .*"], [r"control flow", "exception handling", "cleanup clause", "cleanup"], [ r"control flow", r"exception handling", r"landingpad for exceptions", r"<%ID> = landingpad .", ], [ r"function", r"function call", r"sqrt (llvm-intrinsic)", r"<%ID> = (tail |musttail |notail )?call (fast |)?(i\d+|float|double|x86_fp80|<%ID>|<\d x float>|<\d x double>) @(llvm|llvm\..*)\.sqrt.*", ], [ r"function", r"function call", r"fabs (llvm-intr.)", r"<%ID> = (tail |musttail |notail )?call (fast |)?(i\d+|float|double|x86_fp80|<%ID>|<\d x float>|<\d x double>|<\d x i\d+>) @(llvm|llvm\..*)\.fabs.*", ], [ r"function", r"function call", r"max (llvm-intr.)", r"<%ID> = (tail |musttail |notail )?call (fast |)?(i\d+|float|double|x86_fp80|<%ID>|<\d x float>|<\d x double>|<\d x i\d+>) @(llvm|llvm\..*)\.max.*", ], [ r"function", r"function call", r"min (llvm-intr.)", r"<%ID> = (tail |musttail |notail )?call (fast |)?(i\d+|float|double|x86_fp80|<%ID>|<\d x float>|<\d x double>|<\d x i\d+>) @(llvm|llvm\..*)\.min.*", ], [ r"function", r"function call", r"fma (llvm-intr.)", r"<%ID> = (tail |musttail |notail )?call (fast |)?(i\d+|float|double|x86_fp80|<%ID>|<\d x float>|<\d x double>|<\d x i\d+>) @(llvm|llvm\..*)\.fma.*", ], [ r"function", r"function call", r"phadd (llvm-intr.)", r"<%ID> = (tail |musttail |notail )?call (fast |)?(i\d+|float|double|x86_fp80|<%ID>|<\d x float>|<\d x double>|<\d x i\d+>) @(llvm|llvm\..*)\.phadd.*", ], [ r"function", r"function call", r"pabs (llvm-intr.)", r"<%ID> = (tail |musttail |notail )?call (fast |)?(i\d+|float|double|x86_fp80|<%ID>|<\d x float>|<\d x double>|<\d x i\d+>) @(llvm|llvm\..*)\.pabs.*", ], [ r"function", r"function call", r"pmulu (llvm-intr.)", r"<%ID> = (tail |musttail |notail )?call (fast |)?(i\d+|float|double|x86_fp80|<%ID>|<\d x float>|<\d x double>|<\d x i\d+>) @(llvm|llvm\..*)\.pmulu.*", ], [ r"function", r"function call", r"umul (llvm-intr.)", r"<%ID> = (tail |musttail |notail )?call {.*} @llvm\.umul.*", ], [ r"function", r"function call", r"prefetch (llvm-intr.)", r"(tail |musttail |notail )?call void @llvm\.prefetch.*", ], [ r"function", r"function call", r"trap (llvm-intr.)", r"(tail |musttail |notail )?call void @llvm\.trap.*", ], [r"function", "func decl / def", "function declaration", "declare .*"], [r"function", "func decl / def", "function definition", "define .*"], [ r"function", r"function call", r"function call void", r"(tail |musttail |notail )?call( \w+)? void [\w\)\(\}\{\.\,\*\d\[\]\s<>%]*(<[@%]ID>\(|.*bitcast )", ], [ r"function", r"function call", r"function call mem lifetime", r"(tail |musttail |notail )?call( \w+)? void ([\w)(\.\,\*\d ])*@llvm\.lifetime.*", ], [ r"function", r"function call", r"function call mem copy", r"(tail |musttail |notail )?call( \w+)? void ([\w)(\.\,\*\d ])*@llvm\.memcpy\..*", ], [ r"function", r"function call", r"function call mem set", r"(tail |musttail |notail )?call( \w+)? void ([\w)(\.\,\*\d ])*@llvm\.memset\..*", ], [ r"function", r"function call", r"function call single val", r"<%ID> = (tail |musttail |notail )?call[^{]* (i\d+|float|double|x86_fp80|<\d+ x (i\d+|float|double)>) (.*<[@%]ID>\(|(\(.*\) )?bitcast ).*", ], [ r"function", r"function call", r"function call single val*", r"<%ID> = (tail |musttail |notail )?call[^{]* (i\d+|float|double|x86_fp80)\* (.*<[@%]ID>\(|\(.*\) bitcast ).*", ], [ r"function", r"function call", r"function call single val**", r"<%ID> = (tail |musttail |notail )?call[^{]* (i\d+|float|double|x86_fp80)\*\* (.*<[@%]ID>\(|\(.*\) bitcast ).*", ], [ r"function", r"function call", r"function call array", r"<%ID> = (tail |musttail |notail )?call[^{]* \[.*\] (\(.*\) )?(<[@%]ID>\(|\(.*\) bitcast )", ], [ r"function", r"function call", r"function call array*", r"<%ID> = (tail |musttail |notail )?call[^{]* \[.*\]\* (\(.*\) )?(<[@%]ID>\(|\(.*\) bitcast )", ], [ r"function", r"function call", r"function call array**", r"<%ID> = (tail |musttail |notail )?call[^{]* \[.*\]\*\* (\(.*\) )?(<[@%]ID>\(|\(.*\) bitcast )", ], [ r"function", r"function call", r"function call structure", r"<%ID> = (tail |musttail |notail )?call[^{]* (\{ .* \}[\w\_]*|<?\{ .* \}>?|opaque|\{\}|<%ID>) (\(.*\)\*? )?(<[@%]ID>\(|\(.*\) bitcast )", ], [ r"function", r"function call", r"function call structure*", r"<%ID> = (tail |musttail |notail )?call[^{]* (\{ .* \}[\w\_]*|<?\{ .* \}>?|opaque|\{\}|<%ID>)\* (\(.*\)\*? )?(<[@%]ID>\(|\(.*\) bitcast )", ], [ r"function", r"function call", r"function call structure**", r"<%ID> = (tail |musttail |notail )?call[^{]* (\{ .* \}[\w\_]*|<?\{ .* \}>?|opaque|\{\}|<%ID>)\*\* (\(.*\)\*? )?(<[@%]ID>\(|\(.*\) bitcast )", ], [ r"function", r"function call", r"function call structure***", r"<%ID> = (tail |musttail |notail )?call[^{]* (\{ .* \}[\w\_]*|<?\{ .* \}>?|opaque|\{\}|<%ID>)\*\*\* (\(.*\)\*? )?(<[@%]ID>\(|\(.*\) bitcast )", ], [ r"function", r"function call", r"function call asm value", r"<%ID> = (tail |musttail |notail )?call.* asm .*", ], [ r"function", r"function call", r"function call asm void", r"(tail |musttail |notail )?call void asm .*", ], [ r"function", r"function call", r"function call function", r"<%ID> = (tail |musttail |notail )?call[^{]* void \([^\(\)]*\)\** <[@%]ID>\(", ], [ r"global variables", r"glob. var. definition", r"???", r"<@ID> = (?!.*constant)(?!.*alias).*", ], [r"global variables", "constant definition", "???", "<@ID> = .*constant .*"], [ r"memory access", r"load from memory", r"load structure", r'<%ID> = load (\w* )?(%"|<\{|\{ <|\{ \[|\{ |<%|opaque).*', ], [ r"memory access", r"load from memory", r"load single val", r"<%ID> = load (\w* )?(i\d+|float|double|x86_fp80)[, ].*", ], [ r"memory access", r"load from memory", r"load single val*", r"<%ID> = load (\w* )?(i\d+|float|double|x86_fp80)\*[, ].*", ], [ r"memory access", r"load from memory", r"load single val**", r"<%ID> = load (\w* )?(i\d+|float|double|x86_fp80)\*\*[, ].*", ], [ r"memory access", r"load from memory", r"load single val***", r"<%ID> = load (\w* )?(i\d+|float|double|x86_fp80)\*\*\*[, ].*", ], [ r"memory access", r"load from memory", r"load single val****", r"<%ID> = load (\w* )?(i\d+|float|double|x86_fp80)\*\*\*\*[, ].*", ], [ r"memory access", r"load from memory", r"load single val*****", r"<%ID> = load (\w* )?(i\d+|float|double|x86_fp80)\*\*\*\*\*[, ].*", ], [ r"memory access", r"load from memory", r"load single val******", r"<%ID> = load (\w* )?(i\d+|float|double|x86_fp80)\*\*\*\*\*\*[, ].*", ], [ r"memory access", r"load from memory", r"load single val*******", r"<%ID> = load (\w* )?(i\d+|float|double|x86_fp80)\*\*\*\*\*\*\*[, ].*", ], [ r"memory access", r"load from memory", r"load vector", r"<%ID> = load <\d+ x .*", ], ["memory access", "load from memory", "load array", r"<%ID> = load \[\d.*"], [ r"memory access", r"load from memory", r"load fction ptr", r"<%ID> = load void \(", ], [r"memory access", "store", "store", "store.*"], [r"memory addressing", "GEP", "GEP", r"<%ID> = getelementptr .*"], [ r"memory allocation", r"allocate on stack", r"allocate structure", r'<%ID> = alloca (%"|<{|<%|{ |opaque).*', ], [ r"memory allocation", r"allocate on stack", r"allocate vector", r"<%ID> = alloca <\d.*", ], [ r"memory allocation", r"allocate on stack", r"allocate array", r"<%ID> = alloca \[\d.*", ], [ r"memory allocation", r"allocate on stack", r"allocate single value", r"<%ID> = alloca (double|float|i\d{1,3})\*?.*", ], [ r"memory allocation", r"allocate on stack", r"allocate void", r"<%ID> = alloca void \(.*", ], [ r"memory atomics", r"atomic memory modify", r"atomicrw xchg", r"<%ID> = atomicrmw.* xchg .*", ], [ r"memory atomics", r"atomic memory modify", r"atomicrw add", r"<%ID> = atomicrmw.* add .*", ], [ r"memory atomics", r"atomic memory modify", r"atomicrw sub", r"<%ID> = atomicrmw.* sub .*", ], [ r"memory atomics", r"atomic memory modify", r"atomicrw or", r"<%ID> = atomicrmw.* or .*", ], [ r"memory atomics", r"atomic compare exchange", r"cmpxchg single val", r"<%ID> = cmpxchg (weak )?(i\d+|float|double|x86_fp80)\*", ], [ r"non-instruction", r"label", r"label declaration", r"; <label>:.*(\s+; preds = <LABEL>)?", ], [ r"non-instruction", r"label", r"label declaration", r"<LABEL>:( ; preds = <LABEL>)?", ], [ r"value aggregation", r"extract value", r"extract value", r"<%ID> = extractvalue .*", ], [ r"value aggregation", r"insert value", r"insert value", r"<%ID> = insertvalue .*", ], [ r"vector operation", r"insert element", r"insert element", r"<%ID> = insertelement .*", ], [ r"vector operation", r"extract element", r"extract element", r"<%ID> = extractelement .*", ], [ r"vector operation", r"shuffle vector", r"shuffle vector", r"<%ID> = shufflevector .*", ], ] # Helper functions for exploring llvm_IR_families def get_list_tag_level_1(): r""" Get the list of all level-1 tags in the data structure llvm_IR_families :return: list containing strings corresponding to all level 1 tags r""" list_tags = list() for fam in llvm_IR_stmt_families: list_tags.append(fam[0]) return list(set(list_tags)) def get_list_tag_level_2(tag_level_1="all"): r""" Get the list of all level-2 tags in the data structure llvm_IR_families corresponding to the string given as an input, or absolutely all of them if input == r'all' :param tag_level_1: string containing the level-1 tag to query, or 'all' :return: list of strings r""" # Make sure the input parameter is valid assert tag_level_1 in get_list_tag_level_1() or tag_level_1 == r"all", ( tag_level_1 + r" invalid" ) list_tags = list() if tag_level_1 == r"all": for fam in llvm_IR_stmt_families: list_tags.append(fam[1]) list_tags = sorted(set(list_tags)) else: for fam in llvm_IR_stmt_families: if fam[0] == tag_level_1: list_tags.append(fam[1]) return list(set(list_tags)) ######################################################################################################################## # Tags for clustering statements (by statement type) ######################################################################################################################## # Helper lists types_int = [r"i1", "i8", "i16", "i32", "i64"] types_flpt = [r"half", "float", "double", "fp128", "x86_fp80", "ppc_fp128"] fast_math_flag = [ r"", r"nnan ", r"ninf ", r"nsz ", r"arcp ", r"contract ", r"afn ", r"reassoc ", r"fast ", ] opt_load = [r"atomic ", "volatile "] opt_addsubmul = [r"nsw ", "nuw ", "nuw nsw "] opt_usdiv = [r"", "exact "] opt_icmp = [ r"eq ", r"ne ", r"ugt ", r"uge ", r"ult ", r"ule ", r"sgt ", r"sge ", r"slt ", r"sle ", ] opt_fcmp = [ r"false ", r"oeq ", r"ogt ", r"oge ", r"olt ", r"olt ", r"ole ", r"one ", r"ord ", r"ueq ", r"ugt ", r"uge ", r"ult ", r"ule ", r"une ", r"uno ", r"true ", ] opt_define = [ r"", r"linkonce_odr ", r"linkonce_odr ", r"zeroext ", r"dereferenceable\(\d+\) ", r"hidden ", r"internal ", r"nonnull ", r"weak_odr ", r"fastcc ", r"noalias ", r"signext ", r"spir_kernel ", ] opt_invoke = [ r"", r"dereferenceable\(\d+\) ", r"noalias ", r"fast ", r"zeroext ", r"signext ", r"fastcc ", ] opt_GEP = [r"", "inbounds "] # Helper functions def any_of(possibilities, to_add=""): r""" Construct a regex representing "any of" the given possibilities :param possibilities: list of strings representing different word possibilities :param to_add: string to add at the beginning of each possibility (optional) :return: string corresponding to regex which represents any of the given possibilities r""" assert len(possibilities) > 0 s = r"(" if len(to_add) > 0: s += possibilities[0] + to_add + r" " else: s += possibilities[0] for i in range(len(possibilities) - 1): if len(to_add) > 0: s += r"|" + possibilities[i + 1] + to_add + r" " else: s += r"|" + possibilities[i + 1] return s + r")" # Main tags llvm_IR_stmt_tags = [ # ['regex' r'tag' r'tag general' [ r"<@ID> = (?!.*constant)(?!.*alias).*", r"global definition", r"global variable definition", ], [r"<@ID> = .*constant .*", "global const. def.", "global variable definition"], [ r"<%ID> = add " + any_of(opt_addsubmul) + r"?i1 .*", r"i1 operation", r"int operation", ], [ r"<%ID> = add " + any_of(opt_addsubmul) + r"?<\d+ x i1> .*", r"<d x i1> operation", r"<d x int> operation", ], [ r"<%ID> = add " + any_of(opt_addsubmul) + r"?i2 .*", r"i2 operation", r"int operation", ], [ r"<%ID> = add " + any_of(opt_addsubmul) + r"?<\d+ x i2> .*", r"<d x i2> operation", r"<d x int> operation", ], [ r"<%ID> = add " + any_of(opt_addsubmul) + r"?i4 .*", r"i4 operation", r"int operation", ], [ r"<%ID> = add " + any_of(opt_addsubmul) + r"?<\d+ x i4> .*", r"<d x i4> operation", r"<d x int> operation", ], [ r"<%ID> = add " + any_of(opt_addsubmul) + r"?i8 .*", r"i8 operation", r"int operation", ], [ r"<%ID> = add " + any_of(opt_addsubmul) + r"?<\d+ x i8> .*", r"<d x i8> operation", r"<d x int> operation", ], [ r"<%ID> = add " + any_of(opt_addsubmul) + r"?i16 .*", r"i16 operation", r"int operation", ], [ r"<%ID> = add " + any_of(opt_addsubmul) + r"?<\d+ x i16> .*", r"<d x i16> operation", r"<d x int> operation", ], [ r"<%ID> = add " + any_of(opt_addsubmul) + r"?i32 .*", r"i32 operation", r"int operation", ], [ r"<%ID> = add " + any_of(opt_addsubmul) + r"?<\d+ x i32> .*", r"<d x i32> operation", r"<d x int> operation", ], [ r"<%ID> = add " + any_of(opt_addsubmul) + r"?i64 .*", r"i64 operation", r"int operation", ], [ r"<%ID> = add " + any_of(opt_addsubmul) + r"?<\d+ x i64> .*", r"<d x i64> operation", r"<d x int> operation", ], [ r"<%ID> = add " + any_of(opt_addsubmul) + r"?i128 .*", r"i128 operation", r"int operation", ], [ r"<%ID> = add " + any_of(opt_addsubmul) + r"?<\d+ x i128> .*", r"<d x i128> operation", r"<d x int> operation", ], [ r"<%ID> = sub " + any_of(opt_addsubmul) + r"?i1 .*", r"i1 operation", r"int operation", ], [ r"<%ID> = sub " + any_of(opt_addsubmul) + r"?<\d+ x i1> .*", r"<d x i1> operation", r"<d x int> operation", ], [ r"<%ID> = sub " + any_of(opt_addsubmul) + r"?i2 .*", r"i2 operation", r"int operation", ], [ r"<%ID> = sub " + any_of(opt_addsubmul) + r"?<\d+ x i2> .*", r"<d x i2> operation", r"<d x int> operation", ], [ r"<%ID> = sub " + any_of(opt_addsubmul) + r"?i4 .*", r"i4 operation", r"int operation", ], [ r"<%ID> = sub " + any_of(opt_addsubmul) + r"?<\d+ x i4> .*", r"<d x i4> operation", r"<d x int> operation", ], [ r"<%ID> = sub " + any_of(opt_addsubmul) + r"?i8 .*", r"i8 operation", r"int operation", ], [ r"<%ID> = sub " + any_of(opt_addsubmul) + r"?<\d+ x i8> .*", r"<d x i8> operation", r"<d x int> operation", ], [ r"<%ID> = sub " + any_of(opt_addsubmul) + r"?i16 .*", r"i16 operation", r"int operation", ], [ r"<%ID> = sub " + any_of(opt_addsubmul) + r"?<\d+ x i16> .*", r"<d x i16> operation", r"<d x int> operation", ], [ r"<%ID> = sub " + any_of(opt_addsubmul) + r"?i32 .*", r"i32 operation", r"int operation", ], [ r"<%ID> = sub " + any_of(opt_addsubmul) + r"?<\d+ x i32> .*", r"<d x i32> operation", r"<d x int> operation", ], [ r"<%ID> = sub " + any_of(opt_addsubmul) + r"?i64 .*", r"i64 operation", r"int operation", ], [ r"<%ID> = sub " + any_of(opt_addsubmul) + r"?<\d+ x i64> .*", r"<d x i64> operation", r"<d x int> operation", ], [ r"<%ID> = sub " + any_of(opt_addsubmul) + r"?i128 .*", r"i128 operation", r"int operation", ], [ r"<%ID> = sub " + any_of(opt_addsubmul) + r"?<\d+ x i128> .*", r"<d x i128> operation", r"<d x int> operation", ], [ r"<%ID> = mul " + any_of(opt_addsubmul) + r"?i1 .*", r"i1 operation", r"int operation", ], [ r"<%ID> = mul " + any_of(opt_addsubmul) + r"?<\d+ x i1> .*", r"<d x i1> operation", r"<d x int> operation", ], [ r"<%ID> = mul " + any_of(opt_addsubmul) + r"?i2 .*", r"i2 operation", r"int operation", ], [ r"<%ID> = mul " + any_of(opt_addsubmul) + r"?<\d+ x i2> .*", r"<d x i2> operation", r"<d x int> operation", ], [ r"<%ID> = mul " + any_of(opt_addsubmul) + r"?i4 .*", r"i4 operation", r"int operation", ], [ r"<%ID> = mul " + any_of(opt_addsubmul) + r"?<\d+ x i4> .*", r"<d x i4> operation", r"<d x int> operation", ], [ r"<%ID> = mul " + any_of(opt_addsubmul) + r"?i8 .*", r"i8 operation", r"int operation", ], [ r"<%ID> = mul " + any_of(opt_addsubmul) + r"?<\d+ x i8> .*", r"<d x i8> operation", r"<d x int> operation", ], [ r"<%ID> = mul " + any_of(opt_addsubmul) + r"?i16 .*", r"i16 operation", r"int operation", ], [ r"<%ID> = mul " + any_of(opt_addsubmul) + r"?<\d+ x i16> .*", r"<d x i16> operation", r"<d x int> operation", ], [ r"<%ID> = mul " + any_of(opt_addsubmul) + r"?i32 .*", r"i32 operation", r"int operation", ], [ r"<%ID> = mul " + any_of(opt_addsubmul) + r"?<\d+ x i32> .*", r"<d x i32> operation", r"<d x int> operation", ], [ r"<%ID> = mul " + any_of(opt_addsubmul) + r"?i64 .*", r"i64 operation", r"int operation", ], [ r"<%ID> = mul " + any_of(opt_addsubmul) + r"?<\d+ x i64> .*", r"<d x i64> operation", r"<d x int> operation", ], [ r"<%ID> = mul " + any_of(opt_addsubmul) + r"?i128 .*", r"i128 operation", r"int operation", ], [ r"<%ID> = mul " + any_of(opt_addsubmul) + r"?<\d+ x i128> .*", r"<d x i128> operation", r"<d x int> operation", ], [ r"<%ID> = udiv " + any_of(opt_usdiv) + r"?i1 .*", r"i1 operation", r"int operation", ], [ r"<%ID> = udiv " + any_of(opt_usdiv) + r"?<\d+ x i1> .*", r"<d x i1> operation", r"<d x int> operation", ], [ r"<%ID> = udiv " + any_of(opt_usdiv) + r"?i2 .*", r"i2 operation", r"int operation", ], [ r"<%ID> = udiv " + any_of(opt_usdiv) + r"?<\d+ x i2> .*", r"<d x i2> operation", r"<d x int> operation", ], [ r"<%ID> = udiv " + any_of(opt_usdiv) + r"?i4 .*", r"i4 operation", r"int operation", ], [ r"<%ID> = udiv " + any_of(opt_usdiv) + r"?<\d+ x i4> .*", r"<d x i4> operation", r"<d x int> operation", ], [ r"<%ID> = udiv " + any_of(opt_usdiv) + r"?i8 .*", r"i8 operation", r"int operation", ], [ r"<%ID> = udiv " + any_of(opt_usdiv) + r"?<\d+ x i8> .*", r"<d x i8> operation", r"<d x int> operation", ], [ r"<%ID> = udiv " + any_of(opt_usdiv) + r"?i16 .*", r"i16 operation", r"int operation", ], [ r"<%ID> = udiv " + any_of(opt_usdiv) + r"?<\d+ x i16> .*", r"<d x i16> operation", r"<d x int> operation", ], [ r"<%ID> = udiv " + any_of(opt_usdiv) + r"?i32 .*", r"i32 operation", r"int operation", ], [ r"<%ID> = udiv " + any_of(opt_usdiv) + r"?<\d+ x i32> .*", r"<d x i32> operation", r"<d x int> operation", ], [ r"<%ID> = udiv " + any_of(opt_usdiv) + r"?i64 .*", r"i64 operation", r"int operation", ], [ r"<%ID> = udiv " + any_of(opt_usdiv) + r"?<\d+ x i64> .*", r"<d x i64> operation", r"<d x int> operation", ], [ r"<%ID> = udiv " + any_of(opt_usdiv) + r"?i128 .*", r"i128 operation", r"int operation", ], [ r"<%ID> = udiv " + any_of(opt_usdiv) + r"?<\d+ x i128> .*", r"<d x i128> operation", r"<d x int> operation", ], [ r"<%ID> = sdiv " + any_of(opt_usdiv) + r"?i1 .*", r"i1 operation", r"int operation", ], [ r"<%ID> = sdiv " + any_of(opt_usdiv) + r"?<\d+ x i1> .*", r"<d x i1> operation", r"<d x int> operation", ], [ r"<%ID> = sdiv " + any_of(opt_usdiv) + r"?i2 .*", r"i2 operation", r"int operation", ], [ r"<%ID> = sdiv " + any_of(opt_usdiv) + r"?<\d+ x i2> .*", r"<d x i2> operation", r"<d x int> operation", ], [ r"<%ID> = sdiv " + any_of(opt_usdiv) + r"?i4 .*", r"i4 operation", r"int operation", ], [ r"<%ID> = sdiv " + any_of(opt_usdiv) + r"?<\d+ x i4> .*", r"<d x i4> operation", r"<d x int> operation", ], [ r"<%ID> = sdiv " + any_of(opt_usdiv) + r"?i8 .*", r"i8 operation", r"int operation", ], [ r"<%ID> = sdiv " + any_of(opt_usdiv) + r"?<\d+ x i8> .*", r"<d x i8> operation", r"<d x int> operation", ], [ r"<%ID> = sdiv " + any_of(opt_usdiv) + r"?i16 .*", r"i16 operation", r"int operation", ], [ r"<%ID> = sdiv " + any_of(opt_usdiv) + r"?<\d+ x i16> .*", r"<d x i16> operation", r"<d x int> operation", ], [ r"<%ID> = sdiv " + any_of(opt_usdiv) + r"?i32 .*", r"i32 operation", r"int operation", ], [ r"<%ID> = sdiv " + any_of(opt_usdiv) + r"?<\d+ x i32> .*", r"<d x i32> operation", r"<d x int> operation", ], [ r"<%ID> = sdiv " + any_of(opt_usdiv) + r"?i64 .*", r"i64 operation", r"int operation", ], [ r"<%ID> = sdiv " + any_of(opt_usdiv) + r"?<\d+ x i64> .*", r"<d x i64> operation", r"<d x int> operation", ], [ r"<%ID> = sdiv " + any_of(opt_usdiv) + r"?i128 .*", r"i128 operation", r"int operation", ], [ r"<%ID> = sdiv " + any_of(opt_usdiv) + r"?<\d+ x i128> .*", r"<d x i128> operation", r"<d x int> operation", ], [ r"<%ID> = icmp " + any_of(opt_icmp) + r"?<%ID> .*", r"struct operation", r"int operation", ], [ r"<%ID> = icmp " + any_of(opt_icmp) + r"?<%ID>\* .*", r"struct* operation", r"int operation", ], [ r"<%ID> = icmp " + any_of(opt_icmp) + r"?<%ID>\*\* .*", r"struct** operation", r"int operation", ], [ r"<%ID> = icmp " + any_of(opt_icmp) + r"?<%ID>\*\*\* .*", r"struct*** operation", r"int operation", ], [ r"<%ID> = icmp " + any_of(opt_icmp) + r"?i1 .*", r"i1 operation", r"int operation", ], [ r"<%ID> = icmp " + any_of(opt_icmp) + r"?<\d+ x i1> .*", r"<d x i1> operation", r"<d x int> operation", ], [ r"<%ID> = icmp " + any_of(opt_icmp) + r"?i2 .*", r"i2 operation", r"int operation", ], [ r"<%ID> = icmp " + any_of(opt_icmp) + r"?<\d+ x i2> .*", r"<d x i2> operation", r"<d x int> operation", ], [ r"<%ID> = icmp " + any_of(opt_icmp) + r"?i4 .*", r"i4 operation", r"int operation", ], [ r"<%ID> = icmp " + any_of(opt_icmp) + r"?<\d+ x i4> .*", r"<d x i4> operation", r"<d x int> operation", ], [ r"<%ID> = icmp " + any_of(opt_icmp) + r"?i8 .*", r"i8 operation", r"int operation", ], [ r"<%ID> = icmp " + any_of(opt_icmp) + r"?<\d+ x i8> .*", r"<d x i8> operation", r"<d x int> operation", ], [ r"<%ID> = icmp " + any_of(opt_icmp) + r"?i16 .*", r"i16 operation", r"int operation", ], [ r"<%ID> = icmp " + any_of(opt_icmp) + r"?<\d+ x i16> .*", r"<d x i16> operation", r"<d x int> operation", ], [ r"<%ID> = icmp " + any_of(opt_icmp) + r"?i24 .*", r"i24 operation", r"int operation", ], [ r"<%ID> = icmp " + any_of(opt_icmp) + r"?<\d+ x i24> .*", r"<d x i24> operation", r"<d x int> operation", ], [ r"<%ID> = icmp " + any_of(opt_icmp) + r"?i32 .*", r"i32 operation", r"int operation", ], [ r"<%ID> = icmp " + any_of(opt_icmp) + r"?<\d+ x i32> .*", r"<d x i32> operation", r"<d x int> operation", ], [ r"<%ID> = icmp " + any_of(opt_icmp) + r"?i40 .*", r"i40 operation", r"int operation", ], [ r"<%ID> = icmp " + any_of(opt_icmp) + r"?<\d+ x i40> .*", r"<d x i40> operation", r"<d x int> operation", ], [ r"<%ID> = icmp " + any_of(opt_icmp) + r"?i64 .*", r"i64 operation", r"int operation", ], [ r"<%ID> = icmp " + any_of(opt_icmp) + r"?<\d+ x i64> .*", r"<d x i64> operation", r"<d x int> operation", ], [ r"<%ID> = icmp " + any_of(opt_icmp) + r"?i128 .*", r"i128 operation", r"int operation", ], [ r"<%ID> = icmp " + any_of(opt_icmp) + r"?<\d+ x i128> .*", r"<d x i128> operation", r"<d x int> operation", ], [ r"<%ID> = icmp " + any_of(opt_icmp) + r"?i1\* .*", r"i1* operation", r"int* operation", ], [ r"<%ID> = icmp " + any_of(opt_icmp) + r"?i2\* .*", r"i2* operation", r"int* operation", ], [ r"<%ID> = icmp " + any_of(opt_icmp) + r"?i4\* .*", r"i4* operation", r"int* operation", ], [ r"<%ID> = icmp " + any_of(opt_icmp) + r"?i8\* .*", r"i8* operation", r"int* operation", ], [ r"<%ID> = icmp " + any_of(opt_icmp) + r"?i16\* .*", r"i16* operation", r"int* operation", ], [ r"<%ID> = icmp " + any_of(opt_icmp) + r"?i32\* .*", r"i32* operation", r"int* operation", ], [ r"<%ID> = icmp " + any_of(opt_icmp) + r"?i40\* .*", r"i40* operation", r"int* operation", ], [ r"<%ID> = icmp " + any_of(opt_icmp) + r"?i64\* .*", r"i64* operation", r"int* operation", ], [ r"<%ID> = icmp " + any_of(opt_icmp) + r"?i128\* .*", r"i128* operation", r"int* operation", ], [ r"<%ID> = icmp " + any_of(opt_icmp) + r"?x86_fp80\* .*", r"float* operation", r"floating point* operation", ], [ r"<%ID> = icmp " + any_of(opt_icmp) + r"?float\* .*", r"float* operation", r"floating point* operation", ], [ r"<%ID> = icmp " + any_of(opt_icmp) + r"?double\* .*", r"double* operation", r"floating point* operation", ], [ r"<%ID> = icmp " + any_of(opt_icmp) + r"?i1\*\* .*", r"i1** operation", r"int** operation", ], [ r"<%ID> = icmp " + any_of(opt_icmp) + r"?i2\*\* .*", r"i2** operation", r"int** operation", ], [ r"<%ID> = icmp " + any_of(opt_icmp) + r"?i4\*\* .*", r"i4** operation", r"int** operation", ], [ r"<%ID> = icmp " + any_of(opt_icmp) + r"?i8\*\* .*", r"i8** operation", r"int** operation", ], [ r"<%ID> = icmp " + any_of(opt_icmp) + r"?i16\*\* .*", r"i16** operation", r"int** operation", ], [ r"<%ID> = icmp " + any_of(opt_icmp) + r"?i32\*\* .*", r"i32** operation", r"int** operation", ], [ r"<%ID> = icmp " + any_of(opt_icmp) + r"?i40\*\* .*", r"i40** operation", r"int** operation", ], [ r"<%ID> = icmp " + any_of(opt_icmp) + r"?i64\*\* .*", r"i64** operation", r"int** operation", ], [ r"<%ID> = icmp " + any_of(opt_icmp) + r"?i128\*\* .*", r"i128** operation", r"int** operation", ], [ r"<%ID> = icmp " + any_of(opt_icmp) + r"?x86_fp80\*\* .*", r"float** operation", r"floating point** operation", ], [ r"<%ID> = icmp " + any_of(opt_icmp) + r"?float\*\* .*", r"float** operation", r"floating point** operation", ], [ r"<%ID> = icmp " + any_of(opt_icmp) + r"?double\*\* .*", r"double** operation", r"floating point** operation", ], [ r"<%ID> = icmp " + any_of(opt_icmp) + r"?<%ID>\* .*", r"struct/class op", r"struct/class op", ], [ r"<%ID> = icmp " + any_of(opt_icmp) + r'?(%"|opaque).*', r"struct/class op", r"struct/class op", ], [ r"<%ID> = icmp " + any_of(opt_icmp) + r"?<?{.*", r"struct/class op", r"struct/class op", ], [ r"<%ID> = icmp " + any_of(opt_icmp) + r"?void \(.*", r"function op", r"struct/class op", ], [r"<%ID> = srem i1 .*", "i1 operation", "int operation"], [r"<%ID> = srem <\d+ x i1> .*", "<d x i1> operation", "<d x int> operation"], [r"<%ID> = srem i2 .*", "i2 operation", "int operation"], [r"<%ID> = srem <\d+ x i2> .*", "<d x i2> operation", "<d x int> operation"], [r"<%ID> = srem i4 .*", "i4 operation", "int operation"], [r"<%ID> = srem <\d+ x i4> .*", "<d x i4> operation", "<d x int> operation"], [r"<%ID> = srem i8 .*", "i8 operation", "int operation"], [r"<%ID> = srem <\d+ x i8> .*", "<d x i8> operation", "<d x int> operation"], [r"<%ID> = srem i16 .*", "i16 operation", "int operation"], [ r"<%ID> = srem <\d+ x i16> .*", r"<d x i16> operation", r"<d x int> operation", ], [r"<%ID> = srem i32 .*", "i32 operation", "int operation"], [ r"<%ID> = srem <\d+ x i32> .*", r"<d x i32> operation", r"<d x int> operation", ], [r"<%ID> = srem i64 .*", "i64 operation", "int operation"], [ r"<%ID> = srem <\d+ x i64> .*", r"<d x i64> operation", r"<d x int> operation", ], [r"<%ID> = srem i128 .*", "i128 operation", "int operation"], [ r"<%ID> = srem <\d+ x i128> .*", r"<d x i128> operation", r"<d x int> operation", ], [r"<%ID> = urem i1 .*", "i1 operation", "int operation"], [r"<%ID> = urem <\d+ x i1> .*", "<d x i1> operation", "<d x int> operation"], [r"<%ID> = urem i2 .*", "i2 operation", "int operation"], [r"<%ID> = urem <\d+ x i2> .*", "<d x i2> operation", "<d x int> operation"], [r"<%ID> = urem i4 .*", "i4 operation", "int operation"], [r"<%ID> = urem <\d+ x i4> .*", "<d x i4> operation", "<d x int> operation"], [r"<%ID> = urem i8 .*", "i8 operation", "int operation"], [r"<%ID> = urem <\d+ x i8> .*", "<d x i8> operation", "<d x int> operation"], [r"<%ID> = urem i16 .*", "i16 operation", "int operation"], [ r"<%ID> = urem <\d+ x i16> .*", r"<d x i16> operation", r"<d x int> operation", ], [r"<%ID> = urem i32 .*", "i32 operation", "int operation"], [ r"<%ID> = urem <\d+ x i32> .*", r"<d x i32> operation", r"<d x int> operation", ], [r"<%ID> = urem i64 .*", "i32 operation", "int operation"], [ r"<%ID> = urem <\d+ x i64> .*", r"<d x i64> operation", r"<d x int> operation", ], [r"<%ID> = urem i128 .*", "i128 operation", "int operation"], [ r"<%ID> = urem <\d+ x i128> .*", r"<d x i128> operation", r"<d x int> operation", ], [ r"<%ID> = fadd " + any_of(fast_math_flag) + r"?x86_fp80.*", r"float operation", r"floating point operation", ], [ r"<%ID> = fadd " + any_of(fast_math_flag) + r"?<\d+ x x86_fp80>.*", r"<d x float> operation", r"<d x floating point> operation", ], [ r"<%ID> = fadd " + any_of(fast_math_flag) + r"?float.*", r"float operation", r"floating point operation", ], [ r"<%ID> = fadd " + any_of(fast_math_flag) + r"?<\d+ x float>.*", r"<d x float> operation", r"<d x floating point> operation", ], [ r"<%ID> = fadd " + any_of(fast_math_flag) + r"?double.*", r"double operation", r"floating point operation", ], [ r"<%ID> = fadd " + any_of(fast_math_flag) + r"?<\d+ x double>.*", r"<d x double> operation", r"<d x floating point> operation", ], [ r"<%ID> = fsub " + any_of(fast_math_flag) + r"?x86_fp80.*", r"float operation", r"floating point operation", ], [ r"<%ID> = fsub " + any_of(fast_math_flag) + r"?<\d+ x x86_fp80>.*", r"<d x float> operation", r"<d x floating point> operation", ], [ r"<%ID> = fsub " + any_of(fast_math_flag) + r"?float.*", r"float operation", r"floating point operation", ], [ r"<%ID> = fsub " + any_of(fast_math_flag) + r"?<\d+ x float>.*", r"<d x float> operation", r"<d x floating point> operation", ], [ r"<%ID> = fsub " + any_of(fast_math_flag) + r"?double.*", r"double operation", r"floating point operation", ], [ r"<%ID> = fsub " + any_of(fast_math_flag) + r"?<\d+ x double>.*", r"<d x double> operation", r"<d x floating point> operation", ], [ r"<%ID> = fmul " + any_of(fast_math_flag) + r"?x86_fp80.*", r"float operation", r"floating point operation", ], [ r"<%ID> = fmul " + any_of(fast_math_flag) + r"?<\d+ x x86_fp80>.*", r"<d x float> operation", r"<d x floating point> operation", ], [ r"<%ID> = fmul " + any_of(fast_math_flag) + r"?float.*", r"float operation", r"floating point operation", ], [ r"<%ID> = fmul " + any_of(fast_math_flag) + r"?<\d+ x float>.*", r"<d x float> operation", r"<d x floating point> operation", ], [ r"<%ID> = fmul " + any_of(fast_math_flag) + r"?double.*", r"double operation", r"floating point operation", ], [ r"<%ID> = fmul " + any_of(fast_math_flag) + r"?<\d+ x double>.*", r"<d x double> operation", r"<d x floating point> operation", ], [ r"<%ID> = fdiv " + any_of(fast_math_flag) + r"?x86_fp80.*", r"float operation", r"floating point operation", ], [ r"<%ID> = fdiv " + any_of(fast_math_flag) + r"?<\d+ x x86_fp80>.*", r"<d x float> operation", r"<d x floating point> operation", ], [ r"<%ID> = fdiv " + any_of(fast_math_flag) + r"?float.*", r"float operation", r"floating point operation", ], [ r"<%ID> = fdiv " + any_of(fast_math_flag) + r"?<\d+ x float>.*", r"<d x float> operation", r"<d x floating point> operation", ], [ r"<%ID> = fdiv " + any_of(fast_math_flag) + r"?double.*", r"double operation", r"floating point operation", ], [ r"<%ID> = fdiv " + any_of(fast_math_flag) + r"?<\d+ x double>.*", r"<d x double> operation", r"<d x floating point> operation", ], [ r"<%ID> = frem " + any_of(fast_math_flag) + r"?x86_fp80.*", r"float operation", r"floating point operation", ], [ r"<%ID> = frem " + any_of(fast_math_flag) + r"?<\d+ x x86_fp80>.*", r"<d x float> operation", r"<d x floating point> operation", ], [ r"<%ID> = frem " + any_of(fast_math_flag) + r"?float.*", r"float operation", r"floating point operation", ], [ r"<%ID> = frem " + any_of(fast_math_flag) + r"?<\d+ x float>.*", r"<d x float> operation", r"<d x floating point> operation", ], [ r"<%ID> = frem " + any_of(fast_math_flag) + r"?double.*", r"double operation", r"floating point operation", ], [ r"<%ID> = frem " + any_of(fast_math_flag) + r"?<\d+ x double>.*", r"<d x double> operation", r"<d x floating point> operation", ], [ r"<%ID> = fcmp (fast |)?" + any_of(opt_fcmp) + r"?x86_fp80.*", r"float operation", r"floating point operation", ], [ r"<%ID> = fcmp (fast |)?" + any_of(opt_fcmp) + r"?<\d+ x x86_fp80>.*", r"<d x float> operation", r"<d x floating point> operation", ], [ r"<%ID> = fcmp (fast |)?" + any_of(opt_fcmp) + r"?float.*", r"float operation", r"floating point operation", ], [ r"<%ID> = fcmp (fast |)?" + any_of(opt_fcmp) + r"?<\d+ x float>.*", r"<d x float> operation", r"<d x floating point> operation", ], [ r"<%ID> = fcmp (fast |)?" + any_of(opt_fcmp) + r"?double.*", r"double operation", r"floating point operation", ], [ r"<%ID> = fcmp (fast |)?" + any_of(opt_fcmp) + r"?<\d+ x double>.*", r"<d x double> operation", r"<d x floating point> operation", ], [r"<%ID> = atomicrmw add i1\* .*", "i1* operation", "int* operation"], [r"<%ID> = atomicrmw add i2\* .*", "i2* operation", "int* operation"], [r"<%ID> = atomicrmw add i4\* .*", "i4* operation", "int* operation"], [r"<%ID> = atomicrmw add i8\* .*", "i8* operation", "int* operation"], [r"<%ID> = atomicrmw add i16\* .*", "i16* operation", "int* operation"], [r"<%ID> = atomicrmw add i32\* .*", "i32* operation", "int* operation"], [r"<%ID> = atomicrmw add i64\* .*", "i64* operation", "int* operation"], [r"<%ID> = atomicrmw add i128\* .*", "i128* operation", "int* operation"], [r"<%ID> = atomicrmw sub i1\* .*", "i1* operation", "int* operation"], [r"<%ID> = atomicrmw sub i2\* .*", "i2* operation", "int* operation"], [r"<%ID> = atomicrmw sub i4\* .*", "i4* operation", "int* operation"], [r"<%ID> = atomicrmw sub i8\* .*", "i8* operation", "int* operation"], [r"<%ID> = atomicrmw sub i16\* .*", "i16* operation", "int* operation"], [r"<%ID> = atomicrmw sub i32\* .*", "i32* operation", "int* operation"], [r"<%ID> = atomicrmw sub i64\* .*", "i64* operation", "int* operation"], [r"<%ID> = atomicrmw sub i128\* .*", "i128* operation", "int* operation"], [r"<%ID> = atomicrmw or i1\* .*", "i1* operation", "int* operation"], [r"<%ID> = atomicrmw or i2\* .*", "i2* operation", "int* operation"], [r"<%ID> = atomicrmw or i4\* .*", "i4* operation", "int* operation"], [r"<%ID> = atomicrmw or i8\* .*", "i8* operation", "int* operation"], [r"<%ID> = atomicrmw or i16\* .*", "i16* operation", "int* operation"], [r"<%ID> = atomicrmw or i32\* .*", "i32* operation", "int* operation"], [r"<%ID> = atomicrmw or i64\* .*", "i64* operation", "int* operation"], [r"<%ID> = atomicrmw or i128\* .*", "i128* operation", "int* operation"], [r"<%ID> = atomicrmw xchg i1\* .*", "i1* operation", "int* operation"], [r"<%ID> = atomicrmw xchg i2\* .*", "i2* operation", "int* operation"], [r"<%ID> = atomicrmw xchg i4\* .*", "i4* operation", "int* operation"], [r"<%ID> = atomicrmw xchg i8\* .*", "i8* operation", "int* operation"], [r"<%ID> = atomicrmw xchg i16\* .*", "i16* operation", "int* operation"], [r"<%ID> = atomicrmw xchg i32\* .*", "i32* operation", "int* operation"], [r"<%ID> = atomicrmw xchg i64\* .*", "i64* operation", "int* operation"], [r"<%ID> = atomicrmw xchg i128\* .*", "i128* operation", "int* operation"], [r"<%ID> = alloca i1($|,).*", "i1 operation", "int operation"], [r"<%ID> = alloca i2($|,).*", "i2 operation", "int operation"], [r"<%ID> = alloca i4($|,).*", "i4 operation", "int operation"], [r"<%ID> = alloca i8($|,).*", "i8 operation", "int operation"], [r"<%ID> = alloca i16($|,).*", "i16 operation", "int operation"], [r"<%ID> = alloca i32($|,).*", "i32 operation", "int operation"], [r"<%ID> = alloca i64($|,).*", "i64 operation", "int operation"], [r"<%ID> = alloca i128($|,).*", "i128 operation", "int operation"], [r"<%ID> = alloca i1\*($|,).*", "i1* operation", "int* operation"], [r"<%ID> = alloca i2\*($|,).*", "i2* operation", "int* operation"], [r"<%ID> = alloca i4\*($|,).*", "i4* operation", "int* operation"], [r"<%ID> = alloca i8\*($|,).*", "i8* operation", "int* operation"], [r"<%ID> = alloca i16\*($|,).*", "i16* operation", "int* operation"], [r"<%ID> = alloca i32\*($|,).*", "i32* operation", "int* operation"], [r"<%ID> = alloca i64\*($|,).*", "i64* operation", "int* operation"], [r"<%ID> = alloca i128\*($|,).*", "i128* operation", "int* operation"], [ r"<%ID> = alloca x86_fp80($|,).*", r"float operation", r"floating point operation", ], [ r"<%ID> = alloca float($|,).*", r"float operation", r"floating point operation", ], [ r"<%ID> = alloca double($|,).*", r"double operation", r"floating point operation", ], [ r"<%ID> = alloca x86_fp80\*($|,).*", r"float* operation", r"floating point* operation", ], [ r"<%ID> = alloca float\*($|,).*", r"float* operation", r"floating point* operation", ], [ r"<%ID> = alloca double\*($|,).*", r"double* operation", r"floating point* operation", ], ['<%ID> = alloca %".*', "struct/class op", "struct/class op"], [r"<%ID> = alloca <%.*", "struct/class op", "struct/class op"], [r"<%ID> = alloca <?{.*", "struct/class op", "struct/class op"], [r"<%ID> = alloca opaque.*", "struct/class op", "struct/class op"], [ r"<%ID> = alloca <\d+ x i1>, .*", r"<d x i1> operation", r"<d x int> operation", ], [ r"<%ID> = alloca <\d+ x i2>, .*", r"<d x i2> operation", r"<d x int> operation", ], [ r"<%ID> = alloca <\d+ x i4>, .*", r"<d x i4> operation", r"<d x int> operation", ], [ r"<%ID> = alloca <\d+ x i8>, .*", r"<d x i8> operation", r"<d x int> operation", ], [ r"<%ID> = alloca <\d+ x i16>, .*", r"<d x i16> operation", r"<d x int> operation", ], [ r"<%ID> = alloca <\d+ x i32>, .*", r"<d x i32> operation", r"<d x int> operation", ], [ r"<%ID> = alloca <\d+ x i64>, .*", r"<d x i64> operation", r"<d x int> operation", ], [ r"<%ID> = alloca <\d+ x i128>, .*", r"<d x i128> operation", r"<d x int> operation", ], [ r"<%ID> = alloca <\d+ x x86_fp80>, .*", r"<d x float> operation", r"<d x floating point> operation", ], [ r"<%ID> = alloca <\d+ x float>, .*", r"<d x float> operation", r"<d x floating point> operation", ], [ r"<%ID> = alloca <\d+ x double>, .*", r"<d x double> operation", r"<d x floating point> operation", ], [ r"<%ID> = alloca <\d+ x \{ .* \}>, .*", r"<d x structure> operation", r"<d x structure> operation", ], [ r"<%ID> = alloca <\d+ x i1>\*, .*", r"<d x i1>* operation", r"<d x int>* operation", ], [ r"<%ID> = alloca <\d+ x i2>\*, .*", r"<d x i2>* operation", r"<d x int>* operation", ], [ r"<%ID> = alloca <\d+ x i4>\*, .*", r"<d x i4>* operation", r"<d x int>* operation", ], [ r"<%ID> = alloca <\d+ x i8>\*, .*", r"<d x i8>* operation", r"<d x int>* operation", ], [ r"<%ID> = alloca <\d+ x i16>\*, .*", r"<d x i16>* operation", r"<d x int>* operation", ], [ r"<%ID> = alloca <\d+ x i32>\*, .*", r"<d x i32>* operation", r"<d x int>* operation", ], [ r"<%ID> = alloca <\d+ x i64>\*, .*", r"<d x i64>* operation", r"<d x int>* operation", ], [ r"<%ID> = alloca <\d+ x i128>\*, .*", r"<d x i128>* operation", r"<d x int>* operation", ], [ r"<%ID> = alloca <\d+ x x86_fp80>\*, .*", r"<d x float>* operation", r"<d x floating point>* operation", ], [ r"<%ID> = alloca <\d+ x float>\*, .*", r"<d x float>* operation", r"<d x floating point>* operation", ], [ r"<%ID> = alloca <\d+ x double>\*, .*", r"<d x double>* operation", r"<d x floating point>* operation", ], [ r"<%ID> = alloca <\d+ x \{ .* \}>\*, .*", r"<d x structure>* operation", r"<d x structure>* operation", ], [ r"<%ID> = alloca \[\d+ x i1\], .*", r"[d x i1] operation", r"[d x int] operation", ], [ r"<%ID> = alloca \[\d+ x i2\], .*", r"[d x i2] operation", r"[d x int] operation", ], [ r"<%ID> = alloca \[\d+ x i4\], .*", r"[d x i4] operation", r"[d x int] operation", ], [ r"<%ID> = alloca \[\d+ x i8\], .*", r"[d x i8] operation", r"[d x int] operation", ], [ r"<%ID> = alloca \[\d+ x i16\], .*", r"[d x i16] operation", r"[d x int] operation", ], [ r"<%ID> = alloca \[\d+ x i32\], .*", r"[d x i32] operation", r"[d x int] operation", ], [ r"<%ID> = alloca \[\d+ x i64\], .*", r"[d x i64] operation", r"[d x int] operation", ], [ r"<%ID> = alloca \[\d+ x i128\], .*", r"[d x i128] operation", r"[d x int] operation", ], [ r"<%ID> = alloca \[\d+ x x86_fp80\], .*", r"[d x float] operation", r"[d x floating point] operation", ], [ r"<%ID> = alloca \[\d+ x float\], .*", r"[d x float] operation", r"[d x floating point] operation", ], [ r"<%ID> = alloca \[\d+ x double\], .*", r"[d x double] operation", r"[d x floating point] operation", ], [ r"<%ID> = alloca \[\d+ x \{ .* \}\], .*", r"[d x structure] operation", r"[d x structure] operation", ], [ r"<%ID> = alloca { { float, float } }, .*", r"{ float, float } operation", r"complex operation", ], [ r"<%ID> = alloca { { double, double } }, .*", r"{ double, double } operation", r"complex operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?i1, .*", r"i1 operation", r"int operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?i2, .*", r"i2 operation", r"int operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?i4, .*", r"i4 operation", r"int operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?i8, .*", r"i8 operation", r"int operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?i16, .*", r"i16 operation", r"int operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?i24, .*", r"i16 operation", r"int operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?i32, .*", r"i32 operation", r"int operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?i40, .*", r"i40 operation", r"int operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?i64, .*", r"i64 operation", r"int operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?i128, .*", r"i128 operation", r"int operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?i256, .*", r"i256 operation", r"int operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?i1\*, .*", r"i1* operation", r"int* operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?i2\*, .*", r"i2* operation", r"int* operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?i4\*, .*", r"i4* operation", r"int* operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?i8\*, .*", r"i8* operation", r"int* operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?i16\*, .*", r"i16* operation", r"int* operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?i24\*, .*", r"i16* operation", r"int* operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?i32\*, .*", r"i32* operation", r"int* operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?i40\*, .*", r"i40* operation", r"int* operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?i64\*, .*", r"i64* operation", r"int* operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?i128\*, .*", r"i128* operation", r"int* operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?i256\*, .*", r"i256* operation", r"int* operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?i1\*\*, .*", r"i1** operation", r"int** operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?i2\*\*, .*", r"i2** operation", r"int** operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?i4\*\*, .*", r"i4** operation", r"int** operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?i8\*\*, .*", r"i8** operation", r"int** operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?i16\*\*, .*", r"i16** operation", r"int** operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?i24\*\*, .*", r"i16** operation", r"int** operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?i32\*\*, .*", r"i32** operation", r"int** operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?i40\*\*, .*", r"i40** operation", r"int** operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?i64\*\*, .*", r"i64** operation", r"int** operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?i128\*\*, .*", r"i128** operation", r"int** operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?i256\*\*, .*", r"i256** operation", r"int** operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?i1\*\*\*, .*", r"i1*** operation", r"int*** operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?i2\*\*\*, .*", r"i2*** operation", r"int*** operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?i4\*\*\*, .*", r"i4*** operation", r"int*** operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?i8\*\*\*, .*", r"i8*** operation", r"int*** operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?i16\*\*\*, .*", r"i16*** operation", r"int*** operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?i24\*\*\*, .*", r"i16*** operation", r"int*** operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?i32\*\*\*, .*", r"i32*** operation", r"int*** operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?i40\*\*\*, .*", r"i40*** operation", r"int*** operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?i64\*\*\*, .*", r"i64*** operation", r"int*** operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?i128\*\*\*, .*", r"i128*** operation", r"int*** operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?i256\*\*\*, .*", r"i256*** operation", r"int*** operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?x86_fp80, .*", r"float operation", r"floating point operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?float, .*", r"float operation", r"floating point operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?double, .*", r"double operation", r"floating point operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?x86_fp80\*, .*", r"float* operation", r"floating point* operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?float\*, .*", r"float* operation", r"floating point* operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?double\*, .*", r"double* operation", r"floating point* operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?x86_fp80\*\*, .*", r"float** operation", r"floating point** operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?float\*\*, .*", r"float** operation", r"floating point** operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?double\*\*, .*", r"double** operation", r"floating point** operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?x86_fp80\*\*\*, .*", r"float*** operation", r"floating point*** operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?float\*\*\*, .*", r"float*** operation", r"floating point*** operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?double\*\*\*, .*", r"double*** operation", r"floating point*** operation", ], [ r"<%ID> = load " + any_of(opt_load) + r'?%".*', r"struct/class op", r"struct/class op", ], [ r"<%ID> = load " + any_of(opt_load) + r"?<%.*", r"struct/class op", r"struct/class op", ], [ r"<%ID> = load " + any_of(opt_load) + r"?<?{.*", r"struct/class op", r"struct/class op", ], [ r"<%ID> = load " + any_of(opt_load) + r"?opaque.*", r"struct/class op", r"struct/class op", ], [ r"<%ID> = load " + any_of(opt_load) + r"?<\d+ x i1>, .*", r"<d x i1> operation", r"<d x int> operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?<\d+ x i2>, .*", r"<d x i2> operation", r"<d x int> operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?<\d+ x i4>, .*", r"<d x i4> operation", r"<d x int> operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?<\d+ x i8>, .*", r"<d x i8> operation", r"<d x int> operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?<\d+ x i16>, .*", r"<d x i16> operation", r"<d x int> operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?<\d+ x i24>, .*", r"<d x i16> operation", r"<d x int> operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?<\d+ x i32>, .*", r"<d x i32> operation", r"<d x int> operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?<\d+ x i40>, .*", r"<d x i40> operation", r"<d x int> operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?<\d+ x i64>, .*", r"<d x i64> operation", r"<d x int> operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?<\d+ x i128>, .*", r"<d x i128> operation", r"<d x int> operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?<\d+ x x86_fp80>, .*", r"<d x float> operation", r"<d x floating point> operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?<\d+ x float>, .*", r"<d x float> operation", r"<d x floating point> operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?<\d+ x double>, .*", r"<d x double> operation", r"<d x floating point> operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?<\d+ x \{ .* \}>, .*", r"<d x structure> operation", r"<d x structure> operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?<\d+ x i1\*>, .*", r"<d x i1*> operation", r"<d x int*> operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?<\d+ x i2\*>, .*", r"<d x i2*> operation", r"<d x int*> operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?<\d+ x i4\*>, .*", r"<d x i4*> operation", r"<d x int*> operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?<\d+ x i8\*>, .*", r"<d x i8*> operation", r"<d x int*> operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?<\d+ x i16\*>, .*", r"<d x i16*> operation", r"<d x int*> operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?<\d+ x i24\*>, .*", r"<d x i16*> operation", r"<d x int*> operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?<\d+ x i32\*>, .*", r"<d x i32*> operation", r"<d x int*> operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?<\d+ x i40\*>, .*", r"<d x i40*> operation", r"<d x int*> operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?<\d+ x i64\*>, .*", r"<d x i64*> operation", r"<d x int*> operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?<\d+ x i128\*>, .*", r"<d x i128*> operation", r"<d x int*> operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?<\d+ x x86_fp80\*>, .*", r"<d x float*> operation", r"<d x floating point*> operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?<\d+ x float\*>, .*", r"<d x float*> operation", r"<d x floating point*> operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?<\d+ x double\*>, .*", r"<d x double*> operation", r"<d x floating point*> operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?<\d+ x i1>\*, .*", r"<d x i1>* operation", r"<d x int>* operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?<\d+ x i2>\*, .*", r"<d x i2>* operation", r"<d x int>* operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?<\d+ x i4>\*, .*", r"<d x i4>* operation", r"<d x int>* operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?<\d+ x i8>\*, .*", r"<d x i8>* operation", r"<d x int>* operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?<\d+ x i16>\*, .*", r"<d x i16>* operation", r"<d x int>* operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?<\d+ x i24>\*, .*", r"<d x i16>* operation", r"<d x int>* operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?<\d+ x i32>\*, .*", r"<d x i32>* operation", r"<d x int>* operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?<\d+ x i40>\*, .*", r"<d x i40>* operation", r"<d x int>* operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?<\d+ x i64>\*, .*", r"<d x i64>* operation", r"<d x int>* operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?<\d+ x i128>\*, .*", r"<d x i128>* operation", r"<d x int>* operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?<\d+ x x86_fp80>\*, .*", r"<d x float>* operation", r"<d x floating point>* operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?<\d+ x float>\*, .*", r"<d x float>* operation", r"<d x floating point>* operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?<\d+ x double>\*, .*", r"<d x double>* operation", r"<d x floating point>* operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?<\d+ x \{ .* \}>\*, .*", r"<d x structure>* operation", r"<d x structure>* operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?<\d+ x x86_fp80>\*\*, .*", r"<d x float>** operation", r"<d x floating point>** operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?<\d+ x float>\*\*, .*", r"<d x float>** operation", r"<d x floating point>** operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?<\d+ x double>\*\*, .*", r"<d x double>** operation", r"<d x floating point>** operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?<\d+ x \{ .* \}>\*\*, .*", r"<d x structure>** operation", r"<d x structure>** operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?\[\d+ x i1\], .*", r"[d x i1] operation", r"[d x int] operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?\[\d+ x i2\], .*", r"[d x i2] operation", r"[d x int] operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?\[\d+ x i4\], .*", r"[d x i4] operation", r"[d x int] operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?\[\d+ x i8\], .*", r"[d x i8] operation", r"[d x int] operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?\[\d+ x i16\], .*", r"[d x i16] operation", r"[d x int] operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?\[\d+ x i24\], .*", r"[d x i16] operation", r"[d x int] operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?\[\d+ x i32\], .*", r"[d x i32] operation", r"[d x int] operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?\[\d+ x i40\], .*", r"[d x i40] operation", r"[d x int] operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?\[\d+ x i64\], .*", r"[d x i64] operation", r"[d x int] operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?\[\d+ x i128\], .*", r"[d x i128] operation", r"[d x int] operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?\[\d+ x x86_fp80\], .*", r"[d x float] operation", r"[d x floating point] operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?\[\d+ x float\], .*", r"[d x float] operation", r"[d x floating point] operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?\[\d+ x double\], .*", r"[d x double] operation", r"[d x floating point] operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?\[\d+ x \{ .* \}\], .*", r"[d x structure] operation", r"[d x structure] operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?\[\d+ x i1\]\*, .*", r"[d x i1]* operation", r"[d x int]* operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?\[\d+ x i2\]\*, .*", r"[d x i2]* operation", r"[d x int]* operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?\[\d+ x i4\]\*, .*", r"[d x i4]* operation", r"[d x int]* operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?\[\d+ x i8\]\*, .*", r"[d x i8]* operation", r"[d x int]* operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?\[\d+ x i16\]\*, .*", r"[d x i16]* operation", r"[d x int]* operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?\[\d+ x i32\]\*, .*", r"[d x i32]* operation", r"[d x int]* operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?\[\d+ x i40\]\*, .*", r"[d x i40]* operation", r"[d x int]* operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?\[\d+ x i64\]\*, .*", r"[d x i64]* operation", r"[d x int]* operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?\[\d+ x i128\]\*, .*", r"[d x i128]* operation", r"[d x int]* operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?\[\d+ x x86_fp80\]\*, .*", r"[d x float]* operation", r"[d x floating point] operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?\[\d+ x float\]\*, .*", r"[d x float]* operation", r"[d x floating point] operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?\[\d+ x double\]\*, .*", r"[d x double]* operation", r"[d x floating point] operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?\[\d+ x \{ .* \}\]\*, .*", r"[d x structure]* operation", r"[d x floating point] operation", ], [ r"<%ID> = load " + any_of(opt_load) + r"?.*\(.*\)\*+, .*", r"function operation", r"function operation", ], [r"store " + any_of(opt_load) + r"?i1 .*", "i1 operation", "int operation"], [r"store " + any_of(opt_load) + r"?i2 .*", "i2 operation", "int operation"], [r"store " + any_of(opt_load) + r"?i4 .*", "i4 operation", "int operation"], [r"store " + any_of(opt_load) + r"?i8 .*", "i8 operation", "int operation"], [r"store " + any_of(opt_load) + r"?i16 .*", "i16 operation", "int operation"], [r"store " + any_of(opt_load) + r"?i24 .*", "i16 operation", "int operation"], [r"store " + any_of(opt_load) + r"?i32 .*", "i32 operation", "int operation"], [r"store " + any_of(opt_load) + r"?i40 .*", "i32 operation", "int operation"], [r"store " + any_of(opt_load) + r"?i64 .*", "i64 operation", "int operation"], [r"store " + any_of(opt_load) + r"?i128 .*", "i128 operation", "int operation"], [ r"store " + any_of(opt_load) + r"?i1\* .*", r"i1* operation", r"int* operation", ], [ r"store " + any_of(opt_load) + r"?i2\* .*", r"i2* operation", r"int* operation", ], [ r"store " + any_of(opt_load) + r"?i4\* .*", r"i4* operation", r"int* operation", ], [ r"store " + any_of(opt_load) + r"?i8\* .*", r"i8* operation", r"int* operation", ], [ r"store " + any_of(opt_load) + r"?i16\* .*", r"i16* operation", r"int* operation", ], [ r"store " + any_of(opt_load) + r"?i32\* .*", r"i32* operation", r"int* operation", ], [ r"store " + any_of(opt_load) + r"?i64\* .*", r"i64* operation", r"int* operation", ], [ r"store " + any_of(opt_load) + r"?i128\* .*", r"i128* operation", r"int* operation", ], [ r"store " + any_of(opt_load) + r"?i1\*\* .*", r"i1** operation", r"int** operation", ], [ r"store " + any_of(opt_load) + r"?i2\*\* .*", r"i2** operation", r"int** operation", ], [ r"store " + any_of(opt_load) + r"?i4\*\* .*", r"i4** operation", r"int** operation", ], [ r"store " + any_of(opt_load) + r"?i8\*\* .*", r"i8** operation", r"int** operation", ], [ r"store " + any_of(opt_load) + r"?i16\*\* .*", r"i16** operation", r"int** operation", ], [ r"store " + any_of(opt_load) + r"?i32\*\* .*", r"i32** operation", r"int** operation", ], [ r"store " + any_of(opt_load) + r"?i64\*\* .*", r"i64** operation", r"int** operation", ], [ r"store " + any_of(opt_load) + r"?i128\*\* .*", r"i128** operation", r"int** operation", ], [ r"store " + any_of(opt_load) + r"?x86_fp80 .*", r"float operation", r"floating point operation", ], [ r"store " + any_of(opt_load) + r"?float .*", r"float operation", r"floating point operation", ], [ r"store " + any_of(opt_load) + r"?double .*", r"double operation", r"floating point operation", ], [ r"store " + any_of(opt_load) + r"?x86_fp80\* .*", r"float* operation", r"floating point* operation", ], [ r"store " + any_of(opt_load) + r"?float\* .*", r"float* operation", r"floating point* operation", ], [ r"store " + any_of(opt_load) + r"?double\* .*", r"double* operation", r"floating point* operation", ], [ r"store " + any_of(opt_load) + r"?x86_fp80\*\* .*", r"float** operation", r"floating point** operation", ], [ r"store " + any_of(opt_load) + r"?float\*\* .*", r"float** operation", r"floating point** operation", ], [ r"store " + any_of(opt_load) + r"?double\*\* .*", r"double** operation", r"floating point** operation", ], [r"store " + any_of(opt_load) + r"?void \(.*", "function op", "function op"], [r"store " + any_of(opt_load) + r'?%".*', "struct/class op", "struct/class op"], [r"store " + any_of(opt_load) + r"?<%.*", "struct/class op", "struct/class op"], [ r"store " + any_of(opt_load) + r"?<?{.*", r"struct/class op", r"struct/class op", ], [ r"store " + any_of(opt_load) + r"?opaque.*", r"struct/class op", r"struct/class op", ], [ r"store " + any_of(opt_load) + r"?<\d+ x i1> .*", r"<d x i1> operation", r"<d x int> operation", ], [ r"store " + any_of(opt_load) + r"?<\d+ x i2> .*", r"<d x i2> operation", r"<d x int> operation", ], [ r"store " + any_of(opt_load) + r"?<\d+ x i4> .*", r"<d x i4> operation", r"<d x int> operation", ], [ r"store " + any_of(opt_load) + r"?<\d+ x i8> .*", r"<d x i8> operation", r"<d x int> operation", ], [ r"store " + any_of(opt_load) + r"?<\d+ x i16> .*", r"<d x i16> operation", r"<d x int> operation", ], [ r"store " + any_of(opt_load) + r"?<\d+ x i32> .*", r"<d x i32> operation", r"<d x int> operation", ], [ r"store " + any_of(opt_load) + r"?<\d+ x i64> .*", r"<d x i64> operation", r"<d x int> operation", ], [ r"store " + any_of(opt_load) + r"?<\d+ x i128> .*", r"<d x i128> operation", r"<d x int> operation", ], [ r"store " + any_of(opt_load) + r"?<\d+ x x86_fp80> .*", r"<d x float> operation", r"<d x floating point> operation", ], [ r"store " + any_of(opt_load) + r"?<\d+ x float> .*", r"<d x float> operation", r"<d x floating point> operation", ], [ r"store " + any_of(opt_load) + r"?<\d+ x double> .*", r"<d x double> operation", r"<d x floating point> operation", ], [ r"store " + any_of(opt_load) + r"?<\d+ x \{ .* \}> .*", r"<d x \{ .* \}> operation", r"<d x \{ .* \}> operation", ], [ r"store " + any_of(opt_load) + r"?<\d+ x i1\*> .*", r"<d x i1*> operation", r"<d x int*> operation", ], [ r"store " + any_of(opt_load) + r"?<\d+ x i2\*> .*", r"<d x i2*> operation", r"<d x int*> operation", ], [ r"store " + any_of(opt_load) + r"?<\d+ x i4\*> .*", r"<d x i4*> operation", r"<d x int*> operation", ], [ r"store " + any_of(opt_load) + r"?<\d+ x i8\*> .*", r"<d x i8*> operation", r"<d x int*> operation", ], [ r"store " + any_of(opt_load) + r"?<\d+ x i16\*> .*", r"<d x i16*> operation", r"<d x int*> operation", ], [ r"store " + any_of(opt_load) + r"?<\d+ x i32\*> .*", r"<d x i32*> operation", r"<d x int*> operation", ], [ r"store " + any_of(opt_load) + r"?<\d+ x i64\*> .*", r"<d x i64*> operation", r"<d x int*> operation", ], [ r"store " + any_of(opt_load) + r"?<\d+ x i128\*> .*", r"<d x i128*> operation", r"<d x int*> operation", ], [ r"store " + any_of(opt_load) + r"?<\d+ x x86_fp80\*> .*", r"<d x float*> operation", r"<d x floating point*> operation", ], [ r"store " + any_of(opt_load) + r"?<\d+ x float\*> .*", r"<d x float*> operation", r"<d x floating point*> operation", ], [ r"store " + any_of(opt_load) + r"?<\d+ x double\*> .*", r"<d x double*> operation", r"<d x floating point*> operation", ], [ r"store " + any_of(opt_load) + r"?<\d+ x \{ .* \}\*> .*", r"<d x \{ .* \}*> operation", r"<d x \{ .* \}*> operation", ], [ r"store " + any_of(opt_load) + r"?<\d+ x i1>\* .*", r"<d x i1>* operation", r"<d x int>* operation", ], [ r"store " + any_of(opt_load) + r"?<\d+ x i2>\* .*", r"<d x i2>* operation", r"<d x int>* operation", ], [ r"store " + any_of(opt_load) + r"?<\d+ x i4>\* .*", r"<d x i4>* operation", r"<d x int>* operation", ], [ r"store " + any_of(opt_load) + r"?<\d+ x i8>\* .*", r"<d x i8>* operation", r"<d x int>* operation", ], [ r"store " + any_of(opt_load) + r"?<\d+ x i16>\* .*", r"<d x i16>* operation", r"<d x int>* operation", ], [ r"store " + any_of(opt_load) + r"?<\d+ x i32>\* .*", r"<d x i32>* operation", r"<d x int>* operation", ], [ r"store " + any_of(opt_load) + r"?<\d+ x i64>\* .*", r"<d x i64>* operation", r"<d x int>* operation", ], [ r"store " + any_of(opt_load) + r"?<\d+ x i128>\* .*", r"<d x i128>* operation", r"<d x int>* operation", ], [ r"store " + any_of(opt_load) + r"?<\d+ x x86_fp80>\* .*", r"<d x float>* operation", r"<d x floating point>* operation", ], [ r"store " + any_of(opt_load) + r"?<\d+ x float>\* .*", r"<d x float>* operation", r"<d x floating point>* operation", ], [ r"store " + any_of(opt_load) + r"?<\d+ x double>\* .*", r"<d x double>* operation", r"<d x floating point>* operation", ], [ r"store " + any_of(opt_load) + r"?<\d+ x \{ .* \}\*?>\* .*", r"<d x struct>* operation", r"<d x \{ .* \}>* operation", ], [ r"store " + any_of(opt_load) + r"?<\d+ x void \(.*", r"<d x function>* operation", r"<d x function operation", ], [ r"store " + any_of(opt_load) + r"?\[\d+ x i1\] .*", r"[d x i1] operation", r"[d x int] operation", ], [ r"store " + any_of(opt_load) + r"?\[\d+ x i2\] .*", r"[d x i2] operation", r"[d x int] operation", ], [ r"store " + any_of(opt_load) + r"?\[\d+ x i4\] .*", r"[d x i4] operation", r"[d x int] operation", ], [ r"store " + any_of(opt_load) + r"?\[\d+ x i8\] .*", r"[d x i8] operation", r"[d x int] operation", ], [ r"store " + any_of(opt_load) + r"?\[\d+ x i16\] .*", r"[d x i16] operation", r"[d x int] operation", ], [ r"store " + any_of(opt_load) + r"?\[\d+ x i32\] .*", r"[d x i32] operation", r"[d x int] operation", ], [ r"store " + any_of(opt_load) + r"?\[\d+ x i64\] .*", r"[d x i64] operation", r"[d x int] operation", ], [ r"store " + any_of(opt_load) + r"?\[\d+ x i128\] .*", r"[d x i128] operation", r"[d x int] operation", ], [ r"store " + any_of(opt_load) + r"?\[\d+ x x86_fp80\] .*", r"[d x float] operation", r"[d x floating point] operation", ], [ r"store " + any_of(opt_load) + r"?\[\d+ x float\] .*", r"[d x float] operation", r"[d x floating point] operation", ], [ r"store " + any_of(opt_load) + r"?\[\d+ x double\] .*", r"[d x double] operation", r"[d x floating point] operation", ], [ r"store " + any_of(opt_load) + r"?\[\d+ x \{ .* \}\] .*", r"[d x structure] operation", r"[d x structure] operation", ], [r"declare (noalias |nonnull )*void .*", "void operation", "void operation"], [r"declare (noalias |nonnull )*i1 .*", "i1 operation", "int operation"], [r"declare (noalias |nonnull )*i2 .*", "i2 operation", "int operation"], [r"declare (noalias |nonnull )*i4 .*", "i4 operation", "int operation"], [r"declare (noalias |nonnull )*i8 .*", "i8 operation", "int operation"], [r"declare (noalias |nonnull )*i16 .*", "i16 operation", "int operation"], [r"declare (noalias |nonnull )*i32 .*", "i32 operation", "int operation"], [r"declare (noalias |nonnull )*i64 .*", "i64 operation", "int operation"], [r"declare (noalias |nonnull )*i8\* .*", "i8* operation", "int* operation"], [r"declare (noalias |nonnull )*i16\* .*", "i16* operation", "int* operation"], [r"declare (noalias |nonnull )*i32\* .*", "i32* operation", "int* operation"], [r"declare (noalias |nonnull )*i64\* .*", "i64* operation", "int* operation"], [ r"declare (noalias |nonnull )*x86_fp80 .*", r"float operation", r"floating point operation", ], [ r"declare (noalias |nonnull )*float .*", r"float operation", r"floating point operation", ], [ r"declare (noalias |nonnull )*double .*", r"double operation", r"floating point operation", ], [ r"declare (noalias |nonnull )*x86_fp80\* .*", r"float* operation", r"floating point* operation", ], [ r"declare (noalias |nonnull )*float\* .*", r"float* operation", r"floating point* operation", ], [ r"declare (noalias |nonnull )*double\* .*", r"double* operation", r"floating point* operation", ], ['declare (noalias |nonnull )*%".*', "struct/class op", "struct/class op"], [r"declare (noalias |nonnull )*<%.*", "struct/class op", "struct/class op"], [r"declare (noalias |nonnull )*<?{.*", "struct/class op", "struct/class op"], [ r"declare (noalias |nonnull )*opaque.*", r"struct/class op", r"struct/class op", ], [ r"declare (noalias |nonnull )*<\d+ x i1> .*", r"<d x i1> operation", r"<d x int> operation", ], [ r"declare (noalias |nonnull )*<\d+ x i2> .*", r"<d x i2> operation", r"<d x int> operation", ], [ r"declare (noalias |nonnull )*<\d+ x i4> .*", r"<d x i4> operation", r"<d x int> operation", ], [ r"declare (noalias |nonnull )*<\d+ x i8> .*", r"<d x i8> operation", r"<d x int> operation", ], [ r"declare (noalias |nonnull )*<\d+ x i16> .*", r"<d x i16> operation", r"<d x int> operation", ], [ r"declare (noalias |nonnull )*<\d+ x i32> .*", r"<d x i32> operation", r"<d x int> operation", ], [ r"declare (noalias |nonnull )*<\d+ x i64> .*", r"<d x i64> operation", r"<d x int> operation", ], [ r"declare (noalias |nonnull )*<\d+ x i128> .*", r"<d x i128> operation", r"<d x int> operation", ], [ r"declare (noalias |nonnull )*<\d+ x x86_fp80> .*", r"<d x float> operation", r"<d x floating point> operation", ], [ r"declare (noalias |nonnull )*<\d+ x float> .*", r"<d x float> operation", r"<d x floating point> operation", ], [ r"declare (noalias |nonnull )*<\d+ x double> .*", r"<d x double> operation", r"<d x floating point> operation", ], [ r"declare (noalias |nonnull )*<\d+ x i1>\* .*", r"<d x i1>* operation", r"<d x int>* operation", ], [ r"declare (noalias |nonnull )*<\d+ x i2>\* .*", r"<d x i2>* operation", r"<d x int>* operation", ], [ r"declare (noalias |nonnull )*<\d+ x i4>\* .*", r"<d x i4>* operation", r"<d x int>* operation", ], [ r"declare (noalias |nonnull )*<\d+ x i8>\* .*", r"<d x i8>* operation", r"<d x int>* operation", ], [ r"declare (noalias |nonnull )*<\d+ x i16>\* .*", r"<d x i16>* operation", r"<d x int>* operation", ], [ r"declare (noalias |nonnull )*<\d+ x i32>\* .*", r"<d x i32>* operation", r"<d x int>* operation", ], [ r"declare (noalias |nonnull )*<\d+ x i64>\* .*", r"<d x i64>* operation", r"<d x int>* operation", ], [ r"declare (noalias |nonnull )*<\d+ x i128>\* .*", r"<d x i128>* operation", r"<d x int>* operation", ], [ r"declare (noalias |nonnull )*<\d+ x x86_fp80>\* .*", r"<d x float>* operation", r"<d x floating point>* operation", ], [ r"declare (noalias |nonnull )*<\d+ x float>\* .*", r"<d x float>* operation", r"<d x floating point>* operation", ], [ r"declare (noalias |nonnull )*<\d+ x double>\* .*", r"<d x double>* operation", r"<d x floating point>* operation", ], [ r"declare (noalias |nonnull )*\[\d+ x i1\] .*", r"[d x i1] operation", r"[d x int] operation", ], [ r"declare (noalias |nonnull )*\[\d+ x i2\] .*", r"[d x i2] operation", r"[d x int] operation", ], [ r"declare (noalias |nonnull )*\[\d+ x i4\] .*", r"[d x i4] operation", r"[d x int] operation", ], [ r"declare (noalias |nonnull )*\[\d+ x i8\] .*", r"[d x i8] operation", r"[d x int] operation", ], [ r"declare (noalias |nonnull )*\[\d+ x i16\] .*", r"[d x i16] operation", r"[d x int] operation", ], [ r"declare (noalias |nonnull )*\[\d+ x i32\] .*", r"[d x i32] operation", r"[d x int] operation", ], [ r"declare (noalias |nonnull )*\[\d+ x i64\] .*", r"[d x i64] operation", r"[d x int] operation", ], [ r"declare (noalias |nonnull )*\[\d+ x i128\] .*", r"[d x i128] operation", r"[d x int] operation", ], [ r"declare (noalias |nonnull )*\[\d+ x x86_fp80\] .*", r"[d x float] operation", r"[d x floating point] operation", ], [ r"declare (noalias |nonnull )*\[\d+ x float\] .*", r"[d x float] operation", r"[d x floating point] operation", ], [ r"declare (noalias |nonnull )*\[\d+ x double\] .*", r"[d x double] operation", r"[d x floating point] operation", ], [ r"define " + any_of(opt_define) + r"+void .*", r"void operation", r"void operation", ], [r"define " + any_of(opt_define) + r"+i1 .*", "i1 operation", "int operation"], [r"define " + any_of(opt_define) + r"+i2 .*", "i2 operation", "int operation"], [r"define " + any_of(opt_define) + r"+i4 .*", "i4 operation", "int operation"], [r"define " + any_of(opt_define) + r"+i8 .*", "i8 operation", "int operation"], [ r"define " + any_of(opt_define) + r"+i16 .*", r"i16 operation", r"int operation", ], [ r"define " + any_of(opt_define) + r"+i32 .*", r"i32 operation", r"int operation", ], [ r"define " + any_of(opt_define) + r"+i64 .*", r"i64 operation", r"int operation", ], [ r"define " + any_of(opt_define) + r"+i128 .*", r"i128 operation", r"int operation", ], [ r"define " + any_of(opt_define) + r"+i1\* .*", r"i1* operation", r"int* operation", ], [ r"define " + any_of(opt_define) + r"+i2\* .*", r"i2* operation", r"int* operation", ], [ r"define " + any_of(opt_define) + r"+i4\* .*", r"i4* operation", r"int* operation", ], [ r"define " + any_of(opt_define) + r"+i8\* .*", r"i8* operation", r"int* operation", ], [ r"define " + any_of(opt_define) + r"+i16\* .*", r"i16* operation", r"int* operation", ], [ r"define " + any_of(opt_define) + r"+i32\* .*", r"i32* operation", r"int* operation", ], [ r"define " + any_of(opt_define) + r"+i64\* .*", r"i64* operation", r"int* operation", ], [ r"define " + any_of(opt_define) + r"+i128\* .*", r"i128* operation", r"int* operation", ], [ r"define " + any_of(opt_define) + r"+x86_fp80 .*", r"float operation", r"floating point operation", ], [ r"define " + any_of(opt_define) + r"+float .*", r"float operation", r"floating point operation", ], [ r"define " + any_of(opt_define) + r"+double .*", r"double operation", r"floating point operation", ], [ r"define " + any_of(opt_define) + r"+x86_fp80\* .*", r"float* operation", r"floating point* operation", ], [ r"define " + any_of(opt_define) + r"+float\* .*", r"float* operation", r"floating point* operation", ], [ r"define " + any_of(opt_define) + r"+double\* .*", r"double* operation", r"floating point* operation", ], [ r"define " + any_of(opt_define) + r'+%".*', r"struct/class op", r"struct/class op", ], [ r"define " + any_of(opt_define) + r"+<%.*", r"struct/class op", r"struct/class op", ], [ r"define " + any_of(opt_define) + r"+<?{.*", r"struct/class op", r"struct/class op", ], [ r"define " + any_of(opt_define) + r"+opaque.*", r"struct/class op", r"struct/class op", ], [ r"define " + any_of(opt_define) + r"+<\d+ x i1> .*", r"<d x i1> operation", r"<d x int> operation", ], [ r"define " + any_of(opt_define) + r"+<\d+ x i2> .*", r"<d x i2> operation", r"<d x int> operation", ], [ r"define " + any_of(opt_define) + r"+<\d+ x i4> .*", r"<d x i4> operation", r"<d x int> operation", ], [ r"define " + any_of(opt_define) + r"+<\d+ x i8> .*", r"<d x i8> operation", r"<d x int> operation", ], [ r"define " + any_of(opt_define) + r"+<\d+ x i16> .*", r"<d x i16> operation", r"<d x int> operation", ], [ r"define " + any_of(opt_define) + r"+<\d+ x i32> .*", r"<d x i32> operation", r"<d x int> operation", ], [ r"define " + any_of(opt_define) + r"+<\d+ x i64> .*", r"<d x i64> operation", r"<d x int> operation", ], [ r"define " + any_of(opt_define) + r"+<\d+ x i128> .*", r"<d x i128> operation", r"<d x int> operation", ], [ r"define " + any_of(opt_define) + r"+<\d+ x x86_fp80> .*", r"<d x float> operation", r"<d x floating point> operation", ], [ r"define " + any_of(opt_define) + r"+<\d+ x float> .*", r"<d x float> operation", r"<d x floating point> operation", ], [ r"define " + any_of(opt_define) + r"+<\d+ x double> .*", r"<d x double> operation", r"<d x floating point> operation", ], [ r"define " + any_of(opt_define) + r"+<\d+ x i1>\* .*", r"<d x i1>* operation", r"<d x int>* operation", ], [ r"define " + any_of(opt_define) + r"+<\d+ x i2>\* .*", r"<d x i2>* operation", r"<d x int>* operation", ], [ r"define " + any_of(opt_define) + r"+<\d+ x i4>\* .*", r"<d x i4>* operation", r"<d x int>* operation", ], [ r"define " + any_of(opt_define) + r"+<\d+ x i8>\* .*", r"<d x i8>* operation", r"<d x int>* operation", ], [ r"define " + any_of(opt_define) + r"+<\d+ x i16>\* .*", r"<d x i16>* operation", r"<d x int>* operation", ], [ r"define " + any_of(opt_define) + r"+<\d+ x i32>\* .*", r"<d x i32>* operation", r"<d x int>* operation", ], [ r"define " + any_of(opt_define) + r"+<\d+ x i64>\* .*", r"<d x i64>* operation", r"<d x int>* operation", ], [ r"define " + any_of(opt_define) + r"+<\d+ x i128>\* .*", r"<d x i128>* operation", r"<d x int>* operation", ], [ r"define " + any_of(opt_define) + r"+<\d+ x x86_fp80>\* .*", r"<d x float>* operation", r"<d x floating point>* operation", ], [ r"define " + any_of(opt_define) + r"+<\d+ x float>\* .*", r"<d x float>* operation", r"<d x floating point>* operation", ], [ r"define " + any_of(opt_define) + r"+<\d+ x double>\* .*", r"<d x double>* operation", r"<d x floating point>* operation", ], [ r"define " + any_of(opt_define) + r"+\[\d+ x i1\] .*", r"[d x i1] operation", r"[d x int] operation", ], [ r"define " + any_of(opt_define) + r"+\[\d+ x i2\] .*", r"[d x i2] operation", r"[d x int] operation", ], [ r"define " + any_of(opt_define) + r"+\[\d+ x i4\] .*", r"[d x i4] operation", r"[d x int] operation", ], [ r"define " + any_of(opt_define) + r"+\[\d+ x i8\] .*", r"[d x i8] operation", r"[d x int] operation", ], [ r"define " + any_of(opt_define) + r"+\[\d+ x i16\] .*", r"[d x i16] operation", r"[d x int] operation", ], [ r"define " + any_of(opt_define) + r"+\[\d+ x i32\] .*", r"[d x i32] operation", r"[d x int] operation", ], [ r"define " + any_of(opt_define) + r"+\[\d+ x i64\] .*", r"[d x i64] operation", r"[d x int] operation", ], [ r"define " + any_of(opt_define) + r"+\[\d+ x i128\] .*", r"[d x i128] operation", r"[d x int] operation", ], [ r"define " + any_of(opt_define) + r"+\[\d+ x x86_fp80\] .*", r"[d x float] operation", r"[d x floating point] operation", ], [ r"define " + any_of(opt_define) + r"+\[\d+ x float\] .*", r"[d x float] operation", r"[d x floating point] operation", ], [ r"define " + any_of(opt_define) + r"+\[\d+ x double\] .*", r"[d x double] operation", r"[d x floating point] operation", ], [ r"<%ID> = (tail |musttail |notail )?call " + any_of(opt_invoke) + r"*i1 .*", r"i1 operation", r"int operation", ], [ r"<%ID> = (tail |musttail |notail )?call " + any_of(opt_invoke) + r"*i2 .*", r"i2 operation", r"int operation", ], [ r"<%ID> = (tail |musttail |notail )?call " + any_of(opt_invoke) + r"*i4 .*", r"i4 operation", r"int operation", ], [ r"<%ID> = (tail |musttail |notail )?call " + any_of(opt_invoke) + r"*i8 .*", r"i8 operation", r"int operation", ], [ r"<%ID> = (tail |musttail |notail )?call " + any_of(opt_invoke) + r"*i16 .*", r"i16 operation", r"int operation", ], [ r"<%ID> = (tail |musttail |notail )?call " + any_of(opt_invoke) + r"*i32 .*", r"i32 operation", r"int operation", ], [ r"<%ID> = (tail |musttail |notail )?call " + any_of(opt_invoke) + r"*i64 .*", r"i64 operation", r"int operation", ], [ r"<%ID> = (tail |musttail |notail )?call " + any_of(opt_invoke) + r"*i128 .*", r"i128 operation", r"int operation", ], [ r"<%ID> = (tail |musttail |notail )?call " + any_of(opt_invoke) + r"*i1\* .*", r"i1* operation", r"int* operation", ], [ r"<%ID> = (tail |musttail |notail )?call " + any_of(opt_invoke) + r"*i2\* .*", r"i2* operation", r"int* operation", ], [ r"<%ID> = (tail |musttail |notail )?call " + any_of(opt_invoke) + r"*i4\* .*", r"i4* operation", r"int* operation", ], [ r"<%ID> = (tail |musttail |notail )?call " + any_of(opt_invoke) + r"*i8\* .*", r"i8* operation", r"int* operation", ], [ r"<%ID> = (tail |musttail |notail )?call " + any_of(opt_invoke) + r"*i16\* .*", r"i16* operation", r"int* operation", ], [ r"<%ID> = (tail |musttail |notail )?call " + any_of(opt_invoke) + r"*i32\* .*", r"i32* operation", r"int* operation", ], [ r"<%ID> = (tail |musttail |notail )?call " + any_of(opt_invoke) + r"*i64\* .*", r"i64* operation", r"int* operation", ], [ r"<%ID> = (tail |musttail |notail )?call " + any_of(opt_invoke) + r"*i128\* .*", r"i128* operation", r"int* operation", ], [ r"<%ID> = (tail |musttail |notail )?call " + any_of(opt_invoke) + r"*i1\*\* .*", r"i1** operation", r"int* operation", ], [ r"<%ID> = (tail |musttail |notail )?call " + any_of(opt_invoke) + r"*i2\*\* .*", r"i2** operation", r"int* operation", ], [ r"<%ID> = (tail |musttail |notail )?call " + any_of(opt_invoke) + r"*i4\*\* .*", r"i4** operation", r"int* operation", ], [ r"<%ID> = (tail |musttail |notail )?call " + any_of(opt_invoke) + r"*i8\*\* .*", r"i8** operation", r"int* operation", ], [ r"<%ID> = (tail |musttail |notail )?call " + any_of(opt_invoke) + r"*i16\*\* .*", r"i16** operation", r"int* operation", ], [ r"<%ID> = (tail |musttail |notail )?call " + any_of(opt_invoke) + r"*i32\*\* .*", r"i32** operation", r"int* operation", ], [ r"<%ID> = (tail |musttail |notail )?call " + any_of(opt_invoke) + r"*i64\*\* .*", r"i64** operation", r"int* operation", ], [ r"<%ID> = (tail |musttail |notail )?call " + any_of(opt_invoke) + r"*i128\*\* .*", r"i128** operation", r"int* operation", ], [ r"<%ID> = (tail |musttail |notail )?call " + any_of(opt_invoke) + r"*x86_fp80 .*", r"float operation", r"floating point operation", ], [ r"<%ID> = (tail |musttail |notail )?call " + any_of(opt_invoke) + r"*float .*", r"float operation", r"floating point operation", ], [ r"<%ID> = (tail |musttail |notail )?call " + any_of(opt_invoke) + r"*double .*", r"double operation", r"floating point operation", ], [ r"<%ID> = (tail |musttail |notail )?call " + any_of(opt_invoke) + r"*x86_fp80\* .*", r"float* operation", r"floating point* operation", ], [ r"<%ID> = (tail |musttail |notail )?call " + any_of(opt_invoke) + r"*float\* .*", r"float* operation", r"floating point* operation", ], [ r"<%ID> = (tail |musttail |notail )?call " + any_of(opt_invoke) + r"*double\* .*", r"double* operation", r"floating point* operation", ], [ r"<%ID> = (tail |musttail |notail )?call " + any_of(opt_invoke) + r"*x86_fp80\*\* .*", r"float** operation", r"floating point* operation", ], [ r"<%ID> = (tail |musttail |notail )?call " + any_of(opt_invoke) + r"*float\*\* .*", r"float** operation", r"floating point* operation", ], [ r"<%ID> = (tail |musttail |notail )?call " + any_of(opt_invoke) + r"*double\*\* .*", r"double** operation", r"floating point* operation", ], [ r"<%ID> = (tail |musttail |notail )?call " + any_of(opt_invoke) + r'*%".*', r"struct/class op", r"struct/class op", ], [ r"<%ID> = (tail |musttail |notail )?call " + any_of(opt_invoke) + r"*<%.*", r"struct/class op", r"struct/class op", ], [ r"<%ID> = (tail |musttail |notail )?call " + any_of(opt_invoke) + r"*<?{.*", r"struct/class op", r"struct/class op", ], [ r"<%ID> = (tail |musttail |notail )?call " + any_of(opt_invoke) + r"*opaque.*", r"struct/class op", r"struct/class op", ], [ r"<%ID> = (tail |musttail |notail )?call " + any_of(opt_invoke) + r"*<\d+ x i1> .*", r"<d x i1> operation", r"<d x int> operation", ], [ r"<%ID> = (tail |musttail |notail )?call " + any_of(opt_invoke) + r"*<\d+ x i2> .*", r"<d x i2> operation", r"<d x int> operation", ], [ r"<%ID> = (tail |musttail |notail )?call " + any_of(opt_invoke) + r"*<\d+ x i4> .*", r"<d x i4> operation", r"<d x int> operation", ], [ r"<%ID> = (tail |musttail |notail )?call " + any_of(opt_invoke) + r"*<\d+ x i8> .*", r"<d x i8> operation", r"<d x int> operation", ], [ r"<%ID> = (tail |musttail |notail )?call " + any_of(opt_invoke) + r"*<\d+ x i16> .*", r"<d x i16> operation", r"<d x int> operation", ], [ r"<%ID> = (tail |musttail |notail )?call " + any_of(opt_invoke) + r"*<\d+ x i32> .*", r"<d x i32> operation", r"<d x int> operation", ], [ r"<%ID> = (tail |musttail |notail )?call " + any_of(opt_invoke) + r"*<\d+ x i64> .*", r"<d x i64> operation", r"<d x int> operation", ], [ r"<%ID> = (tail |musttail |notail )?call " + any_of(opt_invoke) + r"*<\d+ x i128> .*", r"<d x i128> operation", r"<d x int> operation", ], [ r"<%ID> = (tail |musttail |notail )?call " + any_of(opt_invoke) + r"*<\d+ x x86_fp80> .*", r"<d x float> operation", r"<d x floating point> operation", ], [ r"<%ID> = (tail |musttail |notail )?call " + any_of(opt_invoke) + r"*<\d+ x float> .*", r"<d x float> operation", r"<d x floating point> operation", ], [ r"<%ID> = (tail |musttail |notail )?call " + any_of(opt_invoke) + r"*<\d+ x double> .*", r"<d x double> operation", r"<d x floating point> operation", ], [ r"<%ID> = (tail |musttail |notail )?call " + any_of(opt_invoke) + r"*<\d+ x i1>\* .*", r"<d x i1>* operation", r"<d x int>* operation", ], [ r"<%ID> = (tail |musttail |notail )?call " + any_of(opt_invoke) + r"*<\d+ x i2>\* .*", r"<d x i2>* operation", r"<d x int>* operation", ], [ r"<%ID> = (tail |musttail |notail )?call " + any_of(opt_invoke) + r"*<\d+ x i4>\* .*", r"<d x i4>* operation", r"<d x int>* operation", ], [ r"<%ID> = (tail |musttail |notail )?call " + any_of(opt_invoke) + r"*<\d+ x i8>\* .*", r"<d x i8>* operation", r"<d x int>* operation", ], [ r"<%ID> = (tail |musttail |notail )?call " + any_of(opt_invoke) + r"*<\d+ x i16>\* .*", r"<d x i16>* operation", r"<d x int>* operation", ], [ r"<%ID> = (tail |musttail |notail )?call " + any_of(opt_invoke) + r"*<\d+ x i32>\* .*", r"<d x i32>* operation", r"<d x int>* operation", ], [ r"<%ID> = (tail |musttail |notail )?call " + any_of(opt_invoke) + r"*<\d+ x i64>\* .*", r"<d x i64>* operation", r"<d x int>* operation", ], [ r"<%ID> = (tail |musttail |notail )?call " + any_of(opt_invoke) + r"*<\d+ x i128>\* .*", r"<d x i128>* operation", r"<d x int>* operation", ], [ r"<%ID> = (tail |musttail |notail )?call " + any_of(opt_invoke) + r"*<\d+ x x86_fp80>\* .*", r"<d x float>* operation", r"<d x floating point>* operation", ], [ r"<%ID> = (tail |musttail |notail )?call " + any_of(opt_invoke) + r"*<\d+ x float>\* .*", r"<d x float>* operation", r"<d x floating point>* operation", ], [ r"<%ID> = (tail |musttail |notail )?call " + any_of(opt_invoke) + r"*<\d+ x double>\* .*", r"<d x double>* operation", r"<d x floating point>* operation", ], [ r"<%ID> = (tail |musttail |notail )?call " + any_of(opt_invoke) + r"*\[\d+ x i1\] .*", r"[d x i1] operation", r"[d x int] operation", ], [ r"<%ID> = (tail |musttail |notail )?call " + any_of(opt_invoke) + r"*\[\d+ x i2\] .*", r"[d x i2] operation", r"[d x int] operation", ], [ r"<%ID> = (tail |musttail |notail )?call " + any_of(opt_invoke) + r"*\[\d+ x i4\] .*", r"[d x i4] operation", r"[d x int] operation", ], [ r"<%ID> = (tail |musttail |notail )?call " + any_of(opt_invoke) + r"*\[\d+ x i8\] .*", r"[d x i8] operation", r"[d x int] operation", ], [ r"<%ID> = (tail |musttail |notail )?call " + any_of(opt_invoke) + r"*\[\d+ x i16\] .*", r"[d x i16] operation", r"[d x int] operation", ], [ r"<%ID> = (tail |musttail |notail )?call " + any_of(opt_invoke) + r"*\[\d+ x i32\] .*", r"[d x i32] operation", r"[d x int] operation", ], [ r"<%ID> = (tail |musttail |notail )?call " + any_of(opt_invoke) + r"*\[\d+ x i64\] .*", r"[d x i64] operation", r"[d x int] operation", ], [ r"<%ID> = (tail |musttail |notail )?call " + any_of(opt_invoke) + r"*\[\d+ x i128\] .*", r"[d x i128] operation", r"[d x int] operation", ], [ r"<%ID> = (tail |musttail |notail )?call " + any_of(opt_invoke) + r"*\[\d+ x x86_fp80\] .*", r"[d x float] operation", r"[d x floating point] operation", ], [ r"<%ID> = (tail |musttail |notail )?call " + any_of(opt_invoke) + r"*\[\d+ x float\] .*", r"[d x float] operation", r"[d x floating point] operation", ], [ r"<%ID> = (tail |musttail |notail )?call " + any_of(opt_invoke) + r"*\[\d+ x double\] .*", r"[d x double] operation", r"[d x floating point] operation", ], [r"ret i1 .*", "i1 operation", "int operation"], [r"ret i2 .*", "i2 operation", "int operation"], [r"ret i4 .*", "i4 operation", "int operation"], [r"ret i8 .*", "i8 operation", "int operation"], [r"ret i16 .*", "i16 operation", "int operation"], [r"ret i32 .*", "i32 operation", "int operation"], [r"ret i64 .*", "i64 operation", "int operation"], [r"ret i128 .*", "i128 operation", "int operation"], [r"ret i1\* .*", "i1* operation", "int* operation"], [r"ret i2\* .*", "i2* operation", "int* operation"], [r"ret i4\* .*", "i4* operation", "int* operation"], [r"ret i8\* .*", "i8* operation", "int* operation"], [r"ret i16\* .*", "i16* operation", "int* operation"], [r"ret i32\* .*", "i32* operation", "int* operation"], [r"ret i64\* .*", "i64* operation", "int* operation"], [r"ret i128\* .*", "i128* operation", "int* operation"], [r"ret x86_fp80 .*", "x86_fp80 operation", "floating point operation"], [r"ret float .*", "float operation", "floating point operation"], [r"ret double .*", "double operation", "floating point operation"], [r"ret x86_fp80\* .*", "x86_fp80* operation", "floating point* operation"], [r"ret float\* .*", "float* operation", "floating point* operation"], [r"ret double\* .*", "double* operation", "floating point* operation"], ['ret %".*', "struct/class op", "struct/class op"], [r"ret <%.*", "struct/class op", "struct/class op"], [r"ret <?{.*", "struct/class op", "struct/class op"], [r"ret opaque.*", "struct/class op", "struct/class op"], [r"ret <\d+ x i1> .*", "<d x i1> operation", "<d x int> operation"], [r"ret <\d+ x i2> .*", "<d x i2> operation", "<d x int> operation"], [r"ret <\d+ x i4> .*", "<d x i4> operation", "<d x int> operation"], [r"ret <\d+ x i8> .*", "<d x i8> operation", "<d x int> operation"], [r"ret <\d+ x i16> .*", "<d x i16> operation", "<d x int> operation"], [r"ret <\d+ x i32> .*", "<d x i32> operation", "<d x int> operation"], [r"ret <\d+ x i64> .*", "<d x i64> operation", "<d x int> operation"], [r"ret <\d+ x i128> .*", "<d x i128> operation", "<d x int> operation"], [ r"ret <\d+ x x86_fp80> .*", r"<d x x86_fp80> operation", r"<d x floating point> operation", ], [ r"ret <\d+ x float> .*", r"<d x float> operation", r"<d x floating point> operation", ], [ r"ret <\d+ x double> .*", r"<d x double> operation", r"<d x floating point> operation", ], [r"ret <\d+ x i1>\* .*", "<d x i1>* operation", "<d x int>* operation"], [r"ret <\d+ x i2>\* .*", "<d x i2>* operation", "<d x int>* operation"], [r"ret <\d+ x i4>\* .*", "<d x i4>* operation", "<d x int>* operation"], [r"ret <\d+ x i8>\* .*", "<d x i8>* operation", "<d x int>* operation"], [r"ret <\d+ x i16>\* .*", "<d x i16>* operation", "<d x int>* operation"], [r"ret <\d+ x i32>\* .*", "<d x i32>* operation", "<d x int>* operation"], [r"ret <\d+ x i64>\* .*", "<d x i64>* operation", "<d x int>* operation"], [r"ret <\d+ x i128>\* .*", "<d x i128>* operation", "<d x int>* operation"], [ r"ret <\d+ x x86_fp80>\* .*", r"<d x x86_fp80>* operation", r"<d x floating point>* operation", ], [ r"ret <\d+ x float>\* .*", r"<d x float>* operation", r"<d x floating point>* operation", ], [ r"ret <\d+ x double>\* .*", r"<d x double>* operation", r"<d x floating point>* operation", ], [r"ret \[\d+ x i1\] .*", "[d x i1] operation", "[d x int] operation"], [r"ret \[\d+ x i2\] .*", "[d x i2] operation", "[d x int] operation"], [r"ret \[\d+ x i4\] .*", "[d x i4] operation", "[d x int] operation"], [r"ret \[\d+ x i8\] .*", "[d x i8] operation", "[d x int] operation"], [r"ret \[\d+ x i16\] .*", "[d x i16] operation", "[d x int] operation"], [r"ret \[\d+ x i32\] .*", "[d x i32] operation", "[d x int] operation"], [r"ret \[\d+ x i64\] .*", "[d x i64] operation", "[d x int] operation"], [r"ret \[\d+ x i128\] .*", "[d x i128] operation", "[d x int] operation"], [ r"ret \[\d+ x x86_fp80\] .*", r"[d x x86_fp80] operation", r"[d x floating point] operation", ], [ r"ret \[\d+ x float\] .*", r"[d x float] operation", r"[d x floating point] operation", ], [ r"ret \[\d+ x double\] .*", r"[d x double] operation", r"[d x floating point] operation", ], [r"<%ID> = and i1 .*", "i1 operation", "int operation"], [r"<%ID> = and <\d+ x i1> .*", "<d x i1> operation", "<d x int> operation"], [r"<%ID> = and i2 .*", "i2 operation", "int operation"], [r"<%ID> = and <\d+ x i2> .*", "<d x i2> operation", "<d x int> operation"], [r"<%ID> = and i4 .*", "i4 operation", "int operation"], [r"<%ID> = and <\d+ x i4> .*", "<d x i4> operation", "<d x int> operation"], [r"<%ID> = and i8 .*", "i8 operation", "int operation"], [r"<%ID> = and <\d+ x i8> .*", "<d x i8> operation", "<d x int> operation"], [r"<%ID> = and i16 .*", "i16 operation", "int operation"], [r"<%ID> = and <\d+ x i16> .*", "<d x i16> operation", "<d x int> operation"], [r"<%ID> = and i24 .*", "i24 operation", "int operation"], [r"<%ID> = and <\d+ x i24> .*", "<d x i24> operation", "<d x int> operation"], [r"<%ID> = and i32 .*", "i32 operation", "int operation"], [r"<%ID> = and <\d+ x i32> .*", "<d x i32> operation", "<d x int> operation"], [r"<%ID> = and i40 .*", "i40 operation", "int operation"], [r"<%ID> = and <\d+ x i40> .*", "<d x i40> operation", "<d x int> operation"], [r"<%ID> = and i64 .*", "i64 operation", "int operation"], [r"<%ID> = and <\d+ x i64> .*", "<d x i64> operation", "<d x int> operation"], [r"<%ID> = and i128 .*", "i128 operation", "int operation"], [ r"<%ID> = and <\d+ x i128> .*", r"<d x i128> operation", r"<d x int> operation", ], [r"<%ID> = or i1 .*", "i1 operation", "int operation"], [r"<%ID> = or <\d+ x i1> .*", "<d x i1> operation", "<d x int> operation"], [r"<%ID> = or i2 .*", "i2 operation", "int operation"], [r"<%ID> = or <\d+ x i2> .*", "<d x i2> operation", "<d x int> operation"], [r"<%ID> = or i4 .*", "i4 operation", "int operation"], [r"<%ID> = or <\d+ x i4> .*", "<d x i4> operation", "<d x int> operation"], [r"<%ID> = or i8 .*", "i8 operation", "int operation"], [r"<%ID> = or <\d+ x i8> .*", "<d x i8> operation", "<d x int> operation"], [r"<%ID> = or i16 .*", "i16 operation", "int operation"], [r"<%ID> = or <\d+ x i16> .*", "<d x i16> operation", "<d x int> operation"], [r"<%ID> = or i24 .*", "i24 operation", "int operation"], [r"<%ID> = or <\d+ x i24> .*", "<d x i24> operation", "<d x int> operation"], [r"<%ID> = or i32 .*", "i32 operation", "int operation"], [r"<%ID> = or <\d+ x i32> .*", "<d x i32> operation", "<d x int> operation"], [r"<%ID> = or i40 .*", "i40 operation", "int operation"], [r"<%ID> = or <\d+ x i40> .*", "<d x i40> operation", "<d x int> operation"], [r"<%ID> = or i64 .*", "i64 operation", "int operation"], [r"<%ID> = or <\d+ x i64> .*", "<d x i64> operation", "<d x int> operation"], [r"<%ID> = or i128 .*", "i128 operation", "int operation"], [r"<%ID> = or <\d+ x i128> .*", "<d x i128> operation", "<d x int> operation"], [r"<%ID> = xor i1 .*", "i1 operation", "int operation"], [r"<%ID> = xor <\d+ x i1>.*", "<d x i1> operation", "<d x int> operation"], [r"<%ID> = xor i4 .*", "i4 operation", "int operation"], [r"<%ID> = xor <\d+ x i2>.*", "<d x i2> operation", "<d x int> operation"], [r"<%ID> = xor i2 .*", "i2 operation", "int operation"], [r"<%ID> = xor <\d+ x i4>.*", "<d x i4> operation", "<d x int> operation"], [r"<%ID> = xor i8 .*", "i8 operation", "int operation"], [r"<%ID> = xor <\d+ x i8>.*", "<d x i8> operation", "<d x int> operation"], [r"<%ID> = xor i16 .*", "i16 operation", "int operation"], [r"<%ID> = xor <\d+ x i16>.*", "<d x i16> operation", "<d x int> operation"], [r"<%ID> = xor i24 .*", "i16 operation", "int operation"], [r"<%ID> = xor <\d+ x i24>.*", "<d x i16> operation", "<d x int> operation"], [r"<%ID> = xor i32 .*", "i32 operation", "int operation"], [r"<%ID> = xor <\d+ x i32>.*", "<d x i32> operation", "<d x int> operation"], [r"<%ID> = xor i40 .*", "i40 operation", "int operation"], [r"<%ID> = xor <\d+ x i40>.*", "<d x i40> operation", "<d x int> operation"], [r"<%ID> = xor i64 .*", "i64 operation", "int operation"], [r"<%ID> = xor <\d+ x i64>.*", "<d x i64> operation", "<d x int> operation"], [r"<%ID> = xor i128 .*", "i128 operation", "int operation"], [r"<%ID> = xor <\d+ x i128>.*", "<d x i128> operation", "<d x int> operation"], [ r"<%ID> = shl " + any_of(opt_addsubmul) + r"?i1 .*", r"i1 operation", r"int operation", ], [ r"<%ID> = shl " + any_of(opt_addsubmul) + r"?<\d+ x i1> .*", r"<d x i1> operation", r"<d x int> operation", ], [ r"<%ID> = shl " + any_of(opt_addsubmul) + r"?i2 .*", r"i2 operation", r"int operation", ], [ r"<%ID> = shl " + any_of(opt_addsubmul) + r"?<\d+ x i2> .*", r"<d x i2> operation", r"<d x int> operation", ], [ r"<%ID> = shl " + any_of(opt_addsubmul) + r"?i4 .*", r"i8 operation", r"int operation", ], [ r"<%ID> = shl " + any_of(opt_addsubmul) + r"?<\d+ x i4> .*", r"<d x i4> operation", r"<d x int> operation", ], [ r"<%ID> = shl " + any_of(opt_addsubmul) + r"?i8 .*", r"i8 operation", r"int operation", ], [ r"<%ID> = shl " + any_of(opt_addsubmul) + r"?<\d+ x i8> .*", r"<d x i8> operation", r"<d x int> operation", ], [ r"<%ID> = shl " + any_of(opt_addsubmul) + r"?i16 .*", r"i16 operation", r"int operation", ], [ r"<%ID> = shl " + any_of(opt_addsubmul) + r"?<\d+ x i16> .*", r"<d x i16> operation", r"<d x int> operation", ], [ r"<%ID> = shl " + any_of(opt_addsubmul) + r"?i32 .*", r"i32 operation", r"int operation", ], [ r"<%ID> = shl " + any_of(opt_addsubmul) + r"?<\d+ x i32> .*", r"<d x i32> operation", r"<d x int> operation", ], [ r"<%ID> = shl " + any_of(opt_addsubmul) + r"?i40 .*", r"i40 operation", r"int operation", ], [ r"<%ID> = shl " + any_of(opt_addsubmul) + r"?<\d+ x i40> .*", r"<d x i40> operation", r"<d x int> operation", ], [ r"<%ID> = shl " + any_of(opt_addsubmul) + r"?i64 .*", r"i64 operation", r"int operation", ], [ r"<%ID> = shl " + any_of(opt_addsubmul) + r"?<\d+ x i64> .*", r"<d x i64> operation", r"<d x int> operation", ], [ r"<%ID> = shl " + any_of(opt_addsubmul) + r"?i128 .*", r"i128 operation", r"int operation", ], [ r"<%ID> = shl " + any_of(opt_addsubmul) + r"?<\d+ x i128> .*", r"<d x i128> operation", r"<d x int> operation", ], [ r"<%ID> = shl " + any_of(opt_addsubmul) + r"?i256 .*", r"i256 operation", r"int operation", ], [ r"<%ID> = shl " + any_of(opt_addsubmul) + r"?<\d+ x i256> .*", r"<d x i256> operation", r"<d x int> operation", ], [ r"<%ID> = ashr " + any_of(opt_usdiv) + r"?i1 .*", r"i1 operation", r"int operation", ], [ r"<%ID> = ashr " + any_of(opt_usdiv) + r"?<\d+ x i1> .*", r"<d x i1> operation", r"<d x int> operation", ], [ r"<%ID> = ashr " + any_of(opt_usdiv) + r"?i2 .*", r"i2 operation", r"int operation", ], [ r"<%ID> = ashr " + any_of(opt_usdiv) + r"?<\d+ x i2> .*", r"<d x i2> operation", r"<d x int> operation", ], [ r"<%ID> = ashr " + any_of(opt_usdiv) + r"?i4 .*", r"i4 operation", r"int operation", ], [ r"<%ID> = ashr " + any_of(opt_usdiv) + r"?<\d+ x i4> .*", r"<d x i4> operation", r"<d x int> operation", ], [ r"<%ID> = ashr " + any_of(opt_usdiv) + r"?i8 .*", r"i8 operation", r"int operation", ], [ r"<%ID> = ashr " + any_of(opt_usdiv) + r"?<\d+ x i8> .*", r"<d x i8> operation", r"<d x int> operation", ], [ r"<%ID> = ashr " + any_of(opt_usdiv) + r"?i16 .*", r"i16 operation", r"int operation", ], [ r"<%ID> = ashr " + any_of(opt_usdiv) + r"?<\d+ x i16> .*", r"<d x i16> operation", r"<d x int> operation", ], [ r"<%ID> = ashr " + any_of(opt_usdiv) + r"?i32 .*", r"i32 operation", r"int operation", ], [ r"<%ID> = ashr " + any_of(opt_usdiv) + r"?<\d+ x i32> .*", r"<d x i32> operation", r"<d x int> operation", ], [ r"<%ID> = ashr " + any_of(opt_usdiv) + r"?i40 .*", r"i40 operation", r"int operation", ], [ r"<%ID> = ashr " + any_of(opt_usdiv) + r"?<\d+ x i40> .*", r"<d x i40> operation", r"<d x int> operation", ], [ r"<%ID> = ashr " + any_of(opt_usdiv) + r"?i64 .*", r"i64 operation", r"int operation", ], [ r"<%ID> = ashr " + any_of(opt_usdiv) + r"?<\d+ x i64> .*", r"<d x i64> operation", r"<d x int> operation", ], [ r"<%ID> = ashr " + any_of(opt_usdiv) + r"?i128 .*", r"i128 operation", r"int operation", ], [ r"<%ID> = ashr " + any_of(opt_usdiv) + r"?<\d+ x i128> .*", r"<d x i128> operation", r"<d x int> operation", ], [ r"<%ID> = ashr " + any_of(opt_usdiv) + r"?i256 .*", r"i256 operation", r"int operation", ], [ r"<%ID> = ashr " + any_of(opt_usdiv) + r"?<\d+ x i256> .*", r"<d x i256> operation", r"<d x int> operation", ], [ r"<%ID> = lshr " + any_of(opt_usdiv) + r"?i1 .*", r"i1 operation", r"int operation", ], [ r"<%ID> = lshr " + any_of(opt_usdiv) + r"?<\d+ x i1> .*", r"<d x i1> operation", r"<d x int> operation", ], [ r"<%ID> = lshr " + any_of(opt_usdiv) + r"?i2 .*", r"i2 operation", r"int operation", ], [ r"<%ID> = lshr " + any_of(opt_usdiv) + r"?<\d+ x i2> .*", r"<d x i2> operation", r"<d x int> operation", ], [ r"<%ID> = lshr " + any_of(opt_usdiv) + r"?i4 .*", r"i4 operation", r"int operation", ], [ r"<%ID> = lshr " + any_of(opt_usdiv) + r"?<\d+ x i4> .*", r"<d x i4> operation", r"<d x int> operation", ], [ r"<%ID> = lshr " + any_of(opt_usdiv) + r"?i8 .*", r"i8 operation", r"int operation", ], [ r"<%ID> = lshr " + any_of(opt_usdiv) + r"?<\d+ x i8> .*", r"<d x i8> operation", r"<d x int> operation", ], [ r"<%ID> = lshr " + any_of(opt_usdiv) + r"?i16 .*", r"i16 operation", r"int operation", ], [ r"<%ID> = lshr " + any_of(opt_usdiv) + r"?<\d+ x i16> .*", r"<d x i16> operation", r"<d x int> operation", ], [ r"<%ID> = lshr " + any_of(opt_usdiv) + r"?i24 .*", r"i24 operation", r"int operation", ], [ r"<%ID> = lshr " + any_of(opt_usdiv) + r"?<\d+ x i24> .*", r"<d x i24> operation", r"<d x int> operation", ], [ r"<%ID> = lshr " + any_of(opt_usdiv) + r"?i32 .*", r"i32 operation", r"int operation", ], [ r"<%ID> = lshr " + any_of(opt_usdiv) + r"?<\d+ x i32> .*", r"<d x i32> operation", r"<d x int> operation", ], [ r"<%ID> = lshr " + any_of(opt_usdiv) + r"?i40 .*", r"i40 operation", r"int operation", ], [ r"<%ID> = lshr " + any_of(opt_usdiv) + r"?<\d+ x i40> .*", r"<d x i40> operation", r"<d x int> operation", ], [ r"<%ID> = lshr " + any_of(opt_usdiv) + r"?i64 .*", r"i64 operation", r"int operation", ], [ r"<%ID> = lshr " + any_of(opt_usdiv) + r"?<\d+ x i64> .*", r"<d x i64> operation", r"<d x int> operation", ], [ r"<%ID> = lshr " + any_of(opt_usdiv) + r"?i128 .*", r"i128 operation", r"int operation", ], [ r"<%ID> = lshr " + any_of(opt_usdiv) + r"?<\d+ x i128> .*", r"<d x i128> operation", r"<d x int> operation", ], [ r"<%ID> = lshr " + any_of(opt_usdiv) + r"?i256 .*", r"i256 operation", r"int operation", ], [ r"<%ID> = lshr " + any_of(opt_usdiv) + r"?<\d+ x i256> .*", r"<d x i256> operation", r"<d x int> operation", ], [r"<%ID> = phi i1 .*", "i1 operation", "int operation"], [r"<%ID> = phi <\d+ x i1> .*", "<d x i1> operation", "<d x int> operation"], [ r"<%ID> = phi <\d+ x i1\*> .*", r"<d x i1*> operation", r"<d x int*> operation", ], [ r"<%ID> = phi <\d+ x i1>\* .*", r"<d x i1>* operation", r"<d x int>* operation", ], [r"<%ID> = phi \[\d+ x i1\] .*", "[d x i1] operation", "[d x int] operation"], [ r"<%ID> = phi \[\d+ x i1\]\* .*", r"[d x i1]* operation", r"[d x int]* operation", ], [ r"<%ID> = phi \[\d+ x i1\]\*\* .*", r"[d x i1]** operation", r"[d x int]** operation", ], [ r"<%ID> = phi \[\d+ x i1\]\*\*\* .*", r"[d x i1]*** operation", r"[d x int]*** operation", ], [r"<%ID> = phi i2 .*", "i2 operation", "int operation"], [r"<%ID> = phi <\d+ x i2> .*", "<d x i2> operation", "<d x int> operation"], [ r"<%ID> = phi <\d+ x i2\*> .*", r"<d x i2*> operation", r"<d x int*> operation", ], [ r"<%ID> = phi <\d+ x i2>\* .*", r"<d x i2>* operation", r"<d x int>* operation", ], [r"<%ID> = phi \[\d+ x i2\] .*", "[d x i2] operation", "[d x int] operation"], [ r"<%ID> = phi \[\d+ x i2\]\* .*", r"[d x i2]* operation", r"[d x int]* operation", ], [ r"<%ID> = phi \[\d+ x i2\]\*\* .*", r"[d x i2]** operation", r"[d x int]** operation", ], [ r"<%ID> = phi \[\d+ x i2\]\*\*\* .*", r"[d x i2]*** operation", r"[d x int]*** operation", ], [r"<%ID> = phi i4 .*", "i4 operation", "int operation"], [r"<%ID> = phi <\d+ x i4> .*", "<d x i4> operation", "<d x int> operation"], [ r"<%ID> = phi <\d+ x i4\*> .*", r"<d x i4*> operation", r"<d x int*> operation", ], [ r"<%ID> = phi <\d+ x i4>\* .*", r"<d x i4>* operation", r"<d x int>* operation", ], [r"<%ID> = phi \[\d+ x i4\] .*", "[d x i4] operation", "[d x int] operation"], [ r"<%ID> = phi \[\d+ x i4\]\* .*", r"[d x i4]* operation", r"[d x int]* operation", ], [ r"<%ID> = phi \[\d+ x i4\]\*\* .*", r"[d x i4]** operation", r"[d x int]** operation", ], [ r"<%ID> = phi \[\d+ x i4\]\*\*\* .*", r"[d x i4]*** operation", r"[d x int]*** operation", ], [r"<%ID> = phi i8 .*", "i8 operation", "int operation"], [r"<%ID> = phi <\d+ x i8> .*", "<d x i8> operation", "<d x int> operation"], [ r"<%ID> = phi <\d+ x i8\*> .*", r"<d x i8*> operation", r"<d x int*> operation", ], [ r"<%ID> = phi <\d+ x i8>\* .*", r"<d x i8>* operation", r"<d x int>* operation", ], [r"<%ID> = phi \[\d+ x i8\] .*", "[d x i4] operation", "[d x int] operation"], [ r"<%ID> = phi \[\d+ x i8\]\* .*", r"[d x i4]* operation", r"[d x int]* operation", ], [ r"<%ID> = phi \[\d+ x i8\]\*\* .*", r"[d x i4]** operation", r"[d x int]** operation", ], [ r"<%ID> = phi \[\d+ x i8\]\*\*\* .*", r"[d x i4]*** operation", r"[d x int]*** operation", ], [r"<%ID> = phi i16 .*", "i16 operation", "int operation"], [r"<%ID> = phi <\d+ x i16> .*", "<d x i16> operation", "<d x int> operation"], [ r"<%ID> = phi <\d+ x i16\*> .*", r"<d x i16*> operation", r"<d x int*> operation", ], [ r"<%ID> = phi <\d+ x i16>\* .*", r"<d x i16>* operation", r"<d x int>* operation", ], [ r"<%ID> = phi \[\d+ x i16\] .*", r"[d x i16] operation", r"[d x int] operation", ], [ r"<%ID> = phi \[\d+ x i16\]\* .*", r"[d x i16]* operation", r"[d x int]* operation", ], [ r"<%ID> = phi \[\d+ x i16\]\*\* .*", r"[d x i16]** operation", r"[d x int]** operation", ], [ r"<%ID> = phi \[\d+ x i16\]\*\*\* .*", r"[d x i16]*** operation", r"[d x int]*** operation", ], [r"<%ID> = phi i32 .*", "i32 operation", "int operation"], [r"<%ID> = phi <\d+ x i32> .*", "<d x i32> operation", "<d x int> operation"], [ r"<%ID> = phi <\d+ x i32\*> .*", r"<d x i32*> operation", r"<d x int*> operation", ], [ r"<%ID> = phi <\d+ x i32>\* .*", r"<d x i32>* operation", r"<d x int>* operation", ], [ r"<%ID> = phi \[\d+ x i32\] .*", r"[d x i32] operation", r"[d x int] operation", ], [ r"<%ID> = phi \[\d+ x i32\]\* .*", r"[d x i32]* operation", r"[d x int]* operation", ], [ r"<%ID> = phi \[\d+ x i32\]\*\* .*", r"[d x i32]** operation", r"[d x int]** operation", ], [ r"<%ID> = phi \[\d+ x i32\]\*\*\* .*", r"[d x i32]*** operation", r"[d x int]*** operation", ], [r"<%ID> = phi i40 .*", "i32 operation", "int operation"], [r"<%ID> = phi <\d+ x i40> .*", "<d x i40> operation", "<d x int> operation"], [ r"<%ID> = phi <\d+ x i40\*> .*", r"<d x i40*> operation", r"<d x int*> operation", ], [ r"<%ID> = phi <\d+ x i40>\* .*", r"<d x i40>* operation", r"<d x int>* operation", ], [ r"<%ID> = phi \[\d+ x i40\] .*", r"[d x i40] operation", r"[d x int] operation", ], [ r"<%ID> = phi \[\d+ x i40\]\* .*", r"[d x i40]* operation", r"[d x int]* operation", ], [ r"<%ID> = phi \[\d+ x i40\]\*\* .*", r"[d x i40]** operation", r"[d x int]** operation", ], [ r"<%ID> = phi \[\d+ x i40\]\*\*\* .*", r"[d x i40]*** operation", r"[d x int]*** operation", ], [r"<%ID> = phi i64 .*", "i64 operation", "int operation"], [r"<%ID> = phi <\d+ x i64> .*", "<d x i64> operation", "<d x int> operation"], [ r"<%ID> = phi <\d+ x i64\*> .*", r"<d x i64*> operation", r"<d x int*> operation", ], [ r"<%ID> = phi <\d+ x i64>\* .*", r"<d x i64>* operation", r"<d x int>* operation", ], [ r"<%ID> = phi \[\d+ x i64\] .*", r"[d x i64] operation", r"[d x int] operation", ], [ r"<%ID> = phi \[\d+ x i64\]\* .*", r"[d x i64]* operation", r"[d x int]* operation", ], [ r"<%ID> = phi \[\d+ x i64\]\*\* .*", r"[d x i64]** operation", r"[d x int]** operation", ], [ r"<%ID> = phi \[\d+ x i64\]\*\*\* .*", r"[d x i64]*** operation", r"[d x int]*** operation", ], [r"<%ID> = phi i128 .*", "i128 operation", "int operation"], [ r"<%ID> = phi <\d+ x i128> .*", r"<d x i128> operation", r"<d x int> operation", ], [ r"<%ID> = phi <\d+ x i128\*> .*", r"<d x i128*> operation", r"<d x int*> operation", ], [ r"<%ID> = phi <\d+ x i128>\* .*", r"<d x i128>* operation", r"<d x int>* operation", ], [ r"<%ID> = phi \[\d+ x i128\] .*", r"[d x i128] operation", r"[d x int] operation", ], [ r"<%ID> = phi \[\d+ x i128\]\* .*", r"[d x i128]* operation", r"[d x int]* operation", ], [ r"<%ID> = phi \[\d+ x i126\]\*\* .*", r"[d x i128]** operation", r"[d x int]** operation", ], [ r"<%ID> = phi \[\d+ x i128\]\*\*\* .*", r"[d x i128]*** operation", r"[d x int]*** operation", ], [r"<%ID> = phi i1\* .*", "i1* operation", "int* operation"], [r"<%ID> = phi i2\* .*", "i2* operation", "int* operation"], [r"<%ID> = phi i4\* .*", "i4* operation", "int* operation"], [r"<%ID> = phi i8\* .*", "i8* operation", "int* operation"], [r"<%ID> = phi i16\* .*", "i16* operation", "int* operation"], [r"<%ID> = phi i32\* .*", "i32* operation", "int* operation"], [r"<%ID> = phi i40\* .*", "i40* operation", "int* operation"], [r"<%ID> = phi i64\* .*", "i64* operation", "int* operation"], [r"<%ID> = phi i128\* .*", "i128* operation", "int* operation"], [r"<%ID> = phi i1\*\* .*", "i1** operation", "int** operation"], [r"<%ID> = phi i2\*\* .*", "i2** operation", "int** operation"], [r"<%ID> = phi i4\*\* .*", "i4** operation", "int** operation"], [r"<%ID> = phi i8\*\* .*", "i8** operation", "int** operation"], [r"<%ID> = phi i16\*\* .*", "i16** operation", "int** operation"], [r"<%ID> = phi i32\*\* .*", "i32** operation", "int** operation"], [r"<%ID> = phi i40\*\* .*", "i40** operation", "int** operation"], [r"<%ID> = phi i64\*\* .*", "i64** operation", "int** operation"], [r"<%ID> = phi i128\*\* .*", "i128** operation", "int** operation"], [r"<%ID> = phi i1\*\*\* .*", "i1*** operation", "int*** operation"], [r"<%ID> = phi i2\*\*\* .*", "i2*** operation", "int*** operation"], [r"<%ID> = phi i4\*\*\* .*", "i4*** operation", "int*** operation"], [r"<%ID> = phi i8\*\*\* .*", "i8*** operation", "int*** operation"], [r"<%ID> = phi i16\*\*\* .*", "i16*** operation", "int*** operation"], [r"<%ID> = phi i32\*\*\* .*", "i32*** operation", "int*** operation"], [r"<%ID> = phi i64\*\*\* .*", "i64*** operation", "int*** operation"], [r"<%ID> = phi i128\*\*\* .*", "i128*** operation", "int*** operation"], [r"<%ID> = phi x86_fp80 .*", "float operation", "floating point operation"], [r"<%ID> = phi float .*", "float operation", "floating point operation"], [r"<%ID> = phi double .*", "double operation", "floating point operation"], [ r"<%ID> = phi <\d+ x x86_fp80> .*", r"<d x float> operation", r"<d x floating point> operation", ], [ r"<%ID> = phi <\d+ x float> .*", r"<d x float> operation", r"<d x floating point> operation", ], [ r"<%ID> = phi <\d+ x double> .*", r"<d x double> operation", r"<d x floating point> operation", ], [ r"<%ID> = phi x86_fp80\* .*", r"float* operation", r"floating point* operation", ], [ r"<%ID> = phi <\d+ x x86_fp80\*> .*", r"<d x float*> operation", r"<d x floating point*> operation", ], [ r"<%ID> = phi <\d+ x float\*> .*", r"<d x float*> operation", r"<d x floating point*> operation", ], [ r"<%ID> = phi <\d+ x double\*> .*", r"<d x double*> operation", r"<d x floating point*> operation", ], [ r"<%ID> = phi <\d+ x x86_fp80>\* .*", r"<d x float>* operation", r"<d x floating point>* operation", ], [ r"<%ID> = phi <\d+ x float>\* .*", r"<d x float>* operation", r"<d x floating point>* operation", ], [ r"<%ID> = phi <\d+ x double>\* .*", r"<d x double>* operation", r"<d x floating point>* operation", ], [ r"<%ID> = phi x86_fp80\* .*", r"float* operation", r"floating point* operation", ], [r"<%ID> = phi float\* .*", "float* operation", "floating point* operation"], [r"<%ID> = phi double\* .*", "double* operation", "floating point* operation"], [ r"<%ID> = phi x86_fp80\*\* .*", r"float** operation", r"floating point** operation", ], [ r"<%ID> = phi float\*\* .*", r"float** operation", r"floating point** operation", ], [ r"<%ID> = phi double\*\* .*", r"double** operation", r"floating point** operation", ], [ r"<%ID> = phi x86_fp80\*\*\* .*", r"float*** operation", r"floating point*** operation", ], [ r"<%ID> = phi float\*\*\* .*", r"float*** operation", r"floating point*** operation", ], [ r"<%ID> = phi double\*\*\* .*", r"double*** operation", r"floating point*** operation", ], [r"<%ID> = phi void \(.*\) \[.*", "function op", "function op"], [r"<%ID> = phi void \(.*\)\* \[.*", "function* op", "function* op"], [r"<%ID> = phi void \(.*\)\*\* \[.*", "function** op", "function** op"], [r"<%ID> = phi void \(.*\)\*\*\* \[.*", "function*** op", "function*** op"], [r"<%ID> = phi (<?{|opaque|<%ID>) .*", "struct/class op", "struct/class op"], [ r"<%ID> = phi (<?{|opaque|<%ID>)\* .*", r"struct/class* op", r"struct/class* op", ], [ r"<%ID> = phi (<?{|opaque|<%ID>)\*\* .*", r"struct/class** op", r"struct/class** op", ], [ r"<%ID> = phi (<?{|opaque|<%ID>)\*\*\* .*", r"struct/class*** op", r"struct/class*** op", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"i1, .*", r"i1 operation", r"int operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"i2, .*", r"i2 operation", r"int operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"i4, .*", r"i4 operation", r"int operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"i8, .*", r"i8 operation", r"int operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"i16, .*", r"i16 operation", r"int operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"i32, .*", r"i32 operation", r"int operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"i64, .*", r"i64 operation", r"int operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"i128, .*", r"i128 operation", r"int operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"i1\*, .*", r"i1* operation", r"int* operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"i2\*, .*", r"i2* operation", r"int* operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"i4\*, .*", r"i4* operation", r"int* operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"i8\*, .*", r"i8* operation", r"int* operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"i16\*, .*", r"i16* operation", r"int* operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"i32\*, .*", r"i32* operation", r"int* operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"i64\*, .*", r"i64* operation", r"int* operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"i128\*, .*", r"i128* operation", r"int* operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"i1\*\*, .*", r"i1** operation", r"int** operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"i2\*\*, .*", r"i2** operation", r"int** operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"i4\*\*, .*", r"i4** operation", r"int** operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"i8\*\*, .*", r"i8** operation", r"int** operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"i16\*\*, .*", r"i16** operation", r"int** operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"i32\*\*, .*", r"i32** operation", r"int** operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"i64\*\*, .*", r"i64** operation", r"int** operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"i128\*\*, .*", r"i128** operation", r"int** operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"x86_fp80, .*", r"float operation", r"floating point operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"float, .*", r"float operation", r"floating point operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"double, .*", r"double operation", r"floating point operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"x86_fp80\*, .*", r"float* operation", r"floating point* operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"float\*, .*", r"float* operation", r"floating point* operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"double\*, .*", r"double* operation", r"floating point* operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"x86_fp80\*\*, .*", r"float** operation", r"floating point** operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"float\*\*, .*", r"float** operation", r"floating point** operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"double\*\*, .*", r"double** operation", r"floating point** operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r'%".*', r"struct/class op", r"struct/class op", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"<%.*", r"struct/class op", r"struct/class op", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"<?{.*", r"struct/class op", r"struct/class op", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"opaque.*", r"struct/class op", r"struct/class op", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"<\d+ x i1>, .*", r"<d x i1> operation", r"<d x int> operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"<\d+ x i2>, .*", r"<d x i2> operation", r"<d x int> operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"<\d+ x i4>, .*", r"<d x i4> operation", r"<d x int> operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"<\d+ x i8>, .*", r"<d x i8> operation", r"<d x int> operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"<\d+ x i16>, .*", r"<d x i16> operation", r"<d x int> operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"<\d+ x i32>, .*", r"<d x i32> operation", r"<d x int> operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"<\d+ x i64>, .*", r"<d x i64> operation", r"<d x int> operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"<\d+ x i128>, .*", r"<d x i128> operation", r"<d x int> operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"<\d+ x x86_fp80>, .*", r"<d x float> operation", r"<d x floating point> operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"<\d+ x float>, .*", r"<d x float> operation", r"<d x floating point> operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"<\d+ x double>, .*", r"<d x double> operation", r"<d x floating point> operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"<\d+ x i1>\*, .*", r"<d x i1>* operation", r"<d x int>* operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"<\d+ x i2>\*, .*", r"<d x i2>* operation", r"<d x int>* operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"<\d+ x i4>\*, .*", r"<d x i4>* operation", r"<d x int>* operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"<\d+ x i8>\*, .*", r"<d x i8>* operation", r"<d x int>* operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"<\d+ x i16>\*, .*", r"<d x i16>* operation", r"<d x int>* operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"<\d+ x i32>\*, .*", r"<d x i32>* operation", r"<d x int>* operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"<\d+ x i64>\*, .*", r"<d x i64>* operation", r"<d x int>* operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"<\d+ x i128>\*, .*", r"<d x i128>* operation", r"<d x int>* operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"<\d+ x x86_fp80>\*, .*", r"<d x float>* operation", r"<d x floating point>* operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"<\d+ x float>\*, .*", r"<d x float>* operation", r"<d x floating point>* operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"<\d+ x double>\*, .*", r"<d x double>* operation", r"<d x floating point>* operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"\[\d+ x i1\], .*", r"[d x i1] operation", r"[d x int] operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"\[\d+ x i2\], .*", r"[d x i2] operation", r"[d x int] operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"\[\d+ x i4\], .*", r"[d x i4] operation", r"[d x int] operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"\[\d+ x i8\], .*", r"[d x i8] operation", r"[d x int] operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"\[\d+ x i16\], .*", r"[d x i16] operation", r"[d x int] operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"\[\d+ x i32\], .*", r"[d x i32] operation", r"[d x int] operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"\[\d+ x i64\], .*", r"[d x i64] operation", r"[d x int] operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"\[\d+ x i128\], .*", r"[d x i128] operation", r"[d x int] operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"\[\d+ x x86_fp80\], .*", r"[d x float] operation", r"[d x floating point] operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"\[\d+ x float\], .*", r"[d x float] operation", r"[d x floating point] operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"\[\d+ x double\], .*", r"[d x double] operation", r"[d x floating point] operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"\[\d+ x .*\], .*", r"array of array operation", r"array of array operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"\[\d+ x i1\]\*, .*", r"[d x i1]* operation", r"[d x int] operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"\[\d+ x i2\]\*, .*", r"[d x i2]* operation", r"[d x int] operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"\[\d+ x i4\]\*, .*", r"[d x i4]* operation", r"[d x int] operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"\[\d+ x i8\]\*, .*", r"[d x i8]* operation", r"[d x int] operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"\[\d+ x i16\]\*, .*", r"[d x i16]* operation", r"[d x int] operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"\[\d+ x i32\]\*, .*", r"[d x i32]* operation", r"[d x int] operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"\[\d+ x i64\]\*, .*", r"[d x i64]* operation", r"[d x int] operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"\[\d+ x i128\]\*, .*", r"[d x i128]* operation", r"[d x int] operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"\[\d+ x x86_fp80\]\*, .*", r"[d x float]* operation", r"[d x floating point] operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"\[\d+ x float\]\*, .*", r"[d x float]* operation", r"[d x floating point] operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"\[\d+ x double\]\*, .*", r"[d x double]* operation", r"[d x floating point] operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"\[\d+ x .*\]\*, .*", r"array of array* operation", r"array of array operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"\[\d+ x i1\]\*\*, .*", r"[d x i1]** operation", r"[d x int] operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"\[\d+ x i2\]\*\*, .*", r"[d x i2]** operation", r"[d x int] operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"\[\d+ x i4\]\*\*, .*", r"[d x i4]** operation", r"[d x int] operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"\[\d+ x i8\]\*\*, .*", r"[d x i8]** operation", r"[d x int] operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"\[\d+ x i16\]\*\*, .*", r"[d x i16]** operation", r"[d x int] operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"\[\d+ x i32\]\*\*, .*", r"[d x i32]** operation", r"[d x int] operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"\[\d+ x i64\]\*\*, .*", r"[d x i64]** operation", r"[d x int] operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"\[\d+ x i128\]\*\*, .*", r"[d x i128]** operation", r"[d x int] operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"\[\d+ x x86_fp80\]\*\*, .*", r"[d x float]** operation", r"[d x floating point] operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"\[\d+ x float\]\*\*, .*", r"[d x float]** operation", r"[d x floating point] operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"\[\d+ x double\]\*\*, .*", r"[d x double]** operation", r"[d x floating point] operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r"\[\d+ x .*\], .*", r"array of array** operation", r"array of array operation", ], [ r"<%ID> = getelementptr " + any_of(opt_GEP) + r".*\(.*\)\*+, .*", r"function operation", r"function operation", ], [ r"<%ID> = invoke " + any_of(opt_invoke) + r"*i1 .*", r"i1 operation", r"int operation", ], [ r"<%ID> = invoke " + any_of(opt_invoke) + r"*i2 .*", r"i2 operation", r"int operation", ], [ r"<%ID> = invoke " + any_of(opt_invoke) + r"*i4 .*", r"i4 operation", r"int operation", ], [ r"<%ID> = invoke " + any_of(opt_invoke) + r"*i8 .*", r"i8 operation", r"int operation", ], [ r"<%ID> = invoke " + any_of(opt_invoke) + r"*i16 .*", r"i16 operation", r"int operation", ], [ r"<%ID> = invoke " + any_of(opt_invoke) + r"*i32 .*", r"i32 operation", r"int operation", ], [ r"<%ID> = invoke " + any_of(opt_invoke) + r"*i64 .*", r"i64 operation", r"int operation", ], [ r"<%ID> = invoke " + any_of(opt_invoke) + r"*i128 .*", r"i128 operation", r"int operation", ], [ r"<%ID> = invoke " + any_of(opt_invoke) + r"*i1\* .*", r"i1* operation", r"int* operation", ], [ r"<%ID> = invoke " + any_of(opt_invoke) + r"*i2\* .*", r"i2* operation", r"int* operation", ], [ r"<%ID> = invoke " + any_of(opt_invoke) + r"*i4\* .*", r"i4* operation", r"int* operation", ], [ r"<%ID> = invoke " + any_of(opt_invoke) + r"*i8\* .*", r"i8* operation", r"int* operation", ], [ r"<%ID> = invoke " + any_of(opt_invoke) + r"*i16\* .*", r"i16* operation", r"int* operation", ], [ r"<%ID> = invoke " + any_of(opt_invoke) + r"*i32\* .*", r"i32* operation", r"int* operation", ], [ r"<%ID> = invoke " + any_of(opt_invoke) + r"*i64\* .*", r"i64* operation", r"int* operation", ], [ r"<%ID> = invoke " + any_of(opt_invoke) + r"*i128\* .*", r"i128* operation", r"int* operation", ], [ r"<%ID> = invoke " + any_of(opt_invoke) + r"*x86_fp80 .*", r"float operation", r"floating point operation", ], [ r"<%ID> = invoke " + any_of(opt_invoke) + r"*float .*", r"float operation", r"floating point operation", ], [ r"<%ID> = invoke " + any_of(opt_invoke) + r"*double .*", r"double operation", r"floating point operation", ], [ r"<%ID> = invoke " + any_of(opt_invoke) + r"*x86_fp80\* .*", r"float* operation", r"floating point* operation", ], [ r"<%ID> = invoke " + any_of(opt_invoke) + r"*float\* .*", r"float* operation", r"floating point* operation", ], [ r"<%ID> = invoke " + any_of(opt_invoke) + r"*double\* .*", r"double* operation", r"floating point* operation", ], [ r"<%ID> = invoke " + any_of(opt_invoke) + r'*%".*', r"struct/class op", r"struct/class op", ], [ r"<%ID> = invoke " + any_of(opt_invoke) + r"*<?{.*", r"struct/class op", r"struct/class op", ], [ r"<%ID> = invoke " + any_of(opt_invoke) + r"*opaque.*", r"struct/class op", r"struct/class op", ], [ r"<%ID> = invoke " + any_of(opt_invoke) + r'*%".*\*.*', r"struct/class* op", r"struct/class op", ], [r"<%ID> = invoke " + any_of(opt_invoke) + r"*void .*", "void op", "void op"], [r"invoke " + any_of(opt_invoke) + r"*void .*", "void op", "void op"], [ r"<%ID> = extractelement <\d+ x i1> .*", r"<d x i1> operation", r"<d x int> operation", ], [ r"<%ID> = extractelement <\d+ x i1\*> .*", r"<d x i1*> operation", r"<d x int*> operation", ], [ r"<%ID> = extractelement <\d+ x i2> .*", r"<d x i2> operation", r"<d x int> operation", ], [ r"<%ID> = extractelement <\d+ x i2\*> .*", r"<d x i2*> operation", r"<d x int*> operation", ], [ r"<%ID> = extractelement <\d+ x i4> .*", r"<d x i4> operation", r"<d x int> operation", ], [ r"<%ID> = extractelement <\d+ x i4\*> .*", r"<d x i4*> operation", r"<d x int*> operation", ], [ r"<%ID> = extractelement <\d+ x i8> .*", r"<d x i8> operation", r"<d x int> operation", ], [ r"<%ID> = extractelement <\d+ x i8\*> .*", r"<d x i8*> operation", r"<d x int*> operation", ], [ r"<%ID> = extractelement <\d+ x i16> .*", r"<d x i16> operation", r"<d x int> operation", ], [ r"<%ID> = extractelement <\d+ x i16\*> .*", r"<d x i16*> operation", r"<d x int*> operation", ], [ r"<%ID> = extractelement <\d+ x i32> .*", r"<d x i32> operation", r"<d x int> operation", ], [ r"<%ID> = extractelement <\d+ x i32\*> .*", r"<d x i32*> operation", r"<d x int*> operation", ], [ r"<%ID> = extractelement <\d+ x i64> .*", r"<d x i64> operation", r"<d x int> operation", ], [ r"<%ID> = extractelement <\d+ x i64\*> .*", r"<d x i64*> operation", r"<d x int*> operation", ], [ r"<%ID> = extractelement <\d+ x i128> .*", r"<d x i128> operation", r"<d x int> operation", ], [ r"<%ID> = extractelement <\d+ x i128\*> .*", r"<d x i128*> operation", r"<d x int*> operation", ], [ r"<%ID> = extractelement <\d+ x x86_fp80> .*", r"<d x float> operation", r"<d x floating point> operation", ], [ r"<%ID> = extractelement <\d+ x x86_fp80\*> .*", r"<d x float*> operation", r"<d x floating point*> operation", ], [ r"<%ID> = extractelement <\d+ x float> .*", r"<d x float> operation", r"<d x floating point> operation", ], [ r"<%ID> = extractelement <\d+ x float\*> .*", r"<d x float*> operation", r"<d x floating point*> operation", ], [ r"<%ID> = extractelement <\d+ x double> .*", r"<d x double> operation", r"<d x floating point> operation", ], [ r"<%ID> = extractelement <\d+ x double\*> .*", r"<d x double*> operation", r"<d x floating point*> operation", ], [ r"<%ID> = extractelement <\d+ x \{.*\}> .*", r"<d x struct> operation", r"<d x struct> operation", ], [ r"<%ID> = extractelement <\d+ x \{.*\}\*> .*", r"<d x struct*> operation", r"<d x struct*> operation", ], [ r"<%ID> = insertelement <\d+ x i1> .*", r"<d x i1> operation", r"<d x int> operation", ], [ r"<%ID> = insertelement <\d+ x i1\*> .*", r"<d x i1*> operation", r"<d x int*> operation", ], [ r"<%ID> = insertelement <\d+ x i2> .*", r"<d x i2> operation", r"<d x int> operation", ], [ r"<%ID> = insertelement <\d+ x i2\*> .*", r"<d x i2*> operation", r"<d x int*> operation", ], [ r"<%ID> = insertelement <\d+ x i4> .*", r"<d x i4> operation", r"<d x int> operation", ], [ r"<%ID> = insertelement <\d+ x i4\*> .*", r"<d x i4*> operation", r"<d x int*> operation", ], [ r"<%ID> = insertelement <\d+ x i8> .*", r"<d x i8> operation", r"<d x int> operation", ], [ r"<%ID> = insertelement <\d+ x i8\*> .*", r"<d x i8*> operation", r"<d x int*> operation", ], [ r"<%ID> = insertelement <\d+ x i16> .*", r"<d x i16> operation", r"<d x int> operation", ], [ r"<%ID> = insertelement <\d+ x i16\*> .*", r"<d x i16*> operation", r"<d x int*> operation", ], [ r"<%ID> = insertelement <\d+ x i32> .*", r"<d x i32> operation", r"<d x int> operation", ], [ r"<%ID> = insertelement <\d+ x i32\*> .*", r"<d x i32*> operation", r"<d x int*> operation", ], [ r"<%ID> = insertelement <\d+ x i64> .*", r"<d x i64> operation", r"<d x int> operation", ], [ r"<%ID> = insertelement <\d+ x i64\*> .*", r"<d x i64*> operation", r"<d x int*> operation", ], [ r"<%ID> = insertelement <\d+ x i128> .*", r"<d x i128> operation", r"<d x int> operation", ], [ r"<%ID> = insertelement <\d+ x i128\*> .*", r"<d x i128*> operation", r"<d x int*> operation", ], [ r"<%ID> = insertelement <\d+ x x86_fp80> .*", r"<d x float> operation", r"<d x floating point> operation", ], [ r"<%ID> = insertelement <\d+ x x86_fp80\*> .*", r"<d x float*> operation", r"<d x floating point*> operation", ], [ r"<%ID> = insertelement <\d+ x float> .*", r"<d x float> operation", r"<d x floating point> operation", ], [ r"<%ID> = insertelement <\d+ x float\*> .*", r"<d x float*> operation", r"<d x floating point*> operation", ], [ r"<%ID> = insertelement <\d+ x double> .*", r"<d x double> operation", r"<d x floating point> operation", ], [ r"<%ID> = insertelement <\d+ x double\*> .*", r"<d x double*> operation", r"<d x floating point*> operation", ], [ r"<%ID> = insertelement <\d+ x \{.*\}> .*", r"<d x struct> operation", r"<d x struct> operation", ], [ r"<%ID> = insertelement <\d+ x \{.*\}\*> .*", r"<d x struct*> operation", r"<d x struct*> operation", ], [ r"<%ID> = shufflevector <\d+ x i1> .*", r"<d x i1> operation", r"<d x int> operation", ], [ r"<%ID> = shufflevector <\d+ x i1\*> .*", r"<d x i1*> operation", r"<d x int*> operation", ], [ r"<%ID> = shufflevector <\d+ x i2> .*", r"<d x i2> operation", r"<d x int> operation", ], [ r"<%ID> = shufflevector <\d+ x i2\*> .*", r"<d x i2*> operation", r"<d x int*> operation", ], [ r"<%ID> = shufflevector <\d+ x i4> .*", r"<d x i4> operation", r"<d x int> operation", ], [ r"<%ID> = shufflevector <\d+ x i4\*> .*", r"<d x i4*> operation", r"<d x int*> operation", ], [ r"<%ID> = shufflevector <\d+ x i8> .*", r"<d x i8> operation", r"<d x int> operation", ], [ r"<%ID> = shufflevector <\d+ x i8\*> .*", r"<d x i8*> operation", r"<d x int*> operation", ], [ r"<%ID> = shufflevector <\d+ x i16> .*", r"<d x i16> operation", r"<d x int> operation", ], [ r"<%ID> = shufflevector <\d+ x i16\*> .*", r"<d x i16*> operation", r"<d x int*> operation", ], [ r"<%ID> = shufflevector <\d+ x i32> .*", r"<d x i32> operation", r"<d x int> operation", ], [ r"<%ID> = shufflevector <\d+ x i32\*> .*", r"<d x i32*> operation", r"<d x int*> operation", ], [ r"<%ID> = shufflevector <\d+ x i64> .*", r"<d x i64> operation", r"<d x int> operation", ], [ r"<%ID> = shufflevector <\d+ x i64\*> .*", r"<d x i64*> operation", r"<d x int*> operation", ], [ r"<%ID> = shufflevector <\d+ x i128> .*", r"<d x i128> operation", r"<d x int> operation", ], [ r"<%ID> = shufflevector <\d+ x i128\*> .*", r"<d x i128*> operation", r"<d x int*> operation", ], [ r"<%ID> = shufflevector <\d+ x x86_fp80> .*", r"<d x float> operation", r"<d x floating point> operation", ], [ r"<%ID> = shufflevector <\d+ x x86_fp80\*> .*", r"<d x float*> operation", r"<d x floating point*> operation", ], [ r"<%ID> = shufflevector <\d+ x float> .*", r"<d x float> operation", r"<d x floating point> operation", ], [ r"<%ID> = shufflevector <\d+ x float\*> .*", r"<d x float*> operation", r"<d x floating point*> operation", ], [ r"<%ID> = shufflevector <\d+ x double> .*", r"<d x double> operation", r"<d x floating point> operation", ], [ r"<%ID> = shufflevector <\d+ x double\*> .*", r"<d x double*> operation", r"<d x floating point*> operation", ], [ r"<%ID> = shufflevector <\d+ x \{.*\}> .*", r"<d x struct> operation", r"<d x struct> operation", ], [ r"<%ID> = shufflevector <\d+ x \{.*\}\*> .*", r"<d x struct*> operation", r"<d x struct*> operation", ], [ r"<%ID> = bitcast void \(.* to .*", r"in-between operation", r"in-between operation", ], [ r"<%ID> = bitcast (i\d+|float|double|x86_fp80|opaque) .* to .*", r"in-between operation", r"in-between operation", ], [ r"<%ID> = bitcast (i\d+|float|double|x86_fp80|opaque)\* .* to .*", r"in-between operation", r"in-between operation", ], [ r"<%ID> = bitcast (i\d+|float|double|x86_fp80|opaque)\*\* .* to .*", r"in-between operation", r"in-between operation", ], [ r"<%ID> = bitcast (i\d+|float|double|x86_fp80|opaque)\*\*\* .* to .*", r"in-between operation", r"in-between operation", ], [ r"<%ID> = bitcast \[\d+.* to .*", r"in-between operation", r"in-between operation", ], [ r"<%ID> = bitcast <\d+.* to .*", r"in-between operation", r"in-between operation", ], [ r'<%ID> = bitcast (%"|<%|<?{).* to .*', r"in-between operation", r"in-between operation", ], [r"<%ID> = fpext .*", "in-between operation", "in-between operation"], [r"<%ID> = fptrunc .*", "in-between operation", "in-between operation"], [r"<%ID> = sext .*", "in-between operation", "in-between operation"], [r"<%ID> = trunc .* to .*", "in-between operation", "in-between operation"], [r"<%ID> = zext .*", "in-between operation", "in-between operation"], [r"<%ID> = sitofp .*", "in-between operation", "in-between operation"], [r"<%ID> = uitofp .*", "in-between operation", "in-between operation"], [r"<%ID> = inttoptr .*", "in-between operation", "in-between operation"], [r"<%ID> = ptrtoint .*", "in-between operation", "in-between operation"], [r"<%ID> = fptosi .*", "in-between operation", "in-between operation"], [r"<%ID> = fptoui .*", "in-between operation", "in-between operation"], [r"<%ID> = extractvalue .*", "in-between operation", "in-between operation"], [r"<%ID> = insertvalue .*", "in-between operation", "in-between operation"], [r"resume .*", "in-between operation", "in-between operation"], [r"(tail |musttail |notail )?call( \w+)? void .*", "call void", "call void"], [r"i\d{1,2} <(INT|FLOAT)>, label <%ID>", "blob", "blob"], [r"<%ID> = select .*", "blob", "blob"], [r".*to label.*unwind label.*", "blob", "blob"], [r"catch .*", "blob", "blob"], [r"cleanup", "blob", "blob"], [r"<%ID> = landingpad .", "blob", "blob"], [r"; <label>:<LABEL>", "blob", "blob"], [r"<LABEL>:", "blob", "blob"], [r"br i1 .*", "blob", "blob"], [r"br label .*", "blob", "blob"], [r"indirectbr .*", "blob", "blob"], [r"switch .*", "blob", "blob"], [r"unreachable.*", "blob", "blob"], [r"ret void", "blob", "blob"], [r"!UNK", "blob", "blob"], ]
CompilerGym-development
compiler_gym/third_party/inst2vec/rgx_utils.py
# NCC: Neural Code Comprehension # https://github.com/spcl/ncc # Copyright 2018 ETH Zurich # All rights reserved. # # Redistribution and use in source and binary forms, with or without modification, are permitted provided that the # following conditions are met: # 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following # disclaimer. # 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided with the distribution. # 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote # products derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, # INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ============================================================================== # flake8: noqa """Preprocess LLVM IR code to XFG for inst2vec training""" import copy import os import pickle import re from typing import Dict import networkx as nx from compiler_gym.third_party.inst2vec import rgx_utils as rgx ######################################################################################################################## # LLVM IR preprocessing ######################################################################################################################## def GetFunctionsDeclaredInFile(bytecode_lines): functions_declared_in_file = [] # Loop on all lines in data for line in bytecode_lines: # Check whether it contains a function declaration if "declare" in line and not line.startswith("call void"): # Find the function name func = re.match(r"declare .*(" + rgx.func_name + r")", line) if func is None: raise ValueError(f"Could not match function name in {line}") func = func.group(1) # Add it to the list functions_declared_in_file.append(func) return functions_declared_in_file def get_functions_declared_in_files(data): """ For each file, construct a list of names of the functions declared in the file, before the corresponding statements are removed by pre-processing. The list is used later on in the graph construction to identify the names of functions declared in this file. :param data: input data as a list of files where each file is a list of strings :return: functions_declared_in_files: list of lists of names of the functions declared in this file """ return [GetFunctionsDeclaredInFile(file) for file in data] def keep(line): """ Determine whether a line of code is representative and should be kept in the data set or not. :param line: string representing the line of code to test :return: boolean: True if the line is to be kept, False if the line is to be discarded """ # Ignore empty lines. if line == "": return False # Ignore comment lines (except labels). if line[0] == ";" and not line[0:9] == "; <label>": return False if line[0] == "!" or line[0] == "\n": return False if ( line.strip()[0] == "{" or line.strip()[0] == "}" or line.strip()[0] == "[" or line.strip()[0] == "]" ): return False # Ignore empty lines (NOTE: possible dupe of `if line == ''` above?). if len(line) == 0: return False if "source_filename" in line: return False if "target triple" in line: return False if "target datalayout" in line: return False if "attributes" in line: return False if "module asm " in line: return False if "declare" in line: return False modif_line = re.sub(r"\".*\"", "", line) if re.match(rgx.global_id + r" = .*alias ", modif_line): return False if re.search("call void asm", line): return False match = re.search(r"\$.* = comdat any", line) if match: return False match = re.match(r"\s+;", line) if match: return False # If none of the above matched, keep the line. return True def remove_non_representative_code(data): """ Remove lines of code that aren't representative of LLVM-IR "language" and shouldn't be used for training the embeddings :param data: input data as a list of files where each file is a list of strings :return: input data with non-representative lines of code removed """ for i in range(len(data)): data[i] = [line for line in data[i] if keep(line)] return data def remove_leading_spaces(data): """ Remove the leading spaces (indentation) of lines of code :param data: input data as a list of files, where each file is a list of strings :return: input data with leading spaces removed """ for i in range(len(data)): for j in range(len(data[i])): data[i][j] = data[i][j].strip() return data def remove_trailing_comments_and_metadata(data): """ Remove comments, metadata and attribute groups trailing at the end of a line :param data: input data as a list of files where each file is a list of strings :return: modified input data """ for i in range(len(data)): for j in range(len(data[i])): line = data[i][j] # If the line contains a trailing metadata pos = line.find("!") if pos != -1: # Remove metadatas which are function arguments while re.search(r"\(.*metadata !.*\)", line) is not None: line = re.sub(r"(, )?metadata !\d+(, )?", "", line) line = re.sub(r"(, )?metadata !\w+(, )?", "", line) line = re.sub(r"metadata !\d+(, )?", "", line) line = re.sub(r"metadata !\w+(, )?", "", line) pos = line.find("!") if pos != -1: # Check whether the '!' is part of a string expression pos_string = line[:pos].find('c"') if ( pos_string == -1 ): # there is no string expression earlier on the line line = line[:pos].strip() # erase from here to the end of the line if line[-1] == ",": # can happen with !tbaa line = line[:-1].strip() else: # there is a string expression earlier on the line pos_endstring = line[pos_string + 2 : pos].find('"') if ( pos_endstring != -1 ): # the string has been terminated before the ; line = line[ :pos ].strip() # erase from here to the end of the line if line[-1] == ",": # can happen with !tbaa line = line[:-1].strip() # If the line contains a trailing attribute group pos = line.find("#") if pos != -1: # Check whether the ';' is part of a string expression s = re.search(r'c".*"', line[:pos]) if not s: # there is no string expression earlier on the line line = line[:pos].strip() # erase from here to the end of the line else: # there is a string expression earlier on the line pos_endstring = s.end() if ( pos_endstring != -1 ): # the string has been terminated before the ; line = line[ :pos ].strip() # erase from here to the end of the line data[i][j] = line return data def collapse_stmt_units_to_a_line(data): """ Some statements are written on several lines though they really are just one statement Detect and collapse these :param data: input data as a list of files where each file is a list of strings :return: modified input data """ to_track = "" erase_token = "to_erase" # Helper variable to mark lines to be erased separator = "\n " # Detect multi-line statements and collapse them for file in data: for i in range(len(file)): if file[i] == to_track: print("Found", to_track) if re.match(rgx.local_id + " = landingpad", file[i]): if i + 1 < len(file): if ( re.match(r"cleanup", file[i + 1]) or re.match(r"filter", file[i + 1]) or re.match(r"catch", file[i + 1]) ): file[i] += separator + file[i + 1] # collapse lines file[i + 1] = erase_token # mark as "to erase" else: continue if i + 2 < len(file): if ( re.match(r"cleanup", file[i + 2]) or re.match(r"filter", file[i + 2]) or re.match(r"catch", file[i + 2]) ): file[i] += separator + file[i + 2] # collapse lines file[i + 2] = erase_token # mark as "to erase" else: continue if i + 3 < len(file): if ( re.match(r"cleanup", file[i + 3]) or re.match(r"filter", file[i + 3]) or re.match(r"catch", file[i + 3]) ): file[i] += separator + file[i + 3] # collapse lines file[i + 3] = erase_token # mark as "to erase" else: continue elif re.match(r"switch", file[i]): for j in range(i + 1, len(file)): if re.search(r"i\d+ -?\d+, label " + rgx.local_id, file[j]): # if this statement is part of the switch, file[i] += separator + file[j] # collapse lines file[j] = erase_token # mark as "to erase" else: # if this statement isn't part of the switch file[i] += "]" # add closing bracket break elif re.search(r"invoke", file[i]): if i + 1 < len(file): if re.match( r"to label " + rgx.local_id + " unwind label " + rgx.local_id, file[i + 1], ): file[i] += separator + file[i + 1] # collapse lines file[i + 1] = erase_token # mark as "to erase" # Erase statements which have been rendered superfluous from collapsing for i in range(len(data)): data[i] = [line for line in data[i] if line != erase_token] return data def remove_structure_definitions(data): """ Remove lines of code that aren't representative of LLVM-IR "language" and shouldn't be used for training the embeddings :param data: input data as a list of files where each file is a list of strings :return: input data with non-representative lines of code removed """ for i in range(len(data)): data[i] = [ line for line in data[i] if not re.match("%.* = type (<?{ .* }|opaque|{})", line) ] return data def preprocess(data): """Pre-processing of source code: - remove non-representative lines of code - remove leading spaces (indentation) - remove trailing comments and metadata :param data: input data as a list of files where each file is a list of strings :return: preprocessed_data: modified input data functions_declared_in_files: """ functions_declared_in_files = get_functions_declared_in_files(data) data = remove_non_representative_code(data) data = remove_leading_spaces(data) data = remove_trailing_comments_and_metadata(data) data = collapse_stmt_units_to_a_line(data) preprocessed_data = copy.deepcopy(data) preprocessed_data = remove_structure_definitions(preprocessed_data) return preprocessed_data, functions_declared_in_files ######################################################################################################################## # XFG-transforming (inline and abstract statements) ######################################################################################################################## # Helper regexs for structure type inlining vector_type = r"<\d+ x " + rgx.first_class_type + r">" array_type = r"\[\d+ x " + rgx.first_class_type + r"\]" array_of_array_type = r"\[\d+ x " + r"\[\d+ x " + rgx.first_class_type + r"\]" + r"\]" function_type = ( rgx.first_class_type + r" \(" + rgx.any_of([rgx.first_class_type, vector_type, array_type, "..."], ",") + "*" + rgx.any_of([rgx.first_class_type, vector_type, array_type, "..."]) + r"\)\**" ) structure_entry = rgx.any_of( [ rgx.first_class_type, vector_type, array_type, array_of_array_type, function_type, ] ) structure_entry_with_comma = rgx.any_of( [ rgx.first_class_type, vector_type, array_type, array_of_array_type, function_type, ], ",", ) literal_structure = ( "(<?{ " + structure_entry_with_comma + "*" + structure_entry + " }>?|opaque|{})" ) literal_structure_with_comma = literal_structure + ", " def construct_struct_types_dictionary_for_file(data): """ Construct a dictionary of structure names :param data: list of strings representing the content of one file :return: data: modified input data ready: dictionary of structure names """ # Optional: tracking to_track = "" # Three dictionaries to_process = dict() # contains non-literal structures to_inline = dict() # contains literal structures to be inlined in "to_process" ready = dict() # contains literal structures which have already been inlined # Helper strings struct_prev = [structure_entry, literal_structure] struct_prev_with_comma = [ structure_entry_with_comma, literal_structure_with_comma, ] use_previously_inlined_stmts = False # Put all "type" expressions from "data" into "to_process" for stmt in data: if len(to_track) > 0: if to_track in stmt: print("Found statement ", to_track) if re.match(rgx.struct_name + r" = type <?{?", stmt): k = re.sub(r"(" + rgx.struct_name + r") = type <?{?.*}?>?$", r"\g<1>", stmt) v = re.sub(rgx.struct_name + " = type (<?{?.*}?>?)$", r"\g<1>", stmt) to_process[k] = v # Loop over contents of "to_process" for i in list(to_process.items()): # Move the literal structures to to_inline if re.match(literal_structure, i[1]): to_inline[i[0]] = i[1] del to_process[i[0]] # Helper variables for iteration checks counter = 0 prev_to_process_len = len(to_process) # While "to_process" is not empty while len(to_process) > 0: # Loop over contents of to_inline for i in list(to_inline.items()): # and inline these statements in to_process for p in list(to_process.items()): pattern = re.escape(i[0]) + rgx.struct_lookahead if re.search(pattern, p[1]): to_process[p[0]] = re.sub(pattern, i[1], p[1]) # Under certain circumstances if use_previously_inlined_stmts: # print("\t... using previously inlined statements") # Loop over contents of "to_process" for p in list(to_process.items()): # and inline these statements with structures from "ready" for i in list(ready.items()): pattern = re.escape(i[0]) + rgx.struct_lookahead if re.search(pattern, p[1]): print("bingo") to_process[p[0]] = re.sub(pattern, i[1], p[1]) # Move contents of to_inline to ready ready.update(to_inline) to_inline = {} # Update possible structure entries if counter < 3: comp_structure_entry = rgx.any_of(struct_prev) comp_structure_entry_with_comma = rgx.any_of(struct_prev_with_comma) comp_structure = ( "<?{ " + comp_structure_entry_with_comma + "*" + comp_structure_entry + " }>?" ) struct_prev.append(comp_structure) struct_prev_with_comma.append(comp_structure + ", ") else: comp_structure = r"<?{ [ <>{}\dx\[\]\(\)\.,\*%IDvfloatdubeipqcy]+}>?$" # Loop over contents of to_process for i in list(to_process.items()): if re.match(comp_structure, i[1]): to_inline[i[0]] = i[1] del to_process[i[0]] # Update counter counter += 1 # Make sure progress as been made since the last iteration if len(to_process) == prev_to_process_len and counter > 3: # If this isn't the case, check if there is a type defined cyclically cycle_found = False for i in list(to_process.items()): # - Recursive, eg %intlist = type { %intlist*, i32 } # tracking if len(to_track) > 0: if to_track in i[0]: print("Found", to_track) if re.search(re.escape(i[0]) + rgx.struct_lookahead, i[1]): cycle_found = True new_entry = i[0] + "_cyclic" to_inline[new_entry] = "opaque" to_process[i[0]] = re.sub( re.escape(i[0]) + rgx.struct_lookahead, new_entry, i[1] ) # break if not cycle_found: # - Cyclic, eg # %"class.std::ios_base": { i32 (...)**, i64, i32, %"struct.std::ios_base::_Callback_list"*, ...} # %"struct.std::ios_base::_Callback_list": { opaque*, void (i32, %"class.std::ios_base"*, i32)* } for j in list(to_process.items()): if i != j and re.search( re.escape(i[0]) + rgx.struct_lookahead, j[1] ): cycle_found = True new_entry = i[0] + "_cyclic" to_inline[new_entry] = "opaque" to_process[j[0]] = re.sub( re.escape(i[0]) + rgx.struct_lookahead, new_entry, j[1] ) # break # If no cyclic type definition was found although no progress was made since the last pass, abort if not cycle_found: if not use_previously_inlined_stmts: # Perhaps some stmts which should be inlined are hiding in "ready": use these at the next pass use_previously_inlined_stmts = True else: assert cycle_found, ( "Counter step: " + str(counter) + ", could not inline " + str(len(to_process)) + " statements : \n" + string_of_items(to_process) ) else: use_previously_inlined_stmts = False # reset prev_to_process_len = len(to_process) # Stopping condition in case we've been looping for a suspiciously long time assert counter < 1000, ( "Could not inline " + str(len(to_process)) + " statements after " + str(counter) + " steps: \n" + string_of_items(to_process) ) # Move contents of "to_inline" to "ready" ready.update(to_inline) return data, ready def GetStructTypes(ir: str) -> Dict[str, str]: """Extract a dictionary of struct definitions from the given IR. :param ir: A string of LLVM IR. :return: A dictionary of <name, def> entries, where <name> is the name of a struct definition (e.g. "%struct.foo"), and <def> is the definition of the member types, e.g. "{ i32 }". """ try: _, dict_temp = construct_struct_types_dictionary_for_file(ir.split("\n")) return dict_temp except AssertionError as e: raise ValueError(e) from e def PreprocessStatement(stmt: str) -> str: # Remove local identifiers stmt = re.sub(rgx.local_id, "<%ID>", stmt) # Global identifiers stmt = re.sub(rgx.global_id, "<@ID>", stmt) # Remove labels if re.match(r"; <label>:\d+:?(\s+; preds = )?", stmt): stmt = re.sub(r":\d+", ":<LABEL>", stmt) stmt = re.sub("<%ID>", "<LABEL>", stmt) elif re.match(rgx.local_id_no_perc + r":(\s+; preds = )?", stmt): stmt = re.sub(rgx.local_id_no_perc + ":", "<LABEL>:", stmt) stmt = re.sub("<%ID>", "<LABEL>", stmt) if "; preds = " in stmt: s = stmt.split(" ") if s[-1][0] == " ": stmt = s[0] + s[-1] else: stmt = s[0] + " " + s[-1] # Remove floating point values stmt = re.sub(rgx.immediate_value_float_hexa, "<FLOAT>", stmt) stmt = re.sub(rgx.immediate_value_float_sci, "<FLOAT>", stmt) # Remove integer values if ( re.match("<%ID> = extractelement", stmt) is None and re.match("<%ID> = extractvalue", stmt) is None and re.match("<%ID> = insertelement", stmt) is None and re.match("<%ID> = insertvalue", stmt) is None ): stmt = re.sub(r"(?<!align)(?<!\[) " + rgx.immediate_value_int, " <INT>", stmt) # Remove string values stmt = re.sub(rgx.immediate_value_string, " <STRING>", stmt) # Remove index types if ( re.match("<%ID> = extractelement", stmt) is not None or re.match("<%ID> = insertelement", stmt) is not None ): stmt = re.sub(r"i\d+ ", "<TYP> ", stmt) return stmt
CompilerGym-development
compiler_gym/third_party/inst2vec/inst2vec_preprocess.py
#!/usr/bin/env python3 # # This script compiles and links the sources for a cBench benchmark into a # single unoptimized LLVM module. # # Usage: # # $ make_cBench_llvm_module.py <in_dir> <outpath> [<cflag>...] # # This compiles the code from <in_dir> and generates an LLVM bitcode module at # the given <outpath>, using any additional <cflags> as clang arguments. import sys from pathlib import Path from typing import List from compiler_gym.envs.llvm.llvm_benchmark import make_benchmark def make_cbench_llvm_module( benchmark_dir: Path, cflags: List[str], output_path: Path ) -> str: """Compile a cBench benchmark into an unoptimized LLVM bitcode file.""" src_dir = benchmark_dir / "src" if not src_dir.is_dir(): src_dir = benchmark_dir assert src_dir.is_dir(), f"Source directory not found: {src_dir}" src_files = [path for path in src_dir.iterdir() if path.name.endswith(".c")] assert src_files, f"No source files in {src_dir}" benchmark = make_benchmark(inputs=src_files, copt=cflags or []) # Write just the bitcode to file. with open(output_path, "wb") as f: f.write(benchmark.proto.program.contents) def main(): """Main entry point.""" # Parse arguments. benchmark_dir, output_path, *cflags = sys.argv[1:] benchmark_dir = Path(benchmark_dir).absolute().resolve() output_path = Path(output_path).absolute().resolve() make_cbench_llvm_module(benchmark_dir, cflags, output_path) if __name__ == "__main__": main()
CompilerGym-development
compiler_gym/third_party/cbench/make_llvm_module.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. AUTOPHASE_FEATURE_NAMES = [ "BBNumArgsHi", "BBNumArgsLo", "onePred", "onePredOneSuc", "onePredTwoSuc", "oneSuccessor", "twoPred", "twoPredOneSuc", "twoEach", "twoSuccessor", "morePreds", "BB03Phi", "BBHiPhi", "BBNoPhi", "BeginPhi", "BranchCount", "returnInt", "CriticalCount", "NumEdges", "const32Bit", "const64Bit", "numConstZeroes", "numConstOnes", "UncondBranches", "binaryConstArg", "NumAShrInst", "NumAddInst", "NumAllocaInst", "NumAndInst", "BlockMid", "BlockLow", "NumBitCastInst", "NumBrInst", "NumCallInst", "NumGetElementPtrInst", "NumICmpInst", "NumLShrInst", "NumLoadInst", "NumMulInst", "NumOrInst", "NumPHIInst", "NumRetInst", "NumSExtInst", "NumSelectInst", "NumShlInst", "NumStoreInst", "NumSubInst", "NumTruncInst", "NumXorInst", "NumZExtInst", "TotalBlocks", "TotalInsts", "TotalMemInst", "TotalFuncs", "ArgsPhi", "testUnary", ] # The dimensionality of the autophase feature vector. AUTOPHASE_FEATURE_DIM: int = len(AUTOPHASE_FEATURE_NAMES)
CompilerGym-development
compiler_gym/third_party/autophase/__init__.py
# Copyright 2013 David Malcolm <dmalcolm@redhat.com> # Copyright 2013 Red Hat, Inc. # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 # USA import argparse import os from pathlib import Path def cmdline_to_argv(cmdline): """ Reconstruct an argv list from a cmdline string """ # Convert the input str to a list of characters # and Quoted instances def iter_fragments(): class Quoted: def __init__(self, quotechar, text): self.quotechar = quotechar self.text = text def __repr__(self): return "Quoted(%r, %r)" % (self.quotechar, self.text) def __str__(self): return "%s%s%s" % (self.quotechar, self.text, self.quotechar) for i, fragment in enumerate(cmdline.split('"')): if i % 2: # within a quoted section: yield Quoted('"', fragment) else: for ch in fragment: yield ch # Now split these characters+Quoted by whitespace: result = [] pending_arg = "" for fragment in iter_fragments(): if fragment in (" ", "\t"): result.append(pending_arg) pending_arg = "" else: pending_arg += str(fragment) if pending_arg: result.append(pending_arg) return result class GccInvocation: """ Parse a command-line invocation of GCC and extract various options of interest """ def __init__(self, argv): # Store the original argv for logging / debugging. self.original_argv = argv # Strip `-Xclang` arguments now because the hyphenated parameters # confuse argparse: sanitized_argv = argv.copy() for i in range(len(argv) - 2, -1, -1): if argv[i] == "-Xclang": del argv[i + 1] del argv[i] self.argv = argv self.executable = argv[0] self.progname = os.path.basename(self.executable) DRIVER_NAMES = ( "c89", "c99", "cc", "gcc", "c++", "g++", "xgcc", "clang", "clang++", ) self.is_driver = self.progname in DRIVER_NAMES self.sources = [] self.defines = [] self.includepaths = [] self.otherargs = [] if self.progname == "collect2": # collect2 appears to have a (mostly) different set of # arguments to the rest: return parser = argparse.ArgumentParser(add_help=False) def add_flag_opt(flag): parser.add_argument(flag, action="store_true") def add_opt_with_param(flag): parser.add_argument(flag, type=str) def add_opt_NoDriverArg(flag): if self.is_driver: add_flag_opt(flag) else: add_opt_with_param(flag) parser.add_argument("-o", type=str) parser.add_argument("-D", type=str, action="append", default=[]) parser.add_argument("-U", type=str, action="append", default=[]) parser.add_argument("-I", type=str, action="append", default=[]) # Arguments that take a further param: parser.add_argument("-x", type=str) # (for now, drop them on the floor) # Arguments for dependency generation (in the order they appear # in gcc/c-family/c.opt) # (for now, drop them on the floor) add_flag_opt("-M") add_opt_NoDriverArg("-MD") add_opt_with_param("-MF") add_flag_opt("-MG") add_flag_opt("-MM") add_opt_NoDriverArg("-MMD") add_flag_opt("-MP") add_opt_with_param("-MQ") add_opt_with_param("-MT") # Additional arguments for clang: add_opt_with_param("-resource-dir") add_opt_with_param("-target") # Various other arguments that take a 2nd argument: for arg in [ "-include", "-imacros", "-idirafter", "-iprefix", "-iwithprefix", "-iwithprefixbefore", "-isysroot", "-imultilib", "-isystem", "-iquote", ]: parser.add_argument(arg, type=str) # (for now, drop them on the floor) # Various arguments to cc1 etc that take a 2nd argument: for arg in ["-dumpbase", "-auxbase-strip"]: parser.add_argument(arg, type=str) # (for now, drop them on the floor) args, remainder = parser.parse_known_args(sanitized_argv[1:]) self.parsed_args = args self.defines = args.D self.includepaths = args.I for arg in remainder: if arg.startswith("-") and arg != "-": self.otherargs.append(arg) else: self.sources.append(arg) # Determine the absolute path of the generated output. output = self.parsed_args.o or "a.out" self.output_path = Path(output).absolute() @classmethod def from_cmdline(cls, cmdline): return cls(cmdline_to_argv(cmdline)) def __repr__(self): return ( "GccInvocation(executable=%r, sources=%r," " defines=%r, includepaths=%r, otherargs=%r)" % ( self.executable, self.sources, self.defines, self.includepaths, self.otherargs, ) ) def restrict_to_one_source(self, source): """ Make a new GccInvocation, preserving most arguments, but restricting the compilation to just the given source file """ newargv = [self.executable] newargv += ["-D%s" % define for define in self.defines] newargv += ["-I%s" % include for include in self.includepaths] newargv += self.otherargs newargv += [source] return GccInvocation(newargv)
CompilerGym-development
compiler_gym/third_party/gccinvocation/gccinvocation.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """Module for resolving paths to LLVM binaries and libraries.""" import io import logging import shutil import sys import tarfile from pathlib import Path from threading import Lock from typing import Optional from fasteners import InterProcessLock from compiler_gym.util.download import download from compiler_gym.util.filesystem import extract_tar from compiler_gym.util.runfiles_path import cache_path, runfiles_path, site_data_path logger = logging.getLogger(__name__) # The data archive containing LLVM binaries and libraries. _LLVM_URL, _LLVM_SHA256 = { "darwin": ( "https://dl.fbaipublicfiles.com/compiler_gym/llvm-v0-macos.tar.bz2", "731ae351b62c5713fb5043e0ccc56bfba4609e284dc816f0b2a5598fb809bf6b", ), "linux": ( "https://dl.fbaipublicfiles.com/compiler_gym/llvm-v0-linux.tar.bz2", "59c3f328efd51994a11168ca15e43a8d422233796c6bc167c9eb771c7bd6b57e", ), }[sys.platform] # Thread lock to prevent race on download_llvm_files() from multi-threading. # This works in tandem with the inter-process file lock - both are required. _LLVM_DOWNLOAD_LOCK = Lock() _LLVM_UNPACKED_LOCATION: Optional[Path] = None def _download_llvm_files(destination: Path) -> Path: """Download and unpack the LLVM data pack.""" logger.warning( "Installing the CompilerGym LLVM environment runtime. This may take a few moments ..." ) # Tidy up an incomplete unpack. shutil.rmtree(destination, ignore_errors=True) tar_contents = io.BytesIO(download(_LLVM_URL, sha256=_LLVM_SHA256)) destination.parent.mkdir(parents=True, exist_ok=True) with tarfile.open(fileobj=tar_contents, mode="r:bz2") as tar: extract_tar(tar, destination) assert destination.is_dir() assert (destination / "LICENSE").is_file() return destination def download_llvm_files() -> Path: """Download and unpack the LLVM data pack.""" global _LLVM_UNPACKED_LOCATION unpacked_location = site_data_path("llvm-v0") # Fast path for repeated calls. if _LLVM_UNPACKED_LOCATION == unpacked_location: return unpacked_location with _LLVM_DOWNLOAD_LOCK: # Fast path for first call. This check will be repeated inside the locked # region if required. if (unpacked_location / ".unpacked").is_file(): _LLVM_UNPACKED_LOCATION = unpacked_location return unpacked_location with InterProcessLock(cache_path(".llvm-v0-install.LOCK")): # Now that the lock is acquired, repeat the check to see if it is # necessary to download the dataset. if (unpacked_location / ".unpacked").is_file(): return unpacked_location _download_llvm_files(unpacked_location) # Create the marker file to indicate that the directory is unpacked # and ready to go. (unpacked_location / ".unpacked").touch() _LLVM_UNPACKED_LOCATION = unpacked_location return unpacked_location def clang_path() -> Path: """Return the path of clang.""" return download_llvm_files() / "bin/clang" def lli_path() -> Path: """Return the path of lli.""" return download_llvm_files() / "bin/lli" def llc_path() -> Path: """Return the path of llc.""" return download_llvm_files() / "bin/llc" def llvm_as_path() -> Path: """Return the path of llvm-as.""" return download_llvm_files() / "bin/llvm-as" def llvm_dis_path() -> Path: """Return the path of llvm-as.""" return download_llvm_files() / "bin/llvm-dis" def llvm_extract_one_path() -> Path: """Return the path of llvm-extract-one.""" return runfiles_path("compiler_gym/envs/llvm/service/llvm-extract-one") def llvm_link_path() -> Path: """Return the path of llvm-link.""" return download_llvm_files() / "bin/llvm-link" def llvm_stress_path() -> Path: """Return the path of llvm-stress.""" return download_llvm_files() / "bin/llvm-stress" def llvm_diff_path() -> Path: """Return the path of llvm-diff.""" return download_llvm_files() / "bin/llvm-diff" def opt_path() -> Path: """Return the path of opt.""" return download_llvm_files() / "bin/opt"
CompilerGym-development
compiler_gym/third_party/llvm/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """Constants for the InstCount feature space.""" # Generated using: # # $ bazel run //compiler_gym/third_party/llvm:PrintInstCountFeatureNames INST_COUNT_FEATURE_NAMES = [ "TotalInsts", "TotalBlocks", "TotalFuncs", "Ret", "Br", "Switch", "IndirectBr", "Invoke", "Resume", "Unreachable", "CleanupRet", "CatchRet", "CatchSwitch", "CallBr", "FNeg", "Add", "FAdd", "Sub", "FSub", "Mul", "FMul", "UDiv", "SDiv", "FDiv", "URem", "SRem", "FRem", "Shl", "LShr", "AShr", "And", "Or", "Xor", "Alloca", "Load", "Store", "GetElementPtr", "Fence", "AtomicCmpXchg", "AtomicRMW", "Trunc", "ZExt", "SExt", "FPToUI", "FPToSI", "UIToFP", "SIToFP", "FPTrunc", "FPExt", "PtrToInt", "IntToPtr", "BitCast", "AddrSpaceCast", "CleanupPad", "CatchPad", "ICmp", "FCmp", "PHI", "Call", "Select", "UserOp1", "UserOp2", "VAArg", "ExtractElement", "InsertElement", "ShuffleVector", "ExtractValue", "InsertValue", "LandingPad", "Freeze", ] INST_COUNT_FEATURE_DIMENSIONALITY = len(INST_COUNT_FEATURE_NAMES) INST_COUNT_FEATURE_SHAPE = (INST_COUNT_FEATURE_DIMENSIONALITY,)
CompilerGym-development
compiler_gym/third_party/llvm/instcount.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """Test that the DeepDataFlow dataset contains the expected numbers of files.""" import pytest from compiler_gym.util.runfiles_path import runfiles_path from tests.test_main import main # The number of bitcode files in the DeepDataFlow dataset, grouped by source. EXPECTED_NUMBER_OF_BITCODE_FILES = { "blas": 300, "linux": 13920, "github": 50708, "npb": 122, "poj104": 49628, "tensorflow": 1985, } @pytest.fixture(scope="session", params=list(EXPECTED_NUMBER_OF_BITCODE_FILES.keys())) def subset(request): return request.param def test_deep_dataflow_file_count(subset: str): bitcode_dir = runfiles_path("compiler_gym/third_party/DeepDataFlow") / subset num_files = len([f for f in bitcode_dir.iterdir() if f.name.endswith(".bc")]) assert num_files == EXPECTED_NUMBER_OF_BITCODE_FILES[subset] if __name__ == "__main__": main()
CompilerGym-development
compiler_gym/third_party/DeepDataFlow/file_count_test.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """This module defines validation errors.""" from typing import Any, Dict from pydantic import BaseModel class ValidationError(BaseModel): """A ValidationError describes an error encountered in a call to :meth:`env.validate() <compiler_gym.envs.CompilerEnv.validate>`. """ type: str """A short name describing the type of error that occured. E.g. :code:`"Runtime crash"`. """ data: Dict[str, Any] = {} """A JSON-serializable dictionary of data that further describes the error. This data dictionary can contain any information that may be relevant for diagnosing the underlying issue, such as a stack trace or an error line number. There is no specified schema for this data, validators are free to return whatever data they like. Setting this field is optional. """ def __lt__(self, rhs): # Implement the < operator so that lists of ValidationErrors can be # sorted. if not isinstance(rhs, ValidationError): return True return (self.type, self.data) <= (rhs.type, rhs.data)
CompilerGym-development
compiler_gym/errors/validation_errors.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from compiler_gym.errors.dataset_errors import BenchmarkInitError, DatasetInitError from compiler_gym.errors.download_errors import DownloadFailed, TooManyRequests from compiler_gym.errors.service_errors import ( EnvironmentNotSupported, ServiceError, ServiceInitError, ServiceIsClosed, ServiceOSError, ServiceTransportError, SessionNotFound, ) from compiler_gym.errors.validation_errors import ValidationError __all__ = [ "ValidationError", "BenchmarkInitError", "ServiceError", "SessionNotFound", "ServiceOSError", "ServiceInitError", "EnvironmentNotSupported", "ServiceTransportError", "ServiceIsClosed", "DownloadFailed", "TooManyRequests", "DatasetInitError", ]
CompilerGym-development
compiler_gym/errors/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """This module defines service related errors.""" class ServiceError(Exception): """Error raised from the service.""" class SessionNotFound(ServiceError): """Requested session ID not found in service.""" class ServiceOSError(ServiceError, OSError): """System error raised from the service.""" class ServiceInitError(ServiceError, OSError): """Error raised if the service fails to initialize.""" class EnvironmentNotSupported(ServiceInitError): """Error raised if the runtime requirements for an environment are not met on the current system.""" class ServiceTransportError(ServiceError, OSError): """Error that is raised if communication with the service fails.""" class ServiceIsClosed(ServiceError, TypeError): """Error that is raised if trying to interact with a closed service."""
CompilerGym-development
compiler_gym/errors/service_errors.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. class DownloadFailed(IOError): """Error thrown if a download fails.""" class TooManyRequests(DownloadFailed): """Error thrown by HTTP 429 response."""
CompilerGym-development
compiler_gym/errors/download_errors.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. class BenchmarkInitError(OSError, ValueError): """Base class for errors raised if a benchmark fails to initialize.""" class DatasetInitError(OSError): """Base class for errors raised if a dataset fails to initialize."""
CompilerGym-development
compiler_gym/errors/dataset_errors.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """Contains an implementation of the :class:`CompilerEnv<compiler_gym.envs.CompilerEnv>` interface as a gRPC client service.""" import logging import numbers import warnings from collections.abc import Iterable as IterableType from copy import deepcopy from math import isclose from pathlib import Path from time import time from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union import numpy as np from gym.spaces import Space from compiler_gym.compiler_env_state import CompilerEnvState from compiler_gym.datasets import Benchmark, Dataset, Datasets from compiler_gym.datasets.uri import BenchmarkUri from compiler_gym.envs.compiler_env import CompilerEnv from compiler_gym.errors import ( ServiceError, ServiceIsClosed, ServiceOSError, ServiceTransportError, SessionNotFound, ValidationError, ) from compiler_gym.service import CompilerGymServiceConnection, ConnectionOpts from compiler_gym.service.proto import ActionSpace as ActionSpaceProto from compiler_gym.service.proto import AddBenchmarkRequest from compiler_gym.service.proto import Benchmark as BenchmarkProto from compiler_gym.service.proto import ( EndSessionReply, EndSessionRequest, Event, ForkSessionReply, ForkSessionRequest, GetVersionReply, GetVersionRequest, SendSessionParameterReply, SendSessionParameterRequest, SessionParameter, StartSessionRequest, StepReply, StepRequest, py_converters, ) from compiler_gym.spaces import ActionSpace, DefaultRewardFromObservation, Reward from compiler_gym.util.decorators import memoized_property from compiler_gym.util.gym_type_hints import ( ActionType, ObservationType, OptionalArgumentValue, RewardType, StepType, ) from compiler_gym.util.shell_format import plural from compiler_gym.util.timer import Timer from compiler_gym.validation_result import ValidationResult from compiler_gym.views import ObservationSpaceSpec, ObservationView, RewardView logger = logging.getLogger(__name__) def _wrapped_step( service: CompilerGymServiceConnection, request: StepRequest, timeout: float ) -> StepReply: """Call the Step() RPC endpoint.""" try: return service(service.stub.Step, request, timeout=timeout) except FileNotFoundError as e: if str(e).startswith("Session not found"): raise SessionNotFound(str(e)) raise class ServiceMessageConverters: """Allows for customization of conversion to/from gRPC messages for the :class:`ClientServiceCompilerEnv <compiler_gym.service.client_service_compiler_env.ClientServiceCompilerEnv>`. Supports conversion customizations: - :code:`compiler_gym.service.proto.ActionSpace` -> :code:`gym.spaces.Space`. - :code:`compiler_gym.util.gym_type_hints.ActionType` -> :code:`compiler_gym.service.proto.Event`. """ action_space_converter: Callable[[ActionSpaceProto], ActionSpace] action_converter: Callable[[ActionType], Event] def __init__( self, action_space_converter: Optional[ Callable[[ActionSpaceProto], ActionSpace] ] = None, action_converter: Optional[Callable[[Any], Event]] = None, ): """Constructor.""" self.action_space_converter = ( py_converters.make_action_space_wrapper( py_converters.make_message_default_converter() ) if action_space_converter is None else action_space_converter ) self.action_converter = ( py_converters.to_event_message_default_converter() if action_converter is None else action_converter ) class ClientServiceCompilerEnv(CompilerEnv): """Implementation of :class:`CompilerEnv <compiler_gym.envs.CompilerEnv>` using gRPC for client-server communication. :ivar service: A connection to the underlying compiler service. :vartype service: compiler_gym.service.CompilerGymServiceConnection """ def __init__( self, service: Union[str, Path], rewards: Optional[List[Reward]] = None, datasets: Optional[Iterable[Dataset]] = None, benchmark: Optional[Union[str, Benchmark]] = None, observation_space: Optional[Union[str, ObservationSpaceSpec]] = None, reward_space: Optional[Union[str, Reward]] = None, action_space: Optional[str] = None, derived_observation_spaces: Optional[List[Dict[str, Any]]] = None, service_message_converters: ServiceMessageConverters = None, connection_settings: Optional[ConnectionOpts] = None, service_connection: Optional[CompilerGymServiceConnection] = None, ): """Construct and initialize a CompilerGym environment. In normal use you should use :code:`gym.make(...)` rather than calling the constructor directly. :param service: The hostname and port of a service that implements the CompilerGym service interface, or the path of a binary file which provides the CompilerGym service interface when executed. See :doc:`/compiler_gym/service` for details. :param rewards: The reward spaces that this environment supports. Rewards are typically calculated based on observations generated by the service. See :class:`Reward <compiler_gym.spaces.Reward>` for details. :param benchmark: The benchmark to use for this environment. Either a URI string, or a :class:`Benchmark <compiler_gym.datasets.Benchmark>` instance. If not provided, the first benchmark as returned by :code:`next(env.datasets.benchmarks())` will be used as the default. :param observation_space: Compute and return observations at each :func:`step()` from this space. Accepts a string name or an :class:`ObservationSpaceSpec <compiler_gym.views.ObservationSpaceSpec>`. If not provided, :func:`step()` returns :code:`None` for the observation value. Can be set later using :meth:`env.observation_space <compiler_gym.envs.ClientServiceCompilerEnv.observation_space>`. For available spaces, see :class:`env.observation.spaces <compiler_gym.views.ObservationView>`. :param reward_space: Compute and return reward at each :func:`step()` from this space. Accepts a string name or a :class:`Reward <compiler_gym.spaces.Reward>`. If not provided, :func:`step()` returns :code:`None` for the reward value. Can be set later using :meth:`env.reward_space <compiler_gym.envs.ClientServiceCompilerEnv.reward_space>`. For available spaces, see :class:`env.reward.spaces <compiler_gym.views.RewardView>`. :param action_space: The name of the action space to use. If not specified, the default action space for this compiler is used. :param derived_observation_spaces: An optional list of arguments to be passed to :meth:`env.observation.add_derived_space() <compiler_gym.views.observation.Observation.add_derived_space>`. :param service_message_converters: Custom converters for action spaces and actions. :param connection_settings: The settings used to establish a connection with the remote service. :param service_connection: An existing compiler gym service connection to use. :raises FileNotFoundError: If service is a path to a file that is not found. :raises TimeoutError: If the compiler service fails to initialize within the parameters provided in :code:`connection_settings`. """ self.metadata = {"render.modes": ["human", "ansi"]} # A compiler service supports multiple simultaneous environments. This # session ID is used to identify this environment. self._session_id: Optional[int] = None self._service_endpoint: Union[str, Path] = service self._connection_settings = connection_settings or ConnectionOpts() self._params_to_send_on_reset: List[SessionParameter] = [] self.service = service_connection or CompilerGymServiceConnection( endpoint=self._service_endpoint, opts=self._connection_settings, ) self._datasets = Datasets(datasets or []) self.action_space_name = action_space # If no reward space is specified, generate some from numeric observation spaces rewards = rewards or [ DefaultRewardFromObservation(obs.name) for obs in self.service.observation_spaces if obs.default_observation.WhichOneof("value") and isinstance( getattr( obs.default_observation, obs.default_observation.WhichOneof("value") ), numbers.Number, ) ] # The benchmark that is currently being used, and the benchmark that # will be used on the next call to reset(). These are equal except in # the gap between the user setting the env.benchmark property while in # an episode and the next call to env.reset(). self._benchmark_in_use: Optional[Benchmark] = None self._benchmark_in_use_proto: BenchmarkProto = BenchmarkProto() self._next_benchmark: Optional[Benchmark] = None # Normally when the benchmark is changed the updated value is not # reflected until the next call to reset(). We make an exception for the # constructor-time benchmark as otherwise the behavior of the benchmark # property is counter-intuitive: # # >>> env = gym.make("example-compiler-v0", benchmark="foo") # >>> env.benchmark # None # >>> env.reset() # >>> env.benchmark # "foo" # # By forcing the _benchmark_in_use URI at constructor time, the first # env.benchmark above returns the benchmark as expected. try: self.benchmark = benchmark or next(self.datasets.benchmarks()) self._benchmark_in_use = self._next_benchmark except StopIteration: # StopIteration raised on next(self.datasets.benchmarks()) if there # are no benchmarks available. This is to allow ClientServiceCompilerEnv to be # used without any datasets by setting a benchmark before/during the # first reset() call. pass self.service_message_converters = ( ServiceMessageConverters() if service_message_converters is None else service_message_converters ) # Process the available action, observation, and reward spaces. self.action_spaces = [ self.service_message_converters.action_space_converter(space) for space in self.service.action_spaces ] self.observation = self._observation_view_type( raw_step=self.raw_step, spaces=self.service.observation_spaces, ) self.reward = self._reward_view_type(rewards, self.observation) # Register any derived observation spaces now so that the observation # space can be set below. for derived_observation_space in derived_observation_spaces or []: self.observation.add_derived_space(**derived_observation_space) self.action_space: Optional[Space] = None self.observation_space: Optional[Space] = None # Mutable state initialized in reset(). self._reward_range: Tuple[float, float] = (-np.inf, np.inf) self.episode_reward = None self.episode_start_time: float = time() self._actions: List[ActionType] = [] # Initialize the default observation/reward spaces. self.observation_space_spec = None self.reward_space_spec = None self.observation_space = observation_space self.reward_space = reward_space @property def observation_space_spec(self) -> ObservationSpaceSpec: return self._observation_space_spec @observation_space_spec.setter def observation_space_spec( self, observation_space_spec: Optional[ObservationSpaceSpec] ): self._observation_space_spec = observation_space_spec @property def observation(self) -> ObservationView: return self._observation @observation.setter def observation(self, observation: ObservationView) -> None: self._observation = observation @property def reward_space_spec(self) -> Optional[Reward]: return self._reward_space_spec @reward_space_spec.setter def reward_space_spec(self, val: Optional[Reward]): self._reward_space_spec = val @property def datasets(self) -> Iterable[Dataset]: return self._datasets @datasets.setter def datasets(self, datasets: Iterable[Dataset]): self._datastes = datasets @property def episode_reward(self) -> Optional[float]: return self._episode_reward @episode_reward.setter def episode_reward(self, episode_reward: Optional[float]): self._episode_reward = episode_reward @property def actions(self) -> List[ActionType]: return self._actions @memoized_property def versions(self) -> GetVersionReply: """Get the version numbers from the compiler service.""" return self.service(self.service.stub.GetVersion, GetVersionRequest()) @property def version(self) -> str: """The version string of the compiler service.""" return self.versions.service_version @property def compiler_version(self) -> str: """The version string of the underlying compiler that this service supports.""" return self.versions.compiler_version @property def episode_walltime(self) -> float: return time() - self.episode_start_time @property def state(self) -> CompilerEnvState: return CompilerEnvState( benchmark=str(self.benchmark) if self.benchmark else None, reward=self.episode_reward, walltime=self.episode_walltime, commandline=self.action_space.to_string(self.actions), ) @property def action_space(self) -> ActionSpace: return self._action_space @action_space.setter def action_space(self, action_space: Optional[str]) -> None: self.action_space_name = action_space index = ( [a.name for a in self.action_spaces].index(action_space) if self.action_space_name else 0 ) self._action_space: ActionSpace = self.action_spaces[index] @property def action_spaces(self) -> List[str]: return self._action_spaces @action_spaces.setter def action_spaces(self, action_spaces: List[str]): self._action_spaces = action_spaces @property def benchmark(self) -> Benchmark: return self._benchmark_in_use @benchmark.setter def benchmark(self, benchmark: Union[str, Benchmark, BenchmarkUri]): if self.in_episode: warnings.warn( "Changing the benchmark has no effect until reset() is called" ) if isinstance(benchmark, str): benchmark_object = self.datasets.benchmark(benchmark) logger.debug("Setting benchmark by name: %s", benchmark_object) self._next_benchmark = benchmark_object elif isinstance(benchmark, Benchmark): logger.debug("Setting benchmark: %s", benchmark.uri) self._next_benchmark = benchmark elif isinstance(benchmark, BenchmarkUri): benchmark_object = self.datasets.benchmark_from_parsed_uri(benchmark) logger.debug("Setting benchmark by name: %s", benchmark_object) self._next_benchmark = benchmark_object else: raise TypeError( f"Expected a Benchmark or str, received: '{type(benchmark).__name__}'" ) @property def reward_space(self) -> Optional[Reward]: return self.reward_space_spec @reward_space.setter def reward_space(self, reward_space: Optional[Union[str, Reward]]) -> None: # Coerce the observation space into a string. reward_space: Optional[str] = ( reward_space.name if isinstance(reward_space, Reward) else reward_space ) if reward_space: if reward_space not in self.reward.spaces: raise LookupError(f"Reward space not found: {reward_space}") # The reward space remains unchanged, nothing to do. if reward_space == self.reward_space: return self.reward_space_spec = self.reward.spaces[reward_space] self._reward_range = ( self.reward_space_spec.min, self.reward_space_spec.max, ) # Reset any cumulative rewards, if we're in an episode. if self.in_episode: self.episode_reward = 0 else: # If no reward space is being used then set the reward range to # unbounded. self.reward_space_spec = None self._reward_range = (-np.inf, np.inf) @property def reward_range(self) -> Tuple[float, float]: return self._reward_range @property def reward(self) -> RewardView: return self._reward @reward.setter def reward(self, reward: RewardView) -> None: self._reward = reward @property def in_episode(self) -> bool: return self._session_id is not None @property def observation_space(self) -> Optional[Space]: if self.observation_space_spec: return self.observation_space_spec.space @observation_space.setter def observation_space( self, observation_space: Optional[Union[str, ObservationSpaceSpec]] ) -> None: # Coerce the observation space into a string. observation_space: Optional[str] = ( observation_space.id if isinstance(observation_space, ObservationSpaceSpec) else observation_space ) if observation_space: if observation_space not in self.observation.spaces: raise LookupError(f"Observation space not found: {observation_space}") self.observation_space_spec = self.observation.spaces[observation_space] else: self.observation_space_spec = None def _init_kwargs(self) -> Dict[str, Any]: """Retturn a dictionary of keyword arguments used to initialize the environment. """ return { "action_space": self.action_space, "benchmark": self.benchmark, "connection_settings": self._connection_settings, "service": self._service_endpoint, } def fork(self) -> "ClientServiceCompilerEnv": if not self.in_episode: actions = self.actions.copy() self.reset() if actions: logger.warning("Parent service of fork() has died, replaying state") _, _, done, _ = self.multistep(actions) assert not done, "Failed to replay action sequence" request = ForkSessionRequest(session_id=self._session_id) try: reply: ForkSessionReply = self.service( self.service.stub.ForkSession, request ) # Create a new environment that shares the connection. new_env = type(self)(**self._init_kwargs(), service_connection=self.service) # Set the session ID. new_env._session_id = reply.session_id # pylint: disable=protected-access new_env.observation.session_id = reply.session_id # Now that we have initialized the environment with the current # state, set the benchmark so that calls to new_env.reset() will # correctly revert the environment to the initial benchmark state. # # pylint: disable=protected-access new_env._next_benchmark = self._benchmark_in_use # Set the "visible" name of the current benchmark to hide the fact # that we loaded from a custom benchmark file. new_env._benchmark_in_use = self._benchmark_in_use except NotImplementedError: # Fallback implementation. If the compiler service does not support # the Fork() operator then we create a new independent environment # and apply the sequence of actions in the current environment to # replay the state. new_env = type(self)(**self._init_kwargs()) new_env.reset() _, _, done, _ = new_env.multistep(self.actions) assert not done, "Failed to replay action sequence in forked environment" # Create copies of the mutable reward and observation spaces. This # is required to correctly calculate incremental updates. new_env.reward.spaces = deepcopy(self.reward.spaces) new_env.observation.spaces = deepcopy(self.observation.spaces) # Set the default observation and reward types. Note the use of IDs here # to prevent passing the spaces by reference. if self.observation_space: new_env.observation_space = self.observation_space_spec.id if self.reward_space: new_env.reward_space = self.reward_space.name # Copy over the mutable episode state. new_env.episode_reward = self.episode_reward new_env.episode_start_time = self.episode_start_time new_env._actions = self.actions.copy() return new_env def close(self): # Try and close out the episode, but errors are okay. close_service = True if self.in_episode: try: reply: EndSessionReply = self.service( self.service.stub.EndSession, EndSessionRequest(session_id=self._session_id), ) # The service still has other sessions attached so we should # not kill it. if reply.remaining_sessions: close_service = False except ServiceIsClosed: # This error can be safely ignored as it means that the service # is already offline. pass except Exception as e: logger.warning( "Failed to end active compiler session on close(): %s (%s)", e, type(e).__name__, ) self._session_id = None if self.service and close_service: self.service.close() self.service = None def __del__(self): # Don't let the service be orphaned if user forgot to close(), or # if an exception was thrown. The conditional guard is because this # may be called in case of early error. if hasattr(self, "service") and getattr(self, "service"): self.close() def reset( self, benchmark: Optional[Union[str, Benchmark]] = None, action_space: Optional[str] = None, reward_space: Union[ OptionalArgumentValue, str, Reward ] = OptionalArgumentValue.UNCHANGED, observation_space: Union[ OptionalArgumentValue, str, ObservationSpaceSpec ] = OptionalArgumentValue.UNCHANGED, timeout: float = 300, ) -> Optional[ObservationType]: return self._reset( benchmark=benchmark, action_space=action_space, observation_space=observation_space, reward_space=reward_space, timeout=timeout, retry_count=0, ) def _reset( # pylint: disable=arguments-differ self, benchmark: Optional[Union[str, Benchmark]], action_space: Optional[str], observation_space: Union[OptionalArgumentValue, str, ObservationSpaceSpec], reward_space: Union[OptionalArgumentValue, str, Reward], timeout: float, retry_count: int, ) -> Optional[ObservationType]: """Private implementation detail. Call `reset()`, not this.""" if observation_space != OptionalArgumentValue.UNCHANGED: self.observation_space = observation_space if reward_space != OptionalArgumentValue.UNCHANGED: self.reward_space = reward_space def _retry(error) -> Optional[ObservationType]: """Abort and retry on error.""" # Log the error that we are recovering from, but treat # ServiceIsClosed errors as unimportant since we know what causes # them. log_severity = ( logger.debug if isinstance(error, ServiceIsClosed) else logger.warning ) log_severity("%s during reset(): %s", type(error).__name__, error) if self.service: try: self.service.close() except ServiceError as e: # close() can raise ServiceError if the service exists with # a non-zero return code. We swallow the error here as we # are about to retry. logger.debug( "Ignoring service error during reset() attempt: %s (%s)", e, type(e).__name__, ) self.service = None if retry_count >= self._connection_settings.init_max_attempts: raise OSError( "Failed to reset environment using benchmark " f"{self.benchmark} after {retry_count - 1} attempts.\n" f"Last error ({type(error).__name__}): {error}" ) from error else: return self._reset( benchmark=benchmark, action_space=action_space, observation_space=observation_space, reward_space=reward_space, timeout=timeout, retry_count=retry_count + 1, ) def _call_with_error( stub_method, *args, **kwargs ) -> Tuple[Optional[Exception], Optional[Any]]: """Call the given stub method. And return an <error, return> tuple.""" try: return None, self.service(stub_method, *args, **kwargs) except (ServiceError, ServiceTransportError, TimeoutError) as e: return e, None if not self._next_benchmark: raise TypeError( "No benchmark set. Set a benchmark using " "`env.reset(benchmark=benchmark)`. Use `env.datasets` to " "access the available benchmarks." ) # Start a new service if required. if self.service is None: self.service = CompilerGymServiceConnection( self._service_endpoint, self._connection_settings ) self.action_space_name = action_space or self.action_space_name # Stop an existing episode. if self.in_episode: logger.debug("Ending session %d", self._session_id) error, _ = _call_with_error( self.service.stub.EndSession, EndSessionRequest(session_id=self._session_id), ) if error: logger.warning( "Failed to stop session %d with %s: %s", self._session_id, type(error).__name__, error, ) self._session_id = None # Update the user requested benchmark, if provided. if benchmark: self.benchmark = benchmark self._benchmark_in_use = self._next_benchmark # When always_send_benchmark_on_reset option is enabled, the entire # benchmark program is sent with every StartEpisode request. Otherwise # only the URI of the benchmark is sent. In cases where benchmarks are # reused between calls to reset(), sending the URI is more efficient as # the service can cache the benchmark. In cases where reset() is always # called with a different benchmark, this causes unnecessary roundtrips # as every StartEpisodeRequest receives a FileNotFound response. if self.service.opts.always_send_benchmark_on_reset: self._benchmark_in_use_proto = self._benchmark_in_use.proto else: self._benchmark_in_use_proto.uri = str(self._benchmark_in_use.uri) start_session_request = StartSessionRequest( benchmark=self._benchmark_in_use_proto, action_space=( [a.name for a in self.action_spaces].index(self.action_space_name) if self.action_space_name else 0 ), observation_space=( [self.observation_space_spec.index] if self.observation_space else None ), ) try: error, reply = _call_with_error( self.service.stub.StartSession, start_session_request ) if error: return _retry(error) except FileNotFoundError: # The benchmark was not found, so try adding it and then repeating # the request. error, _ = _call_with_error( self.service.stub.AddBenchmark, AddBenchmarkRequest(benchmark=[self._benchmark_in_use.proto]), ) if error: return _retry(error) error, reply = _call_with_error( self.service.stub.StartSession, start_session_request ) if error: return _retry(error) self._session_id = reply.session_id self.observation.session_id = reply.session_id self.reward.get_cost = self.observation.__getitem__ self.episode_start_time = time() self._actions = [] # If the action space has changed, update it. if reply.HasField("new_action_space"): self._action_space = self.service_message_converters.action_space_converter( reply.new_action_space ) # Re-send any session parameters that we marked as needing to be # re-sent on reset(). Do this before any other initialization as they # may affect the behavior of subsequent service calls. if self._params_to_send_on_reset: self.send_params(*[(p.key, p.value) for p in self._params_to_send_on_reset]) self.reward.reset(benchmark=self.benchmark, observation_view=self.observation) if self.reward_space: self.episode_reward = 0.0 if self.observation_space: if len(reply.observation) != 1: raise OSError( f"Expected one observation from service, received {len(reply.observation)}" ) return self.observation.spaces[self.observation_space_spec.id].translate( reply.observation[0] ) def raw_step( self, actions: Iterable[ActionType], observation_spaces: List[ObservationSpaceSpec], reward_spaces: List[Reward], timeout: float = 300, ) -> StepType: """Take a step. :param actions: A list of actions to be applied. :param observations: A list of observations spaces to compute observations from. These are evaluated after the actions are applied. :param rewards: A list of reward spaces to compute rewards from. These are evaluated after the actions are applied. :return: A tuple of observations, rewards, done, and info. Observations and rewards are lists. :raises SessionNotFound: If :meth:`reset() <compiler_gym.envs.ClientServiceCompilerEnv.reset>` has not been called. .. warning:: Don't call this method directly, use :meth:`step() <compiler_gym.envs.ClientServiceCompilerEnv.step>` or :meth:`multistep() <compiler_gym.envs.ClientServiceCompilerEnv.multistep>` instead. The :meth:`raw_step() <compiler_gym.envs.ClientServiceCompilerEnv.step>` method is an implementation detail. """ if not self.in_episode: raise SessionNotFound("Must call reset() before step()") reward_observation_spaces: List[ObservationSpaceSpec] = [] for reward_space in reward_spaces: reward_observation_spaces += [ self.observation.spaces[obs] for obs in reward_space.observation_spaces ] observations_to_compute: List[ObservationSpaceSpec] = list( set(observation_spaces).union(set(reward_observation_spaces)) ) observation_space_index_map: Dict[ObservationSpaceSpec, int] = { observation_space: i for i, observation_space in enumerate(observations_to_compute) } # Record the actions. self._actions += actions # Send the request to the backend service. request = StepRequest( session_id=self._session_id, action=[ self.service_message_converters.action_converter(a) for a in actions ], observation_space=[ observation_space.index for observation_space in observations_to_compute ], ) try: reply = _wrapped_step(self.service, request, timeout) except ( ServiceError, ServiceTransportError, ServiceOSError, TimeoutError, SessionNotFound, ) as e: # Gracefully handle "expected" error types. These non-fatal errors # end the current episode and provide some diagnostic information to # the user through the `info` dict. info = { "error_type": type(e).__name__, "error_details": str(e), } try: self.close() except ServiceError as e: # close() can raise ServiceError if the service exists with a # non-zero return code. We swallow the error here but propagate # the diagnostic message. info[ "error_details" ] += f". Additional error during environment closing: {e}" default_observations = [ observation_space.default_value for observation_space in observation_spaces ] default_rewards = [ float(reward_space.reward_on_error(self.episode_reward)) for reward_space in reward_spaces ] return default_observations, default_rewards, True, info # If the action space has changed, update it. if reply.HasField("new_action_space"): self._action_space = self.service_message_converters.action_space_converter( reply.new_action_space ) # Translate observations to python representations. if len(reply.observation) != len(observations_to_compute): raise ServiceError( f"Requested {len(observations_to_compute)} observations " f"but received {len(reply.observation)}" ) computed_observations = [ observation_space.translate(value) for observation_space, value in zip( observations_to_compute, reply.observation ) ] # Get the user-requested observation. observations: List[ObservationType] = [ computed_observations[observation_space_index_map[observation_space]] for observation_space in observation_spaces ] # Update and compute the rewards. rewards: List[RewardType] = [] for reward_space in reward_spaces: reward_observations = [ computed_observations[ observation_space_index_map[ self.observation.spaces[observation_space] ] ] for observation_space in reward_space.observation_spaces ] rewards.append( float( reward_space.update(actions, reward_observations, self.observation) ) ) info = { "action_had_no_effect": reply.action_had_no_effect, "new_action_space": reply.HasField("new_action_space"), } return observations, rewards, reply.end_of_session, info def step( self, action: ActionType, observation_spaces: Optional[Iterable[Union[str, ObservationSpaceSpec]]] = None, reward_spaces: Optional[Iterable[Union[str, Reward]]] = None, observations: Optional[Iterable[Union[str, ObservationSpaceSpec]]] = None, rewards: Optional[Iterable[Union[str, Reward]]] = None, timeout: float = 300, ) -> StepType: """:raises SessionNotFound: If :meth:`reset() <compiler_gym.envs.ClientServiceCompilerEnv.reset>` has not been called. """ if isinstance(action, IterableType): warnings.warn( "Argument `action` of ClientServiceCompilerEnv.step no longer accepts a list " " of actions. Please use ClientServiceCompilerEnv.multistep instead", category=DeprecationWarning, ) return self.multistep( action, observation_spaces=observation_spaces, reward_spaces=reward_spaces, observations=observations, rewards=rewards, ) if observations is not None: warnings.warn( "Argument `observations` of ClientServiceCompilerEnv.step has been " "renamed `observation_spaces`. Please update your code", category=DeprecationWarning, ) observation_spaces = observations if rewards is not None: warnings.warn( "Argument `rewards` of ClientServiceCompilerEnv.step has been renamed " "`reward_spaces`. Please update your code", category=DeprecationWarning, ) reward_spaces = rewards return self.multistep( actions=[action], observation_spaces=observation_spaces, reward_spaces=reward_spaces, timeout=timeout, ) def multistep( self, actions: Iterable[ActionType], observation_spaces: Optional[Iterable[Union[str, ObservationSpaceSpec]]] = None, reward_spaces: Optional[Iterable[Union[str, Reward]]] = None, observations: Optional[Iterable[Union[str, ObservationSpaceSpec]]] = None, rewards: Optional[Iterable[Union[str, Reward]]] = None, timeout: float = 300, ): """:raises SessionNotFound: If :meth:`reset() <compiler_gym.envs.ClientServiceCompilerEnv.reset>` has not been called. """ if observations is not None: warnings.warn( "Argument `observations` of ClientServiceCompilerEnv.multistep has been " "renamed `observation_spaces`. Please update your code", category=DeprecationWarning, ) observation_spaces = observations if rewards is not None: warnings.warn( "Argument `rewards` of ClientServiceCompilerEnv.multistep has been renamed " "`reward_spaces`. Please update your code", category=DeprecationWarning, ) reward_spaces = rewards # Coerce observation spaces into a list of ObservationSpaceSpec instances. if observation_spaces: observation_spaces_to_compute: List[ObservationSpaceSpec] = [ obs if isinstance(obs, ObservationSpaceSpec) else self.observation.spaces[obs] for obs in observation_spaces ] elif self.observation_space_spec: observation_spaces_to_compute: List[ObservationSpaceSpec] = [ self.observation_space_spec ] else: observation_spaces_to_compute: List[ObservationSpaceSpec] = [] # Coerce reward spaces into a list of Reward instances. if reward_spaces: reward_spaces_to_compute: List[Reward] = [ rew if isinstance(rew, Reward) else self.reward.spaces[rew] for rew in reward_spaces ] elif self.reward_space: reward_spaces_to_compute: List[Reward] = [self.reward_space] else: reward_spaces_to_compute: List[Reward] = [] # Perform the underlying environment step. observation_values, reward_values, done, info = self.raw_step( actions, observation_spaces_to_compute, reward_spaces_to_compute, timeout=timeout, ) # Translate observations lists back to the appropriate types. if observation_spaces is None and self.observation_space_spec: observation_values = observation_values[0] elif not observation_spaces_to_compute: observation_values = None # Translate reward lists back to the appropriate types. if reward_spaces is None and self.reward_space: reward_values = reward_values[0] # Update the cumulative episode reward self.episode_reward += reward_values elif not reward_spaces_to_compute: reward_values = None return observation_values, reward_values, done, info def render( self, mode="human", ) -> Optional[str]: """Render the environment. ClientServiceCompilerEnv instances support two render modes: "human", which prints the current environment state to the terminal and return nothing; and "ansi", which returns a string representation of the current environment state. :param mode: The render mode to use. :raises TypeError: If a default observation space is not set, or if the requested render mode does not exist. """ if not self.observation_space: raise ValueError("Cannot call render() when no observation space is used") observation = self.observation[self.observation_space_spec.id] if mode == "human": print(observation) elif mode == "ansi": return str(observation) else: raise ValueError(f"Invalid mode: {mode}") @property def _observation_view_type(self): """Returns the type for observation views. Subclasses may override this to extend the default observation view. """ return ObservationView @property def _reward_view_type(self): """Returns the type for reward views. Subclasses may override this to extend the default reward view. """ return RewardView def apply(self, state: CompilerEnvState) -> None: # noqa if not self.in_episode: self.reset(benchmark=state.benchmark) # TODO(cummins): Does this behavior make sense? Take, for example: # # >>> env.apply(state) # >>> env.benchmark == state.benchmark # False # # I think most users would reasonable expect `env.apply(state)` to fully # apply the state, not just the sequence of actions. And what about the # choice of observation space, reward space, etc? if self.benchmark != state.benchmark: warnings.warn( f"Applying state from environment for benchmark '{state.benchmark}' " f"to environment for benchmark '{self.benchmark}'" ) actions = self.action_space.from_string(state.commandline) done = False for action in actions: _, _, done, info = self.step(action) if done: raise ValueError( f"Environment terminated with error: `{info.get('error_details')}`" ) def validate(self, state: Optional[CompilerEnvState] = None) -> ValidationResult: if state: self.reset(benchmark=state.benchmark) in_place = False benchmark: str = state.benchmark else: state = self.state in_place = True # Record the actual benchmark object to accommodate custom # benchmarks. benchmark: Benchmark = self.benchmark assert self.in_episode errors: ValidationError = [] validation = { "state": state, "actions_replay_failed": False, "reward_validated": False, "reward_validation_failed": False, "benchmark_semantics_validated": False, "benchmark_semantics_validation_failed": False, } fkd = self.fork() try: with Timer() as walltime: replay_target = self if in_place else fkd replay_target.reset(benchmark=benchmark) # Use a while loop here so that we can `break` early out of the # validation process in case a step fails. while True: try: replay_target.apply(state) except (ValueError, OSError) as e: validation["actions_replay_failed"] = True errors.append( ValidationError( type="Action replay failed", data={ "exception": str(e), "exception_type": type(e).__name__, }, ) ) break if state.reward is not None and self.reward_space is None: warnings.warn( "Validating state with reward, but " "environment has no reward space set" ) elif ( state.reward is not None and self.reward_space and self.reward_space.deterministic ): validation["reward_validated"] = True # If reward deviates from the expected amount record the # error but continue with the remainder of the validation. if not isclose( state.reward, replay_target.episode_reward, rel_tol=1e-5, abs_tol=1e-10, ): validation["reward_validation_failed"] = True errors.append( ValidationError( type=( f"Expected reward {state.reward} but " f"received reward {replay_target.episode_reward}" ), data={ "expected_reward": state.reward, "actual_reward": replay_target.episode_reward, }, ) ) benchmark = replay_target.benchmark if benchmark.is_validatable(): validation["benchmark_semantics_validated"] = True semantics_errors = benchmark.validate(replay_target) if semantics_errors: validation["benchmark_semantics_validation_failed"] = True errors += semantics_errors # Finished all checks, break the loop. break finally: fkd.close() return ValidationResult.construct( walltime=walltime.time, errors=errors, **validation, ) def send_param(self, key: str, value: str, resend_on_reset: bool = False) -> str: """Send a single <key, value> parameter to the compiler service. See :meth:`send_params() <compiler_gym.envs.ClientServiceCompilerEnv.send_params>` for more information. :param key: The parameter key. :param value: The parameter value. :param resend_on_reset: Whether to resend this parameter to the compiler service on :code:`reset()`. :return: The response from the compiler service. :raises SessionNotFound: If called before :meth:`reset() <compiler_gym.envs.ClientServiceCompilerEnv.reset>`. """ return self.send_params((key, value), resend_on_reset=resend_on_reset)[0] def send_params( self, *params: Iterable[Tuple[str, str]], resend_on_reset: bool = False ) -> List[str]: """Send a list of <key, value> parameters to the compiler service. This provides a mechanism to send messages to the backend compilation session in a way that doesn't conform to the normal communication pattern. This can be useful for things like configuring runtime debugging settings, or applying "meta actions" to the compiler that are not exposed in the compiler's action space. Consult the documentation for a specific compiler service to see what parameters, if any, are supported. Must have called :meth:`reset() <compiler_gym.envs.ClientServiceCompilerEnv.reset>` first. :param params: A list of parameters, where each parameter is a :code:`(key, value)` tuple. :param resend_on_reset: Whether to resend this parameter to the compiler service on :code:`reset()`. :return: A list of string responses, one per parameter. :raises SessionNotFound: If called before :meth:`reset() <compiler_gym.envs.ClientServiceCompilerEnv.reset>`. """ params_to_send = [SessionParameter(key=k, value=v) for (k, v) in params] if resend_on_reset: self._params_to_send_on_reset += params_to_send if not self.in_episode: raise SessionNotFound("Must call reset() before send_params()") request = SendSessionParameterRequest( session_id=self._session_id, parameter=params_to_send, ) reply: SendSessionParameterReply = self.service( self.service.stub.SendSessionParameter, request ) if len(params) != len(reply.reply): raise OSError( f"Sent {len(params)} {plural(len(params), 'parameter', 'parameters')} but received " f"{len(reply.reply)} {plural(len(reply.reply), 'response', 'responses')} from the " "service" ) return list(reply.reply) def __copy__(self) -> "ClientServiceCompilerEnv": raise TypeError( "ClientServiceCompilerEnv instances do not support shallow copies. Use deepcopy()" ) def __deepcopy__(self, memo) -> "ClientServiceCompilerEnv": del memo # unused return self.fork()
CompilerGym-development
compiler_gym/service/client_service_compiler_env.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from compiler_gym.service.compilation_session import CompilationSession from compiler_gym.service.connection import ( CompilerGymServiceConnection, ConnectionOpts, EnvironmentNotSupported, ServiceError, ServiceInitError, ServiceIsClosed, ServiceOSError, ServiceTransportError, SessionNotFound, ) __all__ = [ "CompilerGymServiceConnection", "CompilationSession", "ConnectionOpts", "EnvironmentNotSupported", "ServiceError", "ServiceInitError", "ServiceIsClosed", "ServiceOSError", "ServiceTransportError", "SessionNotFound", ]
CompilerGym-development
compiler_gym/service/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """This module contains the logic for connecting to services.""" import logging import os import subprocess import sys from pathlib import Path from signal import Signals from time import sleep, time from typing import Dict, Iterable, List, Optional, TypeVar, Union import grpc from deprecated.sphinx import deprecated from pydantic import BaseModel import compiler_gym.errors from compiler_gym.service.proto import ( ActionSpace, CompilerGymServiceStub, GetSpacesReply, GetSpacesRequest, ObservationSpace, ) from compiler_gym.service.service_cache import ServiceCache from compiler_gym.util.debug_util import get_debug_level, logging_level_to_debug_level from compiler_gym.util.runfiles_path import runfiles_path, site_data_path from compiler_gym.util.shell_format import join_cmd, plural from compiler_gym.util.truncate import truncate_lines GRPC_CHANNEL_OPTIONS = [ # Disable the inbound message length filter to allow for large messages such # as observations. ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), # Fix for "received initial metadata size exceeds limit" ("grpc.max_metadata_size", 512 * 1024), # Spurious error UNAVAILABLE "Trying to connect an http1.x server". # https://putridparrot.com/blog/the-unavailable-trying-to-connect-an-http1-x-server-grpc-error/ ("grpc.enable_http_proxy", 0), # Disable TCP port re-use to mitigate port conflict errors when starting # many services in parallel. Context: # https://github.com/facebookresearch/CompilerGym/issues/572 ("grpc.so_reuseport", 0), ] logger = logging.getLogger(__name__) class ConnectionOpts(BaseModel): """The options used to configure a connection to a service.""" rpc_max_retries: int = 5 """The maximum number of failed attempts to communicate with the RPC service before raising an error. Retries are made only for communication errors. Failures from other causes such as error signals raised by the service are not retried.""" retry_wait_seconds: float = 0.1 """The number of seconds to wait between successive attempts to communicate with the RPC service.""" retry_wait_backoff_exponent: float = 1.5 """The exponential backoff scaling between successive attempts to communicate with the RPC service.""" init_max_seconds: float = 30 """The maximum number of seconds to spend attempting to establish a connection to the service before failing. """ init_max_attempts: int = 5 """The maximum number of attempts to make to establish a connection to the service before failing. """ local_service_port_init_max_seconds: float = 30 """The maximum number of seconds to wait for a local service to write the port.txt file.""" local_service_exit_max_seconds: float = 30 """The maximum number of seconds to wait for a local service to terminate on close.""" rpc_init_max_seconds: float = 3 """The maximum number of seconds to wait for an RPC connection to establish.""" always_send_benchmark_on_reset: bool = False """Send the full benchmark program data to the compiler service on ever call to :meth:`env.reset() <compiler_gym.envs.CompilerEnv.reset>`. This is more efficient in cases where the majority of calls to :meth:`env.reset() <compiler_gym.envs.CompilerEnv.reset>` uses a different benchmark. In case of benchmark re-use, leave this :code:`False`. """ script_args: List[str] = [] """If the service is started from a local script, this set of args is used on the command line. No effect when used for existing sockets.""" script_env: Dict[str, str] = {} """If the service is started from a local script, this set of env vars is used on the command line. No effect when used for existing sockets.""" # Deprecated since v0.2.4. # This type is for backwards compatibility that will be removed in a future release. # Please, use errors from `compiler_gym.errors`. ServiceError = compiler_gym.errors.ServiceError # Deprecated since v0.2.4. # This type is for backwards compatibility that will be removed in a future release. # Please, use errors from `compiler_gym.errors`. SessionNotFound = compiler_gym.errors.SessionNotFound # Deprecated since v0.2.4. # This type is for backwards compatibility that will be removed in a future release. # Please, use errors from `compiler_gym.errors`. ServiceOSError = compiler_gym.errors.ServiceOSError # Deprecated since v0.2.4. # This type is for backwards compatibility that will be removed in a future release. # Please, use errors from `compiler_gym.errors`. ServiceInitError = compiler_gym.errors.ServiceInitError # Deprecated since v0.2.4. # This type is for backwards compatibility that will be removed in a future release. # Please, use errors from `compiler_gym.errors`. EnvironmentNotSupported = compiler_gym.errors.EnvironmentNotSupported # Deprecated since v0.2.4. # This type is for backwards compatibility that will be removed in a future release. # Please, use errors from `compiler_gym.errors`. ServiceTransportError = compiler_gym.errors.ServiceTransportError # Deprecated since v0.2.4. # This type is for backwards compatibility that will be removed in a future release. # Please, use errors from `compiler_gym.errors`. ServiceIsClosed = compiler_gym.errors.ServiceIsClosed Request = TypeVar("Request") Reply = TypeVar("Reply") if sys.version_info > (3, 8, 0): from typing import Protocol class StubMethod(Protocol): """Type annotation for an RPC stub method that accepts a request message and returns a reply. """ Request = TypeVar("Request") Reply = TypeVar("Reply") def __call__( self, a: Request, timeout: float ) -> Reply: # pylint: disable=undefined-variable ... else: # Legacy support for Python < 3.8. from typing import Callable StubMethod = Callable[[Request], Reply] class Connection: """Base class for service connections.""" def __init__(self, channel, url: str): """Constructor. Don't instantiate this directly, use the subclasses. :param channel: The RPC channel to use. :param url: The URL of the RPC service. """ self.channel = channel self.url = url self.stub = CompilerGymServiceStub(self.channel) self.spaces: GetSpacesReply = self(self.stub.GetSpaces, GetSpacesRequest()) def close(self): self.channel.close() def __call__( self, stub_method: StubMethod, request: Request, timeout: float = 60, max_retries=5, retry_wait_seconds=0.1, retry_wait_backoff_exponent=1.5, ) -> Reply: """Call the service with the given arguments.""" # pylint: disable=no-member # # House keeping note: if you modify the exceptions that this method # raises, please update the CompilerGymServiceConnection.__call__() # docstring. attempt = 0 while True: try: return stub_method(request, timeout=timeout) except ValueError as e: if str(e) == "Cannot invoke RPC on closed channel!": raise ServiceIsClosed( "RPC communication failed because channel is closed" ) from None raise e except grpc.RpcError as e: # We raise "from None" to discard the gRPC stack trace, with the # remaining stack trace correctly pointing to the CompilerGym # calling code. if e.code() == grpc.StatusCode.INVALID_ARGUMENT: raise ValueError(e.details()) from None elif e.code() == grpc.StatusCode.UNIMPLEMENTED: raise NotImplementedError(e.details()) from None elif e.code() == grpc.StatusCode.NOT_FOUND: raise FileNotFoundError(e.details()) from None elif e.code() == grpc.StatusCode.RESOURCE_EXHAUSTED: raise ServiceOSError(e.details()) from None elif e.code() == grpc.StatusCode.FAILED_PRECONDITION: raise TypeError(str(e.details())) from None elif e.code() == grpc.StatusCode.UNAVAILABLE: # For "unavailable" errors we retry with exponential # backoff. This is because this error can be caused by an # overloaded service, a flaky connection, etc. # Early exit in case we can detect that the service is down # and so there is no use in retrying the RPC call. if self.service_is_down(): raise ServiceIsClosed("Service is offline") attempt += 1 if attempt > max_retries: raise ServiceTransportError( f"{self.url} {e.details()} ({max_retries} retries)" ) from None remaining = max_retries - attempt logger.warning( "%s %s (%d %s remaining)", self.url, e.details(), remaining, plural(remaining, "attempt", "attempts"), ) sleep(retry_wait_seconds) retry_wait_seconds *= retry_wait_backoff_exponent elif ( e.code() == grpc.StatusCode.INTERNAL and e.details() == "Exception serializing request!" ): raise TypeError( f"{e.details()} Request type: {type(request).__name__}" ) from None elif e.code() == grpc.StatusCode.DEADLINE_EXCEEDED: raise TimeoutError( f"{e.details()} ({timeout:.1f} seconds)" ) from None elif e.code() == grpc.StatusCode.DATA_LOSS: raise ServiceError(e.details()) from None elif e.code() == grpc.StatusCode.UNKNOWN: # By default, GRPC provides no context if an exception is # raised in an RPC handler as this could lead to an # information leak. Unfortunately for us this makes # debugging a little more difficult, so be verbose about the # possible causes of this error. raise ServiceError( "Service returned an unknown error. Possibly an " "unhandled exception in a C++ RPC handler, see " "<https://github.com/grpc/grpc/issues/13706>." ) from None else: raise ServiceError( f"RPC call returned status code {e.code()} and error `{e.details()}`" ) from None def loglines(self) -> Iterable[str]: """Fetch any available log lines from the service backend. :return: An iterator over lines of logs. """ yield from () def service_is_down(self) -> bool: """Return true if the service is known to be dead. Subclasses can use this for fast checks that a service is down to avoid retry loops. """ return False class ManagedConnection(Connection): """A connection to a service using a managed subprocess.""" def __init__( self, local_service_binary: Path, port_init_max_seconds: float, rpc_init_max_seconds: float, process_exit_max_seconds: float, script_args: List[str], script_env: Dict[str, str], ): """Constructor. :param local_service_binary: The path of the service binary. :raises TimeoutError: If fails to establish connection within a specified time limit. """ self.process_exit_max_seconds = process_exit_max_seconds if not Path(local_service_binary).is_file(): raise FileNotFoundError(f"File not found: {local_service_binary}") self.cache = ServiceCache() # The command that will be executed. The working directory of this # command will be set to the local_service_binary's parent, so we can # use the relpath for a neater `ps aux` view. cmd = [ f"./{local_service_binary.name}", f"--working_dir={self.cache.path}", ] # Add any custom arguments cmd += script_args # Set the root of the runfiles directory. env = os.environ.copy() env["COMPILER_GYM_RUNFILES"] = str(runfiles_path(".")) env["COMPILER_GYM_SITE_DATA"] = str(site_data_path(".")) # Set the pythonpath so that executable python scripts can use absolute # import paths like `from compiler_gym.envs.foo import bar`. if "PYTHONPATH" in env: env["PYTHONPATH"] = f'{env["PYTHONPATH"]}:{env["COMPILER_GYM_RUNFILES"]}' else: env["PYTHONPATH"] = env["COMPILER_GYM_RUNFILES"] # Set the verbosity of the service. The logging level of the service is # the debug level - 1, so that COMPILER_GYM_DEBUG=3 will cause VLOG(2) # and lower to be logged to stdout. debug_level = max( get_debug_level(), logging_level_to_debug_level(logger.getEffectiveLevel()) ) if debug_level > 0: cmd.append("--alsologtostderr") cmd.append(f"-v={debug_level - 1}") # If we are debugging the backend, set the logbuflevel to a low # value to disable buffering of logging messages. This removes any # buffering between `LOG(INFO) << "..."` and the message being # emited to stderr. cmd.append("--logbuflevel=-1") else: # Silence the gRPC logs as we will do our own error reporting, but # don't override any existing value so that the user may debug the # gRPC backend by setting GRPC_VERBOSITY to ERROR, INFO, or DEBUG. if not os.environ.get("GRPC_VERBOSITY"): env["GRPC_VERBOSITY"] = "NONE" # Set environment variable COMPILER_GYM_SERVICE_ARGS to pass # additional arguments to the service. args = os.environ.get("COMPILER_GYM_SERVICE_ARGS", "") if args: cmd.append(args) # Add any custom environment variables env.update(script_env) logger.debug( "Exec `%s%s`", " ".join(f"{k}={v}" for k, v in script_env.items()) + " " if script_env else "", join_cmd(cmd), ) self.process = subprocess.Popen( cmd, env=env, cwd=local_service_binary.parent, ) self._process_returncode_exception_raised = False # Read the port from a file generated by the service. wait_secs = 0.1 port_path = self.cache / "port.txt" end_time = time() + port_init_max_seconds while time() < end_time: returncode = self.process.poll() if returncode is not None: try: # Try and decode the name of a signal. Signal returncodes # are negative. returncode = f"{returncode} ({Signals(abs(returncode)).name})" except ValueError: pass msg = f"Service terminated with returncode: {returncode}" # Attach any logs from the service if available. logs = truncate_lines( self.loglines(), max_line_len=100, max_lines=25, tail=True ) if logs: msg = f"{msg}\nService logs:\n{logs}" self.cache.close() raise ServiceError(msg) if port_path.is_file(): try: with open(port_path) as f: self.port = int(f.read().rstrip()) break except ValueError: # ValueError is raised by int(...) on invalid input. In that # case, wait for longer. pass sleep(wait_secs) wait_secs *= 1.2 else: # kill() was added in Python 3.7. if sys.version_info >= (3, 7, 0): self.process.kill() else: self.process.terminate() self.process.communicate(timeout=rpc_init_max_seconds) self.cache.close() raise TimeoutError( "Service failed to produce port file after " f"{port_init_max_seconds:.1f} seconds" ) url = f"localhost:{self.port}" wait_secs = 0.1 attempts = 0 end_time = time() + rpc_init_max_seconds while time() < end_time: try: channel = grpc.insecure_channel( url, options=GRPC_CHANNEL_OPTIONS, ) channel_ready = grpc.channel_ready_future(channel) attempts += 1 channel_ready.result(timeout=wait_secs) break except (grpc.FutureTimeoutError, grpc.RpcError) as e: logger.debug( "Connection attempt %d = %s %s", attempts, type(e).__name__, str(e) ) wait_secs *= 1.2 else: # kill() was added in Python 3.7. if sys.version_info >= (3, 7, 0): self.process.kill() else: self.process.terminate() self.process.communicate(timeout=process_exit_max_seconds) # Include the last few lines of logs generated by the compiler # service, if any. logs = truncate_lines( self.loglines(), max_line_len=100, max_lines=25, tail=True ) logs_message = f" Service logs:\n{logs}" if logs else "" self.cache.close() raise TimeoutError( "Failed to connect to RPC service after " f"{rpc_init_max_seconds:.1f} seconds.{logs_message}" ) super().__init__(channel, url) @property @deprecated(version="0.2.4", reason="Replace `working_directory` with `cache.path`") def working_dir(self) -> Path: return self.cache.path def service_is_down(self) -> bool: """Return true if the service subprocess has terminated.""" return self.process.poll() is not None def loglines(self) -> Iterable[str]: """Fetch any available log lines from the service backend. :return: An iterator over lines of logs. """ # Compiler services write log files in the logs directory. Iterate over # them and return their contents. if not (self.cache / "logs").is_dir(): return () for path in sorted((self.cache / "logs").iterdir()): if not path.is_file(): continue with open(path) as f: yield from f.readlines() def close(self): """Terminate a local subprocess and close the connection.""" try: self.process.terminate() self.process.communicate(timeout=self.process_exit_max_seconds) if ( self.process.returncode and not self._process_returncode_exception_raised ): # You can call close() multiple times but we only want to emit # the exception once. self._process_returncode_exception_raised = True raise ServiceError( f"Service exited with returncode {self.process.returncode}" ) except ServiceIsClosed: # The service has already been closed, nothing to do. pass except ProcessLookupError: logger.warning("Service process not found at %s", self.cache) except subprocess.TimeoutExpired: # Try and kill it and then walk away. try: # kill() was added in Python 3.7. if sys.version_info >= (3, 7, 0): self.process.kill() else: self.process.terminate() self.process.communicate(timeout=60) except: # noqa pass logger.warning("Abandoning orphan service at %s", self.cache) finally: self.cache.close() super().close() def __repr__(self): if self.process.poll() is None: return ( f"Connection to service at {self.url} running on PID {self.process.pid}" ) return f"Connection to dead service at {self.url}" class UnmanagedConnection(Connection): """A connection to a service that is not managed by this process.""" def __init__(self, url: str, rpc_init_max_seconds: float): """Constructor. :param url: The URL of the service to connect to. :raises TimeoutError: If fails to establish connection within a specified time limit. """ wait_secs = 0.1 attempts = 0 end_time = time() + rpc_init_max_seconds while time() < end_time: try: channel = grpc.insecure_channel( url, options=GRPC_CHANNEL_OPTIONS, ) channel_ready = grpc.channel_ready_future(channel) attempts += 1 channel_ready.result(timeout=wait_secs) break except (grpc.FutureTimeoutError, grpc.RpcError) as e: logger.debug( "Connection attempt %d = %s %s", attempts, type(e).__name__, str(e) ) wait_secs *= 1.2 else: raise TimeoutError( f"Failed to connect to {url} after " f"{rpc_init_max_seconds:.1f} seconds" ) super().__init__(channel, url) def __repr__(self): return f"Connection to unmanaged service {self.url}" class CompilerGymServiceConnection: """A connection to a compiler gym service. There are two types of service connections: managed and unmanaged. The type of connection is determined by the endpoint. If a "host:port" URL is provided, an unmanaged connection is created. If the path of a file is provided, a managed connection is used. The difference between a managed and unmanaged connection is that with a managed connection, the lifecycle of the service if controlled by the client connection. That is, when a managed connection is created, a service subprocess is started by executing the specified path. When the connection is closed, the subprocess is terminated. With an unmanaged connection, if the service fails is goes offline, the client will fail. This class provides a common abstraction between the two types of connection, and provides a call method for invoking remote procedures on the service. Example usage of an unmanaged service connection: .. code-block:: python # Connect to a service running on localhost:8080. The user must # started a process running on port 8080. connection = CompilerGymServiceConnection("localhost:8080") # Invoke an RPC method. connection(connection.stub.StartSession, StartSessionRequest()) # Close the connection. The service running on port 8080 is # left running. connection.close() Example usage of a managed service connection: .. code-block:: python # Start a subprocess using the binary located at /path/to/my/service. connection = CompilerGymServiceConnection(Path("/path/to/my/service")) # Invoke an RPC method. connection(connection.stub.StartSession, StartSessionRequest()) # Close the connection. The subprocess is terminated. connection.close() :ivar stub: A CompilerGymServiceStub that can be used as the first argument to :py:meth:`__call__()` to specify an RPC method to call. :ivar action_spaces: A list of action spaces provided by the service. :ivar observation_spaces: A list of observation spaces provided by the service. """ def __init__( self, endpoint: Union[str, Path], opts: ConnectionOpts = None, ): """Constructor. :param endpoint: The connection endpoint. Either the URL of a service, e.g. "localhost:8080", or the path of a local service binary. :param opts: The connection options. :raises ValueError: If the provided options are invalid. :raises FileNotFoundError: In case opts.local_service_binary is not found. :raises TimeoutError: In case the service failed to start within opts.init_max_seconds seconds. """ self.endpoint = endpoint self.opts = opts or ConnectionOpts() self.connection = None self.stub = None self._establish_connection() self.action_spaces: List[ActionSpace] = list( self.connection.spaces.action_space_list ) self.observation_spaces: List[ObservationSpace] = list( self.connection.spaces.observation_space_list ) def _establish_connection(self) -> None: """Create and establish a connection.""" self.connection = self._create_connection(self.endpoint, self.opts) self.stub = self.connection.stub @classmethod def _create_connection( cls, endpoint: Union[str, Path], opts: ConnectionOpts, ) -> Connection: """Initialize the service connection, either by connecting to an RPC service or by starting a locally-managed subprocess. :param endpoint: The connection endpoint. Either the URL of a service, e.g. "localhost:8080", or the path of a local service binary. :param opts: The connection options. :raises ValueError: If the provided options are invalid. :raises FileNotFoundError: In case opts.local_service_binary is not found. :raises ServiceError: In case opts.init_max_attempts failures are made without successfully starting the connection. :raises TimeoutError: In case the service failed to start within opts.init_max_seconds seconds. """ if not endpoint: raise TypeError("No endpoint provided for service connection") start_time = time() end_time = start_time + opts.init_max_seconds attempts = 0 last_exception = None while time() < end_time and attempts < opts.init_max_attempts: attempts += 1 try: if isinstance(endpoint, Path): endpoint_name = endpoint.name return ManagedConnection( local_service_binary=endpoint, process_exit_max_seconds=opts.local_service_exit_max_seconds, rpc_init_max_seconds=opts.rpc_init_max_seconds, port_init_max_seconds=opts.local_service_port_init_max_seconds, script_args=opts.script_args, script_env=opts.script_env, ) else: endpoint_name = endpoint return UnmanagedConnection( url=endpoint, rpc_init_max_seconds=opts.rpc_init_max_seconds, ) except (TimeoutError, ServiceError, NotImplementedError) as e: # Catch preventable errors so that we can retry: # TimeoutError: raised if a service does not produce a port file establish a # connection without a deadline. # ServiceError: raised by an RPC method returning an error status. # NotImplementedError: raised if an RPC method is accessed before the RPC service # has initialized. last_exception = e logger.warning("%s %s (attempt %d)", type(e).__name__, e, attempts) exception_class = ( ServiceError if attempts >= opts.init_max_attempts else TimeoutError ) raise exception_class( f"Failed to create connection to {endpoint_name} after " f"{time() - start_time:.1f} seconds " f"({attempts} {plural(attempts, 'attempt', 'attempts')} made).\n" f"Last error ({type(last_exception).__name__}): {last_exception}" ) def __repr__(self): if self.connection is None: return f"Closed connection to {self.endpoint}" return str(self.endpoint) @property def closed(self) -> bool: """Whether the connection is closed.""" return self.connection is None def close(self): if self.closed: return self.connection.close() self.connection = None def __del__(self): # Don't let the subprocess be orphaned if user forgot to close(), or # if an exception was thrown. self.close() def restart(self): """Restart a connection a service. If the service is managed by this connection (i.e. it is a local binary), the existing service process will be killed and replaced. Else, only the connection to the unmanaged service process is replaced. """ if self.connection: self.connection.close() self._establish_connection() def __call__( self, stub_method: StubMethod, request: Request, timeout: Optional[float] = 300, max_retries: Optional[int] = None, retry_wait_seconds: Optional[float] = None, retry_wait_backoff_exponent: Optional[float] = None, ) -> Reply: """Invoke an RPC method on the service and return its response. All RPC methods accept a single `request` message, and respond with a response message. Example usage: .. code-block:: python connection = CompilerGymServiceConnection("localhost:8080") request = compiler_gym.service.proto.GetSpacesRequest() reply = connection(connection.stub.GetSpaces, request) In the above example, the `GetSpaces` RPC method is invoked on a connection, yielding a `GetSpacesReply` message. :param stub_method: An RPC method attribute on `CompilerGymServiceStub`. :param request: A request message. :param timeout: The maximum number of seconds to await a reply. :param max_retries: The maximum number of failed attempts to communicate with the RPC service before raising an error. Retries are made only for communication errors. Failures from other causes such as error signals raised by the service are not retried. :param retry_wait_seconds: The number of seconds to wait between successive attempts to communicate with the RPC service. :param retry_wait_backoff_exponent: The exponential backoff scaling between successive attempts to communicate with the RPC service. :raises ValueError: If the service responds with an error indicating an invalid argument. :raises NotImplementedError: If the service responds with an error indicating that the requested functionality is not implemented. :raises FileNotFoundError: If the service responds with an error indicating that a requested resource was not found. :raises OSError: If the service responds with an error indicating that it ran out of resources. :raises TypeError: If the provided `request` parameter is of incorrect type or cannot be serialized, or if the service responds with an error indicating that a precondition failed. :raises TimeoutError: If the service failed to respond to the query within the specified `timeout`. :raises ServiceTransportError: If the client failed to communicate with the service. :raises ServiceIsClosed: If the connection to the service is closed. :raises ServiceError: If the service raised an error not covered by any of the above conditions. :return: A reply message. """ if self.closed: self._establish_connection() return self.connection( stub_method, request, timeout=timeout, max_retries=max_retries or self.opts.rpc_max_retries, retry_wait_seconds=retry_wait_seconds or self.opts.retry_wait_seconds, retry_wait_backoff_exponent=( retry_wait_backoff_exponent or self.opts.retry_wait_backoff_exponent ), )
CompilerGym-development
compiler_gym/service/connection.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from pathlib import Path from typing import List, Optional, Tuple from compiler_gym.service.proto import ActionSpace, Benchmark from compiler_gym.service.proto import Event as Action from compiler_gym.service.proto import Event as Observation from compiler_gym.service.proto import ObservationSpace class CompilationSession: """Base class for encapsulating an incremental compilation session. To add support for a new compiler, subclass from this base and provide implementations of the abstract methods, then call :func:`create_and_run_compiler_service <compiler_gym.service.runtime.create_and_run_compiler_service>` and pass in your class type: .. code-block:: python from compiler_gym.service import CompilationSession from compiler_gym.service import runtime class MyCompilationSession(CompilationSession): ... if __name__ == "__main__": runtime.create_and_run_compiler_service(MyCompilationSession) """ compiler_version: str = "" """The compiler version.""" action_spaces: List[ActionSpace] = [] """A list of action spaces describing the capabilities of the compiler.""" observation_spaces: List[ObservationSpace] = [] """A list of feature vectors that this compiler provides.""" def __init__( self, working_dir: Path, action_space: ActionSpace, benchmark: Benchmark ): """Start a CompilationSession. Subclasses should initialize the parent class first. :param working_dir: A directory on the local filesystem that can be used to store temporary files such as build artifacts. :param action_space: The action space to use. :param benchmark: The benchmark to use. """ del action_space # Subclasses must use this. del benchmark # Subclasses must use this. self.working_dir = working_dir def apply_action(self, action: Action) -> Tuple[bool, Optional[ActionSpace], bool]: """Apply an action. :param action: The action to apply. :return: A tuple: :code:`(end_of_session, new_action_space, action_had_no_effect)`. """ raise NotImplementedError def get_observation(self, observation_space: ObservationSpace) -> Observation: """Compute an observation. :param observation_space: The observation space. :return: An observation. """ raise NotImplementedError def fork(self) -> "CompilationSession": """Create a copy of current session state. Implementing this method is optional. :return: A new CompilationSession with the same state. """ # No need to override this if you are not adding support to fork(). raise NotImplementedError("CompilationSession.fork() not supported") def handle_session_parameter(self, key: str, value: str) -> Optional[str]: """Handle a session parameter send by the frontend. Session parameters provide a method to send ad-hoc key-value messages to a compilation session through the :meth:`env.send_session_parameter() <compiler_gym.envs.ClientServiceCompilerEnv.send_session_parameter>` method. It us up to the client/service to agree on a common schema for encoding and decoding these parameters. Implementing this method is optional. :param key: The parameter key. :param value: The parameter value. :return: A string response message if the parameter was understood. Else :code:`None` to indicate that the message could not be interpretted. """ pass
CompilerGym-development
compiler_gym/service/compilation_session.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """This module defines a filesystem cache for services.""" import os import random import shutil from datetime import datetime from pathlib import Path from compiler_gym.util.filesystem import is_in_memory from compiler_gym.util.runfiles_path import cache_path, transient_cache_path MAX_CACHE_CONFLICT_RETIRES: int = 1000 def _create_timestamped_unique_service_dir(root: Path) -> Path: for _ in range(MAX_CACHE_CONFLICT_RETIRES): random_hash = random.getrandbits(16) service_name = datetime.now().strftime(f"s/%m%dT%H%M%S-%f-{random_hash:04x}") path: Path = root / service_name # Guard against the unlikely scenario that there is a collision between # the randomly generated working directories of multiple ServiceCache # constructors. try: path.mkdir(parents=True, exist_ok=False) break except FileExistsError: pass else: raise OSError( "Could not create a unique cache directory " f"after {MAX_CACHE_CONFLICT_RETIRES} retries." ) return path class ServiceCache: """A filesystem cache for use by managed services. This provides a directory in which a service can store temporary files and artifacts. A service can assume exclusive use of this cache. When supported, the cache will be in an in-memory filesystem. The cache contains two subdirectories: "logs", which may be used for storing log files, and "disk", which may be used for storing files that require being stored on a traditional filesystem. On some Linux distributions, in-memory filesystems do not permit executing files. See: <github.com/facebookresearch/CompilerGym/issues/465> """ def __init__(self): self.path = _create_timestamped_unique_service_dir(transient_cache_path(".")) (self.path / "logs").mkdir() self._directories_to_remove = [self.path] if is_in_memory(self.path): disk = _create_timestamped_unique_service_dir(cache_path(".")) self._directories_to_remove.append(disk) os.symlink(disk, self.path / "disk") else: (self.path / "disk").mkdir() def __truediv__(self, rhs) -> Path: """Supports 'cache / "path"' syntax.""" return self.path / rhs def close(self): """Remove the cache directory. This must be called.""" for directory in self._directories_to_remove: shutil.rmtree(directory, ignore_errors=True) def __repr__(self) -> str: return str(self.path)
CompilerGym-development
compiler_gym/service/service_cache.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from compiler_gym.service.proto.compiler_gym_service_pb2 import ( ActionSpace, AddBenchmarkReply, AddBenchmarkRequest, Benchmark, BenchmarkDynamicConfig, BooleanBox, BooleanRange, BooleanSequenceSpace, BooleanTensor, ByteBox, ByteSequenceSpace, BytesSequenceSpace, ByteTensor, Command, CommandlineSpace, DictEvent, DictSpace, DiscreteSpace, DoubleBox, DoubleRange, DoubleSequenceSpace, DoubleTensor, EndSessionReply, EndSessionRequest, Event, File, FloatBox, FloatRange, FloatSequenceSpace, FloatTensor, ForkSessionReply, ForkSessionRequest, GetSpacesReply, GetSpacesRequest, GetVersionReply, GetVersionRequest, Int64Box, Int64Range, Int64SequenceSpace, Int64Tensor, ListEvent, ListSpace, NamedDiscreteSpace, ObservationSpace, Opaque, SendSessionParameterReply, SendSessionParameterRequest, SessionParameter, Space, SpaceSequenceSpace, StartSessionReply, StartSessionRequest, StepReply, StepRequest, StringSequenceSpace, StringSpace, StringTensor, ) from compiler_gym.service.proto.compiler_gym_service_pb2_grpc import ( CompilerGymServiceServicer, CompilerGymServiceStub, ) __all__ = [ "ActionSpace", "AddBenchmarkReply", "AddBenchmarkRequest", "Benchmark", "BenchmarkDynamicConfig", "BooleanBox", "BooleanRange", "BooleanSequenceSpace", "BooleanTensor", "ByteBox", "ByteSequenceSpace", "ByteTensor", "BytesSequenceSpace", "Command", "CommandlineSpace", "CompilerGymServiceConnection", "CompilerGymServiceServicer", "CompilerGymServiceStub", "ConnectionOpts", "DictEvent", "DictSpace", "DiscreteSpace", "DoubleBox", "DoubleRange", "DoubleRange", "DoubleSequenceSpace", "DoubleTensor", "EndSessionReply", "EndSessionRequest", "Event", "File", "FloatBox", "FloatRange", "FloatSequenceSpace", "FloatTensor", "ForkSessionReply", "ForkSessionRequest", "GetSpacesReply", "GetSpacesRequest", "GetVersionReply", "GetVersionRequest", "Int64Box", "Int64Range", "Int64SequenceSpace", "Int64Tensor", "ListEvent", "ListSpace", "NamedDiscreteSpace", "NamedDiscreteSpace", "ObservationSpace", "Opaque", "SendSessionParameterReply", "SendSessionParameterRequest", "SessionParameter", "Space", "SpaceSequenceSpace", "StartSessionReply", "StartSessionRequest", "StepReply", "StepRequest", "StringSequenceSpace", "StringSpace", "StringTensor", ]
CompilerGym-development
compiler_gym/service/proto/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """This module contains converters to/from protobuf messages. For example <compiler_gym.servie.proto.ActionSpace>/<compiler_gym.servie.proto.ObservationSpace> <-> <compiler_gym.spaces>, or <compiler_gym.servie.proto.Event> <-> actions/observation. When defining new environments <compiler_gym.service.proto.py_convertes.make_message_default_converter> and <compiler_gym.service.proto.py_convertes.to_event_message_default_converter> can be used as a starting point for custom converters. """ import json from builtins import getattr from collections import OrderedDict from typing import Any, Callable from typing import Dict as DictType from typing import List, Type, Union import google.protobuf.any_pb2 as any_pb2 import networkx as nx import numpy as np from google.protobuf.message import Message from gym.spaces import Space as GymSpace from compiler_gym.service.proto.compiler_gym_service_pb2 import ( ActionSpace as ActionSpaceProto, ) from compiler_gym.service.proto.compiler_gym_service_pb2 import ( BooleanBox, BooleanRange, BooleanSequenceSpace, BooleanTensor, ByteBox, ByteSequenceSpace, BytesSequenceSpace, ByteTensor, CommandlineSpace, DictEvent, DictSpace, DiscreteSpace, DoubleBox, DoubleRange, DoubleSequenceSpace, DoubleTensor, Event, FloatBox, FloatRange, FloatSequenceSpace, FloatTensor, Int64Box, Int64Range, Int64SequenceSpace, Int64Tensor, ListEvent, ListSpace, NamedDiscreteSpace, ObservationSpace, Opaque, Space, SpaceSequenceSpace, StringSequenceSpace, StringSpace, StringTensor, ) from compiler_gym.spaces.action_space import ActionSpace from compiler_gym.spaces.box import Box from compiler_gym.spaces.commandline import Commandline, CommandlineFlag from compiler_gym.spaces.dict import Dict from compiler_gym.spaces.discrete import Discrete from compiler_gym.spaces.named_discrete import NamedDiscrete from compiler_gym.spaces.permutation import Permutation from compiler_gym.spaces.scalar import Scalar from compiler_gym.spaces.sequence import Sequence from compiler_gym.spaces.space_sequence import SpaceSequence from compiler_gym.spaces.tuple import Tuple class TypeBasedConverter: """Converter that dispatches based on the exact type of the parameter. >>> converter = TypeBasedConverter({ int: lambda x: float(x)}) >>> val: float = converter(5) """ conversion_map: DictType[Type, Callable[[Any], Any]] def __init__(self, conversion_map: DictType[Type, Callable[[Any], Any]] = None): self.conversion_map = {} if conversion_map is None else conversion_map def __call__(self, val: Any) -> Any: return self.conversion_map[type(val)](val) class TypeIdDispatchConverter: """Dispatches conversion of a <google.protobuf.message.Message> based on the value of its field "type_id". If the "type_id" filed is not present the conversion falls back on `default_converter`. Example: .. code-block:: python from compiler_gym.service.proto import Event, py_converters def default_converter(msg): return msg.string_value + "_default" conversion_map = { "type_1": lambda msg: msg.string_value + "_type_1", "type_2": lambda msg: msg.string_value + "_type_2", } type_id_converter = py_converters.TypeIdDispatchConverter( default_converter=default_converter, conversion_map=conversion_map ) assert type_id_converter(Event(string_value="msg_val")) == "msg_val_default" assert ( type_id_converter(Event(string_value="msg_val", type_id="type_1")) == "msg_val_type_1" ) assert ( type_id_converter(Event(string_value="msg_val", type_id="type_2")) == "msg_val_type_2" ) """ conversion_map: DictType[str, Callable[[Message], Any]] default_converter: Callable[[Message], Any] def __init__( self, default_converter: Callable[[Message], Any], conversion_map: DictType[str, Callable[[Message], Any]] = None, ): self.conversion_map = {} if conversion_map is None else conversion_map self.default_converter = default_converter def __call__(self, message: Message) -> Any: if message.HasField("type_id"): return self.conversion_map[message.type_id](message) else: return self.default_converter(message) proto_type_to_dtype_map = { BooleanTensor: bool, ByteTensor: np.int8, Int64Tensor: np.int64, FloatTensor: np.float32, DoubleTensor: np.float64, StringTensor: object, BooleanBox: bool, ByteBox: np.int8, Int64Box: np.int64, FloatBox: np.float32, DoubleBox: float, BooleanRange: bool, Int64Range: np.int64, FloatRange: np.float32, DoubleRange: float, BooleanSequenceSpace: bool, BytesSequenceSpace: bytes, ByteSequenceSpace: np.int8, Int64SequenceSpace: np.int64, FloatSequenceSpace: np.float32, DoubleSequenceSpace: float, StringSpace: str, } def convert_standard_tensor_message_to_numpy( tensor: Union[BooleanTensor, Int64Tensor, FloatTensor, DoubleTensor, StringTensor] ): res = np.array(tensor.value, dtype=proto_type_to_dtype_map[type(tensor)]) res = res.reshape(tensor.shape) return res def convert_numpy_to_boolean_tensor_message(tensor: np.ndarray): return BooleanTensor(value=tensor.flatten().tolist(), shape=tensor.shape) def convert_byte_tensor_message_to_numpy(tensor: ByteTensor): res = np.frombuffer(tensor.value, dtype=np.byte) res = res.reshape(tensor.shape) return res def convert_numpy_to_byte_tensor_message(tensor: np.ndarray): return ByteTensor(value=tensor.tobytes(), shape=tensor.shape) def convert_numpy_to_int64_tensor_message(tensor: np.ndarray): return Int64Tensor(value=tensor.flatten(), shape=tensor.shape) def convert_numpy_to_float_tensor_message(tensor: np.ndarray): return FloatTensor(value=tensor.flatten(), shape=tensor.shape) def convert_numpy_to_double_tensor_message(tensor: np.ndarray): return DoubleTensor(value=tensor.flatten(), shape=tensor.shape) def convert_numpy_to_string_tensor_message(tensor: np.ndarray): return StringTensor(value=tensor.flatten(), shape=tensor.shape) convert_tensor_message_to_numpy = TypeBasedConverter( conversion_map={ BooleanTensor: convert_standard_tensor_message_to_numpy, ByteTensor: convert_byte_tensor_message_to_numpy, Int64Tensor: convert_standard_tensor_message_to_numpy, FloatTensor: convert_standard_tensor_message_to_numpy, DoubleTensor: convert_standard_tensor_message_to_numpy, StringTensor: convert_standard_tensor_message_to_numpy, } ) def convert_bytes_to_numpy(arr: bytes) -> np.ndarray: return np.frombuffer(arr, dtype=np.int8) def convert_permutation_space_message(space: Space) -> Permutation: if ( space.int64_sequence.scalar_range.max - space.int64_sequence.scalar_range.min + 1 != space.int64_sequence.length_range.min or space.int64_sequence.length_range.min != space.int64_sequence.length_range.max ): raise ValueError( f"Invalid permutation space message:\n{space}." " Variable sequence length is not allowed." " A permutation must also include all integers in its range " "[min, min + length)." ) return Permutation( name=None, scalar_range=convert_range_message(space.int64_sequence.scalar_range), ) class NumpyToTensorMessageConverter: dtype_conversion_map: DictType[Type, Callable[[Any], Message]] def __init__(self): self.dtype_conversion_map = { np.bool_: convert_numpy_to_boolean_tensor_message, np.int8: convert_numpy_to_byte_tensor_message, np.int64: convert_numpy_to_int64_tensor_message, np.float32: convert_numpy_to_float_tensor_message, np.float64: convert_numpy_to_double_tensor_message, np.dtype(object): convert_numpy_to_string_tensor_message, } def __call__( self, tensor: np.ndarray ) -> Union[ BooleanTensor, ByteTensor, Int64Tensor, FloatTensor, DoubleTensor, StringTensor ]: return self.dtype_conversion_map[tensor.dtype.type](tensor) convert_numpy_to_tensor_message = NumpyToTensorMessageConverter() def convert_trivial(val: Any): return val class FromMessageConverter: """Convert a protobuf message to an object. The conversion function is chosen based on the message descriptor. """ conversion_map: DictType[str, Callable[[Message], Any]] def __init__(self, conversion_map: DictType[str, Callable[[Message], Any]] = None): self.conversion_map = {} if conversion_map is None else conversion_map def __call__(self, message: Message) -> Any: return self.conversion_map[message.DESCRIPTOR.full_name](message) class EventMessageDefaultConverter: message_converter: Callable[[Any], Any] def __init__(self, message_converter: Callable[[Any], Any]): self.message_converter = message_converter def __call__(self, event: Event): field = event.WhichOneof("value") if field is None: return None return self.message_converter(getattr(event, field)) class ToEventMessageConverter: converter: TypeBasedConverter type_field_map: DictType[Type, str] def __init__(self, converter: TypeBasedConverter): self.converter = converter self.type_field_map = { ListEvent: "event_list", DictEvent: "event_dict", bool: "boolean_value", int: "int64_value", np.int32: "int64_value", np.float32: "float_value", float: "double_value", str: "string_value", BooleanTensor: "boolean_tensor", ByteTensor: "byte_tensor", Int64Tensor: "int64_tensor", FloatTensor: "float_tensor", DoubleTensor: "double_tensor", StringTensor: "string_tensor", any_pb2.Any: "any_value", } def __call__(self, val: Any) -> Event: converted_val = self.converter(val) res = Event() if isinstance(converted_val, Message): getattr(res, self.type_field_map[type(converted_val)]).CopyFrom( converted_val ) else: setattr(res, self.type_field_map[type(converted_val)], converted_val) return res class ListEventMessageConverter: event_message_converter: Callable[[Event], Any] def __init__(self, event_message_converter: Callable[[Event], Any]): self.event_message_converter = event_message_converter def __call__(self, list_event: ListEvent) -> List[Any]: return [self.event_message_converter(event) for event in list_event.event] class ToListEventMessageConverter: to_event_converter: ToEventMessageConverter def __init__(self, to_event_converter: ToEventMessageConverter): self.to_event_converter = to_event_converter def __call__(self, event_list: List) -> ListEvent: return ListEvent(event=[self.to_event_converter(event) for event in event_list]) class DictEventMessageConverter: event_message_converter: Callable[[Event], Any] def __init__(self, event_message_converter: Callable[[Event], Any]): self.event_message_converter = event_message_converter def __call__(self, dict_event: DictEvent) -> DictType[str, Any]: return { key: self.event_message_converter(event) for key, event in dict_event.event.items() } class ToDictEventMessageConverter: to_event_converter: ToEventMessageConverter def __init__(self, to_event_converter: ToEventMessageConverter): self.to_event_converter = to_event_converter def __call__(self, d: DictType) -> DictEvent: return DictEvent( event={key: self.to_event_converter(val) for key, val in d.items()} ) class ProtobufAnyUnpacker: # message type string to message class map type_str_to_class_map: DictType[str, Type] def __init__(self, type_str_to_class_map: DictType[str, Type] = None): self.type_str_to_class_map = ( { "compiler_gym.Opaque": Opaque, "compiler_gym.CommandlineSpace": CommandlineSpace, } if type_str_to_class_map is None else type_str_to_class_map ) def __call__(self, msg: any_pb2.Any) -> Message: message_cls = self.type_str_to_class_map[msg.TypeName()] unpacked_message = message_cls() status = msg.Unpack(unpacked_message) if not status: raise ValueError( f'Failed unpacking prtobuf Any message with type url "{msg.TypeName()}".' ) return unpacked_message class ProtobufAnyConverter: unpacker: ProtobufAnyUnpacker message_converter: Callable[[Message], Any] def __init__( self, unpacker: ProtobufAnyUnpacker, message_converter: Callable[[Message], Any] ): self.unpacker = unpacker self.message_converter = message_converter def __call__(self, msg: any_pb2.Any) -> Any: unpacked_message = self.unpacker(msg) return self.message_converter(unpacked_message) class ActionSpaceMessageConverter: message_converter: Callable[[Any], Any] def __init__(self, message_converter: Callable[[Any], Any]): self.message_converter = message_converter def __call__(self, message: ActionSpace) -> GymSpace: res = self.message_converter(message.space) res.name = message.name return res class ObservationSpaceMessageConverter: message_converter: Callable[[Any], Any] def __init__(self, message_converter: Callable[[Any], Any]): self.message_converter = message_converter def __call__(self, message: ObservationSpace) -> GymSpace: res = self.message_converter(message.space) res.name = message.name return res def make_action_space_wrapper( converter: Callable[[Any], Any] ) -> Callable[[Any], ActionSpace]: return lambda msg: ActionSpace(space=converter(msg)) def make_message_default_converter() -> Callable[[Any], Any]: conversion_map = { bool: convert_trivial, int: convert_trivial, np.int32: convert_trivial, float: convert_trivial, np.float32: convert_trivial, str: convert_trivial, bytes: convert_bytes_to_numpy, BooleanTensor: convert_tensor_message_to_numpy, ByteTensor: convert_tensor_message_to_numpy, Int64Tensor: convert_tensor_message_to_numpy, FloatTensor: convert_tensor_message_to_numpy, DoubleTensor: convert_tensor_message_to_numpy, StringTensor: convert_tensor_message_to_numpy, DiscreteSpace: convert_discrete_space_message, NamedDiscreteSpace: convert_named_discrete_space_message, CommandlineSpace: convert_commandline_space_message, BooleanRange: convert_range_message, Int64Range: convert_range_message, FloatRange: convert_range_message, DoubleRange: convert_range_message, StringSpace: convert_string_space, BooleanSequenceSpace: convert_sequence_space, ByteSequenceSpace: convert_sequence_space, BytesSequenceSpace: convert_sequence_space, Int64SequenceSpace: convert_sequence_space, FloatSequenceSpace: convert_sequence_space, DoubleSequenceSpace: convert_sequence_space, StringSequenceSpace: convert_sequence_space, BooleanBox: convert_box_message, ByteBox: convert_box_message, Int64Box: convert_box_message, FloatBox: convert_box_message, DoubleBox: convert_box_message, } res = TypeBasedConverter(conversion_map) conversion_map[Event] = TypeIdDispatchConverter( default_converter=EventMessageDefaultConverter(res) ) conversion_map[ListEvent] = ListEventMessageConverter(conversion_map[Event]) conversion_map[DictEvent] = DictEventMessageConverter(conversion_map[Event]) conversion_map[Space] = TypeIdDispatchConverter( default_converter=SpaceMessageDefaultConverter(res), conversion_map={"permutation": convert_permutation_space_message}, ) conversion_map[ListSpace] = ListSpaceMessageConverter(conversion_map[Space]) conversion_map[DictSpace] = DictSpaceMessageConverter(conversion_map[Space]) conversion_map[SpaceSequenceSpace] = SpaceSequenceSpaceMessageConverter(res) conversion_map[ActionSpaceProto] = ActionSpaceMessageConverter(res) conversion_map[ObservationSpace] = ObservationSpaceMessageConverter(res) conversion_map[any_pb2.Any] = ProtobufAnyConverter( unpacker=ProtobufAnyUnpacker(), message_converter=res ) conversion_map[Opaque] = make_opaque_message_default_converter() return res def to_event_message_default_converter() -> ToEventMessageConverter: conversion_map = { bool: convert_trivial, int: convert_trivial, np.int32: convert_trivial, float: convert_trivial, np.float32: convert_trivial, str: convert_trivial, np.int32: convert_trivial, np.ndarray: NumpyToTensorMessageConverter(), } type_based_converter = TypeBasedConverter(conversion_map) res = ToEventMessageConverter(type_based_converter) conversion_map[list] = ToListEventMessageConverter(res) conversion_map[dict] = ToDictEventMessageConverter(res) conversion_map[OrderedDict] = ToDictEventMessageConverter(res) return res range_type_default_min_map: DictType[Type, Any] = { BooleanRange: False, Int64Range: np.iinfo(np.int64).min, FloatRange: np.float32(np.NINF), DoubleRange: np.float64(np.NINF), } range_type_default_max_map: DictType[Type, Any] = { BooleanRange: True, Int64Range: np.iinfo(np.int64).max, FloatRange: np.float32(np.PINF), DoubleRange: np.float64(np.PINF), } def convert_range_message( range: Union[BooleanRange, Int64Range, FloatRange, DoubleRange] ) -> Scalar: range_type = type(range) min = range.min if range.HasField("min") else range_type_default_min_map[range_type] max = range.max if range.HasField("max") else range_type_default_max_map[range_type] return Scalar( name=None, min=min, max=max, dtype=proto_type_to_dtype_map[range_type] ) class ToRangeMessageConverter: dtype_to_type_map: DictType[Type, Type] def __init__(self): self.dtype_to_type_map = { np.bool_: BooleanRange, np.int8: Int64Range, np.int64: Int64Range, np.float32: FloatRange, np.float64: DoubleRange, } def __call__( self, scalar: Scalar ) -> Union[BooleanRange, Int64Range, FloatRange, DoubleRange]: return self.dtype_to_type_map[np.dtype(scalar.dtype).type]( min=scalar.min, max=scalar.max ) convert_to_range_message = ToRangeMessageConverter() def convert_box_message( box: Union[BooleanBox, ByteBox, Int64Box, FloatBox, DoubleBox] ) -> Box: return Box( low=convert_tensor_message_to_numpy(box.low), high=convert_tensor_message_to_numpy(box.high), name=None, dtype=proto_type_to_dtype_map[type(box)], ) class ToBoxMessageConverter: dtype_to_type_map: DictType[Type, Type] def __init__(self): self.dtype_to_type_map = { np.bool_: BooleanBox, np.int8: ByteBox, np.int64: Int64Box, np.float32: FloatBox, np.float64: DoubleBox, } def __call__( self, box: Box ) -> Union[BooleanBox, ByteBox, Int64Box, FloatBox, DoubleBox]: return self.dtype_to_type_map[np.dtype(box.dtype).type]( low=convert_numpy_to_tensor_message(box.low), high=convert_numpy_to_tensor_message(box.high), ) convert_to_box_message = ToBoxMessageConverter() def convert_discrete_space_message(message: DiscreteSpace) -> Discrete: return Discrete(n=message.n, name=None) def convert_to_discrete_space_message(space: Discrete) -> DiscreteSpace: return DiscreteSpace(n=space.n) def convert_named_discrete_space_message(message: NamedDiscreteSpace) -> NamedDiscrete: return NamedDiscrete(items=message.name, name=None) def convert_commandline_space_message(message: CommandlineSpace) -> Commandline: return Commandline( items=[ CommandlineFlag(name=name, flag=name, description="") for name in message.name ], name=None, ) def convert_to_named_discrete_space_message(space: NamedDiscrete) -> NamedDiscreteSpace: return NamedDiscreteSpace(name=space.names) def convert_sequence_space( seq: Union[ BooleanSequenceSpace, Int64SequenceSpace, FloatSequenceSpace, DoubleSequenceSpace, BytesSequenceSpace, StringSequenceSpace, ] ) -> Sequence: scalar_range = ( convert_range_message(seq.scalar_range) if hasattr(seq, "scalar_range") else None ) length_range = convert_range_message(seq.length_range) return Sequence( name=None, size_range=(length_range.min, length_range.max), dtype=proto_type_to_dtype_map[type(seq)], scalar_range=scalar_range, ) class ToRangedSequenceMessageConverter: dtype_to_type_map: DictType[Type, Type] def __init__(self): self.dtype_to_type_map = { np.bool_: BooleanSequenceSpace, np.int8: ByteSequenceSpace, np.int64: Int64SequenceSpace, np.float32: FloatSequenceSpace, np.float64: DoubleSequenceSpace, } def __call__( self, seq: Sequence ) -> Union[ BooleanSequenceSpace, Int64SequenceSpace, FloatSequenceSpace, DoubleSequenceSpace, ]: return self.dtype_to_type_map[np.dtype(seq.dtype).type]( length_range=Int64Range(min=seq.size_range[0], max=seq.size_range[1]), scalar_range=convert_to_range_message(seq.scalar_range), ) convert_to_ranged_sequence_space = ToRangedSequenceMessageConverter() def convert_to_string_sequence_space(seq: Sequence) -> StringSequenceSpace: return StringSpace( length_range=Int64Range(min=seq.size_range[0], max=seq.size_range[1]) ) def convert_to_bytes_sequence_space(seq: Sequence) -> BytesSequenceSpace: return BytesSequenceSpace( length_range=Int64Range(min=seq.size_range[0], max=seq.size_range[1]) ) def convert_string_space(s: StringSpace) -> Sequence: return convert_sequence_space(s) def convert_to_string_space(s: Sequence) -> StringSpace: return StringSpace( length_range=Int64Range(min=s.size_range[0], max=s.size_range[1]) ) class ToSequenceSpaceMessageConverter: dtype_map: DictType[ Type, Callable[ [Sequence], Union[ BooleanSequenceSpace, BytesSequenceSpace, Int64SequenceSpace, FloatSequenceSpace, DoubleSequenceSpace, StringSequenceSpace, ], ], ] def __init__(self): self.dtype_map = { bool: convert_to_ranged_sequence_space, np.bool_: convert_to_ranged_sequence_space, np.int8: convert_to_bytes_sequence_space, np.int64: convert_to_ranged_sequence_space, int: convert_to_ranged_sequence_space, np.float32: convert_to_ranged_sequence_space, np.float64: convert_to_ranged_sequence_space, float: convert_to_ranged_sequence_space, str: convert_to_string_space, } def __call__( self, seq: Sequence ) -> Union[ BooleanSequenceSpace, BytesSequenceSpace, Int64SequenceSpace, FloatSequenceSpace, DoubleSequenceSpace, StringSequenceSpace, ]: return self.dtype_map[seq.dtype](seq) convert_to_sequence_space_message = ToSequenceSpaceMessageConverter() class SpaceSequenceSpaceMessageConverter: space_message_converter: Callable[[Space], GymSpace] def __init__(self, space_message_converter): self.space_message_converter = space_message_converter def __call__(self, seq: SpaceSequenceSpace) -> GymSpace: return SpaceSequence( name=None, space=self.space_message_converter(seq.space), size_range=(seq.length_range.min, seq.length_range.max), ) class SpaceMessageDefaultConverter: message_converter: TypeBasedConverter def __init__(self, message_converter: TypeBasedConverter): self.message_converter = message_converter def __call__( self, space: Space ) -> Union[Dict, Discrete, NamedDiscrete, Scalar, Tuple, Box, Sequence]: field = space.WhichOneof("value") if field is None: return None res = self.message_converter(getattr(space, field)) return res class ToSpaceMessageConverter: converter: TypeBasedConverter type_field_map: DictType[Type, str] def __init__(self, converter: TypeBasedConverter): self.converter = converter self.type_field_map = { ListSpace: "space_list", DictSpace: "space_dict", DiscreteSpace: "discrete", NamedDiscreteSpace: "named_discrete", BooleanRange: "boolean_value", Int64Range: "int64_value", FloatRange: "float_value", DoubleRange: "double_value", StringSpace: "string_value", BooleanSequenceSpace: "boolean_sequence", BytesSequenceSpace: "bytes_sequence", ByteSequenceSpace: "byte_sequence", Int64SequenceSpace: "int64_sequence", FloatSequenceSpace: "float_sequence", DoubleSequenceSpace: "double_sequence", StringSequenceSpace: "string_sequence", BooleanBox: "boolean_box", ByteBox: "byte_box", Int64Box: "int64_box", FloatBox: "float_box", DoubleBox: "double_box", any_pb2.Any: "any_value", } def __call__( self, space: Union[Tuple, Dict, Discrete, NamedDiscrete, Sequence, Box, Scalar] ) -> Space: converted_space = self.converter(space) res = Space() if isinstance(converted_space, Message): getattr(res, self.type_field_map[type(converted_space)]).CopyFrom( converted_space ) else: setattr(res, self.type_field_map[type(converted_space)], converted_space) return res class ListSpaceMessageConverter: space_message_converter: Callable[[Space], Any] def __init__(self, space_message_converter: Callable[[Space], Any]): self.space_message_converter = space_message_converter def __call__(self, list_space: ListSpace) -> Tuple: return Tuple( spaces=[self.space_message_converter(space) for space in list_space.space], name=None, ) class ToListSpaceMessageConverter: to_space_converter: ToSpaceMessageConverter def __init__(self, to_space_converter: ToSpaceMessageConverter): self.to_space_converter = to_space_converter def __call__(self, spaces: Tuple) -> ListSpace: return ListSpace( space=[self.to_space_converter(space) for space in spaces.spaces] ) class DictSpaceMessageConverter: space_message_converter: Callable[[Space], Any] def __init__(self, space_message_converter: Callable[[Space], Any]): self.space_message_converter = space_message_converter def __call__(self, dict_space: DictSpace) -> Dict: return Dict( spaces={ key: self.space_message_converter(space) for key, space in dict_space.space.items() }, name=None, ) class ToDictSpaceMessageConverter: to_space_converter: ToSpaceMessageConverter def __init__(self, to_space_converter: ToSpaceMessageConverter): self.to_space_converter = to_space_converter def __call__(self, d: Dict) -> DictSpace: return DictSpace( space={key: self.to_space_converter(val) for key, val in d.spaces.items()} ) def to_space_message_default_converter() -> ToSpaceMessageConverter: conversion_map = { Discrete: convert_to_discrete_space_message, NamedDiscrete: convert_to_named_discrete_space_message, Scalar: convert_to_range_message, Sequence: convert_to_sequence_space_message, Box: convert_to_box_message, } type_based_converter = TypeBasedConverter(conversion_map) res = ToSpaceMessageConverter(type_based_converter) conversion_map[Tuple] = ToListSpaceMessageConverter(res) conversion_map[Dict] = ToDictSpaceMessageConverter(res) return res class OpaqueMessageConverter: """Converts <compiler_gym.service.proto.Opaque> message based on its format descriptor.""" format_coverter_map: DictType[str, Callable[[bytes], Any]] def __init__(self, format_coverter_map=None): self.format_coverter_map = ( {} if format_coverter_map is None else format_coverter_map ) def __call__(self, message: Opaque) -> Any: return self.format_coverter_map[message.format](message.data) def make_opaque_message_default_converter(): return OpaqueMessageConverter( {"json://networkx/MultiDiGraph": _json2nx, "json://": bytes_to_json} ) def bytes_to_json(data: bytes): return json.loads(data.decode("utf-8")) def _json2nx(data: bytes): json_data = json.loads(data.decode("utf-8")) return nx.readwrite.json_graph.node_link_graph( json_data, multigraph=True, directed=True ) message_default_converter: TypeBasedConverter = make_message_default_converter()
CompilerGym-development
compiler_gym/service/proto/py_converters.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging from typing import Dict, Optional import numpy as np from compiler_gym.service.proto import Benchmark MAX_SIZE_IN_BYTES = 512 * 104 * 1024 logger = logging.getLogger(__name__) class BenchmarkCache: """An in-memory cache of Benchmark messages. This object caches Benchmark messages by URI. Once the cache reaches a predetermined size, benchmarks are evicted randomly until the capacity is reduced to 50%. """ def __init__( self, max_size_in_bytes: int = MAX_SIZE_IN_BYTES, rng: Optional[np.random.Generator] = None, ): self._max_size_in_bytes = max_size_in_bytes self.rng = rng or np.random.default_rng() self._benchmarks: Dict[str, Benchmark] = {} self._size_in_bytes = 0 def __getitem__(self, uri: str) -> Benchmark: """Get a benchmark by URI. Raises KeyError.""" item = self._benchmarks.get(uri) if item is None: raise KeyError(uri) return item def __contains__(self, uri: str): """Whether URI is in cache.""" return uri in self._benchmarks def __setitem__(self, uri: str, benchmark: Benchmark): """Add benchmark to cache.""" # Remove any existing value to keep the cache size consistent. if uri in self._benchmarks: self._size_in_bytes -= self._benchmarks[uri].ByteSize() del self._benchmarks[uri] size = benchmark.ByteSize() if self.size_in_bytes + size > self.max_size_in_bytes: if size > self.max_size_in_bytes: logger.warning( "Adding new benchmark with size %d bytes exceeds total " "target cache size of %d bytes", size, self.max_size_in_bytes, ) else: logger.debug( "Adding new benchmark with size %d bytes " "exceeds maximum size %d bytes, %d items", size, self.max_size_in_bytes, self.size, ) self.evict_to_capacity() self._benchmarks[uri] = benchmark self._size_in_bytes += size logger.debug( "Cached benchmark %s. Cache size = %d bytes, %d items", uri, self.size_in_bytes, self.size, ) def evict_to_capacity(self, target_size_in_bytes: Optional[int] = None) -> None: """Evict benchmarks randomly to reduce the capacity below 50%.""" evicted = 0 target_size_in_bytes = ( self.max_size_in_bytes // 2 if target_size_in_bytes is None else target_size_in_bytes ) while self.size and self.size_in_bytes > target_size_in_bytes: evicted += 1 key = self.rng.choice(list(self._benchmarks.keys())) self._size_in_bytes -= self._benchmarks[key].ByteSize() del self._benchmarks[key] if evicted: logger.info( "Evicted %d benchmarks from cache. " "Benchmark cache size now %d bytes, %d items", evicted, self.size_in_bytes, self.size, ) @property def size(self) -> int: """The number of items in the cache.""" return len(self._benchmarks) @property def size_in_bytes(self) -> int: """The combined size of the elements in the cache, excluding the cache overhead. """ return self._size_in_bytes @property def max_size_in_bytes(self) -> int: """The maximum size of the cache.""" return self._max_size_in_bytes @max_size_in_bytes.setter def max_size_in_bytes(self, value: int) -> None: """Set a new maximum cache size.""" self._max_size_in_bytes = value self.evict_to_capacity(target_size_in_bytes=value)
CompilerGym-development
compiler_gym/service/runtime/benchmark_cache.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from compiler_gym.service.runtime.create_and_run_compiler_gym_service import ( create_and_run_compiler_gym_service, ) __all__ = [ "create_and_run_compiler_gym_service", ]
CompilerGym-development
compiler_gym/service/runtime/__init__.py
#! /usr/bin/env python3 # # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """An example CompilerGym service in python.""" import os import sys from concurrent import futures from multiprocessing import cpu_count from pathlib import Path from signal import SIGTERM, signal from tempfile import mkdtemp from threading import Event, Thread from typing import Type import grpc from absl import app, flags, logging from compiler_gym.service import connection from compiler_gym.service.compilation_session import CompilationSession from compiler_gym.service.proto import compiler_gym_service_pb2_grpc from compiler_gym.service.runtime.compiler_gym_service import CompilerGymService from compiler_gym.util import debug_util as dbg from compiler_gym.util.filesystem import atomic_file_write from compiler_gym.util.shell_format import plural flags.DEFINE_string("working_dir", "", "Path to use as service working directory") flags.DEFINE_integer("port", 0, "The service listening port") flags.DEFINE_integer( "rpc_service_threads", cpu_count(), "The number of server worker threads" ) flags.DEFINE_integer("logbuflevel", 0, "Flag for compatability with C++ service.") FLAGS = flags.FLAGS MAX_MESSAGE_SIZE_IN_BYTES = 512 * 1024 * 1024 shutdown_signal = Event() # NOTE(cummins): This script is executed in a subprocess, so code coverage # tracking does not work. As such we use "# pragma: no cover" annotation for all # functions. def _shutdown_handler(signal_number, stack_frame): # pragma: no cover del stack_frame # Unused logging.info("Service received signal: %d", signal_number) shutdown_signal.set() def create_and_run_compiler_gym_service( compilation_session_type: Type[CompilationSession], ): # pragma: no cover """Create and run an RPC service for the given compilation session. This should be called on its own in a self contained script to implement a compilation service. Example: .. code-block:: python from compiler_gym.service import runtime from my_compiler_service import MyCompilationSession if __name__ == "__main__": runtime.create_and_run_compiler_gym_service(MyCompilationSession) This function never returns. :param compilation_session_type: A sublass of :class:`CompilationSession <compiler_gym.service.CompilationSession>` that provides implementations of the abstract methods. """ def main(argv): # Register a signal handler for SIGTERM that will set the shutdownSignal # future value. signal(SIGTERM, _shutdown_handler) argv = [x for x in argv if x.strip()] if len(argv) > 1: print( f"ERROR: Unrecognized command line argument '{argv[1]}'", file=sys.stderr, ) sys.exit(1) working_dir = Path(FLAGS.working_dir or mkdtemp(prefix="compiler_gym-service-")) (working_dir / "logs").mkdir(exist_ok=True, parents=True) FLAGS.log_dir = str(working_dir / "logs") logging.get_absl_handler().use_absl_log_file() logging.set_verbosity(dbg.get_logging_level()) # Create the service. server = grpc.server( futures.ThreadPoolExecutor(max_workers=FLAGS.rpc_service_threads), options=connection.GRPC_CHANNEL_OPTIONS, ) service = CompilerGymService( working_directory=working_dir, compilation_session_type=compilation_session_type, ) compiler_gym_service_pb2_grpc.add_CompilerGymServiceServicer_to_server( service, server ) address = f"0.0.0.0:{FLAGS.port}" if FLAGS.port else "0.0.0.0:0" port = server.add_insecure_port(address) with atomic_file_write(working_dir / "port.txt", fileobj=True, mode="w") as f: f.write(str(port)) with atomic_file_write(working_dir / "pid.txt", fileobj=True, mode="w") as f: f.write(str(os.getpid())) logging.info( "Service %s listening on %d, PID = %d", working_dir, port, os.getpid() ) server.start() # Block on the RPC service in a separate thread. This enables the # current thread to handle the shutdown routine. server_thread = Thread(target=server.wait_for_termination) server_thread.start() # Block until the shutdown signal is received. shutdown_signal.wait() logging.info("Shutting down the RPC service") server.stop(60).wait() server_thread.join() logging.info("Service closed") if len(service.sessions): print( "ERROR: Killing a service with", plural(len(service.session), "active session", "active sessions"), file=sys.stderr, ) sys.exit(6) app.run(main)
CompilerGym-development
compiler_gym/service/runtime/create_and_run_compiler_gym_service.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import traceback from contextlib import contextmanager from pathlib import Path from threading import Lock from typing import Dict, Optional from grpc import StatusCode from compiler_gym.service.compilation_session import CompilationSession from compiler_gym.service.proto import AddBenchmarkReply, AddBenchmarkRequest from compiler_gym.service.proto import ( CompilerGymServiceServicer as CompilerGymServiceServicerStub, ) from compiler_gym.service.proto import ( EndSessionReply, EndSessionRequest, ForkSessionReply, ForkSessionRequest, GetSpacesReply, GetSpacesRequest, GetVersionReply, GetVersionRequest, SendSessionParameterReply, SendSessionParameterRequest, StartSessionReply, StartSessionRequest, StepReply, StepRequest, ) from compiler_gym.service.runtime.benchmark_cache import BenchmarkCache from compiler_gym.util.version import __version__ logger = logging.getLogger(__name__) # NOTE(cummins): The CompilerGymService class is used in a subprocess by a # compiler service, so code coverage tracking does not work. As such we use "# # pragma: no cover" annotation for all definitions in this file. @contextmanager def exception_to_grpc_status(context): # pragma: no cover def handle_exception_as(exception, code): exception_trace = "".join( traceback.TracebackException.from_exception(exception).format() ) logger.warning("%s", exception_trace) context.set_code(code) context.set_details(str(exception)) try: yield except ValueError as e: handle_exception_as(e, StatusCode.INVALID_ARGUMENT) except LookupError as e: handle_exception_as(e, StatusCode.NOT_FOUND) except NotImplementedError as e: handle_exception_as(e, StatusCode.UNIMPLEMENTED) except FileNotFoundError as e: handle_exception_as(e, StatusCode.UNIMPLEMENTED) except TypeError as e: handle_exception_as(e, StatusCode.FAILED_PRECONDITION) except TimeoutError as e: handle_exception_as(e, StatusCode.DEADLINE_EXCEEDED) except Exception as e: # pylint: disable=broad-except handle_exception_as(e, StatusCode.INTERNAL) class CompilerGymService(CompilerGymServiceServicerStub): # pragma: no cover def __init__(self, working_directory: Path, compilation_session_type): """Constructor. :param working_directory: The working directory for this service. :param compilation_session_type: The :class:`CompilationSession <compiler_gym.service.CompilationSession>` type that this service implements. """ self.working_directory = working_directory self.benchmarks = BenchmarkCache() self.compilation_session_type = compilation_session_type self.sessions: Dict[int, CompilationSession] = {} self.sessions_lock = Lock() self.next_session_id: int = 0 self.action_spaces = compilation_session_type.action_spaces self.observation_spaces = compilation_session_type.observation_spaces def GetVersion(self, request: GetVersionRequest, context) -> GetVersionReply: del context # Unused del request # Unused logger.debug("GetVersion()") return GetVersionReply( service_version=__version__, compiler_version=self.compilation_session_type.compiler_version, ) def GetSpaces(self, request: GetSpacesRequest, context) -> GetSpacesReply: del request # Unused logger.debug("GetSpaces()") with exception_to_grpc_status(context): return GetSpacesReply( action_space_list=self.action_spaces, observation_space_list=self.observation_spaces, ) def StartSession(self, request: StartSessionRequest, context) -> StartSessionReply: """Create a new compilation session.""" logger.debug( "StartSession(id=%d, benchmark=%s), %d active sessions", self.next_session_id, request.benchmark.uri, len(self.sessions) + 1, ) reply = StartSessionReply() if not request.benchmark: context.set_code(StatusCode.INVALID_ARGUMENT) context.set_details("No benchmark URI set for StartSession()") return reply with self.sessions_lock, exception_to_grpc_status(context): # If a benchmark definition was provided, add it. if request.benchmark.HasField("program"): self.benchmarks[request.benchmark.uri] = request.benchmark # Lookup the requested benchmark. if request.benchmark.uri not in self.benchmarks: context.set_code(StatusCode.NOT_FOUND) context.set_details("Benchmark not found") return reply session = self.compilation_session_type( working_directory=self.working_directory, action_space=self.action_spaces[request.action_space], benchmark=self.benchmarks[request.benchmark.uri], ) # Generate the initial observations. reply.observation.extend( [ session.get_observation(self.observation_spaces[obs]) for obs in request.observation_space ] ) reply.session_id = self.next_session_id self.sessions[reply.session_id] = session self.next_session_id += 1 return reply def ForkSession(self, request: ForkSessionRequest, context) -> ForkSessionReply: logger.debug( "ForkSession(id=%d), [%s]", request.session_id, self.next_session_id, ) reply = ForkSessionReply() with exception_to_grpc_status(context): session = self.sessions[request.session_id] self.sessions[reply.session_id] = session.fork() reply.session_id = self.next_session_id self.next_session_id += 1 return reply def EndSession(self, request: EndSessionRequest, context) -> EndSessionReply: del context # Unused logger.debug( "EndSession(id=%d), %d sessions remaining", request.session_id, len(self.sessions) - 1, ) with self.sessions_lock: if request.session_id in self.sessions: del self.sessions[request.session_id] return EndSessionReply(remaining_sessions=len(self.sessions)) def Step(self, request: StepRequest, context) -> StepReply: logger.debug("Step()") reply = StepReply() if request.session_id not in self.sessions: context.set_code(StatusCode.NOT_FOUND) context.set_details(f"Session not found: {request.session_id}") return reply reply.action_had_no_effect = True with exception_to_grpc_status(context): session = self.sessions[request.session_id] for action in request.action: reply.end_of_session, nas, ahne = session.apply_action(action) reply.action_had_no_effect &= ahne if nas: reply.new_action_space.CopyFrom(nas) reply.observation.extend( [ session.get_observation(self.observation_spaces[obs]) for obs in request.observation_space ] ) return reply def AddBenchmark(self, request: AddBenchmarkRequest, context) -> AddBenchmarkReply: del context # Unused reply = AddBenchmarkReply() with self.sessions_lock: for benchmark in request.benchmark: self.benchmarks[benchmark.uri] = benchmark return reply def SendSessionParameter( self, request: SendSessionParameterRequest, context ) -> SendSessionParameterReply: reply = SendSessionParameterReply() if request.session_id not in self.sessions: context.set_code(StatusCode.NOT_FOUND) context.set_details(f"Session not found: {request.session_id}") return reply session = self.sessions[request.session_id] with exception_to_grpc_status(context): for param in request.parameter: # Handle each parameter in the session and generate a response. message = session.handle_session_parameter(param.key, param.value) # Use the builtin parameter handlers if not handled by a session. if message is None: message = self._handle_builtin_session_parameter( param.key, param.value ) if message is None: context.set_code(StatusCode.INVALID_ARGUMENT) context.set_details(f"Unknown parameter: {param.key}") return reply reply.reply.append(message) return reply def _handle_builtin_session_parameter(self, key: str, value: str) -> Optional[str]: """Handle a built-in session parameter. :param key: The parameter key. :param value: The parameter value. :return: The response message, or :code:`None` if the key is not understood. """ if key == "service.benchmark_cache.set_max_size_in_bytes": self.benchmarks.set_max_size_in_bytes = int(value) return value elif key == "service.benchmark_cache.get_max_size_in_bytes": return str(self.benchmarks.max_size_in_bytes) elif key == "service.benchmark_cache.get_size_in_bytes": return str(self.benchmarks.size_in_bytes) return None
CompilerGym-development
compiler_gym/service/runtime/compiler_gym_service.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import compiler_gym.envs.loop_tool # noqa from compiler_gym import config from compiler_gym.envs.compiler_env import CompilerEnv from compiler_gym.envs.gcc import GccEnv if config.enable_llvm_env: from compiler_gym.envs.llvm.llvm_env import LlvmEnv # noqa: F401 if config.enable_mlir_env: from compiler_gym.envs.mlir.mlir_env import MlirEnv # noqa: F401 from compiler_gym.util.registration import COMPILER_GYM_ENVS __all__ = [ "COMPILER_GYM_ENVS", "CompilerEnv", "GccEnv", ] if config.enable_llvm_env: __all__.append("LlvmEnv") if config.enable_mlir_env: __all__.append("MlirEnv")
CompilerGym-development
compiler_gym/envs/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """This module defines the OpenAI gym interface for compilers.""" from abc import ABC, abstractmethod from typing import Iterable, List, Optional, Tuple, Union import gym from deprecated.sphinx import deprecated from gym.spaces import Space from compiler_gym.compiler_env_state import CompilerEnvState from compiler_gym.datasets import Benchmark, BenchmarkUri, Dataset from compiler_gym.spaces import ActionSpace, Reward from compiler_gym.util.gym_type_hints import ( ActionType, ObservationType, OptionalArgumentValue, StepType, ) from compiler_gym.validation_result import ValidationResult from compiler_gym.views import ObservationSpaceSpec, ObservationView, RewardView class CompilerEnv(gym.Env, ABC): """An OpenAI gym environment for compiler optimizations. The easiest way to create a CompilerGym environment is to call :code:`gym.make()` on one of the registered environments: >>> env = gym.make("llvm-v0") See :code:`compiler_gym.COMPILER_GYM_ENVS` for a list of registered environment names. Alternatively, an environment can be constructed directly, such as by connecting to a running compiler service at :code:`localhost:8080` (see :doc:`this document </compiler_gym/service>` for more details): >>> env = ClientServiceCompilerEnv( ... service="localhost:8080", ... observation_space="features", ... reward_space="runtime", ... rewards=[env_reward_spaces], ... ) Once constructed, an environment can be used in exactly the same way as a regular :code:`gym.Env`, e.g. >>> observation = env.reset() >>> cumulative_reward = 0 >>> for i in range(100): >>> action = env.action_space.sample() >>> observation, reward, done, info = env.step(action) >>> cumulative_reward += reward >>> if done: >>> break >>> print(f"Reward after {i} steps: {cumulative_reward}") Reward after 100 steps: -0.32123 """ @abstractmethod def __init__(self): """Construct an environment. Do not construct an environment directly. Use :code:`gym.make()` on one of the registered environments: >>> with gym.make("llvm-v0") as env: ... pass # Use environment """ raise NotImplementedError("abstract class") @abstractmethod def close(self): """Close the environment. Once closed, :func:`reset` must be called before the environment is used again. .. note:: You must make sure to call :code:`env.close()` on a CompilerGym environment when you are done with it. This is needed to perform manual tidying up of temporary files and processes. See :ref:`the FAQ <faq:Do I need to call env.close()?>` for more details. """ raise NotImplementedError("abstract method") @property @abstractmethod def observation_space_spec(self) -> ObservationSpaceSpec: raise NotImplementedError("abstract method") @observation_space_spec.setter @abstractmethod def observation_space_spec( self, observation_space_spec: Optional[ObservationSpaceSpec] ): raise NotImplementedError("abstract method") @property @abstractmethod def reward_space_spec(self) -> Optional[Reward]: raise NotImplementedError("abstract method") @reward_space_spec.setter @abstractmethod def reward_space_spec(self, val: Optional[Reward]): raise NotImplementedError("abstract method") @property @abstractmethod def benchmark(self) -> Benchmark: """Get or set the benchmark to use. :getter: Get :class:`Benchmark <compiler_gym.datasets.Benchmark>` that is currently in use. :setter: Set the benchmark to use. Either a :class:`Benchmark <compiler_gym.datasets.Benchmark>` instance, or the URI of a benchmark as in :meth:`env.datasets.benchmark_uris() <compiler_gym.datasets.Datasets.benchmark_uris>`. .. note:: Setting a new benchmark has no effect until :func:`env.reset() <compiler_gym.envs.CompilerEnv.reset>` is called. """ raise NotImplementedError("abstract method") @benchmark.setter @abstractmethod def benchmark(self, benchmark: Optional[Union[str, Benchmark, BenchmarkUri]]): raise NotImplementedError("abstract method") @property @abstractmethod def datasets(self) -> Iterable[Dataset]: raise NotImplementedError("abstract method") @datasets.setter @abstractmethod def datasets(self, datasets: Iterable[Dataset]): raise NotImplementedError("abstract method") @property @abstractmethod def episode_walltime(self) -> float: """Return the amount of time in seconds since the last call to :meth:`reset() <compiler_gym.envs.CompilerEnv.reset>`. """ raise NotImplementedError("abstract method") @property @abstractmethod def in_episode(self) -> bool: """Whether the service is ready for :func:`step` to be called, i.e. :func:`reset` has been called and :func:`close` has not. :return: :code:`True` if in an episode, else :code:`False`. """ raise NotImplementedError("abstract method") @property @abstractmethod def episode_reward(self) -> Optional[float]: """If :func:`CompilerEnv.reward_space <compiler_gym.envs.CompilerGym.reward_space>` is set, this value is the sum of all rewards for the current episode. """ raise NotImplementedError("abstract method") @episode_reward.setter @abstractmethod def episode_reward(self, episode_reward: Optional[float]): raise NotImplementedError("abstract method") @property @abstractmethod def actions(self) -> List[ActionType]: raise NotImplementedError("abstract method") @property @abstractmethod def version(self) -> str: """The version string of the compiler service.""" raise NotImplementedError("abstract method") @property @abstractmethod def compiler_version(self) -> str: """The version string of the underlying compiler that this service supports.""" raise NotImplementedError("abstract method") @property @abstractmethod def state(self) -> CompilerEnvState: """The tuple representation of the current environment state.""" raise NotImplementedError("abstract method") @property @abstractmethod def action_space(self) -> ActionSpace: """The current action space. :getter: Get the current action space. :setter: Set the action space to use. Must be an entry in :code:`action_spaces`. If :code:`None`, the default action space is selected. """ raise NotImplementedError("abstract method") @action_space.setter @abstractmethod def action_space(self, action_space: Optional[str]): raise NotImplementedError("abstract method") @property @abstractmethod def action_spaces(self) -> List[ActionSpace]: """A list of supported action space names.""" raise NotImplementedError("abstract method") @action_spaces.setter @abstractmethod def action_spaces(self, action_spaces: List[str]): raise NotImplementedError("abstract method") @property @abstractmethod def reward_space(self) -> Optional[Reward]: """The default reward space that is used to return a reward value from :func:`~step()`. :getter: Returns a :class:`Reward <compiler_gym.spaces.Reward>`, or :code:`None` if not set. :setter: Set the default reward space. """ raise NotImplementedError("abstract method") @reward_space.setter @abstractmethod def reward_space(self, reward_space: Optional[Union[str, Reward]]) -> None: raise NotImplementedError("abstract method") @property @abstractmethod def observation_space(self) -> Optional[Space]: """The observation space that is used to return an observation value in :func:`~step()`. :getter: Returns the underlying observation space, or :code:`None` if not set. :setter: Set the default observation space. """ raise NotImplementedError("abstract method") @observation_space.setter @abstractmethod def observation_space( self, observation_space: Optional[Union[str, ObservationSpaceSpec]] ) -> None: raise NotImplementedError("abstract method") @property @abstractmethod def observation(self) -> ObservationView: """A view of the available observation spaces that permits on-demand computation of observations. """ raise NotImplementedError("abstract method") @observation.setter @abstractmethod def observation(self, observation: ObservationView) -> None: raise NotImplementedError("abstract method") @property @abstractmethod def reward_range(self) -> Tuple[float, float]: """A tuple indicating the range of reward values. Default range is (-inf, +inf). """ raise NotImplementedError("abstract method") @property @abstractmethod def reward(self) -> RewardView: """A view of the available reward spaces that permits on-demand computation of rewards. """ raise NotImplementedError("abstract method") @reward.setter @abstractmethod def reward(self, reward: RewardView) -> None: raise NotImplementedError("abstract method") @abstractmethod def fork(self) -> "CompilerEnv": """Fork a new environment with exactly the same state. This creates a duplicate environment instance with the current state. The new environment is entirely independently of the source environment. The user must call :meth:`close() <compiler_gym.envs.CompilerEnv.close>` on the original and new environments. If not already in an episode, :meth:`reset() <compiler_gym.envs.CompilerEnv.reset>` is called. Example usage: >>> env = gym.make("llvm-v0") >>> env.reset() # ... use env >>> new_env = env.fork() >>> new_env.state == env.state True >>> new_env.step(1) == env.step(1) True .. note:: The client/service implementation of CompilerGym means that the forked and base environments share a common backend resource. This means that if either of them crash, such as due to a compiler assertion, both environments must be reset. :return: A new environment instance. """ raise NotImplementedError("abstract method") @abstractmethod def reset( # pylint: disable=arguments-differ self, benchmark: Optional[Union[str, Benchmark]] = None, action_space: Optional[str] = None, observation_space: Union[ OptionalArgumentValue, str, ObservationSpaceSpec ] = OptionalArgumentValue.UNCHANGED, reward_space: Union[ OptionalArgumentValue, str, Reward ] = OptionalArgumentValue.UNCHANGED, timeout: float = 300, ) -> Optional[ObservationType]: """Reset the environment state. This method must be called before :func:`step()`. :param benchmark: The name of the benchmark to use. If provided, it overrides any value that was set during :func:`__init__`, and becomes subsequent calls to :code:`reset()` will use this benchmark. If no benchmark is provided, and no benchmark was provided to :func:`__init___`, the service will randomly select a benchmark to use. :param action_space: The name of the action space to use. If provided, it overrides any value that set during :func:`__init__`, and subsequent calls to :code:`reset()` will use this action space. If no action space is provided, the default action space is used. :param observation_space: Compute and return observations at each :func:`step()` from this space. Accepts a string name or an :class:`ObservationSpaceSpec <compiler_gym.views.ObservationSpaceSpec>`. If :code:`None`, :func:`step()` returns :code:`None` for the observation value. If :code:`OptionalArgumentValue.UNCHANGED` (the default value), the observation space remains unchanged from the previous episode. For available spaces, see :class:`env.observation.spaces <compiler_gym.views.ObservationView>`. :param reward_space: Compute and return reward at each :func:`step()` from this space. Accepts a string name or a :class:`Reward <compiler_gym.spaces.Reward>`. If :code:`None`, :func:`step()` returns :code:`None` for the reward value. If :code:`OptionalArgumentValue.UNCHANGED` (the default value), the observation space remains unchanged from the previous episode. For available spaces, see :class:`env.reward.spaces <compiler_gym.views.RewardView>`. :param timeout: The maximum number of seconds to wait for reset to succeed. :return: The initial observation. :raises BenchmarkInitError: If the benchmark is invalid. This can happen if the benchmark contains code that the compiler does not support, or because of some internal error within the compiler. In this case, another benchmark must be used. :raises TypeError: If no benchmark has been set, and the environment does not have a default benchmark to select from. """ raise NotImplementedError("abstract method") @abstractmethod def step( self, action: ActionType, observation_spaces: Optional[Iterable[Union[str, ObservationSpaceSpec]]] = None, reward_spaces: Optional[Iterable[Union[str, Reward]]] = None, observations: Optional[Iterable[Union[str, ObservationSpaceSpec]]] = None, rewards: Optional[Iterable[Union[str, Reward]]] = None, timeout: float = 300, ) -> StepType: """Take a step. :param action: An action. :param observation_spaces: A list of observation spaces to compute observations from. If provided, this changes the :code:`observation` element of the return tuple to be a list of observations from the requested spaces. The default :code:`env.observation_space` is not returned. :param reward_spaces: A list of reward spaces to compute rewards from. If provided, this changes the :code:`reward` element of the return tuple to be a list of rewards from the requested spaces. The default :code:`env.reward_space` is not returned. :param timeout: The maximum number of seconds to wait for the step to succeed. Accepts a float value. The default is 300 seconds. :return: A tuple of observation, reward, done, and info. Observation and reward are None if default observation/reward is not set. """ raise NotImplementedError("abstract method") @abstractmethod def multistep( self, actions: Iterable[ActionType], observation_spaces: Optional[Iterable[Union[str, ObservationSpaceSpec]]] = None, reward_spaces: Optional[Iterable[Union[str, Reward]]] = None, observations: Optional[Iterable[Union[str, ObservationSpaceSpec]]] = None, rewards: Optional[Iterable[Union[str, Reward]]] = None, timeout: float = 300, ): """Take a sequence of steps and return the final observation and reward. :param action: A sequence of actions to apply in order. :param observation_spaces: A list of observation spaces to compute observations from. If provided, this changes the :code:`observation` element of the return tuple to be a list of observations from the requested spaces. The default :code:`env.observation_space` is not returned. :param reward_spaces: A list of reward spaces to compute rewards from. If provided, this changes the :code:`reward` element of the return tuple to be a list of rewards from the requested spaces. The default :code:`env.reward_space` is not returned. :param timeout: The maximum number of seconds to wait for the steps to succeed. Accepts a float value. The default is 300 seconds. :return: A tuple of observation, reward, done, and info. Observation and reward are None if default observation/reward is not set. """ raise NotImplementedError("abstract method") @abstractmethod def render( self, mode="human", ) -> Optional[str]: """Render the environment. :param mode: The render mode to use. :raises TypeError: If a default observation space is not set, or if the requested render mode does not exist. """ raise NotImplementedError("abstract method") @deprecated( version="0.2.5", reason="Use env.action_space.to_string(env.actions) instead" ) def commandline(self) -> str: """Interface for :class:`CompilerEnv <compiler_gym.envs.CompilerEnv>` subclasses to provide an equivalent commandline invocation to the current environment state. See also :meth:`commandline_to_actions() <compiler_gym.envs.CompilerEnv.commandline_to_actions>`. :return: A string commandline invocation. """ raise NotImplementedError("abstract method") @deprecated( version="0.2.5", reason='Use env.action_space.from_string("...") instead' ) def commandline_to_actions(self, commandline: str) -> List[ActionType]: """Interface for :class:`CompilerEnv <compiler_gym.envs.CompilerEnv>` subclasses to convert from a commandline invocation to a sequence of actions. See also :meth:`commandline() <compiler_gym.envs.CompilerEnv.commandline>`. :return: A list of actions. """ raise NotImplementedError("abstract method") @abstractmethod def apply(self, state: CompilerEnvState) -> None: # noqa """Replay this state on the given environment. :param state: A :class:`CompilerEnvState <compiler_gym.CompilerEnvState>` instance. :raises ValueError: If this state cannot be applied. """ raise NotImplementedError("abstract method") @abstractmethod def validate(self, state: Optional[CompilerEnvState] = None) -> ValidationResult: """Validate an environment's state. :param state: A state to environment. If not provided, the current state is validated. :returns: A :class:`ValidationResult <compiler_gym.ValidationResult>`. """ raise NotImplementedError("abstract method")
CompilerGym-development
compiler_gym/envs/compiler_env.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """Register the loop_tool environment and reward.""" from pathlib import Path from typing import Iterable from compiler_gym.datasets import Benchmark, Dataset, benchmark from compiler_gym.datasets.uri import BenchmarkUri from compiler_gym.spaces import Reward from compiler_gym.util.registration import register from compiler_gym.util.runfiles_path import runfiles_path LOOP_TOOL_SERVICE_BINARY: Path = runfiles_path( "compiler_gym/envs/loop_tool/service/compiler_gym-loop_tool-service" ) class FLOPSReward(Reward): """ `loop_tool` uses "floating point operations per second" as its default reward space. """ def __init__(self): super().__init__( name="flops", observation_spaces=["flops"], default_value=0, default_negates_returns=True, deterministic=False, platform_dependent=True, ) self.previous_flops = None def reset(self, benchmark: str, observation_view): del benchmark # unused self.previous_flops = observation_view["flops"] def update(self, action, observations, observation_view): del action del observation_view if self.previous_flops is None: self.previous_flops = observations[0] return self.previous_flops reward = float(observations[0] - self.previous_flops) self.previous_flops = observations[0] return reward class LoopToolCUDADataset(Dataset): def __init__(self, *args, **kwargs): super().__init__( name="benchmark://loop_tool-cuda-v0", license="MIT", description="loop_tool dataset", ) def benchmark_uris(self) -> Iterable[str]: return (f"loop_tool-cuda-v0/{i}" for i in range(1, 1024 * 1024 * 8)) def benchmark_from_parsed_uri(self, uri: BenchmarkUri) -> Benchmark: return Benchmark(proto=benchmark.BenchmarkProto(uri=str(uri))) class LoopToolCPUDataset(Dataset): def __init__(self, *args, **kwargs): super().__init__( name="benchmark://loop_tool-cpu-v0", license="MIT", description="loop_tool dataset", ) def benchmark_uris(self) -> Iterable[str]: return (f"loop_tool-cpu-v0/{i}" for i in range(1, 1024 * 1024 * 8)) def benchmark_from_parsed_uri(self, uri: BenchmarkUri) -> Benchmark: return Benchmark(proto=benchmark.BenchmarkProto(uri=str(uri))) register( id="loop_tool-v0", entry_point="compiler_gym.service.client_service_compiler_env:ClientServiceCompilerEnv", kwargs={ "datasets": [LoopToolCPUDataset(), LoopToolCUDADataset()], "observation_space": "action_state", "reward_space": "flops", "rewards": [FLOPSReward()], "service": LOOP_TOOL_SERVICE_BINARY, }, )
CompilerGym-development
compiler_gym/envs/loop_tool/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """Define the loop_tool environment.""" import logging import time from functools import reduce from pathlib import Path from typing import Optional, Tuple import loop_tool_py as lt import numpy as np import pkg_resources from compiler_gym.errors import EnvironmentNotSupported from compiler_gym.service import CompilationSession from compiler_gym.service.proto import ( ActionSpace, Benchmark, DoubleRange, Event, Int64Box, Int64Range, Int64Tensor, NamedDiscreteSpace, ObservationSpace, Space, StringSpace, ) logger = logging.getLogger(__name__) class LoopToolCompilationSession(CompilationSession): """Represents an instance of an interactive loop_tool session.""" compiler_version: str = pkg_resources.get_distribution("loop-tool-py").version # keep it simple for now: 1 variable, 1 nest action_spaces = [ ActionSpace( name="simple", space=Space( # shift around a single pre-split order, changing the size of splits named_discrete=NamedDiscreteSpace( name=["toggle_mode", "up", "down", "toggle_thread"], ), ), ), ActionSpace( name="split", space=Space( # potentially define new splits named_discrete=NamedDiscreteSpace( name=["toggle_mode", "up", "down", "toggle_thread", "split"], ), ), ), ] observation_spaces = [ ObservationSpace( name="flops", space=Space(double_value=DoubleRange()), deterministic=False, platform_dependent=True, default_observation=Event( double_value=0, ), ), ObservationSpace( name="loop_tree", space=Space( string_value=StringSpace(length_range=Int64Range(min=0)), ), deterministic=True, platform_dependent=False, default_observation=Event( string_value="", ), ), ObservationSpace( name="action_state", space=Space( int64_box=Int64Box( low=Int64Tensor(shape=[1], value=[0]), high=Int64Tensor(shape=[1], value=[2**36]), ), ), deterministic=True, platform_dependent=False, default_observation=Event( int64_tensor=Int64Tensor(shape=[1], value=[0]), ), ), ] def __init__( self, working_directory: Path, action_space: ActionSpace, benchmark: Benchmark ): super().__init__(working_directory, action_space, benchmark) self.action_space = action_space if "cuda" in benchmark.uri: self.backend = "cuda" lt.set_default_hardware("cuda") else: self.backend = "cpu" if self.backend not in lt.backends(): raise EnvironmentNotSupported( f"Failed to load {self.backend} dataset for loop_tool. Have you installed all required dependecies? See <https://facebookresearch.github.io/CompilerGym/envs/loop_tool.html#installation> for details. " ) self.ir = lt.IR() self.var = self.ir.create_var("a") r0 = self.ir.create_node("read", [], [self.var]) r1 = self.ir.create_node("read", [], [self.var]) add = self.ir.create_node("add", [r0, r1], [self.var]) w = self.ir.create_node("write", [add], [self.var]) self.ir.set_inputs([r0, r1]) self.ir.set_outputs([w]) self.size = int(benchmark.uri.split("/")[-1]) self.Ap = np.random.randn(self.size) self.Bp = np.random.randn(self.size) self.order = [(self.size, 0), (1, 0), (1, 0)] self.thread = [1, 0, 0] self.cursor = 0 self.mode = "size" logger.info("Started a compilation session for %s", benchmark.uri) def resize(self, increment): """ The idea is pull from or add to the parent loop. Three mutations possible to any size: A) x, y -> x + 1, 0 remove the tail, increase loop size, shrink parent B) x, y -> x, 0 only remove the tail, add to parent C) x, 0 -> x - 1, 0 if no tail, shrink the loop size, increase parent note: this means tails can never exist on innermost loops. this makes good sense :) A) [(a, b), (x, y), ...k] -> [(a', b'), (x + 1, 0), ...k] a * (x * k + y) + b = a' * (x + 1) * k + b' a' = (a * (x * k + y) + b) // ((x + 1) * k) b' = " " % " " B) [(a, b), (x, y), ...k] -> [(a', b'), (x, 0), ...k] a * (x * k + y) + b = a' * (x) * k + b' a' = (a * (x * k + y) + b) // ((x) * k) b' = " " % " " C) [(a, b), (x, y), ...k] -> [(a', b'), (x - 1, 0), ...k] a * (x * k + y) + b = a' * (x - 1) * k + b' a' = (a * (x * k + y) + b) // ((x - 1) * k) b' = " " % " " example interaction model: 1. cursor = 1 [1024, 1, 1] 2. up [512, 2, 1] 3. up [(341,1), 3, 1] 4. up [256, 4, 1] 5. cursor = 2, up [256, 2, 2] 6. up [256, (1, 1), 3] 7. cursor = 1, down [(341, 1), 1, 3] 8. cursor = 2, down [(341, 1), (1, 1), 2] 9. cursor = 1, down [512, 1, 2]""" if self.cursor == 0: return parent_size = self.order[self.cursor - 1] a = parent_size[0] b = parent_size[1] size = self.order[self.cursor] x = size[0] y = size[1] def lam(v, x): return v * x[0] + x[1] k = reduce(lam, self.order[self.cursor + 1 :][::-1], 1) if increment == -1 and y: increment = 0 if (x + increment) < 1: return if (x + increment) > self.size: return n = a * x * k + b d = (x + increment) * k a_ = n // d b_ = n % d if a_ < 1: return if a_ > self.size: return self.order[self.cursor - 1] = (a_, b_) self.order[self.cursor] = (x + increment, 0) end_size = reduce(lam, self.order[::-1], 1) assert ( end_size == self.size ), f"{end_size} != {self.size} ({a}, {b}), ({x}, {y}) -> ({a_}, {b_}), ({x + increment}, 0)" def apply_action(self, action: Event) -> Tuple[bool, Optional[ActionSpace], bool]: if not action.HasField("int64_value"): raise ValueError("Invalid action. int64_value expected.") choice_index = action.int64_value if choice_index < 0 or choice_index >= len( self.action_space.space.named_discrete.name ): raise ValueError("Out-of-range") logger.info("Applied action %d", choice_index) act = self.action_space.space.named_discrete.name[choice_index] if self.mode not in ["size", "select"]: raise RuntimeError("Invalid mode set: {}".format(self.mode)) if act == "toggle_mode": if self.mode == "size": self.mode = "select" elif self.mode == "select": self.mode = "size" if act == "toggle_thread": self.thread[self.cursor] = not self.thread[self.cursor] if act == "down": # always loop around if self.mode == "size": self.resize(-1) elif self.mode == "select": next_cursor = (self.cursor - 1) % len(self.order) self.cursor = next_cursor if act == "up": # always loop around if self.mode == "size": self.resize(1) elif self.mode == "select": next_cursor = (self.cursor + 1) % len(self.order) self.cursor = next_cursor return False, None, False def lower(self): for n in self.ir.nodes: o = [(self.var, k) for k in self.order] self.ir.set_order(n, o) # always disable innermost self.ir.disable_reuse(n, len(o) - 1) loop_tree = lt.LoopTree(self.ir) parallel = set() t = loop_tree.roots[0] for b in self.thread: if b: parallel.add(t) if self.backend == "cpu": loop_tree.annotate(t, "cpu_parallel") t = loop_tree.children(t)[0] return loop_tree, parallel def flops(self): loop_tree, parallel = self.lower() if self.backend == "cuda": c = lt.cuda(loop_tree, parallel) else: c = lt.cpu(loop_tree) A = lt.Tensor(self.size) B = lt.Tensor(self.size) C = lt.Tensor(self.size) A.set(self.Ap) B.set(self.Bp) iters = 1000 warmup = 50 for i in range(warmup): c([A, B, C]) t = time.time() for i in range(iters - 1): c([A, B, C], False) c([A, B, C]) t_ = time.time() flops = self.size * iters / (t_ - t) / 1e9 return flops def get_observation(self, observation_space: ObservationSpace) -> Event: if observation_space.name == "action_state": # cursor, (size, tail) o = self.order[self.cursor] return Event( int64_tensor=Int64Tensor(shape=[3], value=[self.cursor, o[0], o[1]]) ) elif observation_space.name == "flops": return Event(double_value=self.flops()) elif observation_space.name == "loop_tree": loop_tree, parallel = self.lower() return Event( string_value=loop_tree.dump( lambda x: "[thread]" if x in parallel else "" ) ) else: raise KeyError(observation_space.name)
CompilerGym-development
compiler_gym/envs/loop_tool/service/loop_tool_compilation_session.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from enum import Enum class observation_spaces(Enum): Ir = "Ir" IrSha1 = "IrSha1" Bitcode = "Bitcode" BitcodeFile = "BitcodeFile" InstCount = "InstCount" Autophase = "Autophase" Programl = "Programl" ProgramlJson = "ProgramlJson" CpuInfo = "CpuInfo" IrInstructionCount = "IrInstructionCount" IrInstructionCountO0 = "IrInstructionCountO0" IrInstructionCountO3 = "IrInstructionCountO3" IrInstructionCountOz = "IrInstructionCountOz" ObjectTextSizeBytes = "ObjectTextSizeBytes" ObjectTextSizeO0 = "ObjectTextSizeO0" ObjectTextSizeO3 = "ObjectTextSizeO3" ObjectTextSizeOz = "ObjectTextSizeOz" TextSizeBytes = "TextSizeBytes" TextSizeO0 = "TextSizeO0" TextSizeO3 = "TextSizeO3" TextSizeOz = "TextSizeOz" IsBuildable = "IsBuildable" IsRunnable = "IsRunnable" Runtime = "Runtime" Buildtime = "Buildtime" Inst2vecPreprocessedText = "Inst2vecPreprocessedText" Inst2vecEmbeddingIndices = "Inst2vecEmbeddingIndices" Inst2vec = "Inst2vec" InstCountDict = "InstCountDict" InstCountNorm = "InstCountNorm" InstCountNormDict = "InstCountNormDict" AutophaseDict = "AutophaseDict" LexedIr = "LexedIr" class reward_spaces(Enum): IrInstructionCount = "IrInstructionCount" IrInstructionCountNorm = "IrInstructionCountNorm" IrInstructionCountO3 = "IrInstructionCountO3" IrInstructionCountOz = "IrInstructionCountOz" ObjectTextSizeBytes = "ObjectTextSizeBytes" ObjectTextSizeNorm = "ObjectTextSizeNorm" ObjectTextSizeO3 = "ObjectTextSizeO3" ObjectTextSizeOz = "ObjectTextSizeOz" TextSizeBytes = "TextSizeBytes" TextSizeNorm = "TextSizeNorm" TextSizeO3 = "TextSizeO3" TextSizeOz = "TextSizeOz" class actions(Enum): AddDiscriminators = "-add-discriminators" Adce = "-adce" AggressiveInstcombine = "-aggressive-instcombine" AlignmentFromAssumptions = "-alignment-from-assumptions" AlwaysInline = "-always-inline" Argpromotion = "-argpromotion" Attributor = "-attributor" Barrier = "-barrier" Bdce = "-bdce" BreakCritEdges = "-break-crit-edges" Simplifycfg = "-simplifycfg" CallsiteSplitting = "-callsite-splitting" CalledValuePropagation = "-called-value-propagation" CanonicalizeAliases = "-canonicalize-aliases" Consthoist = "-consthoist" Constmerge = "-constmerge" Constprop = "-constprop" CoroCleanup = "-coro-cleanup" CoroEarly = "-coro-early" CoroElide = "-coro-elide" CoroSplit = "-coro-split" CorrelatedPropagation = "-correlated-propagation" CrossDsoCfi = "-cross-dso-cfi" Deadargelim = "-deadargelim" Dce = "-dce" Die = "-die" Dse = "-dse" Reg2mem = "-reg2mem" DivRemPairs = "-div-rem-pairs" EarlyCseMemssa = "-early-cse-memssa" EarlyCse = "-early-cse" ElimAvailExtern = "-elim-avail-extern" EeInstrument = "-ee-instrument" Flattencfg = "-flattencfg" Float2int = "-float2int" Forceattrs = "-forceattrs" Inline = "-inline" InsertGcovProfiling = "-insert-gcov-profiling" GvnHoist = "-gvn-hoist" Gvn = "-gvn" Globaldce = "-globaldce" Globalopt = "-globalopt" Globalsplit = "-globalsplit" GuardWidening = "-guard-widening" Hotcoldsplit = "-hotcoldsplit" Ipconstprop = "-ipconstprop" Ipsccp = "-ipsccp" Indvars = "-indvars" Irce = "-irce" InferAddressSpaces = "-infer-address-spaces" Inferattrs = "-inferattrs" InjectTliMappings = "-inject-tli-mappings" Instsimplify = "-instsimplify" Instcombine = "-instcombine" Instnamer = "-instnamer" JumpThreading = "-jump-threading" Lcssa = "-lcssa" Licm = "-licm" LibcallsShrinkwrap = "-libcalls-shrinkwrap" LoadStoreVectorizer = "-load-store-vectorizer" LoopDataPrefetch = "-loop-data-prefetch" LoopDeletion = "-loop-deletion" LoopDistribute = "-loop-distribute" LoopFusion = "-loop-fusion" LoopGuardWidening = "-loop-guard-widening" LoopIdiom = "-loop-idiom" LoopInstsimplify = "-loop-instsimplify" LoopInterchange = "-loop-interchange" LoopLoadElim = "-loop-load-elim" LoopPredication = "-loop-predication" LoopReroll = "-loop-reroll" LoopRotate = "-loop-rotate" LoopSimplifycfg = "-loop-simplifycfg" LoopSimplify = "-loop-simplify" LoopSink = "-loop-sink" LoopReduce = "-loop-reduce" LoopUnrollAndJam = "-loop-unroll-and-jam" LoopUnroll = "-loop-unroll" LoopUnswitch = "-loop-unswitch" LoopVectorize = "-loop-vectorize" LoopVersioningLicm = "-loop-versioning-licm" LoopVersioning = "-loop-versioning" Loweratomic = "-loweratomic" LowerConstantIntrinsics = "-lower-constant-intrinsics" LowerExpect = "-lower-expect" LowerGuardIntrinsic = "-lower-guard-intrinsic" Lowerinvoke = "-lowerinvoke" LowerMatrixIntrinsics = "-lower-matrix-intrinsics" Lowerswitch = "-lowerswitch" LowerWidenableCondition = "-lower-widenable-condition" Memcpyopt = "-memcpyopt" Mergefunc = "-mergefunc" Mergeicmps = "-mergeicmps" MldstMotion = "-mldst-motion" Sancov = "-sancov" NameAnonGlobals = "-name-anon-globals" NaryReassociate = "-nary-reassociate" Newgvn = "-newgvn" PgoMemopOpt = "-pgo-memop-opt" PartialInliner = "-partial-inliner" PartiallyInlineLibcalls = "-partially-inline-libcalls" PostInlineEeInstrument = "-post-inline-ee-instrument" Functionattrs = "-functionattrs" Mem2reg = "-mem2reg" PruneEh = "-prune-eh" Reassociate = "-reassociate" RedundantDbgInstElim = "-redundant-dbg-inst-elim" RpoFunctionattrs = "-rpo-functionattrs" RewriteStatepointsForGc = "-rewrite-statepoints-for-gc" Sccp = "-sccp" SlpVectorizer = "-slp-vectorizer" Sroa = "-sroa" Scalarizer = "-scalarizer" SeparateConstOffsetFromGep = "-separate-const-offset-from-gep" SimpleLoopUnswitch = "-simple-loop-unswitch" Sink = "-sink" SpeculativeExecution = "-speculative-execution" Slsr = "-slsr" StripDeadPrototypes = "-strip-dead-prototypes" StripDebugDeclare = "-strip-debug-declare" StripNondebug = "-strip-nondebug" Strip = "-strip" Tailcallelim = "-tailcallelim" Mergereturn = "-mergereturn" class action_descriptions(Enum): AddDiscriminators = "Add DWARF path discriminators" Adce = "Aggressive Dead Code Elimination" AggressiveInstcombine = "Combine pattern based expressions" AlignmentFromAssumptions = "Alignment from assumptions" AlwaysInline = "Inliner for always_inline functions" Argpromotion = "Promote 'by reference' arguments to scalars" Attributor = "Deduce and propagate attributes" Barrier = "A No-Op Barrier Pass" Bdce = "Bit-Tracking Dead Code Elimination" BreakCritEdges = "Break critical edges in CFG" Simplifycfg = "Simplify the CFG" CallsiteSplitting = "Call-site splitting" CalledValuePropagation = "Called Value Propagation" CanonicalizeAliases = "Canonicalize aliases" Consthoist = "Constant Hoisting" Constmerge = "Merge Duplicate Global Constants" Constprop = "Simple constant propagation" CoroCleanup = "Lower all coroutine related intrinsics" CoroEarly = "Lower early coroutine intrinsics" CoroElide = "Coroutine frame allocation elision and indirect calls replacement" CoroSplit = "Split coroutine into a set of functions driving its state machine" CorrelatedPropagation = "Value Propagation" CrossDsoCfi = "Cross-DSO CFI" Deadargelim = "Dead Argument Elimination" Dce = "Dead Code Elimination" Die = "Dead Instruction Elimination" Dse = "Dead Store Elimination" Reg2mem = "Demote all values to stack slots" DivRemPairs = "Hoist/decompose integer division and remainder" EarlyCseMemssa = "Early CSE w/ MemorySSA" EarlyCse = "Early CSE" ElimAvailExtern = "Eliminate Available Externally Globals" EeInstrument = ( "Instrument function entry/exit with calls to e.g. mcount()(pre inlining)" ) Flattencfg = "Flatten the CFG" Float2int = "Float to int" Forceattrs = "Force set function attributes" Inline = "Function Integration/Inlining" InsertGcovProfiling = "Insert instrumentation for GCOV profiling" GvnHoist = "Early GVN Hoisting of Expressions" Gvn = "Global Value Numbering" Globaldce = "Dead Global Elimination" Globalopt = "Global Variable Optimizer" Globalsplit = "Global splitter" GuardWidening = "Widen guards" Hotcoldsplit = "Hot Cold Splitting" Ipconstprop = "Interprocedural constant propagation" Ipsccp = "Interprocedural Sparse Conditional Constant Propagation" Indvars = "Induction Variable Simplification" Irce = "Inductive range check elimination" InferAddressSpaces = "Infer address spaces" Inferattrs = "Infer set function attributes" InjectTliMappings = "Inject TLI Mappings" Instsimplify = "Remove redundant instructions" Instcombine = "Combine redundant instructions" Instnamer = "Assign names to anonymous instructions" JumpThreading = "Jump Threading" Lcssa = "Loop-Closed SSA Form Pass" Licm = "Loop Invariant Code Motion" LibcallsShrinkwrap = "Conditionally eliminate dead library calls" LoadStoreVectorizer = "Vectorize load and Store instructions" LoopDataPrefetch = "Loop Data Prefetch" LoopDeletion = "Delete dead loops" LoopDistribute = "Loop Distribution" LoopFusion = "Loop Fusion" LoopGuardWidening = "Widen guards (within a single loop, as a loop pass)" LoopIdiom = "Recognize loop idioms" LoopInstsimplify = "Simplify instructions in loops" LoopInterchange = "Interchanges loops for cache reuse" LoopLoadElim = "Loop Load Elimination" LoopPredication = "Loop predication" LoopReroll = "Reroll loops" LoopRotate = "Rotate Loops" LoopSimplifycfg = "Simplify loop CFG" LoopSimplify = "Canonicalize natural loops" LoopSink = "Loop Sink" LoopReduce = "Loop Strength Reduction" LoopUnrollAndJam = "Unroll and Jam loops" LoopUnroll = "Unroll loops" LoopUnswitch = "Unswitch loops" LoopVectorize = "Loop Vectorization" LoopVersioningLicm = "Loop Versioning For LICM" LoopVersioning = "Loop Versioning" Loweratomic = "Lower atomic intrinsics to non-atomic form" LowerConstantIntrinsics = "Lower constant intrinsics" LowerExpect = "Lower 'expect' Intrinsics" LowerGuardIntrinsic = "Lower the guard intrinsic to normal control flow" Lowerinvoke = "Lower invoke and unwind, for unwindless code generators" LowerMatrixIntrinsics = "Lower the matrix intrinsics" Lowerswitch = "Lower SwitchInst's to branches" LowerWidenableCondition = "Lower the widenable condition to default true value" Memcpyopt = "MemCpy Optimization" Mergefunc = "Merge Functions" Mergeicmps = "Merge contiguous icmps into a memcmp" MldstMotion = "MergedLoadStoreMotion" Sancov = "Pass for instrumenting coverage on functions" NameAnonGlobals = "Provide a name to nameless globals" NaryReassociate = "Nary reassociation" Newgvn = "Global Value Numbering" PgoMemopOpt = "Optimize memory intrinsic using its size value profile" PartialInliner = "Partial Inliner" PartiallyInlineLibcalls = "Partially inline calls to library functions" PostInlineEeInstrument = ( "Instrument function entry/exit with calls to e.g. mcount()(post inlining)" ) Functionattrs = "Deduce function attributes" Mem2reg = "Promote Memory to Register" PruneEh = "Remove unused exception handling info" Reassociate = "Reassociate expressions" RedundantDbgInstElim = "Redundant Dbg Instruction Elimination" RpoFunctionattrs = "Deduce function attributes in RPO" RewriteStatepointsForGc = "Make relocations explicit at statepoints" Sccp = "Sparse Conditional Constant Propagation" SlpVectorizer = "SLP Vectorizer" Sroa = "Scalar Replacement Of Aggregates" Scalarizer = "Scalarize vector operations" SeparateConstOffsetFromGep = ( "Split GEPs to a variadic base and a constant offset for better CSE" ) SimpleLoopUnswitch = "Simple unswitch loops" Sink = "Code sinking" SpeculativeExecution = "Speculatively execute instructions" Slsr = "Straight line strength reduction" StripDeadPrototypes = "Strip Unused Function Prototypes" StripDebugDeclare = "Strip all llvm.dbg.declare intrinsics" StripNondebug = "Strip all symbols, except dbg symbols, from a module" Strip = "Strip all symbols from a module" Tailcallelim = "Tail Call Elimination" Mergereturn = "Unify function exit nodes"
CompilerGym-development
compiler_gym/envs/llvm/specs.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """This module defines reward spaces used by the LLVM environment.""" from typing import List, Optional from compiler_gym.datasets import Benchmark from compiler_gym.spaces.reward import Reward from compiler_gym.util.gym_type_hints import ActionType, ObservationType, RewardType from compiler_gym.views.observation import ObservationView class CostFunctionReward(Reward): """A reward function that uses a scalar observation space as a cost function. """ def __init__(self, cost_function: str, init_cost_function: str, **kwargs): """Constructor. :param cost_function: The ID of the observation space used to produce scalar costs. :param init_cost_function: The ID of an observation space that produces a scalar cost equivalent to cost_function before any actions are made. """ super().__init__(observation_spaces=[cost_function], **kwargs) self.cost_function: str = cost_function self.init_cost_function: str = init_cost_function self.previous_cost: Optional[ObservationType] = None def reset(self, benchmark: Benchmark, observation_view: ObservationView) -> None: """Called on env.reset(). Reset incremental progress.""" del benchmark # unused del observation_view # unused self.previous_cost = None def update( self, actions: List[ActionType], observations: List[ObservationType], observation_view: ObservationView, ) -> RewardType: """Called on env.step(). Compute and return new reward.""" del actions # unused cost: RewardType = observations[0] if self.previous_cost is None: self.previous_cost = observation_view[self.init_cost_function] reward = RewardType(self.previous_cost - cost) self.previous_cost = cost return reward class NormalizedReward(CostFunctionReward): """A cost function reward that is normalized to the initial value.""" def __init__(self, **kwargs): """Constructor.""" super().__init__(**kwargs) self.cost_norm: Optional[ObservationType] = None self.benchmark: Benchmark = None def reset(self, benchmark: str, observation_view: ObservationView) -> None: """Called on env.reset(). Reset incremental progress.""" super().reset(benchmark, observation_view) # The benchmark has changed so we must compute a new cost normalization # value. If the benchmark has not changed then the previously computed # value is still valid. if self.benchmark != benchmark: self.cost_norm = None self.benchmark = benchmark def update( self, actions: List[ActionType], observations: List[ObservationType], observation_view: ObservationView, ) -> RewardType: """Called on env.step(). Compute and return new reward.""" if self.cost_norm is None: self.cost_norm = self.get_cost_norm(observation_view) return super().update(actions, observations, observation_view) / self.cost_norm def get_cost_norm(self, observation_view: ObservationView) -> RewardType: """Return the value used to normalize costs.""" return observation_view[self.init_cost_function] class BaselineImprovementNormalizedReward(NormalizedReward): """A cost function reward that is normalized to improvement made by a baseline approach. """ def __init__(self, baseline_cost_function: str, **kwargs): super().__init__(**kwargs) self.baseline_cost_function: str = baseline_cost_function def get_cost_norm(self, observation_view: ObservationView) -> RewardType: """Return the value used to normalize costs.""" init_cost = observation_view[self.init_cost_function] baseline_cost = observation_view[self.baseline_cost_function] return max(init_cost - baseline_cost, 1)
CompilerGym-development
compiler_gym/envs/llvm/llvm_rewards.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import os import subprocess import tempfile import urllib.parse from compiler_gym.datasets import Benchmark, BenchmarkInitError from compiler_gym.service.proto import Benchmark as BenchmarkProto from compiler_gym.service.proto import File from compiler_gym.third_party.gccinvocation.gccinvocation import GccInvocation from compiler_gym.util.commands import Popen from compiler_gym.util.runfiles_path import transient_cache_path from compiler_gym.util.shell_format import join_cmd logger = logging.getLogger(__name__) class BenchmarkFromCommandLine(Benchmark): """A benchmark that has been constructed from a command line invocation. See :meth:`env.make_benchmark_from_command_line() <compiler_gym.envs.LlvmEnv.make_benchmark_from_command_line>`. """ def __init__(self, invocation: GccInvocation, bitcode: bytes, timeout: int): uri = f"benchmark://clang-v0/{urllib.parse.quote_plus(join_cmd(invocation.original_argv))}" super().__init__( proto=BenchmarkProto(uri=str(uri), program=File(contents=bitcode)) ) self.command_line = invocation.original_argv # Modify the commandline so that it takes the bitcode file as input. # # Strip the original sources from the build command, but leave any # object file inputs. sources = set(s for s in invocation.sources if not s.endswith(".o")) build_command = [arg for arg in invocation.original_argv if arg not in sources] # Convert any object file inputs to absolute paths since the backend # service will have a different working directory. # # TODO(github.com/facebookresearch/CompilerGym/issues/325): To support # distributed execution, we should embed the contents of these object # files in the benchmark proto. object_files = set(s for s in invocation.sources if s.endswith(".o")) build_command = [ os.path.abspath(arg) if arg in object_files else arg for arg in build_command ] # Append the new source to the build command and specify the absolute path # to the output. for i in range(len(build_command) - 2, -1, -1): if build_command[i] == "-o": del build_command[i + 1] del build_command[i] build_command += ["-xir", "$IN", "-o", str(invocation.output_path)] self.proto.dynamic_config.build_cmd.argument[:] = build_command self.proto.dynamic_config.build_cmd.outfile[:] = [str(invocation.output_path)] self.proto.dynamic_config.build_cmd.timeout_seconds = timeout def compile(self, env, timeout: int = 60) -> None: """This completes the compilation and linking of the final executable specified by the original command line. """ with tempfile.NamedTemporaryFile( dir=transient_cache_path("."), prefix="benchmark-", suffix=".bc" ) as f: bitcode_path = f.name env.write_bitcode(bitcode_path) # Set the placeholder for input path. cmd = list(self.proto.dynamic_config.build_cmd.argument).copy() cmd = [bitcode_path if c == "$IN" else c for c in cmd] logger.debug(f"$ {join_cmd(cmd)}") with Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, ) as lower: stdout, _ = lower.communicate(timeout=timeout) if lower.returncode: raise BenchmarkInitError( f"Failed to lower LLVM bitcode with error:\n" f"{stdout.decode('utf-8').rstrip()}\n" f"Running command: {join_cmd(cmd)}" )
CompilerGym-development
compiler_gym/envs/llvm/benchmark_from_command_line.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import List from compiler_gym.spaces import ActionSpace from compiler_gym.util.gym_type_hints import ActionType class LlvmCommandLine(ActionSpace): """An action space for LLVM that supports serializing / deserializing to opt command line. """ def to_string(self, actions: List[ActionType]) -> str: """Returns an LLVM :code:`opt` command line invocation for the given actions. :param actions: A list of actions to serialize. :returns: A command line string. """ return f"opt {self.wrapped.to_string(actions)} input.bc -o output.bc" def from_string(self, string: str) -> List[ActionType]: """Returns a list of actions from the given command line. :param commandline: A command line invocation. :return: A list of actions. :raises ValueError: In case the command line string is malformed. """ if string.startswith("opt "): string = string[len("opt ") :] if string.endswith(" input.bc -o output.bc"): string = string[: -len(" input.bc -o output.bc")] return self.wrapped.from_string(string)
CompilerGym-development
compiler_gym/envs/llvm/llvm_command_line.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """Register the LLVM environments.""" import sys from itertools import product from compiler_gym.envs.llvm.benchmark_from_command_line import BenchmarkFromCommandLine from compiler_gym.envs.llvm.compute_observation import compute_observation from compiler_gym.envs.llvm.llvm_benchmark import ( ClangInvocation, get_system_library_flags, make_benchmark, make_benchmark_from_source, merge_benchmarks, split_benchmark_by_function, ) from compiler_gym.envs.llvm.llvm_command_line import LlvmCommandLine from compiler_gym.envs.llvm.llvm_env import LlvmEnv # TODO(github.com/facebookresearch/CompilerGym/issues/506): Tidy up. if "compiler_gym.envs.llvm.is_making_specs" not in sys.modules: from compiler_gym.envs.llvm.specs import observation_spaces, reward_spaces from compiler_gym.util.registration import register from compiler_gym.util.runfiles_path import runfiles_path __all__ = [ "BenchmarkFromCommandLine", "LlvmCommandLine", "ClangInvocation", "compute_observation", "get_system_library_flags", "LLVM_SERVICE_BINARY", "LlvmEnv", "make_benchmark", "make_benchmark_from_source", "merge_benchmarks", "observation_spaces", "reward_spaces", "split_benchmark_by_function", ] LLVM_SERVICE_BINARY = runfiles_path( "compiler_gym/envs/llvm/service/compiler_gym-llvm-service" ) def _register_llvm_gym_service(): """Register an environment for each combination of LLVM observation/reward/benchmark.""" observation_spaces = {"autophase": "Autophase", "ir": "Ir"} reward_spaces = {"ic": "IrInstructionCountOz", "codesize": "ObjectTextSizeOz"} register( id="llvm-v0", entry_point="compiler_gym.envs.llvm:LlvmEnv", kwargs={ "service": LLVM_SERVICE_BINARY, }, ) for reward_space in reward_spaces: register( id=f"llvm-{reward_space}-v0", entry_point="compiler_gym.envs.llvm:LlvmEnv", kwargs={ "service": LLVM_SERVICE_BINARY, "reward_space": reward_spaces[reward_space], }, ) for observation_space, reward_space in product(observation_spaces, reward_spaces): register( id=f"llvm-{observation_space}-{reward_space}-v0", entry_point="compiler_gym.envs.llvm:LlvmEnv", kwargs={ "service": LLVM_SERVICE_BINARY, "observation_space": observation_spaces[observation_space], "reward_space": reward_spaces[reward_space], }, ) _register_llvm_gym_service()
CompilerGym-development
compiler_gym/envs/llvm/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """This module defines a utility function for computing LLVM observations.""" import subprocess from pathlib import Path from typing import List import google.protobuf.text_format from compiler_gym.service.proto import Event from compiler_gym.util.commands import Popen from compiler_gym.util.gym_type_hints import ObservationType from compiler_gym.util.runfiles_path import runfiles_path from compiler_gym.util.shell_format import plural from compiler_gym.views.observation_space_spec import ObservationSpaceSpec _COMPUTE_OBSERVATION_BIN = runfiles_path( "compiler_gym/envs/llvm/service/compute_observation" ) def pascal_case_to_enum(pascal_case: str) -> str: """Convert PascalCase to ENUM_CASE.""" word_arrays: List[List[str]] = [[]] for c in pascal_case: if c.isupper() and word_arrays[-1]: word_arrays.append([c]) else: word_arrays[-1].append(c.upper()) return "_".join(["".join(word) for word in word_arrays]) def compute_observation( observation_space: ObservationSpaceSpec, bitcode: Path, timeout: float = 300 ) -> ObservationType: """Compute an LLVM observation. This is a utility function that uses a standalone C++ binary to compute an observation from an LLVM bitcode file. It is intended for use cases where you want to compute an observation without the overhead of initializing a full environment. Example usage: >>> env = compiler_gym.make("llvm-v0") >>> space = env.observation.spaces["Ir"] >>> bitcode = Path("bitcode.bc") >>> observation = llvm.compute_observation(space, bitcode, timeout=30) .. warning:: This is not part of the core CompilerGym API and may change in a future release. :param observation_space: The observation that is to be computed. :param bitcode: The path of an LLVM bitcode file. :param timeout: The maximum number of seconds to allow the computation to run before timing out. :raises ValueError: If computing the observation fails. :raises TimeoutError: If computing the observation times out. :raises FileNotFoundError: If the given bitcode does not exist. """ if not Path(bitcode).is_file(): raise FileNotFoundError(bitcode) observation_space_name = pascal_case_to_enum(observation_space.id) try: with Popen( [str(_COMPUTE_OBSERVATION_BIN), observation_space_name, str(bitcode)], stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) as process: stdout, stderr = process.communicate(timeout=timeout) if process.returncode: try: stderr = stderr.decode("utf-8") raise ValueError( f"Failed to compute {observation_space.id} observation: {stderr}" ) except UnicodeDecodeError as e: raise ValueError( f"Failed to compute {observation_space.id} observation" ) from e except subprocess.TimeoutExpired as e: raise TimeoutError( f"Failed to compute {observation_space.id} observation in " f"{timeout:.1f} {plural(int(round(timeout)), 'second', 'seconds')}" ) from e try: stdout = stdout.decode("utf-8") except UnicodeDecodeError as e: raise ValueError( f"Failed to parse {observation_space.id} observation: {e}" ) from e observation = Event() try: google.protobuf.text_format.Parse(stdout, observation) except google.protobuf.text_format.ParseError as e: raise ValueError(f"Failed to parse {observation_space.id} observation") from e return observation_space.translate(observation)
CompilerGym-development
compiler_gym/envs/llvm/compute_observation.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """This module defines a utility function for constructing LLVM benchmarks.""" import logging import os import random import subprocess import sys import tempfile from concurrent.futures import as_completed from copy import deepcopy from datetime import datetime from functools import lru_cache from pathlib import Path from typing import Iterable, List, Optional, Union from compiler_gym.datasets import Benchmark from compiler_gym.errors import BenchmarkInitError from compiler_gym.third_party import llvm from compiler_gym.util.commands import Popen, communicate, run_command from compiler_gym.util.runfiles_path import transient_cache_path from compiler_gym.util.shell_format import join_cmd from compiler_gym.util.thread_pool import get_thread_pool_executor logger = logging.getLogger(__name__) class HostCompilerFailure(OSError): """Exception raised when the system compiler fails.""" class UnableToParseHostCompilerOutput(HostCompilerFailure): """Exception raised if unable to parse the verbose output of the host compiler.""" def _get_system_library_flags(compiler: str) -> Iterable[str]: """Private implementation function.""" # Create a temporary file to write the compiled binary to, since GNU # assembler does not support piping to stdout. transient_cache = transient_cache_path(".") transient_cache.mkdir(parents=True, exist_ok=True) with tempfile.NamedTemporaryFile(dir=transient_cache) as f: cmd = [compiler, "-xc++", "-v", "-", "-o", f.name] # On macOS we need to compile a binary to invoke the linker. if sys.platform != "darwin": cmd.append("-c") # Retry loop to permit timeouts, though unlikely, in case of a # heavily overloaded system (I have observed CI failures because # of this). for _ in range(3): try: with Popen( cmd, stdout=subprocess.DEVNULL, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True, ) as process: _, stderr = communicate( process=process, input="int main(){return 0;}", timeout=30 ) if process.returncode: raise HostCompilerFailure( f"Failed to invoke '{compiler}'. " f"Is there a working system compiler?\n" f"Error: {stderr.strip()}" ) break except subprocess.TimeoutExpired: continue except FileNotFoundError as e: raise HostCompilerFailure( f"Failed to invoke '{compiler}'. " f"Is there a working system compiler?\n" f"Error: {e}" ) from e else: raise HostCompilerFailure( f"Compiler invocation '{join_cmd(cmd)}' timed out after 3 attempts." ) # Parse the compiler output that matches the conventional output format # used by clang and GCC: # # #include <...> search starts here: # /path/1 # /path/2 # End of search list in_search_list = False lines = stderr.split("\n") for line in lines: if in_search_list and line.startswith("End of search list"): break elif in_search_list: # We have an include path to return. path = Path(line.strip()) yield "-isystem" yield str(path) # Compatibility fix for compiling benchmark sources which use the # '#include <endian.h>' header, which on macOS is located in a # 'machine/endian.h' directory. if (path / "machine").is_dir(): yield "-isystem" yield str(path / "machine") elif line.startswith("#include <...> search starts here:"): in_search_list = True else: msg = f"Failed to parse '#include <...>' search paths from '{compiler}'" stderr = stderr.strip() if stderr: msg += f":\n{stderr}" raise UnableToParseHostCompilerOutput(msg) if sys.platform == "darwin": yield "-L/Library/Developer/CommandLineTools/SDKs/MacOSX.sdk/usr/lib" @lru_cache(maxsize=32) def _get_cached_system_library_flags(compiler: str) -> List[str]: """Private implementation detail.""" return list(_get_system_library_flags(compiler)) def get_system_library_flags(compiler: Optional[str] = None) -> List[str]: """Determine the set of compilation flags needed to use the host system libraries. This uses the system compiler to determine the search paths for C/C++ system headers, and on macOS, the location of libclang_rt.osx.a. By default, :code:`c++` is invoked. This can be overridden by setting :code:`os.environ["CXX"]` prior to calling this function. :return: A list of command line flags for a compiler. :raises HostCompilerFailure: If the host compiler cannot be determined, or fails to compile a trivial piece of code. :raises UnableToParseHostCompilerOutput: If the output of the compiler cannot be understood. """ compiler = compiler or (os.environ.get("CXX") or "c++") # We want to cache the results of this expensive query after resolving the # default value for the compiler argument, as it can changed based on # environment variables. return _get_cached_system_library_flags(compiler) class ClangInvocation: """Class to represent a single invocation of the clang compiler.""" def __init__( self, args: List[str], system_includes: bool = True, timeout: int = 600 ): """Create a clang invocation. :param args: The list of arguments to pass to clang. :param system_includes: Whether to include the system standard libraries during compilation jobs. This requires a system toolchain. See :func:`get_system_library_flags`. :param timeout: The maximum number of seconds to allow clang to run before terminating. """ self.args = args self.system_includes = system_includes self.timeout = timeout def command(self, outpath: Path) -> List[str]: cmd = [str(llvm.clang_path()), "-c", "-emit-llvm", "-o", str(outpath)] if self.system_includes: cmd += get_system_library_flags() cmd += [str(s) for s in self.args] return cmd # NOTE(cummins): There is some discussion about the best way to create a # bitcode that is unoptimized yet does not hinder downstream # optimization opportunities. Here we are using a configuration based on # -O1 in which we prevent the -O1 optimization passes from running. This # is because LLVM produces different function attributes dependening on # the optimization level. E.g. "-O0 -Xclang -disable-llvm-optzns -Xclang # -disable-O0-optnone" will generate code with "noinline" attributes set # on the functions, wheras "-Oz -Xclang -disable-llvm-optzns" will # generate functions with "minsize" and "optsize" attributes set. # # See also: # <https://lists.llvm.org/pipermail/llvm-dev/2018-August/thread.html#125365> # <https://github.com/facebookresearch/CompilerGym/issues/110> DEFAULT_COPT = [ "-O1", "-Xclang", "-disable-llvm-passes", "-Xclang", "-disable-llvm-optzns", ] @classmethod def from_c_file( cls, path: Path, copt: Optional[List[str]] = None, system_includes: bool = True, timeout: int = 600, ) -> "ClangInvocation": copt = copt or [] return cls( cls.DEFAULT_COPT + copt + [str(path)], system_includes=system_includes, timeout=timeout, ) def make_benchmark( inputs: Union[str, Path, ClangInvocation, List[Union[str, Path, ClangInvocation]]], copt: Optional[List[str]] = None, system_includes: bool = True, timeout: int = 600, ) -> Benchmark: """Create a benchmark for use by LLVM environments. This function takes one or more inputs and uses them to create an LLVM bitcode benchmark that can be passed to :meth:`compiler_gym.envs.LlvmEnv.reset`. The following input types are supported: +-----------------------------------------------------+---------------------+-------------------------------------------------------------+ | **File Suffix** | **Treated as** | **Converted using** | +-----------------------------------------------------+---------------------+-------------------------------------------------------------+ | :code:`.bc` | LLVM IR bitcode | No conversion required. | +-----------------------------------------------------+---------------------+-------------------------------------------------------------+ | :code:`.ll` | LLVM IR text format | Assembled to bitcode using llvm-as. | +-----------------------------------------------------+---------------------+-------------------------------------------------------------+ | :code:`.c`, :code:`.cc`, :code:`.cpp`, :code:`.cxx` | C / C++ source | Compiled to bitcode using clang and the given :code:`copt`. | +-----------------------------------------------------+---------------------+-------------------------------------------------------------+ .. note:: The LLVM IR format has no compatability guarantees between versions (see `LLVM docs <https://llvm.org/docs/DeveloperPolicy.html#ir-backwards-compatibility>`_). You must ensure that any :code:`.bc` and :code:`.ll` files are compatible with the LLVM version used by CompilerGym, which can be reported using :func:`env.compiler_version <compiler_gym.envs.CompilerEnv.compiler_version>`. E.g. for single-source C/C++ programs, you can pass the path of the source file: >>> benchmark = make_benchmark('my_app.c') >>> env = gym.make("llvm-v0") >>> env.reset(benchmark=benchmark) The clang invocation used is roughly equivalent to: .. code-block:: $ clang my_app.c -O0 -c -emit-llvm -o benchmark.bc Additional compile-time arguments to clang can be provided using the :code:`copt` argument: >>> benchmark = make_benchmark('/path/to/my_app.cpp', copt=['-O2']) If you need more fine-grained control over the options, you can directly construct a :class:`ClangInvocation <compiler_gym.envs.llvm.ClangInvocation>` to pass a list of arguments to clang: >>> benchmark = make_benchmark( ClangInvocation(['/path/to/my_app.c'], system_includes=False, timeout=10) ) For multi-file programs, pass a list of inputs that will be compiled separately and then linked to a single module: >>> benchmark = make_benchmark([ 'main.c', 'lib.cpp', 'lib2.bc', 'foo/input.bc' ]) :param inputs: An input, or list of inputs. :param copt: A list of command line options to pass to clang when compiling source files. :param system_includes: Whether to include the system standard libraries during compilation jobs. This requires a system toolchain. See :func:`get_system_library_flags`. :param timeout: The maximum number of seconds to allow clang to run before terminating. :return: A :code:`Benchmark` instance. :raises FileNotFoundError: If any input sources are not found. :raises TypeError: If the inputs are of unsupported types. :raises OSError: If a suitable compiler cannot be found. :raises BenchmarkInitError: If a compilation job fails. :raises TimeoutExpired: If a compilation job exceeds :code:`timeout` seconds. """ copt = copt or [] bitcodes: List[Path] = [] clang_jobs: List[ClangInvocation] = [] ll_paths: List[Path] = [] def _add_path(path: Path): if not path.is_file(): raise FileNotFoundError(path) if path.suffix == ".bc": bitcodes.append(path.absolute()) elif path.suffix in {".c", ".cc", ".cpp", ".cxx"}: clang_jobs.append( ClangInvocation.from_c_file( path, copt=copt, system_includes=system_includes, timeout=timeout ) ) elif path.suffix == ".ll": ll_paths.append(path) else: raise ValueError(f"Unrecognized file type: {path.name}") # Determine from inputs the list of pre-compiled bitcodes and the clang # invocations required to compile the bitcodes. if isinstance(inputs, str) or isinstance(inputs, Path): _add_path(Path(inputs)) elif isinstance(inputs, ClangInvocation): clang_jobs.append(inputs) else: for input in inputs: if isinstance(input, str) or isinstance(input, Path): _add_path(Path(input)) elif isinstance(input, ClangInvocation): clang_jobs.append(input) else: raise TypeError(f"Invalid input type: {type(input).__name__}") # Shortcut if we only have a single pre-compiled bitcode. if len(bitcodes) == 1 and not clang_jobs and not ll_paths: bitcode = bitcodes[0] return Benchmark.from_file(uri=f"benchmark://file-v0{bitcode}", path=bitcode) tmpdir_root = transient_cache_path(".") tmpdir_root.mkdir(exist_ok=True, parents=True) with tempfile.TemporaryDirectory( dir=tmpdir_root, prefix="llvm-make_benchmark-" ) as d: working_dir = Path(d) clang_outs = [ working_dir / f"clang-out-{i}.bc" for i in range(1, len(clang_jobs) + 1) ] llvm_as_outs = [ working_dir / f"llvm-as-out-{i}.bc" for i in range(1, len(ll_paths) + 1) ] # Run the clang and llvm-as invocations in parallel. Avoid running this # code path if possible as get_thread_pool_executor() requires locking. if clang_jobs or ll_paths: llvm_as_path = str(llvm.llvm_as_path()) executor = get_thread_pool_executor() llvm_as_commands = [ [llvm_as_path, str(ll_path), "-o", bc_path] for ll_path, bc_path in zip(ll_paths, llvm_as_outs) ] # Fire off the clang and llvm-as jobs. futures = [ executor.submit(run_command, job.command(out), job.timeout) for job, out in zip(clang_jobs, clang_outs) ] + [ executor.submit(run_command, command, timeout) for command in llvm_as_commands ] # Block until finished. list(future.result() for future in as_completed(futures)) # Check that the expected files were generated. for clang_job, bc_path in zip(clang_jobs, clang_outs): if not bc_path.is_file(): raise BenchmarkInitError( f"clang failed: {' '.join(clang_job.command(bc_path))}" ) for command, bc_path in zip(llvm_as_commands, llvm_as_outs): if not bc_path.is_file(): raise BenchmarkInitError(f"llvm-as failed: {command}") all_outs = bitcodes + clang_outs + llvm_as_outs if not all_outs: raise ValueError("No inputs") elif len(all_outs) == 1: # We only have a single bitcode so read it. with open(str(all_outs[0]), "rb") as f: bitcode = f.read() else: # Link all of the bitcodes into a single module. llvm_link_cmd = [str(llvm.llvm_link_path()), "-o", "-"] + [ str(path) for path in bitcodes + clang_outs ] with Popen( llvm_link_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) as llvm_link: bitcode, stderr = llvm_link.communicate(timeout=timeout) if llvm_link.returncode: raise BenchmarkInitError( f"Failed to link LLVM bitcodes with error: {stderr.decode('utf-8')}" ) timestamp = datetime.now().strftime("%Y%m%HT%H%M%S") uri = f"benchmark://user-v0/{timestamp}-{random.randrange(16**4):04x}" return Benchmark.from_file_contents(uri, bitcode) def make_benchmark_from_source( source: str, copt: Optional[List[str]] = None, lang: str = "c++", system_includes: bool = True, timeout: int = 600, ) -> Benchmark: """Create a benchmark from a string of source code. This function takes a string of source code and generates a benchmark that can be passed to :meth:`compiler_gym.envs.LlvmEnv.reset`. Example usage: >>> benchmark = make_benchmark_from_source("int A() {return 0;}") >>> env = gym.make("llvm-v0") >>> env.reset(benchmark=benchmark) The clang invocation used is roughly equivalent to: .. code-block:: $ clang - -O0 -c -emit-llvm -o benchmark.bc Additional compile-time arguments to clang can be provided using the :code:`copt` argument: >>> benchmark = make_benchmark_from_source("...", copt=['-O2']) :param source: A string of source code. :param copt: A list of command line options to pass to clang when compiling source files. :param lang: The source language, passed to clang via the :code:`-x` argument. Defaults to C++. :param system_includes: Whether to include the system standard libraries during compilation jobs. This requires a system toolchain. See :func:`get_system_library_flags`. :param timeout: The maximum number of seconds to allow clang to run before terminating. :return: A :code:`Benchmark` instance. :raises FileNotFoundError: If any input sources are not found. :raises TypeError: If the inputs are of unsupported types. :raises OSError: If a suitable compiler cannot be found. :raises BenchmarkInitError: If a compilation job fails. :raises TimeoutExpired: If a compilation job exceeds :code:`timeout` seconds. """ cmd = [ str(llvm.clang_path()), f"-x{lang}", "-", "-o", "-", "-c", "-emit-llvm", *ClangInvocation.DEFAULT_COPT, ] if system_includes: cmd += get_system_library_flags() cmd += copt or [] with Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE ) as clang: bitcode, stderr = clang.communicate(source.encode("utf-8"), timeout=timeout) if clang.returncode: raise BenchmarkInitError( f"Failed to make benchmark with compiler error: {stderr.decode('utf-8')}" ) timestamp = datetime.now().strftime("%Y%m%HT%H%M%S") uri = f"benchmark://user-v0/{timestamp}-{random.randrange(16**4):04x}" return Benchmark.from_file_contents(uri, bitcode) def split_benchmark_by_function( benchmark: Benchmark, maximum_function_count: int = 0, timeout: float = 300 ) -> List[Benchmark]: """Split a benchmark into single-function benchmarks. This function takes a benchmark as input and divides it into a set of independent benchmarks, where each benchmark contains a single function from the input. Under the hood, this uses an extension to `llvm-extract <https://llvm.org/docs/CommandGuide/llvm-extract.html>`__ to pull out individual parts of programs. In pseudo code, this is roughly equivalent to: .. code-block::py for i in number_of_functions_in_benchmark(benchmark): yield llvm_extract(benchmark, function_number=i) :param benchmark: A benchmark to split. :param maximum_function_count: If a positive integer, this specifies the maximum number of single-function benchmarks to extract from the input. If the input contains more than this number of functions, the remainder are ignored. :param timeout: The maximum number of seconds to allow llvm-extract to run before terminating. :return: A list of :code:`Benchmark` instances. :raises ValueError: If the input benchmark contains no functions, or if llvm-extract fails. :raises TimeoutExpired: If any llvm-extract job exceeds :code:`timeout` seconds. """ original_uri = deepcopy(benchmark.uri) original_bitcode = benchmark.proto.program.contents # Count the number of functions in the benchmark. with Popen( [str(llvm.llvm_extract_one_path()), "-", "-count-only", "-o", "-"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, ) as p: stdout, stderr = p.communicate(original_bitcode, timeout=timeout) if p.returncode: raise ValueError( "Failed to count number of functions in benchmark: " f"{stderr.decode('utf-8')}" ) number_of_functions = int(stdout.decode("utf-8")) if number_of_functions <= 0: raise ValueError("No functions found!") split_benchmarks: List[Benchmark] = [] # Extract all of the global initializers into a standalone benchmark. with Popen( [str(llvm.llvm_extract_one_path()), "-", "-const-inits", "-o", "-"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, ) as p: stdout, stderr = p.communicate(original_bitcode, timeout=timeout) if p.returncode: raise ValueError( "Failed to extract constant initializers: " f"{stderr.decode('utf-8')}" ) original_uri.params["function"] = "<constant initializers>" split_benchmarks.append( Benchmark.from_file_contents(uri=original_uri, data=stdout) ) logger.debug("Extracted %s", original_uri) # Iterate over the number of functions, extracting each one in turn. n = min(number_of_functions, maximum_function_count or number_of_functions) for i in range(n): with Popen( [str(llvm.llvm_extract_one_path()), "-", "-n", str(i), "-o", "-"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, ) as p: stdout, stderr = p.communicate(original_bitcode, timeout=timeout) if p.returncode: raise ValueError( "Failed to extract function {i}: " f"{stderr.decode('utf-8')}" ) original_uri.params["function"] = str(i) split_benchmarks.append( Benchmark.from_file_contents(uri=original_uri, data=stdout) ) logger.debug("Extracted %s", original_uri) return split_benchmarks def merge_benchmarks(benchmarks: List[Benchmark], timeout: float = 300) -> Benchmark: """Merge a list of benchmarks into a single benchmark. Under the hood, this `llvm-link <https://llvm.org/docs/CommandGuide/llvm-link.html>`__ to combine each of the bitcodes of the input benchmarks into a single bitcode. :param benchmarks: A list of benchmarks to merge. :param timeout: The maximum number of seconds to allow llvm-link to run before terminating. :return: A :code:`Benchmark` instance. :raises ValueError: If the input contains no benchmarks, or if llvm-link fails. :raises TimeoutExpired: If llvm-link exceeds :code:`timeout` seconds. """ if not benchmarks: raise ValueError("No benchmarks!") transient_cache = transient_cache_path(".") transient_cache.mkdir(parents=True, exist_ok=True) with tempfile.TemporaryDirectory(dir=transient_cache, prefix="llvm-link") as d: tmpdir = Path(d) # Write each of the benchmark bitcodes to a temporary file. cmd = [str(llvm.llvm_link_path()), "-o", "-", "-f"] for i, benchmark in enumerate(benchmarks): bitcode_path = tmpdir / f"{i}.bc" with open(bitcode_path, "wb") as f: f.write(benchmark.proto.program.contents) cmd.append(str(bitcode_path)) # Run llvm-link on the temporary files. with Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) as p: stdout, stderr = p.communicate(timeout=timeout) if p.returncode: raise ValueError( f"Failed to merge benchmarks: {stderr.decode('utf-8')}" ) timestamp = datetime.now().strftime("%Y%m%HT%H%M%S") uri = f"benchmark://llvm-link-v0/{timestamp}-{random.randrange(16**4):04x}" return Benchmark.from_file_contents(uri=uri, data=stdout)
CompilerGym-development
compiler_gym/envs/llvm/llvm_benchmark.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """Extensions to the ClientServiceCompilerEnv environment for LLVM.""" import logging import os import shlex import shutil import subprocess from pathlib import Path from tempfile import TemporaryDirectory from typing import Any, Callable, Iterable, List, Optional, Union import numpy as np from compiler_gym.datasets import Benchmark, Dataset from compiler_gym.envs.llvm.benchmark_from_command_line import BenchmarkFromCommandLine from compiler_gym.envs.llvm.datasets import get_llvm_datasets from compiler_gym.envs.llvm.lexed_ir import LexedToken from compiler_gym.envs.llvm.llvm_benchmark import ( ClangInvocation, get_system_library_flags, make_benchmark, ) from compiler_gym.envs.llvm.llvm_command_line import LlvmCommandLine from compiler_gym.envs.llvm.llvm_rewards import ( BaselineImprovementNormalizedReward, CostFunctionReward, NormalizedReward, ) from compiler_gym.errors import BenchmarkInitError, SessionNotFound from compiler_gym.service.client_service_compiler_env import ( ClientServiceCompilerEnv, ServiceMessageConverters, ) from compiler_gym.service.proto.py_converters import make_message_default_converter from compiler_gym.spaces import Box from compiler_gym.spaces import Dict as DictSpace from compiler_gym.spaces import Scalar, Sequence from compiler_gym.third_party.autophase import AUTOPHASE_FEATURE_NAMES from compiler_gym.third_party.gccinvocation.gccinvocation import GccInvocation from compiler_gym.third_party.inst2vec import Inst2vecEncoder from compiler_gym.third_party.llvm import ( clang_path, download_llvm_files, llvm_link_path, ) from compiler_gym.third_party.llvm.instcount import INST_COUNT_FEATURE_NAMES from compiler_gym.util.commands import Popen from compiler_gym.util.runfiles_path import transient_cache_path from compiler_gym.util.shell_format import join_cmd _INST2VEC_ENCODER = Inst2vecEncoder() _LLVM_DATASETS: Optional[List[Dataset]] = None logger = logging.getLogger(__name__) def _get_llvm_datasets(site_data_base: Optional[Path] = None) -> Iterable[Dataset]: """Get the LLVM datasets. Use a singleton value when site_data_base is the default value. """ global _LLVM_DATASETS if site_data_base is None: if _LLVM_DATASETS is None: _LLVM_DATASETS = list(get_llvm_datasets(site_data_base=site_data_base)) return _LLVM_DATASETS return get_llvm_datasets(site_data_base=site_data_base) def make_llvm_action_space_converter() -> Callable[[Any], LlvmCommandLine]: return lambda msg: LlvmCommandLine(space=make_message_default_converter()(msg)) class LlvmEnv(ClientServiceCompilerEnv): """A specialized ClientServiceCompilerEnv for LLVM. This extends the default :class:`ClientServiceCompilerEnv <compiler_gym.envs.ClientServiceCompilerEnv>` environment, adding extra LLVM functionality. Specifically, the actions use the :class:`CommandlineFlag <compiler_gym.spaces.CommandlineFlag>` space, which is a type of :code:`Discrete` space that provides additional documentation about each action, and the :meth:`env.action_space.to_string(...) <compiler_gym.envs.LlvmEnv.LlvmCommandLine.to_string>` method can be used to produce an equivalent LLVM opt invocation for the given actions. """ def __init__( self, *args, benchmark: Optional[Union[str, Benchmark]] = None, datasets_site_path: Optional[Path] = None, **kwargs, ): # First perform a one-time download of LLVM binaries that are needed by # the LLVM service and are not included by the pip-installed package. download_llvm_files() self.inst2vec = _INST2VEC_ENCODER super().__init__( *args, **kwargs, # Set a default benchmark for use. benchmark=benchmark or "cbench-v1/qsort", datasets=_get_llvm_datasets(site_data_base=datasets_site_path), service_message_converters=ServiceMessageConverters( action_space_converter=make_llvm_action_space_converter() ), rewards=[ CostFunctionReward( name="IrInstructionCount", cost_function="IrInstructionCount", init_cost_function="IrInstructionCountO0", default_negates_returns=True, deterministic=True, platform_dependent=False, ), NormalizedReward( name="IrInstructionCountNorm", cost_function="IrInstructionCount", init_cost_function="IrInstructionCountO0", max=1, default_negates_returns=True, deterministic=True, platform_dependent=False, ), BaselineImprovementNormalizedReward( name="IrInstructionCountO3", cost_function="IrInstructionCount", baseline_cost_function="IrInstructionCountO3", init_cost_function="IrInstructionCountO0", success_threshold=1, default_negates_returns=True, deterministic=True, platform_dependent=False, ), BaselineImprovementNormalizedReward( name="IrInstructionCountOz", cost_function="IrInstructionCount", baseline_cost_function="IrInstructionCountOz", init_cost_function="IrInstructionCountO0", success_threshold=1, default_negates_returns=True, deterministic=True, platform_dependent=False, ), CostFunctionReward( name="ObjectTextSizeBytes", cost_function="ObjectTextSizeBytes", init_cost_function="ObjectTextSizeO0", default_negates_returns=True, deterministic=True, platform_dependent=True, ), NormalizedReward( name="ObjectTextSizeNorm", cost_function="ObjectTextSizeBytes", init_cost_function="ObjectTextSizeO0", max=1, default_negates_returns=True, deterministic=True, platform_dependent=True, ), BaselineImprovementNormalizedReward( name="ObjectTextSizeO3", cost_function="ObjectTextSizeBytes", init_cost_function="ObjectTextSizeO0", baseline_cost_function="ObjectTextSizeO3", success_threshold=1, default_negates_returns=True, deterministic=True, platform_dependent=True, ), BaselineImprovementNormalizedReward( name="ObjectTextSizeOz", cost_function="ObjectTextSizeBytes", init_cost_function="ObjectTextSizeO0", baseline_cost_function="ObjectTextSizeOz", success_threshold=1, default_negates_returns=True, deterministic=True, platform_dependent=True, ), CostFunctionReward( name="TextSizeBytes", cost_function="TextSizeBytes", init_cost_function="TextSizeO0", default_negates_returns=True, deterministic=True, platform_dependent=True, ), NormalizedReward( name="TextSizeNorm", cost_function="TextSizeBytes", init_cost_function="TextSizeO0", max=1, default_negates_returns=True, deterministic=True, platform_dependent=True, ), BaselineImprovementNormalizedReward( name="TextSizeO3", cost_function="TextSizeBytes", init_cost_function="TextSizeO0", baseline_cost_function="TextSizeO3", success_threshold=1, default_negates_returns=True, deterministic=True, platform_dependent=True, ), BaselineImprovementNormalizedReward( name="TextSizeOz", cost_function="TextSizeBytes", init_cost_function="TextSizeO0", baseline_cost_function="TextSizeOz", success_threshold=1, default_negates_returns=True, deterministic=True, platform_dependent=True, ), ], derived_observation_spaces=[ { "id": "Inst2vecPreprocessedText", "base_id": "Ir", "space": Sequence( name="Inst2vecPreprocessedText", size_range=(0, None), dtype=str ), "translate": self.inst2vec.preprocess, "default_value": "", }, { "id": "Inst2vecEmbeddingIndices", "base_id": "Ir", "space": Sequence( name="Inst2vecEmbeddingIndices", size_range=(0, None), dtype=np.int32, ), "translate": lambda base_observation: self.inst2vec.encode( self.inst2vec.preprocess(base_observation) ), "default_value": np.array([self.inst2vec.vocab["!UNK"]]), }, { "id": "Inst2vec", "base_id": "Ir", "space": Sequence( name="Inst2vec", size_range=(0, None), dtype=np.ndarray ), "translate": lambda base_observation: self.inst2vec.embed( self.inst2vec.encode(self.inst2vec.preprocess(base_observation)) ), "default_value": np.vstack( [self.inst2vec.embeddings[self.inst2vec.vocab["!UNK"]]] ), }, { "id": "InstCountDict", "base_id": "InstCount", "space": DictSpace( { f"{name}Count": Scalar( name=f"{name}Count", min=0, max=None, dtype=int ) for name in INST_COUNT_FEATURE_NAMES }, name="InstCountDict", ), "translate": lambda base_observation: { f"{name}Count": val for name, val in zip(INST_COUNT_FEATURE_NAMES, base_observation) }, }, { "id": "InstCountNorm", "base_id": "InstCount", "space": Box( name="InstCountNorm", low=0, high=1, shape=(len(INST_COUNT_FEATURE_NAMES) - 1,), dtype=np.float32, ), "translate": lambda base_observation: ( base_observation[1:] / max(base_observation[0], 1) ).astype(np.float32), }, { "id": "InstCountNormDict", "base_id": "InstCountNorm", "space": DictSpace( { f"{name}Density": Scalar( name=f"{name}Density", min=0, max=None, dtype=int ) for name in INST_COUNT_FEATURE_NAMES[1:] }, name="InstCountNormDict", ), "translate": lambda base_observation: { f"{name}Density": val for name, val in zip( INST_COUNT_FEATURE_NAMES[1:], base_observation ) }, }, { "id": "AutophaseDict", "base_id": "Autophase", "space": DictSpace( { name: Scalar(name=name, min=0, max=None, dtype=int) for name in AUTOPHASE_FEATURE_NAMES }, name="AutophaseDict", ), "translate": lambda base_observation: { name: val for name, val in zip(AUTOPHASE_FEATURE_NAMES, base_observation) }, }, { "id": "LexedIrTuple", "base_id": "LexedIr", "space": Sequence( name="LexedToken", size_range=(0, None), dtype=LexedToken, ), "translate": lambda base_observation: [ LexedToken(tid, kind, cat, val) for tid, kind, cat, val in zip( base_observation["token_id"], base_observation["token_kind"], base_observation["token_category"], base_observation["token_value"], ) ], "default_value": { "token_id": [], "token_kind": [], "token_category": [], "token_value": [], }, }, ], ) # Mutable runtime configuration options that must be set on every call # to reset. self._runtimes_per_observation_count: Optional[int] = None self._runtimes_warmup_per_observation_count: Optional[int] = None cpu_info_spaces = [ Sequence(name="name", size_range=(0, None), dtype=str), Scalar(name="cores_count", min=None, max=None, dtype=int), Scalar(name="l1i_cache_size", min=None, max=None, dtype=int), Scalar(name="l1i_cache_count", min=None, max=None, dtype=int), Scalar(name="l1d_cache_size", min=None, max=None, dtype=int), Scalar(name="l1d_cache_count", min=None, max=None, dtype=int), Scalar(name="l2_cache_size", min=None, max=None, dtype=int), Scalar(name="l2_cache_count", min=None, max=None, dtype=int), Scalar(name="l3_cache_size", min=None, max=None, dtype=int), Scalar(name="l3_cache_count", min=None, max=None, dtype=int), Scalar(name="l4_cache_size", min=None, max=None, dtype=int), Scalar(name="l4_cache_count", min=None, max=None, dtype=int), ] self.observation.spaces["CpuInfo"].space = DictSpace( {space.name: space for space in cpu_info_spaces}, name="CpuInfo", ) def reset(self, *args, **kwargs): try: return super().reset(*args, **kwargs) except ValueError as e: # Catch and re-raise some known benchmark initialization errors with # a more informative error type. if "Failed to compute .text size cost" in str(e): raise BenchmarkInitError( f"Failed to initialize benchmark {self._benchmark_in_use.uri}: {e}" ) from e elif ( "File not found:" in str(e) or "File is empty:" in str(e) or "Error reading file:" in str(e) ): raise BenchmarkInitError(str(e)) from e elif "Failed to parse LLVM bitcode" in str(e): raise BenchmarkInitError(str(e)) from e raise def make_benchmark( self, inputs: Union[ str, Path, ClangInvocation, List[Union[str, Path, ClangInvocation]] ], copt: Optional[List[str]] = None, system_includes: bool = True, timeout: int = 600, ) -> Benchmark: """Create a benchmark for use with this environment. This function takes one or more inputs and uses them to create an LLVM bitcode benchmark that can be passed to :meth:`compiler_gym.envs.LlvmEnv.reset`. The following input types are supported: +-----------------------------------------------------+---------------------+-------------------------------------------------------------+ | **File Suffix** | **Treated as** | **Converted using** | +-----------------------------------------------------+---------------------+-------------------------------------------------------------+ | :code:`.bc` | LLVM IR bitcode | No conversion required. | +-----------------------------------------------------+---------------------+-------------------------------------------------------------+ | :code:`.ll` | LLVM IR text format | Assembled to bitcode using llvm-as. | +-----------------------------------------------------+---------------------+-------------------------------------------------------------+ | :code:`.c`, :code:`.cc`, :code:`.cpp`, :code:`.cxx` | C / C++ source | Compiled to bitcode using clang and the given :code:`copt`. | +-----------------------------------------------------+---------------------+-------------------------------------------------------------+ .. note:: The LLVM IR format has no compatability guarantees between versions (see `LLVM docs <https://llvm.org/docs/DeveloperPolicy.html#ir-backwards-compatibility>`_). You must ensure that any :code:`.bc` and :code:`.ll` files are compatible with the LLVM version used by CompilerGym, which can be reported using :func:`env.compiler_version <compiler_gym.envs.ClientServiceCompilerEnv.compiler_version>`. E.g. for single-source C/C++ programs, you can pass the path of the source file: >>> benchmark = env.make_benchmark('my_app.c') >>> env = gym.make("llvm-v0") >>> env.reset(benchmark=benchmark) The clang invocation used is roughly equivalent to: .. code-block:: $ clang my_app.c -O0 -c -emit-llvm -o benchmark.bc Additional compile-time arguments to clang can be provided using the :code:`copt` argument: >>> benchmark = env.make_benchmark('/path/to/my_app.cpp', copt=['-O2']) If you need more fine-grained control over the options, you can directly construct a :class:`ClangInvocation <compiler_gym.envs.llvm.ClangInvocation>` to pass a list of arguments to clang: >>> benchmark = env.make_benchmark( ClangInvocation(['/path/to/my_app.c'], system_includes=False, timeout=10) ) For multi-file programs, pass a list of inputs that will be compiled separately and then linked to a single module: >>> benchmark = env.make_benchmark([ 'main.c', 'lib.cpp', 'lib2.bc', 'foo/input.bc' ]) :param inputs: An input, or list of inputs. :param copt: A list of command line options to pass to clang when compiling source files. :param system_includes: Whether to include the system standard libraries during compilation jobs. This requires a system toolchain. See :func:`get_system_library_flags`. :param timeout: The maximum number of seconds to allow clang to run before terminating. :return: A :code:`Benchmark` instance. :raises FileNotFoundError: If any input sources are not found. :raises TypeError: If the inputs are of unsupported types. :raises OSError: If a suitable compiler cannot be found. :raises BenchmarkInitError: If a compilation job fails. :raises TimeoutExpired: If a compilation job exceeds :code:`timeout` seconds. """ return make_benchmark( inputs=inputs, copt=copt, system_includes=system_includes, timeout=timeout, ) @property def ir(self) -> str: """Print the LLVM-IR of the program in its current state. Alias for :code:`env.observation["Ir"]`. :return: A string of LLVM-IR. """ return self.observation["Ir"] @property def ir_sha1(self) -> str: """Return the 40-characeter hex sha1 checksum of the current IR. Equivalent to: :code:`hashlib.sha1(env.ir.encode("utf-8")).hexdigest()`. :return: A 40-character hexadecimal sha1 string. """ return self.observation["IrSha1"] def write_ir(self, path: Union[Path, str]) -> Path: """Write the current program state to a file. :param path: The path of the file to write. :return: The input :code:`path` argument. """ path = Path(path).expanduser() with open(path, "w") as f: f.write(self.ir) return path def write_bitcode(self, path: Union[Path, str]) -> Path: """Write the current program state to a bitcode file. :param path: The path of the file to write. :return: The input :code:`path` argument. """ path = Path(path).expanduser() tmp_path = self.observation["BitcodeFile"] try: shutil.copyfile(tmp_path, path) finally: os.unlink(tmp_path) return path def render( self, mode="human", ) -> Optional[str]: if mode == "human": print(self.ir) else: return super().render(mode) @property def runtime_observation_count(self) -> int: """The number of runtimes to return for the Runtime observation space. See the :ref:`Runtime observation space reference <llvm/index:Runtime>` for further details. Example usage: >>> env = compiler_gym.make("llvm-v0") >>> env.reset() >>> env.runtime_observation_count = 10 >>> len(env.observation.Runtime()) 10 :getter: Returns the number of runtimes that will be returned when a :code:`Runtime` observation is requested. :setter: Set the number of runtimes to compute when a :code:`Runtime` observation is requested. :type: int """ return self._runtimes_per_observation_count or int( self.send_param("llvm.get_runtimes_per_observation_count", "") ) @runtime_observation_count.setter def runtime_observation_count(self, n: int) -> None: try: self.send_param( "llvm.set_runtimes_per_observation_count", str(n), resend_on_reset=True ) except SessionNotFound: pass # Not in session yet, will be sent on reset(). self._runtimes_per_observation_count = n @property def runtime_warmup_runs_count(self) -> int: """The number of warmup runs of the binary to perform before measuring the Runtime observation space. See the :ref:`Runtime observation space reference <llvm/index:Runtime>` for further details. Example usage: >>> env = compiler_gym.make("llvm-v0") >>> env.reset() >>> env.runtime_observation_count = 10 >>> len(env.observation.Runtime()) 10 :getter: Returns the number of runs that be performed before measuring the :code:`Runtime` observation is requested. :setter: Set the number of warmup runs to perform when a :code:`Runtime` observation is requested. :type: int """ return self._runtimes_warmup_per_observation_count or int( self.send_param("llvm.get_warmup_runs_count_per_runtime_observation", "") ) @runtime_warmup_runs_count.setter def runtime_warmup_runs_count(self, n: int) -> None: try: self.send_param( "llvm.set_warmup_runs_count_per_runtime_observation", str(n), resend_on_reset=True, ) except SessionNotFound: pass # Not in session yet, will be sent on reset(). self._runtimes_warmup_per_observation_count = n def fork(self): fkd = super().fork() if self.runtime_observation_count is not None: fkd.runtime_observation_count = self.runtime_observation_count if self.runtime_warmup_runs_count is not None: fkd.runtime_warmup_runs_count = self.runtime_warmup_runs_count return fkd def make_benchmark_from_command_line( self, cmd: Union[str, List[str]], replace_driver: bool = True, system_includes: bool = True, timeout: int = 600, ) -> Benchmark: """Create a benchmark for use with this environment. This function takes a command line compiler invocation as input, modifies it to produce an unoptimized LLVM-IR bitcode, and then runs the modified command line to produce a bitcode benchmark. For example, the command line: >>> benchmark = env.make_benchmark_from_command_line( ... ["gcc", "-DNDEBUG", "a.c", "b.c", "-o", "foo", "-lm"] ... ) Will compile a.c and b.c to an unoptimized benchmark that can be then passed to :meth:`reset() <compiler_env.envs.CompilerEnv.reset>`. The way this works is to change the first argument of the command line invocation to the version of clang shipped with CompilerGym, and to then append command line flags that causes the compiler to produce LLVM-IR with optimizations disabled. For example the input command line: .. code-block:: gcc -DNDEBUG a.c b.c -o foo -lm Will be rewritten to be roughly equivalent to: .. code-block:: /path/to/compiler_gym/clang -DNDEG a.c b.c \\ -Xclang -disable-llvm-passes -Xclang -disable-llvm-optzns \\ -c -emit-llvm -o - The generated benchmark then has a method :meth:`compile() <compiler_env.envs.llvm.BenchmarkFromCommandLine.compile>` which completes the linking and compilatilion to executable. For the above example, this would be roughly equivalent to: .. code-block:: /path/to/compiler_gym/clang environment-bitcode.bc -o foo -lm :param cmd: A command line compiler invocation, either as a list of arguments (e.g. :code:`["clang", "in.c"]`) or as a single shell string (e.g. :code:`"clang in.c"`). :param replace_driver: Whether to replace the first argument of the command with the clang driver used by this environment. :param system_includes: Whether to include the system standard libraries during compilation jobs. This requires a system toolchain. See :func:`get_system_library_flags`. :param timeout: The maximum number of seconds to allow the compilation job to run before terminating. :return: A :class:`BenchmarkFromCommandLine <compiler_gym.envs.llvm.BenchmarkFromCommandLine>` instance. :raises ValueError: If no command line is provided. :raises BenchmarkInitError: If executing the command line fails. :raises TimeoutExpired: If a compilation job exceeds :code:`timeout` seconds. """ if not cmd: raise ValueError("Input command line is empty") # Split the command line if passed a single string. if isinstance(cmd, str): cmd = shlex.split(cmd) rewritten_cmd: List[str] = cmd.copy() if len(cmd) < 2: raise ValueError(f"Input command line '{join_cmd(cmd)}' is too short") # Append include flags for the system headers if requested. if system_includes: rewritten_cmd += get_system_library_flags() # Use the CompilerGym clang binary in place of the original driver. if replace_driver: rewritten_cmd[0] = str(clang_path()) # Strip the -S flag, if present, as that changes the output format. rewritten_cmd = [c for c in rewritten_cmd if c != "-S"] invocation = GccInvocation(rewritten_cmd) # Strip the output specifier(s). This is not strictly required since we # override it later, but makes the generated command easier to # understand. for i in range(len(rewritten_cmd) - 2, -1, -1): if rewritten_cmd[i] == "-o": del rewritten_cmd[i + 1] del rewritten_cmd[i] # Fail early. if "-" in invocation.sources: raise ValueError( "Input command line reads from stdin, " f"which is not supported: '{join_cmd(cmd)}'" ) # Convert all of the C/C++ sources to bitcodes which can then be linked # into a single bitcode. We must process them individually because the # '-c' flag does not support multiple sources when we are specifying the # output path using '-o'. sources = set(s for s in invocation.sources if not s.endswith(".o")) if not sources: raise ValueError( f"Input command line has no source file inputs: '{join_cmd(cmd)}'" ) bitcodes: List[bytes] = [] for source in sources: # Adapt and execute the command line so that it will generate an # unoptimized bitecode file. emit_bitcode_command = rewritten_cmd.copy() # Strip the name of other sources: if len(sources) > 1: emit_bitcode_command = [ c for c in emit_bitcode_command if c == source or c not in sources ] # Append the flags to emit the bitcode and disable the optimization # passes. emit_bitcode_command += [ "-c", "-emit-llvm", "-o", "-", "-Xclang", "-disable-llvm-passes", "-Xclang", "-disable-llvm-optzns", ] with Popen( emit_bitcode_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) as clang: logger.debug( f"Generating LLVM bitcode benchmark: {join_cmd(emit_bitcode_command)}" ) bitcode, stderr = clang.communicate(timeout=timeout) if clang.returncode: raise BenchmarkInitError( f"Failed to generate LLVM bitcode with error:\n" f"{stderr.decode('utf-8').rstrip()}\n" f"Running command: {join_cmd(emit_bitcode_command)}\n" f"From original commandline: {join_cmd(cmd)}" ) bitcodes.append(bitcode) # If there were multiple sources then link the bitcodes together. if len(bitcodes) > 1: with TemporaryDirectory( dir=transient_cache_path("."), prefix="llvm-benchmark-" ) as dir: # Write the bitcodes to files. for i, bitcode in enumerate(bitcodes): with open(os.path.join(dir, f"{i}.bc"), "wb") as f: f.write(bitcode) # Link the bitcode files. llvm_link_cmd = [str(llvm_link_path()), "-o", "-"] + [ os.path.join(dir, f"{i}.bc") for i in range(len(bitcodes)) ] with Popen( llvm_link_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) as llvm_link: bitcode, stderr = llvm_link.communicate(timeout=timeout) if llvm_link.returncode: raise BenchmarkInitError( f"Failed to link LLVM bitcodes with error: {stderr.decode('utf-8')}" ) return BenchmarkFromCommandLine(invocation, bitcode, timeout)
CompilerGym-development
compiler_gym/envs/llvm/llvm_env.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """Utilities for LexedIRTuple derived observation space.""" import subprocess from pathlib import Path from tempfile import NamedTemporaryFile from typing import Dict, List, NamedTuple import google.protobuf.text_format import numpy as np from compiler_gym.service.proto import Event from compiler_gym.service.proto.py_converters import make_message_default_converter from compiler_gym.util.commands import Popen from compiler_gym.util.runfiles_path import runfiles_path from compiler_gym.util.shell_format import plural _COMPUTE_OBSERVATION_BIN = runfiles_path( "compiler_gym/envs/llvm/service/compute_observation" ) _COMPUTE_UNLEX_BIN = runfiles_path("compiler_gym/third_party/Lexedir/compute_unlexed") class LexedToken(NamedTuple): ID: int kind: str category: str value: str def LexedIr(bitcode: Path, timeout: float = 300) -> Dict[str, np.array]: """ """ if not Path(bitcode).is_file(): raise FileNotFoundError(bitcode) observation_space_name = "LEXED_IR" translate = make_message_default_converter() try: with Popen( [str(_COMPUTE_OBSERVATION_BIN), observation_space_name, str(bitcode)], stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) as process: stdout, stderr = process.communicate(timeout=timeout) if process.returncode: try: stderr = stderr.decode("utf-8") raise ValueError(f"Failed to compute LexedIr observation: {stderr}") except UnicodeDecodeError as e: raise ValueError("Failed to compute LexedIr observation") from e except subprocess.TimeoutExpired as e: raise TimeoutError( "Failed to compute LexedIr observation in " f"{timeout:.1f} {plural(int(round(timeout)), 'second', 'seconds')}" ) from e try: stdout = stdout.decode("utf-8") except UnicodeDecodeError as e: raise ValueError(f"Failed to parse LexedIr observation: {e}") from e observation = Event() try: google.protobuf.text_format.Parse(stdout, observation) except google.protobuf.text_format.ParseError as e: raise ValueError("Failed to parse LexedIr observation") from e return translate(observation) def LexedIrTuple(bitcode: Path, timeout: float = 300) -> List[LexedToken]: """ Standalone IR Lexer. """ lexed_dict = LexedIr(bitcode, timeout=timeout) return [ LexedToken(tid, tval, tkind, tcat) for tid, tval, tkind, tcat in zip( lexed_dict["token_id"], lexed_dict["token_value"], lexed_dict["token_kind"], lexed_dict["token_category"], ) ] def UnLex(token_ids: List[int], token_values: List[str], timeout: float = 300) -> str: with NamedTemporaryFile("w", prefix="compiler_gym_unlex_") as f: f.write( "\n".join(["{},{}".format(i, v) for i, v in zip(token_ids, token_values)]) ) f.flush() try: with Popen( [str(_COMPUTE_UNLEX_BIN), str(f.name)], stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) as process: stdout, stderr = process.communicate(timeout=timeout) if process.returncode: try: stderr = stderr.decode("utf-8") raise ValueError( f"Failed to compute UnLex observation: {stderr}" ) except UnicodeDecodeError as e: raise ValueError("Failed to compute UnLex observation") from e except subprocess.TimeoutExpired as e: raise TimeoutError( f"Failed to compute UnLex observation in " f"{timeout:.1f} {plural(int(round(timeout)), 'second', 'seconds')}" ) from e try: stdout = stdout.decode("utf-8") except UnicodeDecodeError as e: raise ValueError(f"Failed to parse UnLex observation: {e}") from e return stdout
CompilerGym-development
compiler_gym/envs/llvm/lexed_ir.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import enum import io import logging import os import re import shutil import subprocess import sys import tarfile import tempfile from collections import defaultdict from pathlib import Path from threading import Lock from typing import Callable, Dict, Iterable, List, NamedTuple, Optional import fasteners from compiler_gym.datasets import Benchmark, TarDatasetWithManifest from compiler_gym.datasets.benchmark import ValidationCallback from compiler_gym.datasets.uri import BenchmarkUri from compiler_gym.envs.llvm import llvm_benchmark from compiler_gym.errors import ValidationError from compiler_gym.service.proto import BenchmarkDynamicConfig, Command from compiler_gym.third_party import llvm from compiler_gym.util.commands import Popen from compiler_gym.util.download import download from compiler_gym.util.filesystem import extract_tar from compiler_gym.util.runfiles_path import cache_path, site_data_path from compiler_gym.util.timer import Timer logger = logging.getLogger(__name__) _CBENCH_TARS = { "macos": ( "https://dl.fbaipublicfiles.com/compiler_gym/llvm_bitcodes-10.0.0-cBench-v1-macos.tar.bz2", "90b312b40317d9ee9ed09b4b57d378879f05e8970bb6de80dc8581ad0e36c84f", ), "linux": ( "https://dl.fbaipublicfiles.com/compiler_gym/llvm_bitcodes-10.0.0-cBench-v1-linux.tar.bz2", "601fff3944c866f6617e653b6eb5c1521382c935f56ca1f36a9f5cf1a49f3de5", ), } _CBENCH_RUNTOME_DATA = ( "https://dl.fbaipublicfiles.com/compiler_gym/cBench-v0-runtime-data.tar.bz2", "a1b5b5d6b115e5809ccaefc2134434494271d184da67e2ee43d7f84d07329055", ) if sys.platform == "darwin": _COMPILE_ARGS = [ "-L", "/Library/Developer/CommandLineTools/SDKs/MacOSX.sdk/usr/lib", ] else: _COMPILE_ARGS = [] class LlvmSanitizer(enum.IntEnum): """The LLVM sanitizers.""" ASAN = 1 TSAN = 2 MSAN = 3 UBSAN = 4 # Compiler flags that are enabled by sanitizers. _SANITIZER_FLAGS = { LlvmSanitizer.ASAN: ["-O1", "-g", "-fsanitize=address", "-fno-omit-frame-pointer"], LlvmSanitizer.TSAN: ["-O1", "-g", "-fsanitize=thread"], LlvmSanitizer.MSAN: ["-O1", "-g", "-fsanitize=memory"], LlvmSanitizer.UBSAN: ["-fsanitize=undefined"], } class BenchmarkExecutionResult(NamedTuple): """The result of running a benchmark.""" walltime_seconds: float """The execution time in seconds.""" error: Optional[ValidationError] = None """An error.""" output: Optional[str] = None """The output generated by the benchmark.""" def json(self): return self._asdict() # pylint: disable=no-member def _compile_and_run_bitcode_file( bitcode_file: Path, cmd: str, cwd: Path, linkopts: List[str], env: Dict[str, str], num_runs: int, sanitizer: Optional[LlvmSanitizer] = None, timeout_seconds: float = 300, compilation_timeout_seconds: float = 60, ) -> BenchmarkExecutionResult: """Run the given cBench benchmark.""" # cBench benchmarks expect that a file _finfo_dataset exists in the # current working directory and contains the number of benchmark # iterations in it. with open(cwd / "_finfo_dataset", "w") as f: print(num_runs, file=f) # Create a barebones execution environment for the benchmark. run_env = { "TMPDIR": os.environ.get("TMPDIR", ""), "HOME": os.environ.get("HOME", ""), "USER": os.environ.get("USER", ""), # Disable all logging from GRPC. In the past I have had false-positive # "Wrong output" errors caused by GRPC error messages being logged to # stderr. "GRPC_VERBOSITY": "NONE", } run_env.update(env) error_data = {} if sanitizer: clang_path = llvm.clang_path() binary = cwd / "a.out" error_data["run_cmd"] = cmd.replace("$BIN", "./a.out") # Generate the a.out binary file. compile_cmd = ( [clang_path.name, str(bitcode_file), "-o", str(binary)] + _COMPILE_ARGS + list(linkopts) + _SANITIZER_FLAGS.get(sanitizer, []) ) error_data["compile_cmd"] = compile_cmd logger.debug("compile: %s", compile_cmd) assert not binary.is_file() try: with Popen( compile_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True, env={"PATH": f"{clang_path.parent}:{os.environ.get('PATH', '')}"}, ) as clang: output, _ = clang.communicate(timeout=compilation_timeout_seconds) if clang.returncode: error_data["output"] = output return BenchmarkExecutionResult( walltime_seconds=timeout_seconds, error=ValidationError( type="Compilation failed", data=error_data, ), ) except subprocess.TimeoutExpired: error_data["timeout"] = compilation_timeout_seconds return BenchmarkExecutionResult( walltime_seconds=timeout_seconds, error=ValidationError( type="Compilation timeout", data=error_data, ), ) assert binary.is_file() else: lli_path = llvm.lli_path() error_data["run_cmd"] = cmd.replace("$BIN", f"{lli_path.name} benchmark.bc") run_env["PATH"] = str(lli_path.parent) logger.debug("exec: %s", error_data["run_cmd"]) try: with Timer() as timer, Popen( error_data["run_cmd"], shell=True, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, env=run_env, cwd=cwd, ) as process: stdout, _ = process.communicate(timeout=timeout_seconds) except subprocess.TimeoutExpired: error_data["timeout_seconds"] = timeout_seconds return BenchmarkExecutionResult( walltime_seconds=timeout_seconds, error=ValidationError( type="Execution timeout", data=error_data, ), ) finally: if sanitizer: binary.unlink() try: output = stdout.decode("utf-8") except UnicodeDecodeError: output = "<binary>" if process.returncode: # Runtime error. if sanitizer == LlvmSanitizer.ASAN and "LeakSanitizer" in output: error_type = "Memory leak" elif sanitizer == LlvmSanitizer.ASAN and "AddressSanitizer" in output: error_type = "Memory error" elif sanitizer == LlvmSanitizer.MSAN and "MemorySanitizer" in output: error_type = "Memory error" elif "Segmentation fault" in output: error_type = "Segmentation fault" elif "Illegal Instruction" in output: error_type = "Illegal Instruction" else: error_type = f"Runtime error ({process.returncode})" error_data["return_code"] = process.returncode error_data["output"] = output return BenchmarkExecutionResult( walltime_seconds=timer.time, error=ValidationError( type=error_type, data=error_data, ), ) return BenchmarkExecutionResult(walltime_seconds=timer.time, output=output) def download_cBench_runtime_data() -> bool: """Download and unpack the cBench runtime dataset.""" cbench_data = site_data_path("llvm-v0/cbench-v1-runtime-data/runtime_data") if (cbench_data / "unpacked").is_file(): return False else: # Clean up any partially-extracted data directory. if cbench_data.is_dir(): shutil.rmtree(cbench_data) url, sha256 = _CBENCH_RUNTOME_DATA tar_contents = io.BytesIO(download(url, sha256)) with tarfile.open(fileobj=tar_contents, mode="r:bz2") as tar: cbench_data.parent.mkdir(parents=True, exist_ok=True) extract_tar(tar, cbench_data.parent) assert cbench_data.is_dir() # Create the marker file to indicate that the directory is unpacked # and ready to go. (cbench_data / "unpacked").touch() return True # Thread lock to prevent race on download_cBench_runtime_data() from # multi-threading. This works in tandem with the inter-process file lock - both # are required. _CBENCH_DOWNLOAD_THREAD_LOCK = Lock() def _make_cBench_validator( cmd: str, linkopts: List[str], os_env: Dict[str, str], num_runs: int = 1, compare_output: bool = True, input_files: Optional[List[Path]] = None, output_files: Optional[List[Path]] = None, validate_result: Optional[ Callable[[BenchmarkExecutionResult], Optional[str]] ] = None, pre_execution_callback: Optional[Callable[[Path], None]] = None, sanitizer: Optional[LlvmSanitizer] = None, flakiness: int = 5, ) -> ValidationCallback: """Construct a validation callback for a cBench benchmark. See validator() for usage.""" input_files = input_files or [] output_files = output_files or [] def validator_cb(env: "LlvmEnv") -> Optional[ValidationError]: # noqa: F821 """The validation callback.""" with _CBENCH_DOWNLOAD_THREAD_LOCK: with fasteners.InterProcessLock(cache_path(".cbench-v1-runtime-data.LOCK")): download_cBench_runtime_data() cbench_data = site_data_path("llvm-v0/cbench-v1-runtime-data/runtime_data") for input_file_name in input_files: path = cbench_data / input_file_name if not path.is_file(): raise FileNotFoundError(f"Required benchmark input not found: {path}") # Create a temporary working directory to execute the benchmark in. with tempfile.TemporaryDirectory(dir=env.service.connection.cache.path) as d: cwd = Path(d) # Expand shell variable substitutions in the benchmark command. expanded_command = cmd.replace("$D", str(cbench_data)) # Translate the output file names into paths inside the working # directory. output_paths = [cwd / o for o in output_files] if pre_execution_callback: pre_execution_callback(cwd) # Produce a gold-standard output using a reference version of # the benchmark. if compare_output or output_files: gs_env = env.fork() try: # Reset to the original benchmark state and compile it. gs_env.reset(benchmark=env.benchmark) gs_env.write_bitcode(cwd / "benchmark.bc") gold_standard = _compile_and_run_bitcode_file( bitcode_file=cwd / "benchmark.bc", cmd=expanded_command, cwd=cwd, num_runs=1, # Use default optimizations for gold standard. linkopts=linkopts + ["-O2"], # Always assume safe. sanitizer=None, env=os_env, ) if gold_standard.error: return ValidationError( type=f"Gold standard: {gold_standard.error.type}", data=gold_standard.error.data, ) finally: gs_env.close() # Check that the reference run produced the expected output # files. for path in output_paths: if not path.is_file(): try: output = gold_standard.output except UnicodeDecodeError: output = "<binary>" raise FileNotFoundError( f"Expected file '{path.name}' not generated\n" f"Benchmark: {env.benchmark}\n" f"Command: {cmd}\n" f"Output: {output}" ) path.rename(f"{path}.gold_standard") # Serialize the benchmark to a bitcode file that will then be # compiled to a binary. env.write_bitcode(cwd / "benchmark.bc") outcome = _compile_and_run_bitcode_file( bitcode_file=cwd / "benchmark.bc", cmd=expanded_command, cwd=cwd, num_runs=num_runs, linkopts=linkopts, sanitizer=sanitizer, env=os_env, ) if outcome.error: return outcome.error # Run a user-specified validation hook. if validate_result: validate_result(outcome) # Difftest the console output. if compare_output and gold_standard.output != outcome.output: return ValidationError( type="Wrong output", data={"expected": gold_standard.output, "actual": outcome.output}, ) # Difftest the output files. for path in output_paths: if not path.is_file(): return ValidationError( type="Output not generated", data={"path": path.name, "command": cmd}, ) with Popen( ["diff", str(path), f"{path}.gold_standard"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, ) as diff: stdout, _ = diff.communicate(timeout=300) if diff.returncode: try: stdout = stdout.decode("utf-8") return ValidationError( type="Wrong output (file)", data={"path": path.name, "diff": stdout}, ) except UnicodeDecodeError: return ValidationError( type="Wrong output (file)", data={"path": path.name, "diff": "<binary>"}, ) def flaky_wrapped_cb(env: "LlvmEnv") -> Optional[ValidationError]: # noqa: F821 """Wrap the validation callback in a flakiness retry loop.""" for j in range(1, max(flakiness, 1) + 1): try: error = validator_cb(env) if not error: return except TimeoutError: # Timeout errors can be raised by the environment in case of a # slow step / observation, and should be retried. pass # No point in repeating compilation failures as they are not flaky. if error.type == "Compilation failed": return error logger.warning( "Validation callback failed (%s), attempt=%d/%d", error.type, j, flakiness, ) return error # The flaky_wrapped_cb() function takes an environment and produces a single # error. We need the validator to produce an iterable of errors. def adapt_validator_return_type( env: "LlvmEnv", # noqa: F821 ) -> Iterable[ValidationError]: error = flaky_wrapped_cb(env) if error: yield error return adapt_validator_return_type def validator( benchmark: str, cmd: str, data: Optional[List[str]] = None, outs: Optional[List[str]] = None, platforms: Optional[List[str]] = None, compare_output: bool = True, validate_result: Optional[ Callable[[BenchmarkExecutionResult], Optional[str]] ] = None, linkopts: Optional[List[str]] = None, env: Optional[Dict[str, str]] = None, pre_execution_callback: Optional[Callable[[], None]] = None, sanitizers: Optional[List[LlvmSanitizer]] = None, ) -> bool: """Declare a new benchmark validator. TODO(cummins): Pull this out into a public API. :param benchmark: The name of the benchmark that this validator supports. :cmd: The shell command to run the validation. Variable substitution is applied to this value as follows: :code:`$BIN` is replaced by the path of the compiled binary and :code:`$D` is replaced with the path to the benchmark's runtime data directory. :data: A list of paths to input files. :outs: A list of paths to output files. :return: :code:`True` if the new validator was registered, else :code:`False`. """ platforms = platforms or ["linux", "macos"] if {"darwin": "macos"}.get(sys.platform, sys.platform) not in platforms: return False infiles = data or [] outfiles = [Path(p) for p in outs or []] linkopts = linkopts or [] env = env or {} if sanitizers is None: sanitizers = LlvmSanitizer VALIDATORS[benchmark].append( _make_cBench_validator( cmd=cmd, input_files=infiles, output_files=outfiles, compare_output=compare_output, validate_result=validate_result, linkopts=linkopts, os_env=env, pre_execution_callback=pre_execution_callback, ) ) # Register additional validators using the sanitizers. if sys.platform.startswith("linux"): for sanitizer in sanitizers: VALIDATORS[benchmark].append( _make_cBench_validator( cmd=cmd, input_files=infiles, output_files=outfiles, compare_output=compare_output, validate_result=validate_result, linkopts=linkopts, os_env=env, pre_execution_callback=pre_execution_callback, sanitizer=sanitizer, ) ) # Create the BenchmarkDynamicConfig object. cbench_data = site_data_path("llvm-v0/cbench-v1-runtime-data/runtime_data") uri = BenchmarkUri.from_string(benchmark) DYNAMIC_CONFIGS[uri.path].append( BenchmarkDynamicConfig( build_cmd=Command( argument=["$CC", "$IN"] + llvm_benchmark.get_system_library_flags() + linkopts, timeout_seconds=60, outfile=["a.out"], ), run_cmd=Command( argument=cmd.replace("$BIN", "./a.out") .replace("$D", str(cbench_data)) .split(), timeout_seconds=300, infile=["a.out", "_finfo_dataset"], outfile=[str(s) for s in outfiles], ), pre_run_cmd=[ Command(argument=["echo", "1", ">_finfo_dataset"], timeout_seconds=30), ], ) ) return True class CBenchDataset(TarDatasetWithManifest): def __init__(self, site_data_base: Path): platform = {"darwin": "macos"}.get(sys.platform, sys.platform) url, sha256 = _CBENCH_TARS[platform] super().__init__( name="benchmark://cbench-v1", description="Runnable C benchmarks", license="BSD 3-Clause", references={ "Paper": "https://arxiv.org/pdf/1407.3487.pdf", "Homepage": "https://ctuning.org/wiki/index.php/CTools:CBench", }, tar_urls=[url], tar_sha256=sha256, manifest_urls=[ "https://dl.fbaipublicfiles.com/compiler_gym/llvm_bitcodes-10.0.0-cbench-v1-manifest.bz2" ], manifest_sha256="eeffd7593aeb696a160fd22e6b0c382198a65d0918b8440253ea458cfe927741", strip_prefix="cBench-v1", benchmark_file_suffix=".bc", benchmark_class=Benchmark, site_data_base=site_data_base, sort_order=-1, validatable="Partially", ) def install(self): super().install() with _CBENCH_DOWNLOAD_THREAD_LOCK: with fasteners.InterProcessLock(cache_path(".cbench-v1-runtime-data.LOCK")): download_cBench_runtime_data() def benchmark_from_parsed_uri(self, uri: BenchmarkUri) -> Benchmark: benchmark = super().benchmark_from_parsed_uri(uri) for val in VALIDATORS.get(str(uri), []): benchmark.add_validation_callback(val) # Parse the "dataset" parameter to determine the correct dynamic # configuration to use. if DYNAMIC_CONFIGS[uri.path]: cfgs = DYNAMIC_CONFIGS[uri.path] dataset = uri.params.get("dataset", ["0"]) try: dataset_index = int(dataset[-1]) except (ValueError, TypeError) as e: raise ValueError(f"Invalid dataset: {dataset[-1]}") from e if dataset_index < 0 or dataset_index >= len(cfgs): raise ValueError(f"Invalid dataset: {dataset_index}") benchmark.proto.dynamic_config.MergeFrom(cfgs[dataset_index]) return benchmark class CBenchLegacyDataset2(TarDatasetWithManifest): def __init__( self, site_data_base: Path, sort_order: int = 0, name="benchmark://cbench-v1", manifest_url="https://dl.fbaipublicfiles.com/compiler_gym/llvm_bitcodes-10.0.0-cbench-v1-manifest.bz2", manifest_sha256="eeffd7593aeb696a160fd22e6b0c382198a65d0918b8440253ea458cfe927741", deprecated=None, ): platform = {"darwin": "macos"}.get(sys.platform, sys.platform) url, sha256 = _CBENCH_TARS[platform] super().__init__( name=name, description="Runnable C benchmarks", license="BSD 3-Clause", references={ "Paper": "https://arxiv.org/pdf/1407.3487.pdf", "Homepage": "https://ctuning.org/wiki/index.php/CTools:CBench", }, tar_urls=[url], tar_sha256=sha256, manifest_urls=[manifest_url], manifest_sha256=manifest_sha256, strip_prefix="cBench-v1", benchmark_file_suffix=".bc", site_data_base=site_data_base, sort_order=sort_order, deprecated=deprecated, validatable="Partially", ) # URLs of the deprecated cBench datasets. _CBENCH_LEGACY_TARS = { "macos": ( "https://dl.fbaipublicfiles.com/compiler_gym/llvm_bitcodes-10.0.0-cBench-v0-macos.tar.bz2", "072a730c86144a07bba948c49afe543e4f06351f1cb17f7de77f91d5c1a1b120", ), "linux": ( "https://dl.fbaipublicfiles.com/compiler_gym/llvm_bitcodes-10.0.0-cBench-v0-linux.tar.bz2", "9b5838a90895579aab3b9375e8eeb3ed2ae58e0ad354fec7eb4f8b31ecb4a360", ), } class CBenchLegacyDataset(TarDatasetWithManifest): # The difference between cbench-v0 and cbench-v1 is the arguments passed to # clang when preparing the LLVM bitcodes: # # - v0: `-O0 -Xclang -disable-O0-optnone`. # - v1: `-O1 -Xclang -Xclang -disable-llvm-passes`. # # The key difference with is that in v0, the generated IR functions were # annotated with a `noinline` attribute that prevented inline. In v1 that is # no longer the case. def __init__(self, site_data_base: Path): platform = {"darwin": "macos"}.get(sys.platform, sys.platform) url, sha256 = _CBENCH_LEGACY_TARS[platform] super().__init__( name="benchmark://cBench-v0", description="Runnable C benchmarks", license="BSD 3-Clause", references={ "Paper": "https://arxiv.org/pdf/1407.3487.pdf", "Homepage": "https://ctuning.org/wiki/index.php/CTools:CBench", }, tar_urls=[url], tar_sha256=sha256, manifest_urls=[ "https://dl.fbaipublicfiles.com/compiler_gym/llvm_bitcodes-10.0.0-cBench-v0-manifest.bz2" ], manifest_sha256="635b94eeb2784dfedb3b53fd8f84517c3b4b95d851ddb662d4c1058c72dc81e0", strip_prefix="cBench-v0", benchmark_file_suffix=".bc", site_data_base=site_data_base, deprecated="Please use 'benchmark://cbench-v1'", ) # =============================== # Definition of cBench validators # =============================== # A map from benchmark name to validation callbacks. VALIDATORS: Dict[str, List[ValidationCallback]] = defaultdict(list) # A map from cBench benchmark path to a list of BenchmarkDynamicConfig messages, # one per dataset. DYNAMIC_CONFIGS: Dict[str, List[BenchmarkDynamicConfig]] = defaultdict(list) def validate_sha_output(result: BenchmarkExecutionResult) -> Optional[str]: """SHA benchmark prints 5 random hex strings. Normally these hex strings are 16 characters but occasionally they are less (presumably because of a leading zero being omitted). """ try: if not re.match( r"[0-9a-f]{0,16} [0-9a-f]{0,16} [0-9a-f]{0,16} [0-9a-f]{0,16} [0-9a-f]{0,16}", result.output.rstrip(), ): return "Failed to parse hex output" except UnicodeDecodeError: return "Failed to parse unicode output" def setup_ghostscript_library_files(dataset_id: int) -> Callable[[Path], None]: """Make a pre-execution setup hook for ghostscript.""" def setup(cwd: Path): cbench_data = site_data_path("llvm-v0/cbench-v1-runtime-data/runtime_data") # Copy the input data file into the current directory since ghostscript # doesn't like long input paths. shutil.copyfile( cbench_data / "office_data" / f"{dataset_id}.ps", cwd / "input.ps" ) # Ghostscript doesn't like the library files being symlinks so copy them # into the working directory as regular files. for path in (cbench_data / "ghostscript").iterdir(): if path.name.endswith(".ps"): shutil.copyfile(path, cwd / path.name) return setup validator( benchmark="benchmark://cbench-v1/bitcount", cmd="$BIN 1125000", ) validator( benchmark="benchmark://cbench-v1/bitcount", cmd="$BIN 512", ) # The cBench benchmarks contain 20 runtime datasets. When use all 20 datasets # when validating the correctness of one of these benchmarks, we use all 20 # datasets. However, this takes a long time, so for CI jobs (determined by the # presence of the $CI environment variable), we use only 2 datasets for testing. NUM_DATASETS = 2 if os.environ.get("CI", "") == "1" else 20 for i in range(1, NUM_DATASETS + 1): # NOTE(cummins): Disabled due to timeout errors, further investigation # needed. # # validator( # benchmark="benchmark://cbench-v1/adpcm", # cmd=f"$BIN $D/telecom_data/{i}.adpcm", # data=[f"telecom_data/{i}.adpcm"], # ) # # validator( # benchmark="benchmark://cbench-v1/adpcm", # cmd=f"$BIN $D/telecom_data/{i}.pcm", # data=[f"telecom_data/{i}.pcm"], # ) validator( benchmark="benchmark://cbench-v1/blowfish", cmd=f"$BIN d $D/office_data/{i}.benc output.txt 1234567890abcdeffedcba0987654321", data=[f"office_data/{i}.benc"], outs=["output.txt"], ) validator( benchmark="benchmark://cbench-v1/bzip2", cmd=f"$BIN -d -k -f -c $D/bzip2_data/{i}.bz2", data=[f"bzip2_data/{i}.bz2"], ) validator( benchmark="benchmark://cbench-v1/crc32", cmd=f"$BIN $D/telecom_data/{i}.pcm", data=[f"telecom_data/{i}.pcm"], ) validator( benchmark="benchmark://cbench-v1/dijkstra", cmd=f"$BIN $D/network_dijkstra_data/{i}.dat", data=[f"network_dijkstra_data/{i}.dat"], ) validator( benchmark="benchmark://cbench-v1/gsm", cmd=f"$BIN -fps -c $D/telecom_gsm_data/{i}.au", data=[f"telecom_gsm_data/{i}.au"], ) # NOTE(cummins): ispell fails with returncode 1 and no output when run # under safe optimizations. # # validator( # benchmark="benchmark://cbench-v1/ispell", # cmd=f"$BIN -a -d americanmed+ $D/office_data/{i}.txt", # data = [f"office_data/{i}.txt"], # ) validator( benchmark="benchmark://cbench-v1/jpeg-c", cmd=f"$BIN -dct int -progressive -outfile output.jpeg $D/consumer_jpeg_data/{i}.ppm", data=[f"consumer_jpeg_data/{i}.ppm"], outs=["output.jpeg"], # NOTE(cummins): AddressSanitizer disabled because of # global-buffer-overflow in regular build. sanitizers=[LlvmSanitizer.TSAN, LlvmSanitizer.UBSAN], ) validator( benchmark="benchmark://cbench-v1/jpeg-d", cmd=f"$BIN -dct int -outfile output.ppm $D/consumer_jpeg_data/{i}.jpg", data=[f"consumer_jpeg_data/{i}.jpg"], outs=["output.ppm"], ) validator( benchmark="benchmark://cbench-v1/patricia", cmd=f"$BIN $D/network_patricia_data/{i}.udp", data=[f"network_patricia_data/{i}.udp"], env={ # NOTE(cummins): Benchmark leaks when executed with safe optimizations. "ASAN_OPTIONS": "detect_leaks=0", }, ) validator( benchmark="benchmark://cbench-v1/qsort", cmd=f"$BIN $D/automotive_qsort_data/{i}.dat", data=[f"automotive_qsort_data/{i}.dat"], outs=["sorted_output.dat"], linkopts=["-lm"], ) # NOTE(cummins): Rijndael benchmark disabled due to memory errors under # basic optimizations. # # validator(benchmark="benchmark://cbench-v1/rijndael", cmd=f"$BIN # $D/office_data/{i}.enc output.dec d # 1234567890abcdeffedcba09876543211234567890abcdeffedcba0987654321", # data=[f"office_data/{i}.enc"], outs=["output.dec"], # ) # # validator(benchmark="benchmark://cbench-v1/rijndael", cmd=f"$BIN # $D/office_data/{i}.txt output.enc e # 1234567890abcdeffedcba09876543211234567890abcdeffedcba0987654321", # data=[f"office_data/{i}.txt"], outs=["output.enc"], # ) validator( benchmark="benchmark://cbench-v1/sha", cmd=f"$BIN $D/office_data/{i}.txt", data=[f"office_data/{i}.txt"], compare_output=False, validate_result=validate_sha_output, ) validator( benchmark="benchmark://cbench-v1/stringsearch", cmd=f"$BIN $D/office_data/{i}.txt $D/office_data/{i}.s.txt output.txt", data=[f"office_data/{i}.txt"], outs=["output.txt"], env={ # NOTE(cummins): Benchmark leaks when executed with safe optimizations. "ASAN_OPTIONS": "detect_leaks=0", }, linkopts=["-lm"], ) # NOTE(cummins): The stringsearch2 benchmark has a very long execution time. # Use only a single input to keep the validation time reasonable. I have # also observed Segmentation fault on gold standard using 4.txt and 6.txt. if i == 1: validator( benchmark="benchmark://cbench-v1/stringsearch2", cmd=f"$BIN $D/office_data/{i}.txt $D/office_data/{i}.s.txt output.txt", data=[f"office_data/{i}.txt"], outs=["output.txt"], env={ # NOTE(cummins): Benchmark leaks when executed with safe optimizations. "ASAN_OPTIONS": "detect_leaks=0", }, # TSAN disabled because of extremely long execution leading to # timeouts. sanitizers=[LlvmSanitizer.ASAN, LlvmSanitizer.MSAN, LlvmSanitizer.UBSAN], ) validator( benchmark="benchmark://cbench-v1/susan", cmd=f"$BIN $D/automotive_susan_data/{i}.pgm output_large.corners.pgm -c", data=[f"automotive_susan_data/{i}.pgm"], outs=["output_large.corners.pgm"], linkopts=["-lm"], ) validator( benchmark="benchmark://cbench-v1/tiff2bw", cmd=f"$BIN $D/consumer_tiff_data/{i}.tif output.tif", data=[f"consumer_tiff_data/{i}.tif"], outs=["output.tif"], linkopts=["-lm"], env={ # NOTE(cummins): Benchmark leaks when executed with safe optimizations. "ASAN_OPTIONS": "detect_leaks=0", }, ) validator( benchmark="benchmark://cbench-v1/tiff2rgba", cmd=f"$BIN $D/consumer_tiff_data/{i}.tif output.tif", data=[f"consumer_tiff_data/{i}.tif"], outs=["output.tif"], linkopts=["-lm"], ) validator( benchmark="benchmark://cbench-v1/tiffdither", cmd=f"$BIN $D/consumer_tiff_data/{i}.bw.tif out.tif", data=[f"consumer_tiff_data/{i}.bw.tif"], outs=["out.tif"], linkopts=["-lm"], ) validator( benchmark="benchmark://cbench-v1/tiffmedian", cmd=f"$BIN $D/consumer_tiff_data/{i}.nocomp.tif output.tif", data=[f"consumer_tiff_data/{i}.nocomp.tif"], outs=["output.tif"], linkopts=["-lm"], ) # NOTE(cummins): On macOS the following benchmarks abort with an illegal # hardware instruction error. # if sys.platform != "darwin": # validator( # benchmark="benchmark://cbench-v1/lame", # cmd=f"$BIN $D/consumer_data/{i}.wav output.mp3", # data=[f"consumer_data/{i}.wav"], # outs=["output.mp3"], # compare_output=False, # linkopts=["-lm"], # ) # NOTE(cummins): Segfault on gold standard. # # validator( # benchmark="benchmark://cbench-v1/ghostscript", # cmd="$BIN -sDEVICE=ppm -dNOPAUSE -dQUIET -sOutputFile=output.ppm -- input.ps", # data=[f"office_data/{i}.ps"], # outs=["output.ppm"], # linkopts=["-lm", "-lz"], # pre_execution_callback=setup_ghostscript_library_files(i), # )
CompilerGym-development
compiler_gym/envs/llvm/datasets/cbench.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import subprocess from concurrent.futures import as_completed from pathlib import Path from typing import Iterable from compiler_gym.datasets import Benchmark, TarDatasetWithManifest from compiler_gym.datasets.benchmark import BenchmarkWithSource from compiler_gym.datasets.uri import BenchmarkUri from compiler_gym.envs.llvm.llvm_benchmark import ClangInvocation from compiler_gym.util import thread_pool from compiler_gym.util.filesystem import atomic_file_write URIS = [ "benchmark://chstone-v0/adpcm", "benchmark://chstone-v0/aes", "benchmark://chstone-v0/blowfish", "benchmark://chstone-v0/dfadd", "benchmark://chstone-v0/dfdiv", "benchmark://chstone-v0/dfmul", "benchmark://chstone-v0/dfsin", "benchmark://chstone-v0/gsm", "benchmark://chstone-v0/jpeg", "benchmark://chstone-v0/mips", "benchmark://chstone-v0/motion", "benchmark://chstone-v0/sha", ] class CHStoneDataset(TarDatasetWithManifest): """A dataset of C programs curated from GitHub source code. The dataset is from: Hara, Yuko, Hiroyuki Tomiyama, Shinya Honda, Hiroaki Takada, and Katsuya Ishii. "Chstone: A benchmark program suite for practical c-based high-level synthesis." In 2008 IEEE International Symposium on Circuits and Systems, pp. 1192-1195. IEEE, 2008. And is available at: http://www.ertl.jp/chstone/ """ def __init__( self, site_data_base: Path, sort_order: int = 0, ): super().__init__( name="benchmark://chstone-v0", description="Benchmarks for C-based High-Level Synthesis", references={ "Paper": "http://www.yxi.com/applications/iscas2008-300_1027.pdf", "Homepage": "http://www.ertl.jp/chstone/", }, license="Mixture of open source and public domain licenses", site_data_base=site_data_base, tar_urls=[ "https://github.com/ChrisCummins/patmos_HLS/archive/e62d878ceb91e5a18007ca2e0a9602ee44ff7d59.tar.gz" ], tar_sha256="f7acab9d3c3dc7b971e62c8454bc909d84bddb6d0a96378e41beb94231739acb", strip_prefix="patmos_HLS-e62d878ceb91e5a18007ca2e0a9602ee44ff7d59/benchmarks/CHStone", tar_compression="gz", benchmark_file_suffix=".bc", sort_order=sort_order, # We provide our own manifest. manifest_urls=[], manifest_sha256="", ) def benchmark_uris(self) -> Iterable[str]: yield from URIS def benchmark_from_parsed_uri(self, uri: BenchmarkUri) -> Benchmark: self.install() benchmark_name = uri.path[1:] if not benchmark_name: raise LookupError(f"No benchmark specified: {uri}") bitcode_abspath = self.dataset_root / f"{benchmark_name}.bc" # Most of the source files are named after the parent directory, but not # all. c_file_name = { "blowfish": "bf.c", "motion": "mpeg2.c", "sha": "sha_driver.c", "jpeg": "main.c", }.get(benchmark_name, f"{benchmark_name}.c") c_file_abspath = self.dataset_root / benchmark_name / c_file_name # If the file does not exist, compile it on-demand. if not bitcode_abspath.is_file(): if not c_file_abspath.is_file(): raise LookupError( f"Benchmark not found: {uri} (file not found: {c_file_abspath})" ) with atomic_file_write(bitcode_abspath) as tmp_path: compile_cmd = ClangInvocation.from_c_file( c_file_abspath, copt=[ "-ferror-limit=1", # Stop on first error. "-w", # No warnings. ], ).command(outpath=tmp_path) subprocess.check_call(compile_cmd, timeout=300) return BenchmarkWithSource.create( uri, bitcode_abspath, "function.c", c_file_abspath ) @property def size(self) -> int: return len(URIS) def compile_all(self): n = self.size executor = thread_pool.get_thread_pool_executor() # Since the dataset is lazily compiled, simply iterating over the full # set of URIs will compile everything. Do this in parallel. futures = ( executor.submit(self.benchmark, uri) for uri in self.benchmark_uris() ) for i, future in enumerate(as_completed(futures), start=1): future.result() print( f"\r\033[KCompiled {i} of {n} programs ({i/n:.1%} complete)", flush=True, end="", )
CompilerGym-development
compiler_gym/envs/llvm/datasets/chstone.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import subprocess from concurrent.futures import as_completed from pathlib import Path from compiler_gym.datasets import Benchmark, TarDataset, TarDatasetWithManifest from compiler_gym.datasets.benchmark import BenchmarkWithSource from compiler_gym.datasets.uri import BenchmarkUri from compiler_gym.envs.llvm.llvm_benchmark import ( ClangInvocation, get_system_library_flags, ) from compiler_gym.service.proto import BenchmarkDynamicConfig, Command from compiler_gym.util import thread_pool from compiler_gym.util.filesystem import atomic_file_write class JotaiBenchDataset(TarDatasetWithManifest): """A dataset of C programs curated from GitHub source code. The dataset is from: da Silva, Anderson Faustino, Bruno Conde Kind, José Wesley de Souza Magalhaes, Jerônimo Nunes Rocha, Breno Campos Ferreira Guimaraes, and Fernando Magno Quinão Pereira. "ANGHABENCH: A Suite with One Million Compilable C Benchmarks for Code-Size Reduction." In 2021 IEEE/ACM International Symposium on Code Generation and Optimization (CGO), pp. 378-390. IEEE, 2021. And is available at: http://cuda.dcc.ufmg.br/Jotai/src/ Installation ------------ The JotaiBench dataset consists of C functions that are compiled to LLVM-IR on-demand and cached. The first time each benchmark is used there is an overhead of compiling it from C to bitcode. This is a one-off cost. """ def __init__( self, site_data_base: Path, ): super().__init__( name="benchmark://jotaibench-v0", description="Compile-only C/C++ functions extracted from GitHub", references={ "Paper": "https://homepages.dcc.ufmg.br/~fernando/publications/papers/FaustinoCGO21.pdf", "Homepage": "http://cuda.dcc.ufmg.br/angha/", }, license="GNU General Public License v3.0 (GPLv3)", site_data_base=site_data_base, manifest_urls=[ "https://dl.fbaipublicfiles.com/compiler_gym/llvm_bitcodes-10.0.0-jotaibench-v0.bz2" ], manifest_sha256="ac4ee456e52073964d472d3e3969058b2f3052f8a4b402719013a3c603eb4b62", tar_urls=[ "https://github.com/ChrisCummins/jotai-benchmarks/raw/ca26ccd27afecf38919c1e101c64e3cc17e39631/benchmarks/jotaibench.bz2" ], tar_sha256="b5a51af3d4e2f77a66001635ec64ed321e0ece19873c4a888040859af7556401", strip_prefix="jotaibench/jotaibench-v0", tar_compression="bz2", benchmark_file_suffix=".c", sort_order=0, ) def benchmark_from_parsed_uri(self, uri: BenchmarkUri) -> Benchmark: self.install() benchmark_name = uri.path[1:] if not benchmark_name: raise LookupError(f"No benchmark specified: {uri}") # The absolute path of the file, without an extension. path_stem = self.dataset_root / benchmark_name bitcode_abspath = Path(f"{path_stem}.bc") c_file_abspath = Path(f"{path_stem}.c") # If the file does not exist, compile it on-demand. if not bitcode_abspath.is_file(): if not c_file_abspath.is_file(): raise LookupError( f"Benchmark not found: {uri} (file not found: {c_file_abspath})" ) with atomic_file_write(bitcode_abspath) as tmp_path: compile_cmd = ClangInvocation.from_c_file( c_file_abspath, copt=[ "-ferror-limit=1", # Stop on first error. "-w", # No warnings. ], ).command(outpath=tmp_path) subprocess.check_call(compile_cmd, timeout=300) return BenchmarkWithSource.create( uri, bitcode_abspath, "function.c", c_file_abspath ) def compile_all(self): n = self.size executor = thread_pool.get_thread_pool_executor() # Since the dataset is lazily compiled, simply iterating over the full # set of URIs will compile everything. Do this in parallel. futures = ( executor.submit(self.benchmark, uri) for uri in self.benchmark_uris() ) for i, future in enumerate(as_completed(futures), start=1): future.result() print( f"\r\033[KCompiled {i} of {n} programs ({i/n:.1%} complete)", flush=True, end="", ) class JotaiBenchRunnableDataset(TarDataset): def __init__( self, site_data_base: Path, ): super().__init__( name="benchmark://jotai-runnable-v0", description="Runnable C/C++ functions extracted from GitHub", references={ "Paper": "https://homepages.dcc.ufmg.br/~fernando/publications/papers/FaustinoCGO21.pdf", "Homepage": "http://cuda.dcc.ufmg.br/angha/", }, license="GNU General Public License v3.0 (GPLv3)", site_data_base=site_data_base, tar_urls=[ "https://github.com/lac-dcc/jotai-benchmarks/blob/main/benchmarks/jotaibench.bz2?raw=true" ], tar_sha256="b5a51af3d4e2f77a66001635ec64ed321e0ece19873c4a888040859af7556401", strip_prefix="jotaibench-v0", tar_compression="bz2", benchmark_file_suffix=".c", ) def benchmark_from_parsed_uri(self, uri: BenchmarkUri) -> Benchmark: self.install() benchmark_name = uri.path[1:] if not benchmark_name: raise LookupError(f"No benchmark specified: {uri}") # The absolute path of the file, without an extension. path_stem = self.dataset_root / benchmark_name bitcode_abspath = Path(f"{path_stem}.bc") c_file_abspath = Path(f"{path_stem}.c") # If the file does not exist, compile it to a bitcode file on-demand. if not bitcode_abspath.is_file(): if not c_file_abspath.is_file(): raise LookupError( f"Benchmark not found: {uri} (file not found: {c_file_abspath})" ) with atomic_file_write(bitcode_abspath) as tmp_path: compile_cmd = ClangInvocation.from_c_file( c_file_abspath, copt=[ "-ferror-limit=1", # Stop on first error. "-w", # No warnings. ], ).command(outpath=tmp_path) subprocess.check_call(compile_cmd, timeout=300) benchmark = BenchmarkWithSource.create( uri, bitcode_abspath, "function.c", c_file_abspath ) # This is what makes a benchmark "runnable". benchmark.proto.dynamic_config.MergeFrom( BenchmarkDynamicConfig( build_cmd=Command( argument=["$CC", "$IN"] + get_system_library_flags(), timeout_seconds=30, outfile=["a.out"], ), run_cmd=Command( argument=["./a.out 0"], timeout_seconds=30, infile=[], outfile=[], ), ) ) return benchmark def compile_all(self): n = self.size executor = thread_pool.get_thread_pool_executor() # Since the dataset is lazily compiled, simply iterating over the full # set of URIs will compile everything. Do this in parallel. futures = ( executor.submit(self.benchmark, uri) for uri in self.benchmark_uris() ) for i, future in enumerate(as_completed(futures), start=1): future.result() print( f"\r\033[KCompiled {i} of {n} programs ({i/n:.1%} complete)", flush=True, end="", )
CompilerGym-development
compiler_gym/envs/llvm/datasets/jotaibench.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import os import subprocess import sys from concurrent.futures import as_completed from pathlib import Path from compiler_gym.datasets import Benchmark, TarDatasetWithManifest from compiler_gym.datasets.benchmark import BenchmarkWithSource from compiler_gym.datasets.uri import BenchmarkUri from compiler_gym.envs.llvm.llvm_benchmark import ClangInvocation from compiler_gym.errors import BenchmarkInitError from compiler_gym.util import thread_pool from compiler_gym.util.commands import Popen from compiler_gym.util.download import download from compiler_gym.util.filesystem import atomic_file_write from compiler_gym.util.truncate import truncate logger = logging.getLogger(__name__) class POJ104Dataset(TarDatasetWithManifest): """The POJ-104 dataset contains 52000 C++ programs implementing 104 different algorithms with 500 examples of each. The dataset is from: Lili Mou, Ge Li, Lu Zhang, Tao Wang, Zhi Jin. "Convolutional neural networks over tree structures for programming language processing." To appear in Proceedings of 30th AAAI Conference on Artificial Intelligence, 2016. And is available at: https://sites.google.com/site/treebasedcnn/ """ def __init__(self, site_data_base: Path, sort_order: int = 0): manifest_url, manifest_sha256 = { "darwin": ( "https://dl.fbaipublicfiles.com/compiler_gym/llvm_bitcodes-10.0.0-poj104-v1-macos-manifest.bz2", "74db443f225478933dd0adf3f821fd4e615089eeaa90596c19d9d1af7006a801", ), "linux": ( "https://dl.fbaipublicfiles.com/compiler_gym/llvm_bitcodes-10.0.0-poj104-v1-linux-manifest.bz2", "ee6253ee826e171816105e76fa78c0d3cbd319ef66e10da4bcf9cf8a78e12ab9", ), }[sys.platform] super().__init__( name="benchmark://poj104-v1", tar_urls=[ "https://dl.fbaipublicfiles.com/compiler_gym/llvm_bitcodes-10.0.0-poj104-v1.tar.gz", "https://drive.google.com/u/0/uc?id=0B2i-vWnOu7MxVlJwQXN6eVNONUU&export=download", ], tar_sha256="c0b8ef3ee9c9159c882dc9337cb46da0e612a28e24852a83f8a1cd68c838f390", tar_compression="gz", manifest_urls=[manifest_url], manifest_sha256=manifest_sha256, references={ "Paper": "https://ojs.aaai.org/index.php/AAAI/article/download/10139/9998", "Homepage": "https://sites.google.com/site/treebasedcnn/", }, license="BSD 3-Clause", strip_prefix="ProgramData", description="Solutions to programming programs", benchmark_file_suffix=".txt", site_data_base=site_data_base, sort_order=sort_order, ) def benchmark_from_parsed_uri(self, uri: BenchmarkUri) -> Benchmark: self.install() # The absolute path of the file, without an extension. path_stem = os.path.normpath(f"{self.dataset_root}/{uri.path}") # If the file does not exist, compile it on-demand. bitcode_path = Path(f"{path_stem}.bc") cc_file_path = Path(f"{path_stem}.txt") if not bitcode_path.is_file(): if not cc_file_path.is_file(): raise LookupError( f"Benchmark not found: {uri} (file not found: {cc_file_path})" ) # Load the C++ source into memory and pre-process it. with open(cc_file_path) as f: src = self.preprocess_poj104_source(f.read()) # Compile the C++ source into a bitcode file. with atomic_file_write(bitcode_path) as tmp_bitcode_path: compile_cmd = ClangInvocation.from_c_file( "-", copt=[ "-xc++", "-ferror-limit=1", # Stop on first error. "-w", # No warnings. # Some of the programs use the gets() function that was # deprecated in C++11 and removed in C++14. "-std=c++11", ], ).command(outpath=tmp_bitcode_path) logger.debug("Exec %s", compile_cmd) try: with Popen( compile_cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) as clang: _, stderr = clang.communicate( input=src.encode("utf-8"), timeout=300 ) except subprocess.TimeoutExpired: raise BenchmarkInitError(f"Benchmark compilation timed out: {uri}") if clang.returncode: compile_cmd = " ".join(compile_cmd) error = truncate(stderr.decode("utf-8"), max_lines=20, max_line_len=100) if tmp_bitcode_path.is_file(): tmp_bitcode_path.unlink() raise BenchmarkInitError( f"Compilation job failed!\n" f"Command: {compile_cmd}\n" f"Error: {error}" ) if not bitcode_path.is_file(): raise BenchmarkInitError( f"Compilation job failed to produce output file!\nCommand: {compile_cmd}" ) return BenchmarkWithSource.create(uri, bitcode_path, "source.cc", cc_file_path) @staticmethod def preprocess_poj104_source(src: str) -> str: """Pre-process a POJ-104 C++ source file for compilation.""" # Clean up declaration of main function. Many are missing a return type # declaration, or use an incorrect void return type. src = src.replace("void main", "int main") src = src.replace("\nmain", "int main") if src.startswith("main"): src = f"int {src}" # Pull in the standard library. if sys.platform == "linux": header = "#include <bits/stdc++.h>\n" "using namespace std;\n" else: # Download a bits/stdc++ implementation for macOS. header = download( "https://raw.githubusercontent.com/tekfyl/bits-stdc-.h-for-mac/e1193f4470514d82ea19c3cc1357116fadaa2a4e/stdc%2B%2B.h", sha256="b4d9b031d56d89a2b58b5ed80fa9943aa92420d6aed0835747c9a5584469afeb", ).decode("utf-8") # These defines provide values for commonly undefined symbols. Defining # these macros increases the number of POJ-104 programs that compile # from 49,302 to 49,821 (+519) on linux. defines = "#define LEN 128\n" "#define MAX_LENGTH 1024\n" "#define MAX 1024\n" return header + defines + src def compile_all(self): n = self.size executor = thread_pool.get_thread_pool_executor() # Since the dataset is lazily compiled, simply iterating over the full # set of URIs will compile everything. Do this in parallel. futures = ( executor.submit(self.benchmark, uri) for uri in self.benchmark_uris() ) for i, future in enumerate(as_completed(futures), start=1): future.result() print( f"\r\033[KCompiled {i} of {n} programs ({i/n:.2%} complete)", flush=True, end="", ) class POJ104LegacyDataset(TarDatasetWithManifest): def __init__(self, site_data_base: Path, sort_order: int = 0): super().__init__( name="benchmark://poj104-v0", tar_urls="https://dl.fbaipublicfiles.com/compiler_gym/llvm_bitcodes-10.0.0-poj104-v0.tar.bz2", tar_sha256="6254d629887f6b51efc1177788b0ce37339d5f3456fb8784415ed3b8c25cce27", manifest_urls=[ "https://dl.fbaipublicfiles.com/compiler_gym/llvm_bitcodes-10.0.0-poj104-v0-manifest.bz2" ], manifest_sha256="ac3eaaad7d2878d871ed2b5c72a3f39c058ea6694989af5c86cd162414db750b", references={ "Paper": "https://ojs.aaai.org/index.php/AAAI/article/download/10139/9998", "Homepage": "https://sites.google.com/site/treebasedcnn/", }, license="BSD 3-Clause", strip_prefix="poj104-v0", description="Solutions to programming programs", benchmark_file_suffix=".bc", site_data_base=site_data_base, sort_order=sort_order, deprecated="Please update to benchmark://poj104-v1.", )
CompilerGym-development
compiler_gym/envs/llvm/datasets/poj104.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import subprocess from pathlib import Path from typing import Iterable, List, Optional import numpy as np from compiler_gym.datasets import Benchmark, BenchmarkSource, Dataset from compiler_gym.datasets.benchmark import BenchmarkWithSource from compiler_gym.datasets.uri import BenchmarkUri from compiler_gym.envs.llvm import llvm_benchmark from compiler_gym.envs.llvm.llvm_benchmark import ClangInvocation from compiler_gym.errors import BenchmarkInitError from compiler_gym.service.proto import BenchmarkDynamicConfig, Command from compiler_gym.util.commands import Popen, communicate from compiler_gym.util.decorators import memoized_property from compiler_gym.util.runfiles_path import runfiles_path from compiler_gym.util.shell_format import plural from compiler_gym.util.truncate import truncate logger = logging.getLogger(__name__) # The maximum value for the --seed argument to csmith. UINT_MAX = (2**32) - 1 _CSMITH_BIN = runfiles_path("compiler_gym/third_party/csmith/csmith/bin/csmith") _CSMITH_INCLUDES = runfiles_path( "compiler_gym/third_party/csmith/csmith/include/csmith-2.3.0" ) class CsmithBenchmark(BenchmarkWithSource): """A CSmith benchmark.""" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._src = None self.proto.dynamic_config.MergeFrom( BenchmarkDynamicConfig( build_cmd=Command( argument=["$CC", "$IN"] + llvm_benchmark.get_system_library_flags(), outfile=["a.out"], timeout_seconds=60, ), run_cmd=Command( argument=["./a.out"], timeout_seconds=300, ), ) ) @classmethod def create(cls, uri: str, bitcode: bytes, src: bytes) -> Benchmark: """Create a benchmark from paths.""" benchmark = cls.from_file_contents(uri, bitcode) benchmark._src = src # pylint: disable=protected-access return benchmark @memoized_property def sources(self) -> Iterable[BenchmarkSource]: return [ BenchmarkSource(filename="source.c", contents=self._src), ] @property def source(self) -> str: """Return the single source file contents as a string.""" return self._src.decode("utf-8") class CsmithDataset(Dataset): """A dataset which uses Csmith to generate programs. Csmith is a tool that can generate random conformant C99 programs. It is described in the publication: Yang, Xuejun, Yang Chen, Eric Eide, and John Regehr. "Finding and understanding bugs in C compilers." In Proceedings of the 32nd ACM SIGPLAN conference on Programming Language Design and Implementation (PLDI), pp. 283-294. 2011. For up-to-date information about Csmith, see: https://embed.cs.utah.edu/csmith/ Note that Csmith is a tool that is used to find errors in compilers. As such, there is a higher likelihood that the benchmark cannot be used for an environment and that :meth:`env.reset() <compiler_gym.envs.CompilerEnv.reset>` will raise :class:`BenchmarkInitError <compiler_gym.datasets.BenchmarkInitError>`. """ def __init__( self, site_data_base: Path, sort_order: int = 0, csmith_bin: Optional[Path] = None, csmith_includes: Optional[Path] = None, ): """Constructor. :param site_data_base: The base path of a directory that will be used to store installed files. :param sort_order: An optional numeric value that should be used to order this dataset relative to others. Lowest value sorts first. :param csmith_bin: The path of the Csmith binary to use. If not provided, the version of Csmith shipped with CompilerGym is used. :param csmith_includes: The path of the Csmith includes directory. If not provided, the includes of the Csmith shipped with CompilerGym is used. """ super().__init__( name="generator://csmith-v0", description="Random conformant C99 programs", references={ "Paper": "http://web.cse.ohio-state.edu/~rountev.1/5343/pdf/pldi11.pdf", "Homepage": "https://embed.cs.utah.edu/csmith/", }, license="BSD", site_data_base=site_data_base, sort_order=sort_order, benchmark_class=CsmithBenchmark, ) self.csmith_bin_path = csmith_bin or _CSMITH_BIN self.csmith_includes_path = csmith_includes or _CSMITH_INCLUDES # The command that is used to compile an LLVM-IR bitcode file from a # Csmith input. Reads from stdin, writes to stdout. self.clang_compile_command: List[str] = ClangInvocation.from_c_file( "-", # Read from stdin. copt=[ "-xc", # The C programming language. "-ferror-limit=1", # Stop on first error. "-w", # No warnings. f"-I{self.csmith_includes_path}", # Include the Csmith headers. ], ).command( outpath="-" # Write to stdout. ) @property def size(self) -> int: # Actually 2^32 - 1, but practically infinite for all intents and # purposes. return 0 def benchmark_uris(self) -> Iterable[str]: return (f"{self.name}/{i}" for i in range(UINT_MAX)) def benchmark_from_parsed_uri(self, uri: BenchmarkUri) -> CsmithBenchmark: seed = int(uri.path[1:]) return self.benchmark_from_seed(seed) def _random_benchmark(self, random_state: np.random.Generator) -> Benchmark: seed = random_state.integers(UINT_MAX) return self.benchmark_from_seed(seed) def benchmark_from_seed( self, seed: int, max_retries: int = 3, retry_count: int = 0 ) -> CsmithBenchmark: """Get a benchmark from a uint32 seed. :param seed: A number in the range 0 <= n < 2^32. :return: A benchmark instance. :raises OSError: If Csmith fails. :raises BenchmarkInitError: If the C program generated by Csmith cannot be lowered to LLVM-IR. """ if retry_count >= max_retries: raise OSError( f"Csmith failed after {retry_count} {plural(retry_count, 'attempt', 'attempts')} " f"with seed {seed}" ) self.install() # Run csmith with the given seed and pipe the output to clang to # assemble a bitcode. logger.debug("Exec csmith --seed %d", seed) try: with Popen( [str(self.csmith_bin_path), "--seed", str(seed)], stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) as csmith: # Generate the C source. src, stderr = communicate(csmith, timeout=300) if csmith.returncode: try: stderr = "\n".join( truncate( stderr.decode("utf-8"), max_line_len=200, max_lines=20 ) ) logger.warning("Csmith failed with seed %d: %s", seed, stderr) except UnicodeDecodeError: # Failed to interpret the stderr output, generate a generic # error message. logger.warning("Csmith failed with seed %d", seed) return self.benchmark_from_seed( seed, max_retries=max_retries, retry_count=retry_count + 1 ) # Compile to IR. with Popen( self.clang_compile_command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, ) as clang: stdout, _ = communicate(clang, input=src, timeout=300) if clang.returncode: compile_cmd = " ".join(self.clang_compile_command) raise BenchmarkInitError( f"Compilation job failed!\n" f"Csmith seed: {seed}\n" f"Command: {compile_cmd}\n" ) except subprocess.TimeoutExpired: raise BenchmarkInitError( f"Benchmark generation using seed {seed} timed out" ) return self.benchmark_class.create(f"{self.name}/{seed}", stdout, src)
CompilerGym-development
compiler_gym/envs/llvm/datasets/csmith.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import sys from pathlib import Path from typing import Iterable, Optional from compiler_gym.datasets import Dataset, TarDatasetWithManifest from compiler_gym.envs.llvm.datasets.anghabench import AnghaBenchDataset from compiler_gym.envs.llvm.datasets.cbench import ( CBenchDataset, CBenchLegacyDataset, CBenchLegacyDataset2, ) from compiler_gym.envs.llvm.datasets.chstone import CHStoneDataset from compiler_gym.envs.llvm.datasets.clgen import CLgenDataset from compiler_gym.envs.llvm.datasets.csmith import CsmithBenchmark, CsmithDataset from compiler_gym.envs.llvm.datasets.jotaibench import JotaiBenchDataset from compiler_gym.envs.llvm.datasets.llvm_stress import LlvmStressDataset from compiler_gym.envs.llvm.datasets.poj104 import POJ104Dataset, POJ104LegacyDataset from compiler_gym.util.runfiles_path import site_data_path class BlasDataset(TarDatasetWithManifest): def __init__(self, site_data_base: Path, sort_order: int = 0): super().__init__( name="benchmark://blas-v0", tar_urls=[ "https://dl.fbaipublicfiles.com/compiler_gym/llvm_bitcodes-10.0.0-blas-v0.tar.bz2" ], tar_sha256="e724a8114709f8480adeb9873d48e426e8d9444b00cddce48e342b9f0f2b096d", manifest_urls=[ "https://dl.fbaipublicfiles.com/compiler_gym/llvm_bitcodes-10.0.0-blas-v0-manifest.bz2" ], manifest_sha256="6946437dcb0da5fad3ed8a7fd83eb4294964198391d5537b1310e22d7ceebff4", references={ "Paper": "https://strum355.netsoc.co/books/PDF/Basic%20Linear%20Algebra%20Subprograms%20for%20Fortran%20Usage%20-%20BLAS%20(1979).pdf", "Homepage": "http://www.netlib.org/blas/", }, license="BSD 3-Clause", strip_prefix="blas-v0", description="Basic linear algebra kernels", benchmark_file_suffix=".bc", site_data_base=site_data_base, sort_order=sort_order, ) class GitHubDataset(TarDatasetWithManifest): def __init__(self, site_data_base: Path, sort_order: int = 0): manifest_url, manifest_sha256 = { "darwin": ( "https://dl.fbaipublicfiles.com/compiler_gym/llvm_bitcodes-10.0.0-github-v0-macos-manifest.bz2", "10d933a7d608248be286d756b27813794789f7b87d8561c241d0897fb3238503", ), "linux": ( "https://dl.fbaipublicfiles.com/compiler_gym/llvm_bitcodes-10.0.0-github-v0-linux-manifest.bz2", "aede9ca78657b4694ada9a4592d93f0bbeb3b3bd0fff3b537209850228480d3b", ), }[sys.platform] super().__init__( name="benchmark://github-v0", tar_urls=[ "https://dl.fbaipublicfiles.com/compiler_gym/llvm_bitcodes-10.0.0-github-v0.tar.bz2" ], tar_sha256="880269dd7a5c2508ea222a2e54c318c38c8090eb105c0a87c595e9dd31720764", manifest_urls=[manifest_url], manifest_sha256=manifest_sha256, license="CC BY 4.0", references={ "Paper": "https://arxiv.org/pdf/2012.01470.pdf", }, strip_prefix="github-v0", description="Compile-only C/C++ objects from GitHub", benchmark_file_suffix=".bc", site_data_base=site_data_base, sort_order=sort_order, ) class LinuxDataset(TarDatasetWithManifest): def __init__(self, site_data_base: Path, sort_order: int = 0): manifest_url, manifest_sha256 = { "darwin": ( "https://dl.fbaipublicfiles.com/compiler_gym/llvm_bitcodes-10.0.0-linux-v0-macos-manifest.bz2", "dfc87b94c7a43e899e76507398a5af22178aebaebcb5d7e24e82088aeecb0690", ), "linux": ( "https://dl.fbaipublicfiles.com/compiler_gym/llvm_bitcodes-10.0.0-linux-v0-linux-manifest.bz2", "32ceb8576f683798010816ac605ee496f386ddbbe64be9e0796015d247a73f92", ), }[sys.platform] super().__init__( name="benchmark://linux-v0", tar_urls=[ "https://dl.fbaipublicfiles.com/compiler_gym/llvm_bitcodes-10.0.0-linux-v0.tar.bz2" ], tar_sha256="a1ae5c376af30ab042c9e54dc432f89ce75f9ebaee953bc19c08aff070f12566", manifest_urls=[manifest_url], manifest_sha256=manifest_sha256, references={"Homepage": "https://www.linux.org/"}, license="GPL-2.0", strip_prefix="linux-v0", description="Compile-only object files from C Linux kernel", benchmark_file_suffix=".bc", site_data_base=site_data_base, sort_order=sort_order, ) class MibenchDataset(TarDatasetWithManifest): def __init__(self, site_data_base: Path, sort_order: int = 0): super().__init__( name="benchmark://mibench-v1", tar_urls=[ "https://dl.fbaipublicfiles.com/compiler_gym/llvm_bitcodes-10.0.0-mibench-v1.tar.bz2" ], tar_sha256="795b80d3198bc96e394823a4cb294d256845beffccce52fea0e3446395212bb5", manifest_urls=[ "https://dl.fbaipublicfiles.com/compiler_gym/llvm_bitcodes-10.0.0-mibench-v0-manifest.bz2" ], manifest_sha256="8ed985d685b48f444a3312cd84ccc5debda4a839850e442a3cdc93910ba0dc5f", references={ "Paper": "http://vhosts.eecs.umich.edu/mibench/Publications/MiBench.pdf" }, license="BSD 3-Clause", strip_prefix="mibench-v1", description="C benchmarks", benchmark_file_suffix=".bc", site_data_base=site_data_base, sort_order=sort_order, ) class MibenchV0Dataset(TarDatasetWithManifest): def __init__(self, site_data_base: Path, sort_order: int = 0): super().__init__( name="benchmark://mibench-v0", tar_urls=[ "https://dl.fbaipublicfiles.com/compiler_gym/llvm_bitcodes-10.0.0-mibench-v0.tar.bz2" ], tar_sha256="128c090c40b955b99fdf766da167a5f642018fb35c16a1d082f63be2e977eb13", manifest_urls=[ "https://dl.fbaipublicfiles.com/compiler_gym/llvm_bitcodes-10.0.0-mibench-v0-manifest.bz2" ], manifest_sha256="8ed985d685b48f444a3312cd84ccc5debda4a839850e442a3cdc93910ba0dc5f", references={ "Paper": "http://vhosts.eecs.umich.edu/mibench/Publications/MiBench.pdf" }, license="BSD 3-Clause", strip_prefix="mibench-v0", description="C benchmarks", benchmark_file_suffix=".bc", site_data_base=site_data_base, sort_order=sort_order, deprecated="Please use mibench-v1", ) class NPBDataset(TarDatasetWithManifest): def __init__(self, site_data_base: Path, sort_order: int = 0): super().__init__( name="benchmark://npb-v0", tar_urls=[ "https://dl.fbaipublicfiles.com/compiler_gym/llvm_bitcodes-10.0.0-npb-v0.tar.bz2" ], tar_sha256="793ac2e7a4f4ed83709e8a270371e65b724da09eaa0095c52e7f4209f63bb1f2", manifest_urls=[ "https://dl.fbaipublicfiles.com/compiler_gym/llvm_bitcodes-10.0.0-npb-v0-manifest.bz2" ], manifest_sha256="89eccb7f1b0b9e1f82b9b900b9f686ff5b189a2a67a4f8969a15901cd315dba2", references={ "Paper": "http://optout.csc.ncsu.edu/~mueller/codeopt/codeopt05/projects/www4.ncsu.edu/~pgauria/csc791a/papers/NAS-95-020.pdf" }, license="NASA Open Source Agreement v1.3", strip_prefix="npb-v0", description="NASA Parallel Benchmarks", benchmark_file_suffix=".bc", site_data_base=site_data_base, sort_order=sort_order, ) class OpenCVDataset(TarDatasetWithManifest): def __init__(self, site_data_base: Path, sort_order: int = 0): super().__init__( name="benchmark://opencv-v0", tar_urls=[ "https://dl.fbaipublicfiles.com/compiler_gym/llvm_bitcodes-10.0.0-opencv-v0.tar.bz2" ], tar_sha256="003df853bd58df93572862ca2f934c7b129db2a3573bcae69a2e59431037205c", manifest_urls=[ "https://dl.fbaipublicfiles.com/compiler_gym/llvm_bitcodes-10.0.0-opencv-v0-manifest.bz2" ], manifest_sha256="8de96f722fab18f3a2a74db74b4038c7947fe8b3da867c9260206fdf5338cd81", references={ "Paper": "https://mipro-proceedings.com/sites/mipro-proceedings.com/files/upload/sp/sp_008.pdf", "Homepage": "https://opencv.org/", }, license="Apache 2.0", strip_prefix="opencv-v0", description="Compile-only object files from C++ OpenCV library", benchmark_file_suffix=".bc", site_data_base=site_data_base, sort_order=sort_order, ) class TensorFlowDataset(TarDatasetWithManifest): def __init__(self, site_data_base: Path, sort_order: int = 0): super().__init__( name="benchmark://tensorflow-v0", tar_urls=[ "https://dl.fbaipublicfiles.com/compiler_gym/llvm_bitcodes-10.0.0-tensorflow-v0.tar.bz2" ], tar_sha256="f77dd1988c772e8359e1303cc9aba0d73d5eb27e0c98415ac3348076ab94efd1", manifest_urls=[ "https://dl.fbaipublicfiles.com/compiler_gym/llvm_bitcodes-10.0.0-tensorflow-v0-manifest.bz2" ], manifest_sha256="cffc45cd10250d483cb093dec913c8a7da64026686284cccf404623bd1da6da8", references={ "Paper": "https://www.usenix.org/system/files/conference/osdi16/osdi16-abadi.pdf", "Homepage": "https://www.tensorflow.org/", }, license="Apache 2.0", strip_prefix="tensorflow-v0", description="Compile-only object files from C++ TensorFlow library", benchmark_file_suffix=".bc", site_data_base=site_data_base, sort_order=sort_order, ) def get_llvm_datasets(site_data_base: Optional[Path] = None) -> Iterable[Dataset]: """Instantiate the builtin LLVM datasets. :param site_data_base: The root of the site data path. :return: An iterable sequence of :class:`Dataset <compiler_gym.datasets.Dataset>` instances. """ site_data_base = site_data_base or site_data_path("llvm-v0") yield AnghaBenchDataset(site_data_base=site_data_base, sort_order=0) # Add legacy version of Anghabench using an old manifest. anghabench_v0_manifest_url, anghabench_v0_manifest_sha256 = { "darwin": ( "https://dl.fbaipublicfiles.com/compiler_gym/llvm_bitcodes-10.0.0-anghabench-v0-macos-manifest.bz2", "39464256405aacefdb7550a7f990c9c578264c132804eec3daac091fa3c21bd1", ), "linux": ( "https://dl.fbaipublicfiles.com/compiler_gym/llvm_bitcodes-10.0.0-anghabench-v0-linux-manifest.bz2", "a038d25d39ee9472662a9704dfff19c9e3512ff6a70f1067af85c5cb3784b477", ), }[sys.platform] yield AnghaBenchDataset( name="benchmark://anghabench-v0", site_data_base=site_data_base, sort_order=0, manifest_url=anghabench_v0_manifest_url, manifest_sha256=anghabench_v0_manifest_sha256, deprecated="Please use anghabench-v1", ) yield JotaiBenchDataset(site_data_base=site_data_base) yield BlasDataset(site_data_base=site_data_base, sort_order=0) yield CLgenDataset(site_data_base=site_data_base, sort_order=0) yield CBenchDataset(site_data_base=site_data_base) # Add legacy version of cbench-v1 in which the 'b' was capitalized. This # is deprecated and will be removed no earlier than v0.1.10. yield CBenchLegacyDataset2( site_data_base=site_data_base, name="benchmark://cBench-v1", deprecated=( "Please use 'benchmark://cbench-v1' (note the lowercase name). " "The dataset is the same, only the name has changed" ), manifest_url="https://dl.fbaipublicfiles.com/compiler_gym/llvm_bitcodes-10.0.0-cBench-v1-manifest.bz2", manifest_sha256="635b94eeb2784dfedb3b53fd8f84517c3b4b95d851ddb662d4c1058c72dc81e0", sort_order=100, ) yield CBenchLegacyDataset(site_data_base=site_data_base) yield CHStoneDataset(site_data_base=site_data_base) yield CsmithDataset(site_data_base=site_data_base, sort_order=0) yield GitHubDataset(site_data_base=site_data_base, sort_order=0) yield LinuxDataset(site_data_base=site_data_base, sort_order=0) yield LlvmStressDataset(site_data_base=site_data_base, sort_order=0) yield MibenchDataset(site_data_base=site_data_base, sort_order=0) yield MibenchV0Dataset(site_data_base=site_data_base, sort_order=100) yield NPBDataset(site_data_base=site_data_base, sort_order=0) yield OpenCVDataset(site_data_base=site_data_base, sort_order=0) yield POJ104Dataset(site_data_base=site_data_base, sort_order=0) yield POJ104LegacyDataset(site_data_base=site_data_base, sort_order=100) yield TensorFlowDataset(site_data_base=site_data_base, sort_order=0) __all__ = [ "AnghaBenchDataset", "BlasDataset", "CBenchDataset", "CBenchLegacyDataset", "CLgenDataset", "CsmithBenchmark", "CsmithDataset", "get_llvm_datasets", "GitHubDataset", "JotaiBenchDataset", "LinuxDataset", "LlvmStressDataset", "MibenchDataset", "NPBDataset", "OpenCVDataset", "POJ104Dataset", "POJ104LegacyDataset", "TensorFlowDataset", ]
CompilerGym-development
compiler_gym/envs/llvm/datasets/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import subprocess from pathlib import Path from typing import Iterable import numpy as np from compiler_gym.datasets import Benchmark, Dataset from compiler_gym.datasets.uri import BenchmarkUri from compiler_gym.errors import BenchmarkInitError from compiler_gym.third_party import llvm from compiler_gym.util.commands import Popen # The maximum value for the --seed argument to llvm-stress. UINT_MAX = (2**32) - 1 class LlvmStressDataset(Dataset): """A dataset which uses llvm-stress to generate programs. `llvm-stress <https://llvm.org/docs/CommandGuide/llvm-stress.html>`_ is a tool for generating random LLVM-IR files. This dataset forces reproducible results by setting the input seed to the generator. The benchmark's URI is the seed, e.g. "generator://llvm-stress-v0/10" is the benchmark generated by llvm-stress using seed 10. The total number of unique seeds is 2^32 - 1. Note that llvm-stress is a tool that is used to find errors in LLVM. As such, there is a higher likelihood that the benchmark cannot be used for an environment and that :meth:`env.reset() <compiler_gym.envs.CompilerEnv.reset>` will raise :class:`BenchmarkInitError <compiler_gym.datasets.BenchmarkInitError>`. """ def __init__(self, site_data_base: Path, sort_order: int = 0): super().__init__( name="generator://llvm-stress-v0", description="Randomly generated LLVM-IR", references={ "Documentation": "https://llvm.org/docs/CommandGuide/llvm-stress.html" }, license="Apache License v2.0 with LLVM Exceptions", site_data_base=site_data_base, sort_order=sort_order, ) @property def size(self) -> int: # Actually 2^32 - 1, but practically infinite for all intents and # purposes. return 0 def benchmark_uris(self) -> Iterable[str]: return (f"{self.name}/{i}" for i in range(UINT_MAX)) def benchmark_from_parsed_uri(self, uri: BenchmarkUri) -> Benchmark: seed = int(uri.path[1:]) return self.benchmark_from_seed(seed) def _random_benchmark(self, random_state: np.random.Generator) -> Benchmark: seed = random_state.integers(UINT_MAX) return self.benchmark_from_seed(seed) def benchmark_from_seed(self, seed: int) -> Benchmark: """Get a benchmark from a uint32 seed. :param seed: A number in the range 0 <= n < 2^32. :return: A benchmark instance. """ self.install() # Run llvm-stress with the given seed and pipe the output to llvm-as to # assemble a bitcode. try: with Popen( [str(llvm.llvm_stress_path()), f"--seed={seed}"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) as llvm_stress: with Popen( [str(llvm.llvm_as_path()), "-"], stdin=llvm_stress.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) as llvm_as: stdout, _ = llvm_as.communicate(timeout=60) llvm_stress.communicate(timeout=60) if llvm_stress.returncode or llvm_as.returncode: raise BenchmarkInitError("Failed to generate benchmark") except subprocess.TimeoutExpired: raise BenchmarkInitError("Benchmark generation timed out") return Benchmark.from_file_contents(f"{self.name}/{seed}", stdout)
CompilerGym-development
compiler_gym/envs/llvm/datasets/llvm_stress.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import io import logging import os import shutil import subprocess import tarfile from pathlib import Path from threading import Lock from typing import List from fasteners import InterProcessLock from compiler_gym.datasets import Benchmark, TarDatasetWithManifest from compiler_gym.datasets.benchmark import BenchmarkWithSource from compiler_gym.datasets.uri import BenchmarkUri from compiler_gym.envs.llvm.llvm_benchmark import ClangInvocation from compiler_gym.errors import BenchmarkInitError from compiler_gym.util.commands import Popen, communicate from compiler_gym.util.download import download from compiler_gym.util.filesystem import atomic_file_write from compiler_gym.util.truncate import truncate logger = logging.getLogger(__name__) _CLGEN_INSTALL_LOCK = Lock() class CLgenDataset(TarDatasetWithManifest): """The CLgen dataset contains 1000 synthetically generated OpenCL kernels. The dataset is from: Cummins, Chris, Pavlos Petoumenos, Zheng Wang, and Hugh Leather. "Synthesizing benchmarks for predictive modeling." In 2017 IEEE/ACM International Symposium on Code Generation and Optimization (CGO), pp. 86-99. IEEE, 2017. And is available at: https://github.com/ChrisCummins/paper-synthesizing-benchmarks Installation ------------ The CLgen dataset consists of OpenCL kernels that are compiled to LLVM-IR on-demand and cached. The first time each benchmark is used there is an overhead of compiling it from OpenCL to bitcode. This is a one-off cost. Compiling OpenCL to bitcode requires third party headers that are downloaded on the first call to :code:`install()`. """ def __init__(self, site_data_base: Path, sort_order: int = 0): super().__init__( name="benchmark://clgen-v0", description="Synthetically generated OpenCL kernels", references={ "Paper": "https://chriscummins.cc/pub/2017-cgo.pdf", "Homepage": "https://github.com/ChrisCummins/clgen", }, license="GNU General Public License v3.0", site_data_base=site_data_base, manifest_urls=[ "https://dl.fbaipublicfiles.com/compiler_gym/llvm_bitcodes-10.0.0-clgen-v0-manifest.bz2" ], manifest_sha256="d2bbc1da5a24a8cb03b604d1d8e59227b33bdfcd964ebe741ca8339f1c8d65cc", tar_urls=[ "https://github.com/ChrisCummins/paper-synthesizing-benchmarks/raw/e45b6dffe9998f612624f05a6c4878ab4bcc84ec/data/clgen-1000.tar.bz2" ], tar_sha256="0bbd1b737f2537305e4db09b2971a5fa848b7c3a978bff6b570f45d1a488a72c", strip_prefix="clgen-1000/kernels", tar_compression="bz2", benchmark_file_suffix=".bc", sort_order=sort_order, ) self._opencl_installed = False self._opencl_headers_installed_marker = ( self._site_data_path / ".opencl-installed" ) self.libclc_dir = self.site_data_path / "libclc" self.opencl_h_path = self.site_data_path / "opencl.h" def install(self): super().install() if not self._opencl_installed: self._opencl_installed = self._opencl_headers_installed_marker.is_file() if self._opencl_installed: return with _CLGEN_INSTALL_LOCK, InterProcessLock(self._tar_lockfile): # Repeat install check now that we are in the locked region. if self._opencl_headers_installed_marker.is_file(): return # Download the libclc headers. shutil.rmtree(self.libclc_dir, ignore_errors=True) logger.info("Downloading OpenCL headers ...") tar_data = io.BytesIO( download( "https://dl.fbaipublicfiles.com/compiler_gym/libclc-v0.tar.bz2", sha256="f1c511f2ac12adf98dcc0fbfc4e09d0f755fa403c18f1fb1ffa5547e1fa1a499", ) ) with tarfile.open(fileobj=tar_data, mode="r:bz2") as arc: arc.extractall(str(self.site_data_path / "libclc")) # Download the OpenCL header. with open(self.opencl_h_path, "wb") as f: f.write( download( "https://github.com/ChrisCummins/clgen/raw/463c0adcd8abcf2432b24df0aca594b77a69e9d3/deeplearning/clgen/data/include/opencl.h", sha256="f95b9f4c8b1d09114e491846d0d41425d24930ac167e024f45dab8071d19f3f7", ) ) self._opencl_headers_installed_marker.touch() def benchmark_from_parsed_uri(self, uri: BenchmarkUri) -> Benchmark: self.install() benchmark_name = uri.path[1:] if not benchmark_name: raise LookupError(f"No benchmark specified: {uri}") # The absolute path of the file, without an extension. path_stem = os.path.normpath(f"{self.dataset_root}/{uri.path}") bc_path, cl_path = Path(f"{path_stem}.bc"), Path(f"{path_stem}.cl") # If the file does not exist, compile it on-demand. if not bc_path.is_file(): if not cl_path.is_file(): raise LookupError( f"Benchmark not found: {uri} (file not found: {cl_path}, path_stem {path_stem})" ) # Compile the OpenCL kernel into a bitcode file. with atomic_file_write(bc_path) as tmp_bc_path: compile_command: List[str] = ClangInvocation.from_c_file( cl_path, copt=[ "-isystem", str(self.libclc_dir), "-include", str(self.opencl_h_path), "-target", "nvptx64-nvidia-nvcl", "-ferror-limit=1", # Stop on first error. "-w", # No warnings. ], ).command(outpath=tmp_bc_path) logger.debug("Exec %s", compile_command) try: with Popen( compile_command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) as clang: _, stderr = communicate(clang, timeout=300) except subprocess.TimeoutExpired: raise BenchmarkInitError(f"Benchmark compilation timed out: {uri}") if clang.returncode: compile_command = " ".join(compile_command) error = truncate( stderr.decode("utf-8"), max_lines=20, max_line_len=20000 ) raise BenchmarkInitError( f"Compilation job failed!\n" f"Command: {compile_command}\n" f"Error: {error}" ) return BenchmarkWithSource.create(uri, bc_path, "kernel.cl", cl_path)
CompilerGym-development
compiler_gym/envs/llvm/datasets/clgen.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import subprocess import sys from concurrent.futures import as_completed from pathlib import Path from typing import Optional from compiler_gym.datasets import Benchmark, TarDatasetWithManifest from compiler_gym.datasets.benchmark import BenchmarkWithSource from compiler_gym.datasets.uri import BenchmarkUri from compiler_gym.envs.llvm.llvm_benchmark import ClangInvocation from compiler_gym.util import thread_pool from compiler_gym.util.filesystem import atomic_file_write class AnghaBenchDataset(TarDatasetWithManifest): """A dataset of C programs curated from GitHub source code. The dataset is from: da Silva, Anderson Faustino, Bruno Conde Kind, José Wesley de Souza Magalhaes, Jerônimo Nunes Rocha, Breno Campos Ferreira Guimaraes, and Fernando Magno Quinão Pereira. "ANGHABENCH: A Suite with One Million Compilable C Benchmarks for Code-Size Reduction." In 2021 IEEE/ACM International Symposium on Code Generation and Optimization (CGO), pp. 378-390. IEEE, 2021. And is available at: http://cuda.dcc.ufmg.br/angha/home Installation ------------ The AnghaBench dataset consists of C functions that are compiled to LLVM-IR on-demand and cached. The first time each benchmark is used there is an overhead of compiling it from C to bitcode. This is a one-off cost. """ def __init__( self, site_data_base: Path, sort_order: int = 0, manifest_url: Optional[str] = None, manifest_sha256: Optional[str] = None, deprecated: Optional[str] = None, name: Optional[str] = None, ): manifest_url_, manifest_sha256_ = { "darwin": ( "https://dl.fbaipublicfiles.com/compiler_gym/llvm_bitcodes-10.0.0-anghabench-v1-macos-manifest.bz2", "96ead63da5f8efa07fd0370f0c6e452b59bed840828b8b19402102b1ce3ee109", ), "linux": ( "https://dl.fbaipublicfiles.com/compiler_gym/llvm_bitcodes-10.0.0-anghabench-v1-linux-manifest.bz2", "14df85f650199498cf769715e9f0d7841d09f9fa62a95b8ecc242bdaf227f33a", ), }[sys.platform] super().__init__( name=name or "benchmark://anghabench-v1", description="Compile-only C/C++ functions extracted from GitHub", references={ "Paper": "https://homepages.dcc.ufmg.br/~fernando/publications/papers/FaustinoCGO21.pdf", "Homepage": "http://cuda.dcc.ufmg.br/angha/", }, license="Unknown. See: https://github.com/brenocfg/AnghaBench/issues/1", site_data_base=site_data_base, manifest_urls=[manifest_url or manifest_url_], manifest_sha256=manifest_sha256 or manifest_sha256_, tar_urls=[ "https://github.com/brenocfg/AnghaBench/archive/d8034ac8562b8c978376008f4b33df01b8887b19.tar.gz" ], tar_sha256="85d068e4ce44f2581e3355ee7a8f3ccb92568e9f5bd338bc3a918566f3aff42f", strip_prefix="AnghaBench-d8034ac8562b8c978376008f4b33df01b8887b19", tar_compression="gz", benchmark_file_suffix=".bc", sort_order=sort_order, deprecated=deprecated, ) def benchmark_from_parsed_uri(self, uri: BenchmarkUri) -> Benchmark: self.install() benchmark_name = uri.path[1:] if not benchmark_name: raise LookupError(f"No benchmark specified: {uri}") # The absolute path of the file, without an extension. path_stem = self.dataset_root / benchmark_name bitcode_abspath = Path(f"{path_stem}.bc") c_file_abspath = Path(f"{path_stem}.c") # If the file does not exist, compile it on-demand. if not bitcode_abspath.is_file(): if not c_file_abspath.is_file(): raise LookupError( f"Benchmark not found: {uri} (file not found: {c_file_abspath})" ) with atomic_file_write(bitcode_abspath) as tmp_path: compile_cmd = ClangInvocation.from_c_file( c_file_abspath, copt=[ "-ferror-limit=1", # Stop on first error. "-w", # No warnings. ], ).command(outpath=tmp_path) subprocess.check_call(compile_cmd, timeout=300) return BenchmarkWithSource.create( uri, bitcode_abspath, "function.c", c_file_abspath ) def compile_all(self): n = self.size executor = thread_pool.get_thread_pool_executor() # Since the dataset is lazily compiled, simply iterating over the full # set of URIs will compile everything. Do this in parallel. futures = ( executor.submit(self.benchmark, uri) for uri in self.benchmark_uris() ) for i, future in enumerate(as_completed(futures), start=1): future.result() print( f"\r\033[KCompiled {i} of {n} programs ({i/n:.1%} complete)", flush=True, end="", )
CompilerGym-development
compiler_gym/envs/llvm/datasets/anghabench.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """This module demonstrates how to """ from pathlib import Path from compiler_gym.envs.gcc.gcc import Gcc, GccSpec, Option from compiler_gym.envs.gcc.gcc_env import DEFAULT_GCC, GccEnv from compiler_gym.util.registration import register from compiler_gym.util.runfiles_path import runfiles_path GCC_SERVICE_BINARY: Path = runfiles_path( "compiler_gym/envs/gcc/service/compiler_gym-gcc-service" ) register( id="gcc-v0", entry_point="compiler_gym.envs.gcc:GccEnv", kwargs={"service": GCC_SERVICE_BINARY}, ) __all__ = ["GccEnv", "GccSpec", "Gcc", "Option", "DEFAULT_GCC"]
CompilerGym-development
compiler_gym/envs/gcc/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """A CompilerGym environment for GCC.""" import codecs import json import pickle from pathlib import Path from typing import Any, Dict, List, Optional, Union from deprecated.sphinx import deprecated from compiler_gym.datasets import Benchmark from compiler_gym.envs.gcc.datasets import get_gcc_datasets from compiler_gym.envs.gcc.gcc import Gcc, GccSpec from compiler_gym.envs.gcc.gcc_rewards import AsmSizeReward, ObjSizeReward from compiler_gym.service import ConnectionOpts from compiler_gym.service.client_service_compiler_env import ClientServiceCompilerEnv from compiler_gym.spaces import Reward from compiler_gym.util.decorators import memoized_property from compiler_gym.util.gym_type_hints import ObservationType, OptionalArgumentValue from compiler_gym.views import ObservationSpaceSpec # The default gcc_bin argument. DEFAULT_GCC: str = "docker:gcc:11.2.0" class GccEnv(ClientServiceCompilerEnv): """A specialized ClientServiceCompilerEnv for GCC. This class exposes the optimization space of GCC's command line flags as an environment for reinforcement learning. For further details, see the :ref:`GCC Environment Reference <envs/gcc:Installation>`. """ def __init__( self, *args, gcc_bin: Union[str, Path] = DEFAULT_GCC, benchmark: Union[str, Benchmark] = "benchmark://chstone-v0/adpcm", datasets_site_path: Optional[Path] = None, connection_settings: Optional[ConnectionOpts] = None, **kwargs, ): """Create an environment. :param gcc_bin: The path to the GCC executable, or the name of a docker image to use if prefixed with :code:`docker:`. Only used if the environment is attached to a local service. If attached remotely, the service will have already been created. :param benchmark: The benchmark to use for this environment. Either a URI string, or a :class:`Benchmark <compiler_gym.datasets.Benchmark>` instance. If not provided, a default benchmark is used. :param connection_settings: The connection settings to use. :raises EnvironmentNotSupported: If the runtime requirements for the GCC environment have not been met. :raises ServiceInitError: If the requested GCC version cannot be used. """ connection_settings = connection_settings or ConnectionOpts() # Pass the executable path via an environment variable connection_settings.script_env = {"CC": gcc_bin} # Eagerly create a GCC compiler instance now because: # # 1. We want to catch an invalid gcc_bin argument early. # # 2. We want to perform the expensive one-off `docker pull` before we # start the backend service, as otherwise the backend service # initialization may time out. Gcc(bin=gcc_bin) super().__init__( *args, **kwargs, benchmark=benchmark, datasets=get_gcc_datasets( gcc_bin=gcc_bin, site_data_base=datasets_site_path ), rewards=[AsmSizeReward(), ObjSizeReward()], connection_settings=connection_settings, ) def reset( self, benchmark: Optional[Union[str, Benchmark]] = None, action_space: Optional[str] = None, observation_space: Union[ OptionalArgumentValue, str, ObservationSpaceSpec ] = OptionalArgumentValue.UNCHANGED, reward_space: Union[ OptionalArgumentValue, str, Reward ] = OptionalArgumentValue.UNCHANGED, ) -> Optional[ObservationType]: observation = super().reset( benchmark=benchmark, action_space=action_space, observation_space=observation_space, reward_space=reward_space, ) return observation @deprecated( version="0.2.1", reason="Use `env.observation.command_line()` instead", ) def commandline(self) -> str: """Return a string representing the command line options. :return: A string. """ return self.observation["command_line"] @memoized_property def gcc_spec(self) -> GccSpec: """A :class:`GccSpec <compiler_gym.envs.gcc.gcc.GccSpec>` description of the compiler specification. """ pickled = self.send_param("gcc_spec", "") return pickle.loads(codecs.decode(pickled.encode(), "base64")) @property def source(self) -> str: """Get the source code.""" return self.observation["source"] @property def rtl(self) -> str: """Get the final rtl of the program.""" return self.observation["rtl"] @property def asm(self) -> str: """Get the assembly code.""" return self.observation["asm"] @property def asm_size(self) -> int: """Get the assembly code size in bytes.""" return self.observation["asm_size"] @property def asm_hash(self) -> str: """Get a hash of the assembly code.""" return self.observation["asm_hash"] @property def instruction_counts(self) -> Dict[str, int]: """Get a count of the instruction types in the assembly code. Note, that it will also count fields beginning with a :code:`.`, like :code:`.bss` and :code:`.align`. Make sure to remove those if not needed. """ return json.loads(self.observation["instruction_counts"]) @property def obj(self) -> bytes: """Get the object code.""" return self.observation["obj"] @property def obj_size(self) -> int: """Get the object code size in bytes.""" return self.observation["obj_size"] @property def obj_hash(self) -> str: """Get a hash of the object code.""" return self.observation["obj_hash"] @property def choices(self) -> List[int]: """Get the current choices""" return self.observation["choices"] @choices.setter def choices(self, choices: List[int]): """Set the current choices. This must be a list of ints with one element for each option the gcc_spec. Each element must be in range for the corresponding option. I.e. it must be between -1 and len(option) inclusive. """ # TODO(github.com/facebookresearch/CompilerGym/issues/52): This can be # exposed directly through the action space once #369 is merged. assert len(self.gcc_spec.options) == len(choices) assert all( -1 <= c < len(self.gcc_spec.options[i]) for i, c in enumerate(choices) ) self.send_param("choices", ",".join(map(str, choices))) def _init_kwargs(self) -> Dict[str, Any]: """Return the arguments required to initialize a GccEnv.""" return { # GCC has an additional gcc_bin argument. "gcc_bin": self.gcc_spec.gcc.bin, **super()._init_kwargs(), }
CompilerGym-development
compiler_gym/envs/gcc/gcc_env.py
#! /usr/bin/env python3 # # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """Query a GCC binary for version, optimization and param spaces. The goal of this file is to query the available settings in a GCC compiler so that they don't have to be hard coded. The main entry point to this file is the 'get_spec' function which returns a GccSpec object. That object describes the version, options and parameters. Querying these settings is time consuming, so this file tries to cache the values in a cache directory. Running this file will print the gcc spec to stdout. """ import logging import math import os import pickle import re import subprocess import warnings from functools import lru_cache from pathlib import Path from typing import Dict, List, NamedTuple, Optional, Union import docker from compiler_gym.errors import EnvironmentNotSupported, ServiceError, ServiceInitError from compiler_gym.util.filesystem import atomic_file_write from compiler_gym.util.runfiles_path import site_data_path logger = logging.getLogger(__name__) class Option: """An Option is either a command line optimization setting or a parameter. It is essentially a list of the possible values that can be taken. Each item is command line parameter. In GCC, all of these are single settings, so only need one string to describe them, rather than a list. """ def __len__(self): """Number of available settings. Note that the absence of a value is not included in this, it is implicit. """ raise NotImplementedError() def __getitem__(self, key: int) -> str: """Get the command line argument associated with an index (key).""" raise NotImplementedError() def __str__(self) -> str: """Get the name of this option.""" raise NotImplementedError() class GccOOption(Option): """This class represents the :code:`-O0`, :code:`-O1`, :code:`-O2`, :code:`-O3`, :code:`-Os`, and :code:`-Ofast` options. This class starts with no values, we fill them in with :code:`_gcc_parse_optimize()`. The suffixes to append to :code:`-O` are stored in self.values. """ def __init__(self): self.values = [] def __len__(self): return len(self.values) def __getitem__(self, key: int) -> str: return "-O" + self.values[key] def __str__(self) -> str: return "-O" def __repr__(self) -> str: return f"<GccOOption values=[{','.join(self.values)}]>" class GccFlagOption(Option): """An ordinary :code:`-f` flag. These have two possible settings. For a given flag name there are :code:`'-f<name>' and :code:`'-fno-<name>. If :code:`no_fno` is true, then there is only the :code:`-f<name>` form. """ def __init__(self, name: str, no_fno: bool = False): self.name = name self.no_fno = no_fno def __len__(self): return 1 if self.no_fno else 2 def __getitem__(self, key: int) -> str: return f"-f{'' if key == 0 else 'no-'}{self.name}" def __str__(self) -> str: return f"-f{self.name}" def __repr__(self) -> str: return f"<GccFlagOption name={self.name}>" class GccFlagEnumOption(Option): """A flag of style :code:`-f<name>=[val1, val2, ...]`. :code:`self.name` holds the name. :code:`self.values` holds the values. """ def __init__(self, name: str, values: List[str]): self.name = name self.values = values def __len__(self): return len(self.values) def __getitem__(self, key: int) -> str: return f"-f{self.name}={self.values[key]}" def __str__(self) -> str: return f"-f{self.name}" def __repr__(self) -> str: return f"<GccFlagEnumOption name={self.name}, values=[{','.join(self.values)}]>" class GccFlagIntOption(Option): """A flag of style :code:`-f<name>=<integer>` where the integer is between min and max. """ def __init__(self, name: str, min: int, max: int): self.name = name self.min = min self.max = max def __len__(self): return self.max - self.min + 1 def __getitem__(self, key: int) -> str: return f"-f{self.name}={self.min + key}" def __str__(self) -> str: return f"-f{self.name}" def __repr__(self) -> str: return f"<GccFlagIntOption name={self.name}, min={self.min}, max={self.max}>" class GccFlagAlignOption(Option): """Alignment flags. These take several forms. See the GCC documentation.""" def __init__(self, name: str): logger.warning("Alignment options not properly handled %s", name) self.name = name def __len__(self): return 1 def __getitem__(self, key: int) -> str: return f"-f{self.name}" def __str__(self) -> str: return f"-f{self.name}" def __repr__(self) -> str: return f"<GccFlagAlignOption name={self.name}>" class GccParamEnumOption(Option): """A parameter :code:`--param=<name>=[val1, val2, val3]`.""" def __init__(self, name: str, values: List[str]): self.name = name self.values = values def __len__(self): return len(self.values) def __getitem__(self, key: int) -> str: return f"--param={self.name}={self.values[key]}" def __str__(self) -> str: return f"--param={self.name}" def __repr__(self) -> str: return ( f"<GccParamEnumOption name={self.name}, values=[{','.join(self.values)}]>" ) class GccParamIntOption(Option): """A parameter :code:`--param=<name>=<integer>`, where the integer is between min and max. """ def __init__(self, name: str, min: int, max: int): self.name = name self.min = min self.max = max def __len__(self): return self.max - self.min + 1 def __getitem__(self, key: int) -> str: return f"--param={self.name}={self.min + key}" def __str__(self) -> str: return f"--param={self.name}" def __repr__(self) -> str: return f"<GccParamIntOption name={self.name}, min={self.min}, max={self.max}>" @lru_cache(maxsize=2) def get_docker_client(): """Fetch the docker client singleton.""" # Ignore deprecation warnings from docker.from_env(). with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=DeprecationWarning) try: return docker.from_env() except docker.errors.DockerException as e: raise EnvironmentNotSupported( f"Failed to initialize docker client needed by GCC environment: {e}.\n" "Have you installed the runtime dependencies?\n See " "<https://facebookresearch.github.io/CompilerGym/envs/gcc.html#installation> " "for details." ) from e # We only need to run this function once per image. @lru_cache(maxsize=64) def pull_docker_image(image: str) -> str: """Pull the requested docker image. :param image: The name of the docker image to pull. :raises ServiceInitError: If pulling the docker image fails. """ try: client = get_docker_client() client.images.pull(image) return image except docker.errors.DockerException as e: raise ServiceInitError(f"Failed to fetch docker image '{image}': {e}") def join_docker_container(container, timeout_seconds: int) -> str: """Block until the container terminates, returning its output.""" try: status = container.wait(timeout=timeout_seconds) except docker.exceptions.ReadTimeout as e: # Catch and re-raise the timeout. raise TimeoutError(f"GCC timed out after {timeout_seconds:,d} seconds") from e if status["StatusCode"]: logs = "" try: logs = container.logs(stdout=True, stderr=False).decode() except (UnicodeDecodeError, docker.errors.NotFound): pass raise ServiceError(f"GCC failed with returncode {status['StatusCode']}: {logs}") return container.logs(stdout=True, stderr=False).decode() class Gcc: """This class represents an instance of the GCC compiler, either as a binary or a docker image. :ivar bin: A string version of the constructor argument. :vartype bin: str :ivar spec: A :class:`GccSpec <compiler_gym.envs.gcc.gcc.GccSpec>` instance. :vartype spec: GccSpec """ def __init__(self, bin: Union[str, Path]): self.bin = str(bin) self.image = self.bin[len("docker:") :] if self.bin.startswith("docker:"): pull_docker_image(self.image) self.call = self._docker_run else: self.call = self._subprocess_run self.spec = _get_spec(self, cache_dir=site_data_path("gcc-v0")) def __call__( self, *args: str, timeout: int, cwd: Optional[Path] = None, volumes: Optional[Dict[str, Dict[str, str]]] = None, ) -> str: """Run GCC with the given args. :param args: The command line arguments to append. :param timeout: A timeout in seconds. :param cwd: The working directory. :param volumes: A dictionary of volume bindings for docker. :raises TimeoutError: If GCC fails to complete within timeout. :raises ServiceError: In case GCC fails. """ return self.call(args, timeout, cwd=Path(cwd or "."), volumes=volumes) def _docker_run( self, args: List[str], timeout: int, cwd: Path, volumes: Optional[Dict[str, Dict[str, str]]] = None, ): cwd = cwd.absolute().as_posix() cmd_line = ["gcc"] + list(map(str, args)) if timeout: cmd_line = ["timeout", str(timeout)] + cmd_line volumes_ = {cwd: {"bind": cwd, "mode": "rw"}} volumes_.update(volumes or {}) client = get_docker_client() container = client.containers.create( self.image, cmd_line, working_dir=cwd, volumes=volumes_, ) container.start() try: return join_docker_container(container, timeout_seconds=timeout) finally: container.remove() def _subprocess_run(self, args, timeout, cwd, volumes): del volumes # Unused cmd_line = [self.bin] + list(map(str, args)) try: result = subprocess.check_output( cmd_line, cwd=cwd, universal_newlines=True, timeout=timeout ) except subprocess.CalledProcessError as e: raise ServiceError(f"Failed to run {self.bin}: {e}") from e except FileNotFoundError: raise ServiceInitError(f"GCC binary not found: {self.bin}") return result class GccSpec(NamedTuple): """This class combines all of the information about the version and options for a GCC instance. """ gcc: Gcc """A compiler instance.""" version: str """The GCC version string.""" options: List[Option] """A list of options exposed by the compiler.""" @property def size(self) -> int: """Calculate the size of the option space. This is the product of the cardinalities of all the options. """ sz = 1 for option in self.options: # Each option can be applied or not. sz *= len(option) + 1 return sz def _gcc_parse_optimize(gcc: Gcc) -> List[Option]: """Parse the optimization help string from the GCC binary to find options.""" logger.debug("Parsing GCC optimization space") # Call 'gcc --help=optimize -Q' result = gcc("--help=optimize", "-Q", timeout=60) # Split into lines. Ignore the first line. out = result.split("\n")[1:] # Regex patterns to match the different options O_num_pat = re.compile("-O<number>") O_pat = re.compile("-O([a-z]+)") flag_align_eq_pat = re.compile("-f(align-[-a-z]+)=") flag_pat = re.compile("-f([-a-z0-9]+)") flag_enum_pat = re.compile("-f([-a-z0-9]+)=\\[([-A-Za-z_\\|]+)\\]") flag_interval_pat = re.compile("-f([-a-z0-9]+)=<([0-9]+),([0-9]+)>") flag_number_pat = re.compile("-f([-a-z0-9]+)=<number>") # The list of options as it gets built up. options = {} # Add a -O value def add_gcc_o(value: str): # -O flag name = "O" # There are multiple -O flags. We add one value at a time. opt = options[name] = options.get(name, GccOOption()) # There shouldn't be any way to overwrite this with the wrong type. assert type(opt) == GccOOption opt.values.append(value) # Add a flag def add_gcc_flag(name: str): # Straight flag. # If there is something else in its place already (like a flag enum), # then we don't overwrite it. Straight flags always have the lowest # priority options[name] = options.get(name, GccFlagOption(name)) # Add an enum flag def add_gcc_flag_enum(name: str, values: List[str]): # Enum flag. opt = options.get(name) if opt: # We should only ever be overwriting a straight flag assert type(opt) == GccFlagOption # Always overwrite options[name] = GccFlagEnumOption(name, values) # Add an integer flag def add_gcc_flag_int(name: str, min: int, max: int): # Int flag. opt = options.get(name) if opt: # We should only ever be overwriting a straight flag assert type(opt) == GccFlagOption # Always overwrite options[name] = GccFlagIntOption(name, min, max) # Add an align flag def add_gcc_flag_align(name: str): # Align flag. opt = options.get(name) if opt: # We should only ever be overwriting a straight flag assert type(opt) == GccFlagOption # Always overwrite options[name] = GccFlagAlignOption(name) # Parse a line from the help output def parse_line(line: str): # The first bit of the line is the specification bits = line.split() if not bits: return spec = bits[0] # -O<number> m = O_num_pat.fullmatch(spec) if m: for i in range(4): add_gcc_o(str(i)) return # -Ostr m = O_pat.fullmatch(spec) if m: add_gcc_o(m.group(1)) return # -falign-str= # These have quite complicated semantics m = flag_align_eq_pat.fullmatch(spec) if m: name = m.group(1) add_gcc_flag_align(name) return # -fflag m = flag_pat.fullmatch(spec) if m: name = m.group(1) add_gcc_flag(name) return # -fflag=[a|b] m = flag_enum_pat.fullmatch(spec) if m: name = m.group(1) values = m.group(2).split("|") add_gcc_flag_enum(name, values) return # -fflag=<min,max> m = flag_interval_pat.fullmatch(spec) if m: name = m.group(1) min = int(m.group(2)) max = int(m.group(3)) add_gcc_flag_int(name, min, max) return # -fflag=<number> m = flag_number_pat.fullmatch(spec) if m: name = m.group(1) min = 0 max = 2 << 31 - 1 add_gcc_flag_int(name, min, max) return logger.warning("Unknown GCC optimization flag spec, '%s'", line) # Parse all the lines for line in out: parse_line(line.strip()) # Sort and return return list(map(lambda x: x[1], sorted(list(options.items())))) def _gcc_parse_params(gcc: Gcc) -> List[Option]: """Parse the param help string from the GCC binary to find options.""" # Pretty much identical to _gcc_parse_optimize logger.debug("Parsing GCC param space") result = gcc("--help=param", "-Q", timeout=60) out = result.split("\n")[1:] param_enum_pat = re.compile("--param=([-a-zA-Z0-9]+)=\\[([-A-Za-z_\\|]+)\\]") param_interval_pat = re.compile("--param=([-a-zA-Z0-9]+)=<(-?[0-9]+),([0-9]+)>") param_number_pat = re.compile("--param=([-a-zA-Z0-9]+)=") param_old_interval_pat = re.compile( "([-a-zA-Z0-9]+)\\s+default\\s+(-?\\d+)\\s+minimum\\s+(-?\\d+)\\s+maximum\\s+(-?\\d+)" ) params = {} def add_gcc_param_enum(name: str, values: List[str]): # Enum param. opt = params.get(name) assert not opt params[name] = GccParamEnumOption(name, values) def add_gcc_param_int(name: str, min: int, max: int): # Int flag. opt = params.get(name) assert not opt params[name] = GccParamIntOption(name, min, max) def is_int(s: str) -> bool: try: int(s) return True except ValueError: return False def parse_line(line: str): bits = line.split() if not bits: return # TODO(hugh): Not sure what the correct behavior is there. if len(bits) <= 1: return spec = bits[0] default = bits[1] # --param=name=[a|b] m = param_enum_pat.fullmatch(spec) if m: name = m.group(1) values = m.group(2).split("|") assert not default or default in values add_gcc_param_enum(name, values) return # --param=name=<min,max> m = param_interval_pat.fullmatch(spec) if m: name = m.group(1) min = int(m.group(2)) max = int(m.group(3)) if is_int(default): assert not default or min <= int(default) <= max add_gcc_param_int(name, min, max) return # --param=name= m = param_number_pat.fullmatch(spec) if m: name = m.group(1) min = 0 max = 2 << 31 - 1 if is_int(default): dflt = int(default) min = min if dflt >= min else dflt add_gcc_param_int(name, min, max) return # name default num minimum num maximum num m = param_old_interval_pat.fullmatch(line) if m: name = m.group(1) default = int(m.group(2)) min = int(m.group(3)) max = int(m.group(4)) if min <= default <= max: # For now we will only consider fully described params add_gcc_param_int(name, min, max) return logger.warning("Unknown GCC param flag spec, '%s'", line) # breakpoint() for line in out: parse_line(line.strip()) return list(map(lambda x: x[1], sorted(list(params.items())))) def _fix_options(options: List[Option]) -> List[Option]: """Fixes for things that seem not to be true in the help.""" def keep(option: Option) -> bool: # Ignore -flive-patching if isinstance(option, GccFlagEnumOption): if option.name == "live-patching": return False return True options = [opt for opt in options if keep(opt)] for i, option in enumerate(options): if isinstance(option, GccParamIntOption): # Some things say they can have -1, but can't if option.name in [ "logical-op-non-short-circuit", "prefetch-minimum-stride", "sched-autopref-queue-depth", "vect-max-peeling-for-alignment", ]: option.min = 0 elif isinstance(option, GccFlagOption): # -fhandle-exceptions renamed to -fexceptions if option.name == "handle-exceptions": option.name = "exceptions" # Some flags have no -fno- version if option.name in [ "stack-protector-all", "stack-protector-explicit", "stack-protector-strong", ]: option.no_fno = True # -fno-threadsafe-statics should have the no- removed if option.name == "no-threadsafe-statics": option.name = "threadsafe-statics" elif isinstance(option, GccFlagIntOption): # -fpack-struct has to be a small positive power of two if option.name == "pack-struct": values = [str(1 << j) for j in range(5)] options[i] = GccFlagEnumOption("pack-struct", values) return options def _gcc_get_version(gcc: Gcc) -> str: """Get the version string""" logger.debug("Getting GCC version for %s", gcc.bin) try: result = gcc("--version", timeout=60) except ServiceError as e: raise EnvironmentNotSupported(f"Failed to run GCC binary: {gcc.bin}") from e version = result.split("\n")[0] logger.debug("GCC version is %s", version) if "gcc" not in version: raise ServiceInitError(f"Invalid GCC version string: {version}") return version def _version_hash(version: str) -> str: """Hash the version so we can cache the spec at that name.""" h = 0 for c in version: h = ord(c) + 31 * h return str(h % (2 << 64)) def _get_spec(gcc: Gcc, cache_dir: Path) -> Optional[GccSpec]: """Get the specification for a GCC executable. :param gcc: The executable. :param cache_dir: An optional directory to search for cached versions of the spec. """ # Get the version version = _gcc_get_version(gcc) spec = None # See if there is a pickled spec in the cache_dir. First we use a hash to # name the file. spec_path = cache_dir / _version_hash(version) / "spec.pkl" # Try to get the pickled version if os.path.isfile(spec_path): try: with open(spec_path, "rb") as f: spec = pickle.load(f) spec = GccSpec(gcc=gcc, version=spec.version, options=spec.options) logger.debug("GccSpec for version '%s' read from %s", version, spec_path) except (pickle.UnpicklingError, EOFError) as e: logger.warning("Unable to read spec from '%s': %s", spec_path, e) if spec is None: # Pickle doesn't exist, parse optim_opts = _gcc_parse_optimize(gcc) param_opts = _gcc_parse_params(gcc) options = _fix_options(optim_opts + param_opts) spec = GccSpec(gcc, version, options) if not spec.options: return None # Cache the spec file for future. spec_path.parent.mkdir(exist_ok=True, parents=True) with atomic_file_write(spec_path, fileobj=True) as f: pickle.dump(spec, f) logger.debug("GccSpec for %s written to %s", version, spec_path) logger.debug("GccSpec size is approximately 10^%.0f", round(math.log(spec.size))) return spec
CompilerGym-development
compiler_gym/envs/gcc/gcc.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """Reward spaces for use in the GCC environments.""" from compiler_gym.spaces import Reward from compiler_gym.views.observation import ObservationView class AsmSizeReward(Reward): """Reward for the size in bytes of the assembly code""" def __init__(self): super().__init__( name="asm_size", observation_spaces=["asm_size"], default_value=0, default_negates_returns=True, deterministic=False, platform_dependent=True, ) self.previous = None def reset(self, benchmark: str, observation_view: ObservationView): super().reset(benchmark, observation_view) del benchmark # unused self.previous = None def update(self, action, observations, observation_view): del action # unused del observation_view # unused if self.previous is None: self.previous = observations[0] reward = float(self.previous - observations[0]) self.previous = observations[0] return reward class ObjSizeReward(Reward): """Reward for the size in bytes of the object code""" def __init__(self): super().__init__( name="obj_size", observation_spaces=["obj_size"], default_value=0, default_negates_returns=True, deterministic=False, platform_dependent=True, ) self.previous = None def reset(self, benchmark: str, observation_view: ObservationView): super().reset(benchmark, observation_view) del benchmark # unused self.previous = None def update(self, action, observations, observation_view): del action del observation_view if self.previous is None: self.previous = observations[0] reward = float(self.previous - observations[0]) self.previous = observations[0] return reward
CompilerGym-development
compiler_gym/envs/gcc/gcc_rewards.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import os from pathlib import Path from typing import Iterable from compiler_gym.datasets import Benchmark, TarDatasetWithManifest from compiler_gym.datasets.uri import BenchmarkUri from compiler_gym.envs.gcc.gcc import Gcc from compiler_gym.util.decorators import memoized_property from compiler_gym.util.filesystem import atomic_file_write URIS = [ "benchmark://chstone-v0/adpcm", "benchmark://chstone-v0/aes", "benchmark://chstone-v0/blowfish", "benchmark://chstone-v0/dfadd", "benchmark://chstone-v0/dfdiv", "benchmark://chstone-v0/dfmul", "benchmark://chstone-v0/dfsin", "benchmark://chstone-v0/gsm", "benchmark://chstone-v0/jpeg", "benchmark://chstone-v0/mips", "benchmark://chstone-v0/motion", "benchmark://chstone-v0/sha", ] # TODO(github.com/facebookresearch/CompilerGym/issues/325): This can be merged # with the LLVM implementation. class CHStoneDataset(TarDatasetWithManifest): """A dataset of C programs curated from GitHub source code. The dataset is from: Hara, Yuko, Hiroyuki Tomiyama, Shinya Honda, Hiroaki Takada, and Katsuya Ishii. "Chstone: A benchmark program suite for practical c-based high-level synthesis." In 2008 IEEE International Symposium on Circuits and Systems, pp. 1192-1195. IEEE, 2008. And is available at: http://www.ertl.jp/chstone/ """ def __init__( self, gcc_bin: Path, site_data_base: Path, sort_order: int = 0, ): super().__init__( name="benchmark://chstone-v0", description="Benchmarks for C-based High-Level Synthesis", references={ "Paper": "http://www.yxi.com/applications/iscas2008-300_1027.pdf", "Homepage": "http://www.ertl.jp/chstone/", }, license="Mixture of open source and public domain licenses", site_data_base=site_data_base, tar_urls=[ "https://github.com/ChrisCummins/patmos_HLS/archive/e62d878ceb91e5a18007ca2e0a9602ee44ff7d59.tar.gz" ], tar_sha256="f7acab9d3c3dc7b971e62c8454bc909d84bddb6d0a96378e41beb94231739acb", strip_prefix="patmos_HLS-e62d878ceb91e5a18007ca2e0a9602ee44ff7d59/benchmarks/CHStone", tar_compression="gz", benchmark_file_suffix=".c", sort_order=sort_order, # We provide our own manifest. manifest_urls=[], manifest_sha256="", ) self.gcc_bin = gcc_bin def benchmark_uris(self) -> Iterable[str]: yield from URIS @memoized_property def gcc(self): # Defer instantiation of Gcc from the constructor as it will fail if the # given Gcc is not available. Memoize the result as initialization is # expensive. return Gcc(bin=self.gcc_bin) def benchmark_from_parsed_uri(self, uri: BenchmarkUri) -> Benchmark: self.install() # Most of the source files are named after the parent directory, but not # all. c_file_name = { "blowfish": "bf.c", "motion": "mpeg2.c", "sha": "sha_driver.c", "jpeg": "main.c", }.get(uri.path[1:], f"{uri.path[1:]}.c") source_dir_path = Path(os.path.normpath(f"{self.dataset_root}/{uri.path}")) source_path = source_dir_path / c_file_name preprocessed_path = source_dir_path / "src.c" # If the file does not exist, preprocess it on-demand. if not preprocessed_path.is_file(): if not source_path.is_file(): raise LookupError( f"Benchmark not found: {uri} (file not found: {source_path})" ) with atomic_file_write(preprocessed_path) as tmp_path: # TODO(github.com/facebookresearch/CompilerGym/issues/325): Send # over the unprocessed code to the service, have the service # preprocess. Until then, we do it client side with GCC having # to be fixed by an environment variable. self.gcc( "-E", "-o", tmp_path.name, c_file_name, cwd=source_dir_path, timeout=300, ) return Benchmark.from_file(uri, preprocessed_path) @property def size(self) -> int: return len(URIS)
CompilerGym-development
compiler_gym/envs/gcc/datasets/chstone.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import shutil import subprocess import tempfile from pathlib import Path from threading import Lock from typing import Iterable, Optional, Union import numpy as np from fasteners import InterProcessLock from compiler_gym.datasets import Benchmark, BenchmarkSource, Dataset from compiler_gym.datasets.benchmark import BenchmarkWithSource from compiler_gym.datasets.uri import BenchmarkUri from compiler_gym.envs.gcc.gcc import Gcc from compiler_gym.util.commands import Popen from compiler_gym.util.decorators import memoized_property from compiler_gym.util.runfiles_path import runfiles_path from compiler_gym.util.shell_format import plural from compiler_gym.util.truncate import truncate logger = logging.getLogger(__name__) # The maximum value for the --seed argument to csmith. UINT_MAX = (2**32) - 1 _CSMITH_BIN = runfiles_path("compiler_gym/third_party/csmith/csmith/bin/csmith") _CSMITH_INCLUDES = runfiles_path( "compiler_gym/third_party/csmith/csmith/include/csmith-2.3.0" ) _CSMITH_INSTALL_LOCK = Lock() # TODO(github.com/facebookresearch/CompilerGym/issues/325): This can be merged # with the LLVM implementation. class CsmithBenchmark(BenchmarkWithSource): """A CSmith benchmark.""" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._src = None @classmethod def create(cls, uri: str, bitcode: bytes, src: bytes) -> Benchmark: """Create a benchmark from paths.""" benchmark = cls.from_file_contents(uri, bitcode) benchmark._src = src # pylint: disable=protected-access return benchmark @memoized_property def sources(self) -> Iterable[BenchmarkSource]: return [ BenchmarkSource(filename="source.c", contents=self._src), ] @property def source(self) -> str: """Return the single source file contents as a string.""" return self._src.decode("utf-8") class CsmithDataset(Dataset): """A dataset which uses Csmith to generate programs. Csmith is a tool that can generate random conformant C99 programs. It is described in the publication: Yang, Xuejun, Yang Chen, Eric Eide, and John Regehr. "Finding and understanding bugs in C compilers." In Proceedings of the 32nd ACM SIGPLAN conference on Programming Language Design and Implementation (PLDI), pp. 283-294. 2011. For up-to-date information about Csmith, see: https://embed.cs.utah.edu/csmith/ Note that Csmith is a tool that is used to find errors in compilers. As such, there is a higher likelihood that the benchmark cannot be used for an environment and that :meth:`env.reset() <compiler_gym.envs.CompilerEnv.reset>` will raise :class:`BenchmarkInitError <compiler_gym.datasets.BenchmarkInitError>`. """ def __init__( self, gcc_bin: Union[Path, str], site_data_base: Path, sort_order: int = 0, csmith_bin: Optional[Path] = None, csmith_includes: Optional[Path] = None, ): """Constructor. :param site_data_base: The base path of a directory that will be used to store installed files. :param sort_order: An optional numeric value that should be used to order this dataset relative to others. Lowest value sorts first. :param csmith_bin: The path of the Csmith binary to use. If not provided, the version of Csmith shipped with CompilerGym is used. :param csmith_includes: The path of the Csmith includes directory. If not provided, the includes of the Csmith shipped with CompilerGym is used. """ super().__init__( name="generator://csmith-v0", description="Random conformant C99 programs", references={ "Paper": "http://web.cse.ohio-state.edu/~rountev.1/5343/pdf/pldi11.pdf", "Homepage": "https://embed.cs.utah.edu/csmith/", }, license="BSD", site_data_base=site_data_base, sort_order=sort_order, benchmark_class=CsmithBenchmark, ) self.gcc_bin = gcc_bin self.csmith_bin_path = csmith_bin or _CSMITH_BIN self.csmith_includes_path = csmith_includes or _CSMITH_INCLUDES self._install_lockfile = self.site_data_path / ".install.LOCK" @property def size(self) -> int: # Actually 2^32 - 1, but practically infinite for all intents and # purposes. return 0 @memoized_property def gcc(self): # Defer instantiation of Gcc from the constructor as it will fail if the # given Gcc is not available. Memoize the result as initialization is # expensive. return Gcc(bin=self.gcc_bin) def benchmark_uris(self) -> Iterable[str]: return (f"{self.name}/{i}" for i in range(UINT_MAX)) def benchmark_from_parsed_uri(self, uri: BenchmarkUri) -> CsmithBenchmark: return self.benchmark_from_seed(int(uri.path[1:])) def _random_benchmark(self, random_state: np.random.Generator) -> Benchmark: seed = random_state.integers(UINT_MAX) return self.benchmark_from_seed(seed) @property def installed(self) -> bool: return super().installed and (self.site_data_path / "includes").is_dir() def install(self) -> None: super().install() if self.installed: return with _CSMITH_INSTALL_LOCK, InterProcessLock(self._install_lockfile): if (self.site_data_path / "includes").is_dir(): return # Copy the Csmith headers into the dataset's site directory path because # in bazel builds this includes directory is a symlink, and we need # actual files that we can use in a docker volume. shutil.copytree( self.csmith_includes_path, self.site_data_path / "includes.tmp", ) # Atomic directory rename to prevent race on install(). (self.site_data_path / "includes.tmp").rename( self.site_data_path / "includes" ) def benchmark_from_seed( self, seed: int, max_retries: int = 3, retry_count: int = 0 ) -> CsmithBenchmark: """Get a benchmark from a uint32 seed. :param seed: A number in the range 0 <= n < 2^32. :return: A benchmark instance. :raises OSError: If Csmith fails. :raises BenchmarkInitError: If the C program generated by Csmith cannot be lowered to LLVM-IR. """ if retry_count >= max_retries: raise OSError( f"Csmith failed after {retry_count} {plural(retry_count, 'attempt', 'attempts')} " f"with seed {seed}" ) self.install() # Run csmith with the given seed and pipe the output to clang to # assemble a bitcode. logger.debug("Exec csmith --seed %d", seed) with Popen( [str(self.csmith_bin_path), "--seed", str(seed)], stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) as csmith: # Generate the C source. src, stderr = csmith.communicate(timeout=300) if csmith.returncode: try: stderr = "\n".join( truncate(stderr.decode("utf-8"), max_line_len=200, max_lines=20) ) logger.warning("Csmith failed with seed %d: %s", seed, stderr) except UnicodeDecodeError: # Failed to interpret the stderr output, generate a generic # error message. logger.warning("Csmith failed with seed %d", seed) return self.benchmark_from_seed( seed, max_retries=max_retries, retry_count=retry_count + 1 ) # Pre-process the source. with tempfile.TemporaryDirectory() as tmpdir: src_file = f"{tmpdir}/src.c" with open(src_file, "wb") as f: f.write(src) preprocessed_src = self.gcc( "-E", "-I", str(self.site_data_path / "includes"), "-o", "-", src_file, cwd=tmpdir, timeout=60, volumes={ str(self.site_data_path / "includes"): { "bind": str(self.site_data_path / "includes"), "mode": "ro", } }, ) return self.benchmark_class.create( f"{self.name}/{seed}", preprocessed_src.encode("utf-8"), src )
CompilerGym-development
compiler_gym/envs/gcc/datasets/csmith.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from functools import lru_cache from pathlib import Path from typing import Iterable, List, Optional, Union from compiler_gym.datasets import Dataset from compiler_gym.envs.gcc.datasets.anghabench import AnghaBenchDataset from compiler_gym.envs.gcc.datasets.chstone import CHStoneDataset from compiler_gym.envs.gcc.datasets.csmith import CsmithBenchmark, CsmithDataset from compiler_gym.util.runfiles_path import site_data_path def _get_gcc_datasets( gcc_bin: Union[str, Path], site_data_base: Optional[Path] = None ) -> Iterable[Dataset]: site_data_base = site_data_base or site_data_path("gcc-v0") yield CHStoneDataset(gcc_bin=gcc_bin, site_data_base=site_data_base) yield AnghaBenchDataset(site_data_base=site_data_base) yield CsmithDataset(gcc_bin=gcc_bin, site_data_base=site_data_base) @lru_cache(maxsize=16) def get_gcc_datasets( gcc_bin: Union[str, Path], site_data_base: Optional[Path] = None ) -> List[Dataset]: """Instantiate the builtin GCC datasets. :param gcc_bin: The GCC binary to use. :param site_data_base: The root of the site data path. :return: An iterable sequence of :class:`Dataset <compiler_gym.datasets.Dataset>` instances. """ return list(_get_gcc_datasets(gcc_bin, site_data_base)) __all__ = [ "AnghaBenchDataset", "CHStoneDataset", "CsmithBenchmark", "CsmithDataset", "get_gcc_datasets", ]
CompilerGym-development
compiler_gym/envs/gcc/datasets/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import sys from pathlib import Path from typing import Optional from compiler_gym.datasets import TarDatasetWithManifest # TODO(github.com/facebookresearch/CompilerGym/issues/325): This can be merged # with the LLVM implementation. class AnghaBenchDataset(TarDatasetWithManifest): """A dataset of C programs curated from GitHub source code. The dataset is from: da Silva, Anderson Faustino, Bruno Conde Kind, José Wesley de Souza Magalhaes, Jerônimo Nunes Rocha, Breno Campos Ferreira Guimaraes, and Fernando Magno Quinão Pereira. "ANGHABENCH: A Suite with One Million Compilable C Benchmarks for Code-Size Reduction." In 2021 IEEE/ACM International Symposium on Code Generation and Optimization (CGO), pp. 378-390. IEEE, 2021. And is available at: http://cuda.dcc.ufmg.br/angha/home """ def __init__( self, site_data_base: Path, sort_order: int = 0, manifest_url: Optional[str] = None, manifest_sha256: Optional[str] = None, deprecated: Optional[str] = None, name: Optional[str] = None, ): manifest_url_, manifest_sha256_ = { "darwin": ( "https://dl.fbaipublicfiles.com/compiler_gym/llvm_bitcodes-10.0.0-anghabench-v1-macos-manifest.bz2", "96ead63da5f8efa07fd0370f0c6e452b59bed840828b8b19402102b1ce3ee109", ), "linux": ( "https://dl.fbaipublicfiles.com/compiler_gym/llvm_bitcodes-10.0.0-anghabench-v1-linux-manifest.bz2", "14df85f650199498cf769715e9f0d7841d09f9fa62a95b8ecc242bdaf227f33a", ), }[sys.platform] super().__init__( name=name or "benchmark://anghabench-v1", description="Compile-only C/C++ functions extracted from GitHub", references={ "Paper": "https://homepages.dcc.ufmg.br/~fernando/publications/papers/FaustinoCGO21.pdf", "Homepage": "http://cuda.dcc.ufmg.br/angha/", }, license="Unknown. See: https://github.com/brenocfg/AnghaBench/issues/1", site_data_base=site_data_base, manifest_urls=[manifest_url or manifest_url_], manifest_sha256=manifest_sha256 or manifest_sha256_, tar_urls=[ "https://github.com/brenocfg/AnghaBench/archive/d8034ac8562b8c978376008f4b33df01b8887b19.tar.gz" ], tar_sha256="85d068e4ce44f2581e3355ee7a8f3ccb92568e9f5bd338bc3a918566f3aff42f", strip_prefix="AnghaBench-d8034ac8562b8c978376008f4b33df01b8887b19", tar_compression="gz", benchmark_file_suffix=".c", sort_order=sort_order, deprecated=deprecated, )
CompilerGym-development
compiler_gym/envs/gcc/datasets/anghabench.py
#! /usr/bin/env python3 # # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """A CompilerGym service for GCC.""" import codecs import hashlib import json import logging import os import pickle import re from collections import Counter from pathlib import Path from typing import List, Optional, Tuple from urllib.request import urlopen from compiler_gym.envs.gcc import Gcc, Option from compiler_gym.service import CompilationSession from compiler_gym.service.proto import ( ActionSpace, Benchmark, ByteSequenceSpace, ByteTensor, Event, Int64Range, Int64Tensor, ListSpace, NamedDiscreteSpace, ObservationSpace, Space, StringSpace, ) logger = logging.getLogger(__name__) def make_gcc_compilation_session(gcc_bin: str): """Create a class to represent a GCC compilation service. :param gcc_bin: Path to the gcc executable. This can a command name, like "gcc", or it can be path to the executable. Finally, if prefixed with "docker:" it can be the name of a docker image, e.g. "docker:gcc:11.2.0" """ gcc = Gcc(gcc_bin) # The available actions actions = [] # Actions that are small will have all their various choices made as # explicit actions. # Actions that are not small will have the abbility to increment the choice # by different amounts. for i, option in enumerate(gcc.spec.options): if len(option) < 10: for j in range(len(option)): actions.append(SimpleAction(option, i, j)) if len(option) >= 10: actions.append(IncrAction(option, i, 1)) actions.append(IncrAction(option, i, -1)) if len(option) >= 50: actions.append(IncrAction(option, i, 10)) actions.append(IncrAction(option, i, -10)) if len(option) >= 500: actions.append(IncrAction(option, i, 100)) actions.append(IncrAction(option, i, -100)) if len(option) >= 5000: actions.append(IncrAction(option, i, 1000)) actions.append(IncrAction(option, i, -1000)) action_spaces_ = [ ActionSpace( name="default", space=Space( named_discrete=NamedDiscreteSpace(name=[str(a) for a in actions]), ), ), ] observation_spaces_ = [ # A string of the source code ObservationSpace( name="source", space=Space(string_value=StringSpace(length_range=Int64Range(min=0))), deterministic=True, platform_dependent=False, default_observation=Event(string_value=""), ), # A string of the rtl code ObservationSpace( name="rtl", space=Space(string_value=StringSpace(length_range=Int64Range(min=0))), deterministic=True, platform_dependent=True, default_observation=Event(string_value=""), ), # A string of the assembled code ObservationSpace( name="asm", space=Space(string_value=StringSpace(length_range=Int64Range(min=0))), deterministic=True, platform_dependent=True, default_observation=Event(string_value=""), ), # The size of the assembled code ObservationSpace( name="asm_size", space=Space(int64_value=Int64Range(min=-1)), deterministic=True, platform_dependent=True, default_observation=Event( int64_value=-1, ), ), # The hash of the assembled code ObservationSpace( name="asm_hash", space=Space( string_value=StringSpace(length_range=Int64Range(min=0, max=200)), ), deterministic=True, platform_dependent=True, default_observation=Event(string_value=""), ), # Asm instruction counts - Counter as a JSON string ObservationSpace( name="instruction_counts", space=Space( string_value=StringSpace(length_range=Int64Range(min=0)), ), deterministic=True, platform_dependent=True, default_observation=Event(string_value=""), ), # A bytes of the object code ObservationSpace( name="obj", space=Space( byte_sequence=ByteSequenceSpace(length_range=Int64Range(min=0)), ), deterministic=True, platform_dependent=False, default_observation=Event(byte_tensor=ByteTensor(shape=[0], value=b"")), ), # The size of the object code ObservationSpace( name="obj_size", space=Space(int64_value=Int64Range(min=-1)), deterministic=True, platform_dependent=True, default_observation=Event( int64_value=-1, ), ), # The hash of the object code ObservationSpace( name="obj_hash", space=Space( string_value=StringSpace(length_range=Int64Range(min=0, max=200)), ), deterministic=True, platform_dependent=True, default_observation=Event(string_value=""), ), # A list of the choices. Each element corresponds to an option in the spec. # '-1' indicates that this is empty on the command line (e.g. if the choice # corresponding to the '-O' option is -1, then no -O flag will be emitted.) # If a nonnegative number if given then that particular choice is used # (e.g. for the -O flag, 5 means use '-Ofast' on the command line.) ObservationSpace( name="choices", space=Space( space_list=ListSpace( space=[ Space(int64_value=Int64Range(min=0, max=len(option) - 1)) for option in gcc.spec.options ] ), ), ), # The command line for compiling the object file as a string ObservationSpace( name="command_line", space=Space( string_value=StringSpace(length_range=Int64Range(min=0, max=200)), ), deterministic=True, platform_dependent=True, default_observation=Event(string_value=""), ), ] class GccCompilationSession(CompilationSession): """A GCC interactive compilation session.""" compiler_version: str = gcc.spec.version action_spaces = action_spaces_ observation_spaces = observation_spaces_ def __init__( self, working_directory: Path, action_space: ActionSpace, benchmark: Benchmark, ): super().__init__(working_directory, action_space, benchmark) # The benchmark being used self.benchmark = benchmark # Timeout value for compilation (in seconds) self._timeout = None # The source code self._source = None # The rtl code self._rtl = None # The assembled code self._asm = None # Size of the assembled code self._asm_size = None # Hash of the assembled code self._asm_hash = None # The object binary self._obj = None # size of the object binary self._obj_size = None # Hash of the object binary self._obj_hash = None # Set the path to the GCC executable self._gcc_bin = "gcc" # Initially the choices and the spec, etc are empty. They will be # initialised lazily self._choices = None @property def num_actions(self) -> int: return len(self.action_spaces[0].space.named_discrete.name) @property def choices(self) -> List[int]: if self._choices is None: self._choices = [-1] * len(gcc.spec.options) return self._choices @choices.setter def choices(self, value: List[int]): self._choices = value @property def source(self) -> str: """Get the benchmark source""" self.prepare_files() return self._source @property def rtl(self) -> bytes: """Get the RTL code""" self.dump_rtl() return self._rtl @property def asm(self) -> bytes: """Get the assembled code""" self.assemble() return self._asm @property def asm_size(self) -> int: """Get the assembled code size""" self.assemble() return self._asm_size @property def asm_hash(self) -> str: """Get the assembled code hash""" self.assemble() return self._asm_hash @property def instruction_counts(self) -> str: """Get the instuction counts as a JSON string""" self.assemble() insn_pat = re.compile("\t([a-zA-Z-0-9.-]+).*") insn_cnts = Counter() lines = self._asm.split("\n") for line in lines: m = insn_pat.fullmatch(line) if m: insn_cnts[m.group(1)] += 1 return json.dumps(insn_cnts) @property def obj(self) -> bytes: """Get the compiled code""" self.compile() return self._obj @property def obj_size(self) -> int: """Get the compiled code size""" self.compile() return self._obj_size @property def obj_hash(self) -> str: """Get the compiled code hash""" self.compile() return self._obj_hash @property def src_path(self) -> Path: """Get the path to the source file""" return self.working_dir / "src.c" @property def obj_path(self) -> Path: """Get the path to object file""" return self.working_dir / "obj.o" @property def asm_path(self) -> Path: """Get the path to the assembly""" return self.working_dir / "asm.s" @property def rtl_path(self) -> Path: """Get the path to the rtl""" return self.working_dir / "rtl.lsp" def obj_command_line( self, src_path: Path = None, obj_path: Path = None ) -> List[str]: """Get the command line to create the object file. The 'src_path' and 'obj_path' give the input and output paths. If not set, then they are taken from 'self.src_path' and 'self.obj_path'. This is useful for printing where the actual paths are not important.""" src_path = src_path or self.src_path obj_path = obj_path or self.obj_path # Gather the choices as strings opts = [ option[choice] for option, choice in zip(gcc.spec.options, self.choices) if choice >= 0 ] cmd_line = opts + ["-w", "-c", src_path, "-o", obj_path] return cmd_line def asm_command_line( self, src_path: Path = None, asm_path: Path = None ) -> List[str]: """Get the command line to create the assembly file. The 'src_path' and 'asm_path' give the input and output paths. If not set, then they are taken from 'self.src_path' and 'self.obj_path'. This is useful for printing where the actual paths are not important.""" src_path = src_path or self.src_path asm_path = asm_path or self.asm_path opts = [ option[choice] for option, choice in zip(gcc.spec.options, self.choices) if choice >= 0 ] cmd_line = opts + ["-w", "-S", src_path, "-o", asm_path] return cmd_line def rtl_command_line( self, src_path: Path = None, rtl_path: Path = None, asm_path: Path = None ) -> List[str]: """Get the command line to create the rtl file - might as well do the asm at the same time. The 'src_path', 'rtl_path', 'asm_path' give the input and output paths. If not set, then they are taken from 'self.src_path' and 'self.obj_path'. This is useful for printing where the actual paths are not important.""" src_path = src_path or self.src_path rtl_path = rtl_path or self.rtl_path asm_path = asm_path or self.asm_path opts = [ option[choice] for option, choice in zip(gcc.spec.options, self.choices) if choice >= 0 ] cmd_line = opts + [ "-w", "-S", src_path, f"-fdump-rtl-dfinish={rtl_path}", "-o", asm_path, ] return cmd_line def prepare_files(self): """Copy the source to the working directory.""" if not self._source: if self.benchmark.program.contents: self._source = self.benchmark.program.contents.decode() else: with urlopen(self.benchmark.program.uri) as r: self._source = r.read().decode() with open(self.src_path, "w") as f: print(self._source, file=f) def compile(self) -> Optional[str]: """Compile the benchmark""" if not self._obj: self.prepare_files() logger.debug( "Compiling: %s", " ".join(map(str, self.obj_command_line())) ) gcc( *self.obj_command_line(), cwd=self.working_dir, timeout=self._timeout, ) with open(self.obj_path, "rb") as f: # Set the internal variables self._obj = f.read() self._obj_size = os.path.getsize(self.obj_path) self._obj_hash = hashlib.md5(self._obj).hexdigest() def assemble(self) -> Optional[str]: """Assemble the benchmark""" if not self._asm: self.prepare_files() logger.debug( "Assembling: %s", " ".join(map(str, self.asm_command_line())) ) gcc( *self.asm_command_line(), cwd=self.working_dir, timeout=self._timeout, ) with open(self.asm_path, "rb") as f: # Set the internal variables asm_bytes = f.read() self._asm = asm_bytes.decode() self._asm_size = os.path.getsize(self.asm_path) self._asm_hash = hashlib.md5(asm_bytes).hexdigest() def dump_rtl(self) -> Optional[str]: """Dump the RTL (and assemble the benchmark)""" if not self._rtl: self.prepare_files() logger.debug( "Dumping RTL: %s", " ".join(map(str, self.rtl_command_line())) ) gcc( *self.rtl_command_line(), cwd=self.working_dir, timeout=self._timeout, ) with open(self.asm_path, "rb") as f: # Set the internal variables asm_bytes = f.read() self._asm = asm_bytes.decode() self._asm_size = os.path.getsize(self.asm_path) self._asm_hash = hashlib.md5(asm_bytes).hexdigest() with open(self.rtl_path, "rb") as f: # Set the internal variables rtl_bytes = f.read() self._rtl = rtl_bytes.decode() def reset_cached(self): """Reset the cached values""" self._obj = None self._obj_size = None self._obj_hash = None self._rtl = None self._asm = None self._asm_size = None self._asm_hash = None def apply_action( self, action_proto: Event ) -> Tuple[bool, Optional[ActionSpace], bool]: """Apply an action.""" if not action_proto.HasField("int64_value"): raise ValueError("Invalid action, int64_value expected.") choice_index = action_proto.int64_value if choice_index < 0 or choice_index >= self.num_actions: raise ValueError("Out-of-range") # Get the action action = actions[choice_index] # Apply the action to this session and check if we changed anything old_choices = self.choices.copy() action(self) logger.debug("Applied action %s", action) # Reset the internal variables if this action has caused a change in the # choices if old_choices != self.choices: self.reset_cached() # The action has not changed anything yet. That waits until an # observation is taken return False, None, False def get_observation(self, observation_space: ObservationSpace) -> Event: """Get one of the observations""" if observation_space.name == "source": return Event(string_value=self.source or "") elif observation_space.name == "rtl": return Event(string_value=self.rtl or "") elif observation_space.name == "asm": return Event(string_value=self.asm or "") elif observation_space.name == "asm_size": return Event(int64_value=self.asm_size or -1) elif observation_space.name == "asm_hash": return Event(string_value=self.asm_hash or "") elif observation_space.name == "instruction_counts": return Event(string_value=self.instruction_counts or "{}") elif observation_space.name == "obj": value = self.obj or b"" return Event(byte_tensor=ByteTensor(shape=[len(value)], value=value)) elif observation_space.name == "obj_size": return Event(int64_value=self.obj_size or -1) elif observation_space.name == "obj_hash": return Event(string_value=self.obj_hash or "") elif observation_space.name == "choices": observation = Event( int64_tensor=Int64Tensor( shape=[len(self.choices)], value=self.choices ) ) return observation elif observation_space.name == "command_line": return Event( string_value=gcc.bin + " " + " ".join(map(str, self.obj_command_line("src.c", "obj.o"))) ) else: raise KeyError(observation_space.name) def handle_session_parameter(self, key: str, value: str) -> Optional[str]: if key == "gcc_spec": return codecs.encode(pickle.dumps(gcc.spec), "base64").decode() elif key == "choices": choices = list(map(int, value.split(","))) assert len(choices) == len(gcc.spec.options) assert all( -1 <= p <= len(gcc.spec.options[i]) for i, p in enumerate(choices) ) if choices != self.choices: self.choices = choices self.reset_cached() return "" elif key == "timeout": self._timeout = None if value == "" else int(value) return "" return None return GccCompilationSession class Action: """An action is applying a choice to an option""" def __init__(self, option: Option, option_index: int): """The option and its index in the option list. We need the index to match it with the corresponding choice later during the application of the action.""" self.option = option self.option_index = option_index def __call__(self, session: "GccCompilationSession"): # noqa """Apply the action to the session.""" raise NotImplementedError() def __str__(self) -> str: raise NotImplementedError() class SimpleAction(Action): """A simple action just sets the choice directly. The choice_index describes which choice to apply.""" def __init__(self, option: Option, option_index: int, choice_index: int): super().__init__(option, option_index) self.choice_index = choice_index def __call__(self, session: "GccCompilationSession"): # noqa session.choices[self.option_index] = self.choice_index def __str__(self) -> str: return self.option[self.choice_index] class IncrAction(Action): """An action that increments a choice by an amount.""" def __init__(self, option: Option, option_index: int, choice_incr: int): super().__init__(option, option_index) self.choice_incr = choice_incr def __call__(self, session: "GccCompilationSession"): # noqag choice = session.choices[self.option_index] choice += self.choice_incr if choice < -1: choice = -1 if choice >= len(self.option): choice = len(self.option) - 1 session.choices[self.option_index] = choice def __str__(self) -> str: return f"{self.option}[{self.choice_incr:+}]"
CompilerGym-development
compiler_gym/envs/gcc/service/gcc_service.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """Register the MLIR environments.""" from compiler_gym.envs.mlir.mlir_env import MlirEnv from compiler_gym.util.registration import register from compiler_gym.util.runfiles_path import runfiles_path __all__ = [ "MLIR_SERVICE_BINARY", "MlirEnv", ] MLIR_SERVICE_BINARY = runfiles_path( "compiler_gym/envs/mlir/service/compiler_gym-mlir-service" ) def _register_mlir_gym_service(): """Register an environment for each combination of MLIR observation/reward/benchmark.""" register( id="mlir-v0", entry_point="compiler_gym.envs.mlir:MlirEnv", kwargs={ "service": MLIR_SERVICE_BINARY, }, ) _register_mlir_gym_service()
CompilerGym-development
compiler_gym/envs/mlir/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from pathlib import Path from typing import Iterable, List, Optional, Union import numpy as np from compiler_gym.datasets import Dataset from compiler_gym.envs.mlir.datasets import MatmulBenchmark, get_mlir_datasets from compiler_gym.service.client_service_compiler_env import ClientServiceCompilerEnv from compiler_gym.spaces import Reward, RuntimeReward from compiler_gym.util.gym_type_hints import ActionType from compiler_gym.views import ObservationSpaceSpec _MLIR_DATASETS: Optional[List[Dataset]] = None def _get_mlir_datasets(site_data_base: Optional[Path] = None) -> Iterable[Dataset]: """Get the MLIR datasets. Use a singleton value when site_data_base is the default value. """ global _MLIR_DATASETS if site_data_base is None: if _MLIR_DATASETS is None: _MLIR_DATASETS = list(get_mlir_datasets(site_data_base=site_data_base)) return _MLIR_DATASETS return get_mlir_datasets(site_data_base=site_data_base) class MlirEnv(ClientServiceCompilerEnv): """Environment that exposes optimization parameters for matrix multiplication in MLIR. It optimizes the operation C = A * B, where A is an MxK matrix and B is an KxN. This environment is still under construction. The observation space is the running time of the operation. All optimization parameters are supplied in a single step. In other words the episode length is 1 step. It supports x86_64 processors.""" def __init__( self, *args, benchmark: Optional[Union[str, MatmulBenchmark]] = None, datasets_site_path: Optional[Path] = None, rewards: Optional[List[Reward]] = None, **kwargs ): """ :param args: Positional arguments passed to the base class `compiler_gym.service.ClientServiceCompilerEnv`. :param benchmark: Specifies what MLIR code is to be optimize. Currently the default benchmark and only benchmark is M=N=K=64. :param rewards: The reward spaces that this environment supports. Defaults to (-running time). :param datasets_site_path: The base path of a directory that will be used to store installed files. :param kwargs: keyworded arguments passed to the base class `compiler_gym.service.ClientServiceCompilerEnv`. """ super().__init__( *args, **kwargs, rewards=[ RuntimeReward( runtime_count=1, warmup_count=0, estimator=np.median, default_value=-1, ) ] if rewards is None else rewards, datasets=_get_mlir_datasets(site_data_base=datasets_site_path), benchmark=benchmark ) self._runtimes_per_observation_count: Optional[int] = None self.reset() if rewards is None: self.reward.spaces["runtime"].runtime_count = self.runtime_observation_count @property def runtime_observation_count(self) -> int: return self._runtimes_per_observation_count or int( self.send_param("mlir.get_runtimes_per_observation_count", "") ) @runtime_observation_count.setter def runtime_observation_count(self, n: int) -> None: if self.in_episode: self.send_param("mlir.set_runtimes_per_observation_count", str(n)) self._runtimes_per_observation_count = n def reset(self, *args, **kwargs): observation = super().reset(*args, **kwargs) # Resend the runtimes-per-observation session parameter, if it is a # non-default value. if self._runtimes_per_observation_count is not None: self.runtime_observation_count = self._runtimes_per_observation_count return observation def fork(self): fkd = super().fork() if self.runtime_observation_count is not None: fkd.runtime_observation_count = self.runtime_observation_count return fkd def step( # pylint: disable=arguments-differ self, action: ActionType, observation_spaces: Optional[Iterable[Union[str, ObservationSpaceSpec]]] = None, reward_spaces: Optional[Iterable[Union[str, Reward]]] = None, ): return self.multistep( actions=[action], observation_spaces=observation_spaces, reward_spaces=reward_spaces, )
CompilerGym-development
compiler_gym/envs/mlir/mlir_env.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging from pathlib import Path from typing import Iterable, Optional from compiler_gym.datasets import Benchmark, BenchmarkSource, Dataset from compiler_gym.datasets.benchmark import BenchmarkWithSource from compiler_gym.service.proto import BenchmarkDynamicConfig, Command from compiler_gym.util.decorators import memoized_property from compiler_gym.util.runfiles_path import runfiles_path, site_data_path from compiler_gym.util.shell_format import plural logger = logging.getLogger(__name__) # The maximum value for the --seed argument to matmul. UINT_MAX = (2**32) - 1 _matmul_bin = runfiles_path("compiler_gym/third_party/matmul/matmul/bin/matmul") _matmul_includes = runfiles_path( "compiler_gym/third_party/matmul/matmul/include/matmul-2.3.0" ) matmul_sizes = [(64, 64, 64)] class MatmulBenchmark(BenchmarkWithSource): """A matmul benchmark.""" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._src = None self.proto.dynamic_config.MergeFrom( BenchmarkDynamicConfig( build_cmd=Command( argument=["$CC", "$IN"], outfile=["benchmark_main"], timeout_seconds=120, ), run_cmd=Command( argument=["./benchmark_main", "--benchmark_format=json"], timeout_seconds=300, ), ) ) @classmethod def create(cls, uri: str, bitcode: bytes, src: bytes) -> Benchmark: """Create a benchmark from paths.""" benchmark = cls.from_file_contents(uri, bitcode) benchmark._src = src # pylint: disable=protected-access return benchmark @memoized_property def sources(self) -> Iterable[BenchmarkSource]: return [ BenchmarkSource(filename="source.mlir", contents=self._src), ] @property def source(self) -> str: """Return the single source file contents as a string.""" return self._src.decode("utf-8") class MatmulDataset(Dataset): """A dataset which generates matmul programs.""" def __init__( self, site_data_base: Path, sort_order: int = 0, matmul_bin: Optional[Path] = None, matmul_includes: Optional[Path] = None, ): """Constructor. :param site_data_base: The base path of a directory that will be used to store installed files. :param sort_order: An optional numeric value that should be used to order this dataset relative to others. Lowest value sorts first. :param matmul_bin: The path of the matmul binary to use. If not provided, the version of matmul shipped with CompilerGym is used. :param matmul_includes: The path of the matmul includes directory. If not provided, the includes of the matmul shipped with CompilerGym is used. """ super().__init__( name="generator://matmul-v0", description="Targeted size matmul programs", references={}, license="MIT", site_data_base=site_data_base, sort_order=sort_order, benchmark_class=MatmulBenchmark, ) self.matmul_bin_path = matmul_bin or _matmul_bin self.matmul_includes_path = matmul_includes or _matmul_includes @property def size(self) -> int: return len(matmul_sizes) def name_from_size(self, mnk): return f"{self.name}/{mnk[0]}_{mnk[1]}_{mnk[2]}" # TODO(kyleherndon): Benchmarks are actually dynamically generated for any # provided parameters, figure out a better way to represent this in the list of # available benchmarks def benchmark_uris(self) -> Iterable[str]: return (self.name_from_size(mnk) for mnk in matmul_sizes) def benchmark(self, uri: str) -> MatmulBenchmark: sizestr = uri.split("/")[-1] sizetuple = (int(i) for i in sizestr.split("_")) return self.benchmark_from_size(sizetuple) def benchmark_from_size( self, mnk, max_retries: int = 3, retry_count: int = 0 ) -> MatmulBenchmark: """Get a benchmark from a uint32 seed. :param mnk: 3-tuple containing m, n, k sizes of the matmul :return: A benchmark instance. :raises OSError: If matmul fails. :raises BenchmarkInitError: If the C program generated by matmul cannot be lowered to mlir-IR. """ if retry_count >= max_retries: raise OSError( f"matmul failed after {retry_count} {plural(retry_count, 'attempt', 'attempts')} " f"with size {mnk}" ) self.install() mnk = list(mnk) # Run matmul with the given size and regex to produce the correct mlir logger.debug("Exec matmul --mnk %d", mnk) # TODO(kyleherndon): refactor these to another location src_content = """ func @matmul(%a: tensor<${M}x${K}xf32> {linalg.buffer_layout = affine_map<(i, j)[s0, s1] -> (i, j)>}, %b: tensor<${K}x${N}xf32> {linalg.buffer_layout = affine_map<(i, j)[s0, s1] -> (i, j)>}, %c: tensor<${M}x${N}xf32> {linalg.buffer_layout = affine_map<(i, j)[s0, s1] -> (i, j)>}) -> tensor<${M}x${N}xf32> attributes { passthrough = [["target-cpu", "haswell"], ["prefer-vector-width", "256"]]} { %f0 = arith.constant 0.0 : f32 %f1 = linalg.fill(%f0, %c) : f32, tensor<${M}x${N}xf32> -> tensor<${M}x${N}xf32> %d = linalg.matmul ins(%a, %b : tensor<${M}x${K}xf32>, tensor<${K}x${N}xf32>) outs(%f1: tensor<${M}x${N}xf32>) -> tensor<${M}x${N}xf32> return %d : tensor<${M}x${N}xf32> }""" cc_src = """ #include <benchmark/benchmark.h> #include <mlir/ExecutionEngine/RunnerUtils.h> #include <cstdio> #include <vector> void naive_matmul(const float* a, const float* b, float* c, size_t m, size_t k, size_t n) { // correctness check for (size_t i = 0; i < m; i++) { for (size_t j = 0; j < n; j++) { #ifdef COLUMN_MAJOR size_t ci = i + j * m; #else size_t ci = i * n + j; #endif c[ci] = 0.0f; for (size_t p = 0; p < k; p++) { #ifdef COLUMN_MAJOR c[ci] += a[i + p * m] * b[p + j * k]; #else c[ci] += a[i * k + p] * b[p * n + j]; #endif } } } } void init_matrix(float* a, int nrows, int ncols) { for (int j = 0; j < ncols; j++) { for (int i = 0; i < nrows; i++) { a[i + j * nrows] = ((float)rand() / (float)RAND_MAX); } } } extern "C" { void matmul(float* aligned_a, float* allocated_a, int64_t offset_a, int64_t size_a0, int64_t size_a1, int64_t strides_a0, int64_t strides_a1, float* aligned_b, float* allocated_b, int64_t offset_b, int64_t size_b0, int64_t size_b1, int64_t strides_b0, int64_t strides_b1, float* aligned_c, float* allocated_c, int64_t offset_c, int64_t size_c0, int64_t size_c1, int64_t strides_c0, int64_t strides_c1); } size_t g_errors = 0; static void BenchmarkFunction(benchmark::State& state) { // TODO(boian): pass these as command line arguments int MDIM = ${M}; int NDIM = ${N}; int KDIM = ${K}; std::vector<float> a(MDIM * KDIM); std::vector<float> b(KDIM * NDIM); std::vector<float> c(MDIM * NDIM); float *A = a.data(), *B = b.data(), *C = c.data(); // a[0] = 1; b[0] = 2; init_matrix(A, MDIM, KDIM); init_matrix(B, KDIM, NDIM); init_matrix(C, MDIM, NDIM); int LDA = KDIM; int LDB = NDIM; int LDC = NDIM; for (auto _ : state) { matmul(A, A, 0, MDIM, KDIM, LDA, 1, B, B, 0, KDIM, NDIM, LDB, 1, C, C, 0, MDIM, NDIM, LDC, 1); } std::vector<float> c2(MDIM * NDIM); float* C2 = c2.data(); size_t errors = 0; naive_matmul(A, B, C2, MDIM, KDIM, NDIM); for (size_t i = 0; i < MDIM; i++) { for (size_t j = 0; j < NDIM; j++) { size_t ci = i + j * MDIM; if (std::abs(C[ci] - C2[ci]) > 0.01f) { if (errors == 0) { fprintf(stderr, "Incorrect result at index %ld,%ld: C=%0.2f C2=%0.2f\\n", i, j, C[ci], C2[ci]); } errors++; } } } fprintf(stderr, "Detected %ld errors.\\n", errors); g_errors = errors; } int main(int argc, char** argv) { benchmark::Initialize(&argc, argv); benchmark::RegisterBenchmark("BM_Matmul", BenchmarkFunction) ->MeasureProcessCPUTime() ->UseRealTime(); benchmark::RunSpecifiedBenchmarks(); benchmark::Shutdown(); return g_errors != 0; } """ mlir_site_dir = site_data_path("mlir-v0") mlir_site_dir.mkdir(parents=True, exist_ok=True) mlir_file_path = site_data_path("mlir-v0") / "matmul.mlir.template" with open(mlir_file_path, "w+") as mlir_file: mlir_file.write(src_content) mlir_file.close() cc_file_path = site_data_path("mlir-v0") / "benchmark_main.cc.template" with open(cc_file_path, "w+") as cc_file: cc_file.write(cc_src) cc_file.close() new_content = src_content.replace("${M}", str(mnk[0])) new_content = new_content.replace("${N}", str(mnk[1])) content = new_content.replace("${K}", str(mnk[2])) return self.benchmark_class.create( self.name_from_size(mnk), bytes(content, "utf-8"), bytes(src_content, "utf-8"), )
CompilerGym-development
compiler_gym/envs/mlir/datasets/matmul.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from pathlib import Path from typing import Iterable, Optional from compiler_gym.datasets import Dataset from compiler_gym.envs.mlir.datasets.matmul import MatmulBenchmark, MatmulDataset from compiler_gym.util.runfiles_path import site_data_path def get_mlir_datasets(site_data_base: Optional[Path] = None) -> Iterable[Dataset]: """Instantiate the builtin datasets. :param site_data_base: The root of the site data path. :return: An iterable sequence of :class:`Dataset <compiler_gym.datasets.Dataset>` instances. """ site_data_base = site_data_base or site_data_path("mlir-v0") yield MatmulDataset(site_data_base=site_data_base) __all__ = ["MatmulDataset", "MatmulBenchmark"]
CompilerGym-development
compiler_gym/envs/mlir/datasets/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Callable, Dict, List from compiler_gym.errors import ServiceError from compiler_gym.service.proto import ObservationSpace from compiler_gym.util.gym_type_hints import ( ActionType, ObservationType, RewardType, StepType, ) from compiler_gym.views.observation_space_spec import ObservationSpaceSpec class ObservationView: """A view into the available observation spaces of a service. Example usage: >>> env = gym.make("llvm-v0") >>> env.reset() >>> env.observation.spaces.keys() ["Autophase", "Ir"] >>> env.observation.spaces["Autophase"].space Box(56,) >>> env.observation["Autophase"] [0, 1, ..., 2] >>> observation["Ir"] int main() {...} """ def __init__( self, raw_step: Callable[ [List[ActionType], List[ObservationType], List[RewardType]], StepType ], spaces: List[ObservationSpace], ): if not spaces: raise ValueError("No observation spaces") self.spaces: Dict[str, ObservationSpaceSpec] = {} self._raw_step = raw_step for i, s in enumerate(spaces): self._add_space(ObservationSpaceSpec.from_proto(i, s)) def __getitem__(self, observation_space: str) -> ObservationType: """Request an observation from the given space. :param observation_space: The observation space to query. :return: An observation. :raises KeyError: If the requested observation space does not exist. :raises SessionNotFound: If :meth:`env.reset() <compiler_gym.envs.CompilerEnv.reset>` has not been called. :raises ServiceError: If the backend service fails to compute the observation, or reports that a terminal state has been reached. """ observation_space: ObservationSpaceSpec = self.spaces[observation_space] observations, _, done, info = self._raw_step( actions=[], observation_spaces=[observation_space], reward_spaces=[] ) if done: # Computing an observation should never cause a terminal state since # no action has been applied. msg = f"Failed to compute observation '{observation_space.id}'" if info.get("error_details"): msg += f": {info['error_details']}" raise ServiceError(msg) if len(observations) != 1: raise ServiceError( f"Expected 1 '{observation_space.id}' observation " f"but the service returned {len(observations)}" ) return observations[0] def _add_space(self, space: ObservationSpaceSpec): """Register a new space.""" self.spaces[space.id] = space # Bind a new method to this class that is a callback to compute the # given observation space. E.g. if a new space is added with ID # `FooBar`, this observation can be computed using # env.observation.FooBar(). setattr(self, space.id, lambda: self[space.id]) def add_derived_space( self, id: str, base_id: str, **kwargs, ) -> None: """Internal API for adding a new observation space.""" base_space = self.spaces[base_id] self._add_space(base_space.make_derived_space(id=id, **kwargs)) def __repr__(self): return f"ObservationView[{', '.join(sorted(self.spaces.keys()))}]"
CompilerGym-development
compiler_gym/views/observation.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from compiler_gym.views.observation import ObservationView from compiler_gym.views.observation_space_spec import ObservationSpaceSpec from compiler_gym.views.reward import RewardView __all__ = ["ObservationView", "ObservationSpaceSpec", "RewardView"]
CompilerGym-development
compiler_gym/views/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import warnings from typing import Dict, List from compiler_gym.datasets import Benchmark from compiler_gym.spaces.reward import Reward from compiler_gym.views.observation import ObservationView class RewardView: """A view into a set of reward spaces. Example usage: >>> env = gym.make("llvm-v0") >>> env.reset() >>> env.reward.spaces["codesize"].range (-np.inf, 0) >>> env.reward["codesize"] -1243 :ivar spaces: Specifications of available reward spaces. :vartype spaces: Dict[str, Reward] """ def __init__( self, spaces: List[Reward], observation_view: ObservationView, ): self.spaces: Dict[str, Reward] = {} self.previous_action = None self._observation_view = observation_view for space in spaces: self._add_space(space) def __getitem__(self, reward_space: str) -> float: """Request an observation from the given space. :param reward_space: The reward space to query. :return: A reward. :raises KeyError: If the requested reward space does not exist. :raises SessionNotFound: If :meth:`env.reset() <compiler_gym.envs.CompilerEnv.reset>` has not been called. """ # TODO(cummins): Since reward is a function from (state, action) -> r # it would be better to make the list of rewards to evaluate an argument # to env.step() rather than using this lazy view. if not self.spaces: raise ValueError("No reward spaces") space = self.spaces[reward_space] observations = [self._observation_view[obs] for obs in space.observation_spaces] return space.update(self.previous_action, observations, self._observation_view) def reset(self, benchmark: Benchmark, observation_view: ObservationView) -> None: """Reset the rewards space view. This is called on :meth:`env.reset() <compiler_gym.envs.CompilerEnv.reset>`. :param benchmark: The benchmark that is used for this episode. """ self.previous_action = None for space in self.spaces.values(): space.reset(benchmark=benchmark, observation_view=observation_view) def add_space(self, space: Reward) -> None: """Register a new :class:`Reward <compiler_gym.spaces.Reward>` space. :param space: The reward space to be added. """ if space.name in self.spaces: warnings.warn(f"Replacing existing reward space '{space.name}'") self._add_space(space) def _add_space(self, space: Reward): """Register a new space.""" self.spaces[space.name] = space # Bind a new method to this class that is a callback to compute the # given reward space. E.g. if a new space is added with name `FooBar`, # this reward can be computed using env.reward.FooBar(). setattr(self, space.name, lambda: self[space.name])
CompilerGym-development
compiler_gym/views/reward.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Any, Callable, ClassVar, Optional, Union from gym.spaces import Space from compiler_gym.service.proto import Event, ObservationSpace, py_converters from compiler_gym.util.gym_type_hints import ObservationType from compiler_gym.util.shell_format import indent class ObservationSpaceSpec: """Specification of an observation space. :ivar id: The name of the observation space. :vartype id: str :ivar index: The index into the list of observation spaces that the service supports. :vartype index: int :ivar space: The space. :vartype space: Space :ivar deterministic: Whether the observation space is deterministic. :vartype deterministic: bool :ivar platform_dependent: Whether the observation values depend on the execution environment of the service. :vartype platform_dependent: bool :ivar default_value: A default observation. This value will be returned by :func:`CompilerEnv.step() <compiler_gym.envs.CompilerEnv.step>` if :func:`CompilerEnv.observation_space <compiler_gym.envs.CompilerEnv.observation_space>` is set and the service terminates. """ message_converter: ClassVar[ Callable[[Any], Any] ] = py_converters.make_message_default_converter() def __init__( self, id: str, index: int, space: Space, translate: Callable[[Union[ObservationType, Event]], ObservationType], to_string: Callable[[ObservationType], str], deterministic: bool, platform_dependent: bool, default_value: ObservationType, ): """Constructor. Don't call directly, use make_derived_space().""" self.id: str = id self.index: int = index self.space = space self.deterministic = deterministic self.platform_dependent = platform_dependent self.default_value = default_value self.translate = translate self.to_string = to_string def __hash__(self) -> int: # Quickly hash observation spaces by comparing the index into the list # of spaces returned by the environment. This means that you should not # hash between observation spaces from different environments as this # will cause collisions, e.g. # # # not okay: # >>> obs = set(env.observation.spaces).union( # other_env.observation.spaces # ) # # If you want to hash between environments, consider using the string id # to identify the observation spaces. return self.index def __repr__(self) -> str: return f"ObservationSpaceSpec({self.id})" def __eq__(self, rhs) -> bool: """Equality check.""" if isinstance(rhs, str): return self.id == rhs elif isinstance(rhs, ObservationSpaceSpec): return ( self.id == rhs.id and self.index == rhs.index and self.space == rhs.space and self.platform_dependent == rhs.platform_dependent and self.deterministic == rhs.deterministic ) return False @classmethod def from_proto(cls, index: int, proto: ObservationSpace): """Create an observation space from a ObservationSpace protocol buffer. :param index: The index of this observation space into the list of observation spaces that the compiler service supports. :param proto: An ObservationSpace protocol buffer. :raises ValueError: If protocol buffer is invalid. """ try: spec = ObservationSpaceSpec.message_converter(proto) except ValueError as e: raise ValueError( f"Error interpreting description of observation space '{proto.name}'.\n" f"Error: {e}\n" f"ObservationSpace message:\n" f"{indent(proto.space, n=2)}" ) from e # TODO(cummins): Additional validation of the observation space # specification would be useful here, such as making sure that the size # of {low, high} tensors for box shapes match. At present, these errors # tend not to show up until later, making it more difficult to debug. return cls( id=proto.name, index=index, space=spec, translate=ObservationSpaceSpec.message_converter, to_string=str, deterministic=proto.deterministic, platform_dependent=proto.platform_dependent, default_value=ObservationSpaceSpec.message_converter( proto.default_observation ), ) def make_derived_space( self, id: str, translate: Callable[[ObservationType], ObservationType], space: Optional[Space] = None, deterministic: Optional[bool] = None, default_value: Optional[ObservationType] = None, platform_dependent: Optional[bool] = None, to_string: Callable[[ObservationType], str] = None, ) -> "ObservationSpaceSpec": """Create a derived observation space. :param id: The name of the derived observation space. :param translate: A callback function to compute a derived observation from the base observation. :param space: The :code:`gym.Space` describing the observation space. :param deterministic: Whether the observation space is deterministic. If not provided, the value is inherited from the base observation space. :param default_value: The default value for the observation space. If not provided, the value is derived from the default value of the base observation space. :param platform_dependent: Whether the derived observation space is platform-dependent. If not provided, the value is inherited from the base observation space. :param to_string: A callback to convert and observation to a string representation. If not provided, the callback is inherited from the base observation space. :return: A new ObservationSpaceSpec. """ return ObservationSpaceSpec( id=id, index=self.index, space=space or self.space, translate=lambda observation: translate(self.translate(observation)), to_string=to_string or self.to_string, default_value=( translate(self.default_value) if default_value is None else default_value ), deterministic=( self.deterministic if deterministic is None else deterministic ), platform_dependent=( self.platform_dependent if platform_dependent is None else platform_dependent ), )
CompilerGym-development
compiler_gym/views/observation_space_spec.py
# Copyright 2021 The TF-Coder Authors. # Copyright (c) Facebook, Inc. and its affiliates. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Defines the FilterGroup enum. Every function used in TF-Coder is associated with one such FilterGroup. """ import enum # LINT.IfChange(FilterGroup) @enum.unique class FilterGroup(enum.Enum): """A group of similar operations that should have the same filters. The number of arguments is crucially important when adding filters, so by convention the enum names have the number of arguments at the end. """ # No filters. Even if some filtering might be reasonable, it could be faster # to just try all values to avoid the filtering overhead. NONE = "NONE" ############################# # Operations with 1 argument. # The argument is a shape. SHAPE_1 = "SHAPE_1" # The argument is a tensor. TENSOR_1 = "TENSOR_1" # The argument is a sequence of tensors. TENSORSEQUENCE_1 = "TENSORSEQUENCE_1" # The argument is a float tensor. FLOATTENSOR_1 = "FLOATTENSOR_1" # The argument is an int or float tensor. NUMERICTENSOR_1 = "NUMERICTENSOR_1" # The argument is a primitive or tensor. PRIMITIVE_OR_TENSOR_1 = "PRIMITIVE_OR_TENSOR_1" ################################ # Operations with 2 arguments. # The first argument is a tensor, and the second argument is an int # representing an axis, i.e., an int in the range [1, rank_of_tensor). TENSOR_AXIS_2 = "TENSOR_AXIS_2" # The first argument is an int or float tensor, the second is an axis. NUMERICTENSOR_AXIS_2 = "NUMERICTENSOR_AXIS_2" # The first argument is a sequence of tensors, the second is an axis. TENSORSEQUENCE_AXIS_2 = "TENSORSEQUENCE_AXIS_2" # The first argument is a tensor, the second is a boolean tensor. TENSOR_BOOLTENSOR_2 = "TENSOR_BOOLTENSOR_2" # The two arguments are numeric (int or float) tensors with the same shape. SAME_SHAPES_NUMERICTENSOR_2 = "SAME_SHAPES_NUMERICTENSOR_2" # The two arguments are numeric (int or float) tensors with the same dtype, # and the two tensors are broadcastable. SAME_DTYPE_NUMERIC_BROADCASTABLE_2 = "SAME_DTYPE_NUMERIC_BROADCASTABLE_2" # The first argument is a numeric tensor, and the second is either a scalar # or a tensor. The two arguments are broadcastable. ELEMENTWISE_COMPARISON_2 = "ELEMENTWISE_COMPARISON_2" # The first argument is a numeric tensor, and the second is either a scalar # or a tensor. The two arguments are broadcastable, but the must be different. NE_BROADCASTABLE_2 = "NE_BROADCASTABLE_2" ######################################### # Operations with other special handling. # The argument contains nonnegative ints with a small maximum. BINCOUNT_1 = "BINCOUNT_1" # The argument results in a small tensor. EYE_1 = "EYE_1" # The argument results in a small tensor. RANGE_1 = "RANGE_1" # The argument is either a primitive or a sequence of primitives TENSORIZABLE_1 = "TENSORIZABLE_1" # The arguments should be 3-D tensors, and the first argument's # third dimension size should be equal to the second argument's # second dimension size. BMM_2 = "BMM_2" # The first argument is a sequence of tensors, # the second is an axis in the range [-1, rank_of_tensor-1]. CAT_TENSORSEQUENCE_AXIS_2 = "CAT_TENSORSEQUENCE_AXIS_2" # Both arguments are tensors and have the same shape. # The dimensions should be greater than 1. CDIST_2 = "CDIST_2" # The first argument is a tensor, the second is an axis in the range # [-1, rank_of_tensor-1]. Note that this range is slightly different from the # TENSOR_AXIS_2 filter. EXPAND_DIMS_2 = "EXPAND_DIMS_2" # The first argument is a tensor, the second is an axis in the range # [-1, rank_of_tensor]. EXPAND_DIMS_ADDITIONAL_2 = "EXPAND_DIMS_ADDITIONAL_2" # The arguments result in a small tensor. EYE_ROWS_COLS_2 = "EYE_ROWS_COLS_2" # Ensures the tensors are both numeric and have the same dtype and rank. MATMUL_2 = "MATMUL_2" # Ensures the tensors are both numeric and have the same dtype and rank. MM_2 = "MM_2" # The first argument is a tensor, the second is an axis in the range # [-1, rank_of_tensor-1]. The first argument must be float or int. NORMALIZE_2 = "NORMALIZE_2" # Ensures that torch.nn.functional.one_hot(indices, num_classes) produces a small result. ONE_HOT_2 = "ONE_HOT_2" # The first argument must be a tensor, and the second must be a nested int # list or int32 tensor of shape [rank_of_arg_1, 2]. PAD_2 = "PAD_2" # The first argument is a tensor, and the second is a tuple. RESHAPE_2 = "RESHAPE_2" # Ensures that torch.tile(input, multiples) produces a small result. TILE_2 = "TILE_2" # The first argument is sorted in the last dimension, the second argument is # the same dtype and rank, and all dimension lengths match except the last. SEARCHSORTED_2 = "SEARCHSORTED_2" # The first argument is a tensor with more than 1 squeezable dimension, and # the second argument is an int specifying a squeezable dimension. SQUEEZE_2 = "SQUEEZE_2" # The first argument is a non-scalar tensor, the second is a dimension, and # the third is a tensor containing ints suitable for indexing into the first # tensor. GATHER_3 = "GATHER_3" # The first argument is a tensor, the second is a tensor containing ints # suitable for indexing into the first tensor on multiple dimensions, and the # third is a number of batch dimensions. INDEX_SELECT_3 = "INDEX_SELECT_3" # The arguments result in a small tensor. RANGE_3 = "RANGE_3" # The first argument is a tensor, the second argument is either a numeric tensor # or an integer, and the third argument is an int specifying the dimension. REPEAT_3 = "REPEAT_3" # The second and third arguments must be int primitives, lists of ints, or 1D # int tensors, and they must have the same shape. ROLL_3 = "ROLL_3" # The first two arguments are tensors with the same dtype, and the third # contains ints of the appropriate shape. TENSORDOT_3 = "TENSORDOT_3" # The first argument is a tensor, and the second and the third are dimensions # to transpose. TRANSPOSE_3 = "TRANSPOSE_3" # Ensures that the shapes and dtypes for torch.where(condition, tensor, tensor/number) match. WHERE_TENSOR_3 = "WHERE_TENSOR_3" # Ensures that the shapes and dtypes for torch.where(condition, number, tensor/number) match. WHERE_NUMERIC_3 = "WHERE_NUMERIC_3" # LINT.ThenChange(value_search/operation_filtering.py:add_filters_to_function_operation)
APIsynth-master
Synthesis_incorporation/filter_group.py
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # Lint as: python3 """Functions and arguments used in the PyCoder project.""" import ast import collections import torch from tf_coder import filter_group FilterGroup = filter_group.FilterGroup FunctionInfo = collections.namedtuple( 'FunctionInfo', ['name', 'filter_group', 'weight']) # Weights for leaf nodes in the AST. # Constants given by the user. PROVIDED_CONSTANT_WEIGHT = 7 # Ubiquitous constants: 0, 1, -1. COMMON_CONSTANT_WEIGHT = 8 # A torch.constant() wrapper around an input primitive. PRIMITIVE_INPUT_AS_TENSOR_WEIGHT = 5 # Int constants meant to be axis values, chosen based on input tensor ranks. AXIS_CONSTANT_WEIGHT = 14 # Int constants obtained from input/output tensor shapes. SHAPE_CONSTANT_WEIGHT = 24 # Weight of constructing a tuple with the output shape. OUTPUT_SHAPE_TUPLE_WEIGHT = 32 # Input variable nodes (in1, in2, etc.). INPUT_VARIABLE_WEIGHT = 4 # DTypes with weights to add to the pool of constants. CONSTANT_DTYPES_AND_WEIGHTS = collections.OrderedDict([ (torch.int32, 8), (torch.float32, 8), (torch.bool, 8), (torch.int64, 16), ]) # Used in value search to convert primitive inputs (e.g., 3) into scalar tensors # (e.g., torch.tensor(3)). CONSTANT_OPERATION_NAME = 'torch.tensor(data)' INT_OPERATION_NAME = 'IntOperation' FLOAT_OPERATION_NAME = 'FloatOperation' BOOL_OPERATION_NAME = 'BoolOperation' # A list of FunctionInfo namedtuples, each describing one function usable by a # program synthesizer. Each FunctionInfo's name contains the function name along # with the names of the arguments for that function, in the order given in the # function's signature. A function may appear multiple times with different # lists of usable arguments. This list is ordered, so value search will try # earlier functions before later ones. # FunctionInfo name format: "torch.module.function(arg_1, arg_2, arg_3='value')" # means call the function `torch.module.function` with varying inputs `arg_1` and # `arg_2`, where `arg_3` is fixed and set to the literal constant `'value'`. TORCH_FUNCTIONS = [ # FunctionInfo(name='torch.abs(input)', # filter_group=FilterGroup.NUMERICTENSOR_1, # weight=40), FunctionInfo(name='torch.add(input, other)', filter_group=FilterGroup.ELEMENTWISE_COMPARISON_2, weight=28), # # FunctionInfo(name='torch.all(input)', # # filter_group=FilterGroup.TENSOR_1, # # weight=40), # # FunctionInfo(name='torch.all(input, dim)', # # filter_group=FilterGroup.EXPAND_DIMS_2, # # weight=40), FunctionInfo(name='torch.any(input)', filter_group=FilterGroup.TENSOR_1, weight=40), FunctionInfo(name='torch.any(input, dim)', filter_group=FilterGroup.EXPAND_DIMS_2, weight=40), FunctionInfo(name='torch.arange(end)', filter_group=FilterGroup.RANGE_1, weight=28), # FunctionInfo(name='torch.arange(start, end, step)', # filter_group=FilterGroup.RANGE_3, # weight=56), FunctionInfo(name='torch.argmax(input)', filter_group=FilterGroup.NUMERICTENSOR_1, weight=32), FunctionInfo(name='torch.argmax(input, dim)', filter_group=FilterGroup.NUMERICTENSOR_AXIS_2, weight=32), # # FunctionInfo(name='torch.argsort(input, dim, descending=True)', # # filter_group=FilterGroup.NUMERICTENSOR_AXIS_2, # # weight=48), # # FunctionInfo(name='torch.argsort(input, dim, descending=False)', # # filter_group=FilterGroup.NUMERICTENSOR_AXIS_2, # # weight=48), FunctionInfo(name='torch.bincount(input)', filter_group=FilterGroup.BINCOUNT_1, weight=40), # # FunctionInfo(name='torch.bmm(input, mat2)', # # filter_group=FilterGroup.BMM_2, # # weight=40), # # FunctionInfo(name='torch.cat(tensors, dim)', # # filter_group=FilterGroup.CAT_TENSORSEQUENCE_AXIS_2, # # weight=36), FunctionInfo(name='torch.cdist(x1, x2)', filter_group=FilterGroup.CDIST_2, weight=48), # # FunctionInfo(name='torch.cumsum(input, dim)', # # filter_group=FilterGroup.NUMERICTENSOR_AXIS_2, # # weight=44), FunctionInfo(name='torch.div(input, other)', filter_group=FilterGroup.NE_BROADCASTABLE_2, weight=28), FunctionInfo(name='torch.eq(input, other)', filter_group=FilterGroup.ELEMENTWISE_COMPARISON_2, weight=24), FunctionInfo(name='torch.eye(n)', filter_group=FilterGroup.EYE_1, weight=40), # FunctionInfo(name='torch.eye(n, m)', # filter_group=FilterGroup.EYE_ROWS_COLS_2, # weight=60), # # FunctionInfo(name='torch.flatten(input)', # # filter_group=FilterGroup.TENSOR_1, # # weight=23), # # FunctionInfo(name='torch.flatten(input, start_dim)', # # filter_group=FilterGroup.EXPAND_DIMS_2, # # weight=23), # # FunctionInfo(name='torch.flatten(input, start_dim, end_dim)', # # filter_group=FilterGroup.TRANSPOSE_3, # # weight=23), FunctionInfo(name='torch.gather(input, dim, index)', filter_group=FilterGroup.GATHER_3, weight=48), # # FunctionInfo(name='torch.ge(input, other)', # # filter_group=FilterGroup.ELEMENTWISE_COMPARISON_2, # # weight=32), FunctionInfo(name='torch.gt(input, other)', filter_group=FilterGroup.ELEMENTWISE_COMPARISON_2, weight=24), # # FunctionInfo(name='torch.index_select(input, dim, index)', # # filter_group=FilterGroup.INDEX_SELECT_3, # # weight=24), # # FunctionInfo(name='torch.le(input, other)', # # filter_group=FilterGroup.ELEMENTWISE_COMPARISON_2, # # weight=32), FunctionInfo(name='torch.lt(input, other)', filter_group=FilterGroup.ELEMENTWISE_COMPARISON_2, weight=24), # # FunctionInfo(name='torch.logical_and(input, other)', # # filter_group=FilterGroup.SAME_DTYPE_NUMERIC_BROADCASTABLE_2, # # weight=24), FunctionInfo(name='torch.masked_select(input, mask)', filter_group=FilterGroup.TENSOR_BOOLTENSOR_2, weight=28), FunctionInfo(name='torch.matmul(input, other)', filter_group=FilterGroup.MATMUL_2, weight=24), FunctionInfo(name='torch.max(input)', filter_group=FilterGroup.NUMERICTENSOR_1, weight=24), # FunctionInfo(name='torch.max(input, dim)', # filter_group=FilterGroup.NUMERICTENSOR_AXIS_2, # weight=24), # # FunctionInfo(name='torch.maximum(input, other)', # # filter_group=FilterGroup.SAME_SHAPES_NUMERICTENSOR_2, # # weight=24), # # FunctionInfo(name='torch.mean(input)', # # filter_group=FilterGroup.NUMERICTENSOR_1, # # weight=40), # # FunctionInfo(name='torch.mean(input, dim)', # # filter_group=FilterGroup.NUMERICTENSOR_AXIS_2, # # weight=40), # # FunctionInfo(name='torch.min(input)', # # filter_group=FilterGroup.NUMERICTENSOR_1, # # weight=24), FunctionInfo(name='torch.minimum(input, other)', filter_group=FilterGroup.SAME_SHAPES_NUMERICTENSOR_2, weight=32), # # FunctionInfo(name='torch.mm(input, mat2)', # # filter_group=FilterGroup.MM_2, # # weight=32), FunctionInfo(name='torch.mul(input, other)', filter_group=FilterGroup.ELEMENTWISE_COMPARISON_2, weight=24), FunctionInfo(name='torch.ne(input, other)', filter_group=FilterGroup.ELEMENTWISE_COMPARISON_2, weight=24), # # FunctionInfo(name='torch.nonzero(input)', # # filter_group=FilterGroup.TENSOR_1, # # weight=24), # # FunctionInfo(name='torch.nn.functional.normalize(input, dim)', # # filter_group=FilterGroup.NORMALIZE_2, # # weight=48), FunctionInfo(name='torch.nn.functional.one_hot(input, num_classes)', filter_group=FilterGroup.ONE_HOT_2, weight=28), # # FunctionInfo(name='torch.nn.functional.pad(input, pad, mode="constant")', # # filter_group=FilterGroup.PAD_2, # # weight=40), # # FunctionInfo(name='torch.prod(input, dim)', # # filter_group=FilterGroup.NUMERICTENSOR_AXIS_2, # # weight=52), FunctionInfo(name='torch.repeat_interleave(input, repeats, dim)', filter_group=FilterGroup.REPEAT_3, weight=48), FunctionInfo(name='torch.reshape(input, shape)', filter_group=FilterGroup.RESHAPE_2, weight=28), FunctionInfo(name='torch.roll(input, shifts, dims)', filter_group=FilterGroup.ROLL_3, weight=48), FunctionInfo(name='torch.searchsorted(sorted_sequence, input)', filter_group=FilterGroup.SEARCHSORTED_2, weight=56), # # FunctionInfo(name='torch.sort(input, dim)', # # filter_group=FilterGroup.NUMERICTENSOR_AXIS_2, # # weight=52), # # FunctionInfo(name='torch.sort(input, dim, descending=True)', # # filter_group=FilterGroup.NUMERICTENSOR_AXIS_2, # # weight=60), FunctionInfo(name='torch.squeeze(input)', filter_group=FilterGroup.TENSOR_1, weight=23), FunctionInfo(name='torch.squeeze(input, dim)', filter_group=FilterGroup.SQUEEZE_2, weight=23), # # FunctionInfo(name='torch.sqrt(input)', # # filter_group=FilterGroup.NUMERICTENSOR_1, # # weight=56), FunctionInfo(name='torch.square(input)', filter_group=FilterGroup.NUMERICTENSOR_1, weight=28), FunctionInfo(name='torch.stack(tensors)', filter_group=FilterGroup.TENSORSEQUENCE_1, weight=36), FunctionInfo(name='torch.stack(tensors, dim)', filter_group=FilterGroup.TENSORSEQUENCE_AXIS_2, weight=36), # # FunctionInfo(name='torch.std(input, dim)', # # filter_group=FilterGroup.NUMERICTENSOR_AXIS_2, # # weight=40), # # FunctionInfo(name='torch.sub(input, other)', # # filter_group=FilterGroup.NE_BROADCASTABLE_2, # # weight=28), FunctionInfo(name='torch.sum(input)', filter_group=FilterGroup.NUMERICTENSOR_1, weight=24), FunctionInfo(name='torch.sum(input, dim)', filter_group=FilterGroup.NUMERICTENSOR_AXIS_2, weight=24), # FunctionInfo(name=CONSTANT_OPERATION_NAME, # filter_group=FilterGroup.TENSORIZABLE_1, # weight=24), FunctionInfo(name='torch.tensordot(a, b, dims)', filter_group=FilterGroup.TENSORDOT_3, weight=24), FunctionInfo(name='torch.tile(input, dims)', filter_group=FilterGroup.TILE_2, weight=28), FunctionInfo(name='torch.transpose(input, dim0, dim1)', filter_group=FilterGroup.TRANSPOSE_3, weight=24), FunctionInfo(name='torch.where(condition, input, other)', filter_group=FilterGroup.WHERE_TENSOR_3, weight=24), FunctionInfo(name='torch.where(condition, self, other)', filter_group=FilterGroup.WHERE_NUMERIC_3, weight=24), # # FunctionInfo(name='torch.unique(input)', # # filter_group=FilterGroup.TENSOR_1, # # weight=48), FunctionInfo(name='torch.unsqueeze(input, dim)', filter_group=FilterGroup.EXPAND_DIMS_ADDITIONAL_2, weight=22), # # FunctionInfo(name='torch.zeros(size)', # # filter_group=FilterGroup.SHAPE_1, # # weight=40), ] SPARSE_FUNCTIONS = [ ] def parse_function_info_name(function_info): """Takes a FunctionInfo and returns (function_name, list_of_args). Args: function_info: A FunctionInfo namedtuple. Returns: A tuple (function_name, list_of_args, constant_kwargs), where function_name is a string, list_of_args is a list of strings, and constant_kwargs is a dict mapping argument names to their constant literal values. For example, if the FunctionInfo's name is 'torch.foo.bar(x, axis, baz=True)', then this function would return ('torch.foo.bar', ['x', 'axis'], {'baz': True}). Raises: ValueError: If the FunctionInfo's name is not properly formatted. """ name = function_info.name if name.count('(') != 1: raise ValueError("The FunctionInfo's name must have exactly one open " "parenthesis.") if name.count(')') != 1 or name[-1] != ')': raise ValueError("The FunctionInfo's name must have exactly one close " "parenthesis, at the end of the name.") open_paren = name.index('(') close_paren = name.index(')') function_name = name[ : open_paren] arg_list = name[open_paren + 1 : close_paren] split_by_comma = [arg.strip() for arg in arg_list.split(',')] list_of_args = [] constant_kwargs = collections.OrderedDict() for part in split_by_comma: if '=' in part: kwarg_name, literal_as_string = [x.strip() for x in part.split('=')] constant_kwargs[kwarg_name] = ast.literal_eval(literal_as_string) else: list_of_args.append(part) return function_name, list_of_args, constant_kwargs
APIsynth-master
Synthesis_incorporation/torch_functions.py
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import torch import torch as T from torch import nn EMBEDDING_SIZE = 150 SHAPE_EMBEDDING_SIZE = 6 class pycoder_parameters: ''' Core Fuzzing Parameters ''' NUM_FUZZ_PER_API= 100000 #000 NUM_TEST_FUZZ = 2 FLOAT_TENSOR = False #We either generate float or integer tensors UNIT_TEST = False COMPOSITE = True ''' Fuzzing Detailed Parameters ''' MAX_TENSOR_DIMENSIONS = 3 #how many rows, columns, etc. MIN_VAL_PER_DIMENSION = 1 # e.g., min number of rows, columns, etc. MAX_VAL_PER_DIMENSION = 5 # e.g., max number of rows, columns, etc. #So far limiting to integers MIN_TENSOR_VALUE = 1 MAX_TENSOR_VALUE = 15 ''' Embedding Parameters ''' EMBEDDING_NOISE_LEVEL = 0 #0 noise by default EMBEDDING_SIZE = 150 SHAPE_EMBEDDING_SIZE = 6 data_type = 'float' if FLOAT_TENSOR is True else 'integer' model_type = 'Composite_' if COMPOSITE is True else 'Single_' file_name = str(model_type) + str(NUM_FUZZ_PER_API) + '_' + data_type fuzzing = file_name + '.pt' embedding = file_name + '.embedding' + '.pt', classification = file_name + '.model_result' + '.pt' train_valid_test = file_name + 'train_valid_test.pt' def setNoiseLevel(self, noise): self.EMBEDDING_NOISE_LEVEL = noise self.embedding = self.file_name + '.embedding' + '_' + str(self.EMBEDDING_NOISE_LEVEL) + '.pt' def getEmbeddingFile(self): return(self.file_name + '.embedding' + '_' + str(self.EMBEDDING_NOISE_LEVEL) + '.pt') def getVisulizationFile(self): return(self.file_name + '.embedding' + '_' + str(self.EMBEDDING_NOISE_LEVEL) + '_' + 'tSNE.pt') class Net(torch.nn.Module): def __init__(self, settings, len_api): super(Net, self).__init__() first_layer_size = settings.model.embedding_size if settings.model.use_shape_encoding: first_layer_size += settings.model.shape_embedding_size if settings.model.use_type_encoding: first_layer_size += 2 self.hid1 = torch.nn.Linear(4*(first_layer_size+1), 500) self.hid2 = torch.nn.Linear(500, 250) self.hid3 = torch.nn.Linear(250, 100) self.oupt = torch.nn.Linear(100, len_api) torch.nn.init.xavier_uniform_(self.hid1.weight) torch.nn.init.zeros_(self.hid1.bias) torch.nn.init.xavier_uniform_(self.hid2.weight) torch.nn.init.zeros_(self.hid2.bias) torch.nn.init.xavier_uniform_(self.oupt.weight) torch.nn.init.zeros_(self.oupt.bias) torch.nn.Dropout(p=0.2) def forward(self, x): z1 = torch.tanh(self.hid1(x)) z2 = torch.tanh(self.hid2(z1)) z3 = torch.tanh(self.hid3(z2)) z = self.oupt(z3) # no softmax: CrossEntropyLoss() return (z, z3, z2, z1) class FFNet(T.nn.Module): def __init__(self): super(FFNet, self).__init__() NOISE = 0 f = pycoder_parameters() f.setNoiseLevel(NOISE) f.embedding = f.getEmbeddingFile() print(f.embedding) print(f.SHAPE_EMBEDDING_SIZE) self.hid1 = T.nn.Linear(4*(f.EMBEDDING_SIZE+f.SHAPE_EMBEDDING_SIZE+1+2), 500) self.hid2 = T.nn.Linear(500, 250) self.hid3 = T.nn.Linear(250, 100) # self.oupt = T.nn.Linear(100, len(api2indx)) self.oupt = T.nn.Linear(100, 33) T.nn.init.xavier_uniform_(self.hid1.weight) T.nn.init.zeros_(self.hid1.bias) T.nn.init.xavier_uniform_(self.hid2.weight) T.nn.init.zeros_(self.hid2.bias) T.nn.init.xavier_uniform_(self.oupt.weight) T.nn.init.zeros_(self.oupt.bias) T.nn.Dropout(p=0.2) def forward(self, x): z1 = T.tanh(self.hid1(x)) z2 = T.tanh(self.hid2(z1)) z3 = T.tanh(self.hid3(z2)) z = self.oupt(z3) # no softmax: CrossEntropyLoss() return (z, z3, z2, z1) class RNNModel(nn.Module): def __init__(self, input_size, output_size, hidden_dim, n_layers): super(RNNModel, self).__init__() # Defining some parameters self.hidden_dim = hidden_dim self.n_layers = n_layers #Defining the layers # RNN Layer self.rnn = nn.RNN(input_size, hidden_dim, n_layers, batch_first=True, bidirectional=True) # Fully connected layer self.fc = nn.Linear(hidden_dim*2, output_size) def forward(self, x): batch_size = x.size(0) #Initializing hidden state for first input using method defined below hidden = self.init_hidden(batch_size) # Passing in the input and hidden state into the model and obtaining outputs out, hidden = self.rnn(x, hidden) out1 = out.contiguous().view(-1, self.hidden_dim*2) out1 = self.fc(out1) return out1, hidden, out def init_hidden(self, batch_size): device = T.device("cpu") # This method generates the first hidden state of zeros which we'll use in the forward pass hidden = torch.zeros(self.n_layers*2, batch_size, self.hidden_dim).to(device) # We'll send the tensor holding the hidden state to the device we specified earlier as well return hidden
APIsynth-master
Synthesis_incorporation/models/models.py
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # Lint as: python3 """An interface for predicting operations given input and output.""" import abc import torch import torch.nn.functional as F from typing import Dict, List, Optional, Text from itertools import product import six from tf_coder.benchmarks import benchmark as benchmark_module from tf_coder.value_search import all_operations from tf_coder.value_search import operation_base from tf_coder.value_search import value_search_settings as settings_module from tf_coder.value_search import value as value_module from tf_coder.models.models import Net from iopath.common.file_io import PathManager from iopath.fb.manifold import ManifoldPathHandler def load_checkpoint(checkpoint_path, map_location=None): pm = PathManager() pm.register_handler(ManifoldPathHandler()) with pm.open(checkpoint_path, "rb") as f: if map_location is not None: checkpoint = torch.load(f, map_location=map_location) else: checkpoint = torch.load(f, map_location=lambda storage, loc: storage) return checkpoint @six.add_metaclass(abc.ABCMeta) class PredictionModel(object): """Apply prediction model's results in PyCoder. Attributes: operations: A list of operations that the handler knows about. all_names: A list of operation names, in the same order as the `operations` list. """ def __init__(self, operations: Optional[List[operation_base.Operation]] = None): """Initializes the handler. Args: operations: A list of operations that the scorer should handle. Exposed for testing. Raises: ValueError: If there are duplicate operation names. """ self.operations = ( operations if operations else all_operations.get_operations(include_sparse_operations=True) ) self.all_names = [operation.name for operation in self.operations] if len(set(self.all_names)) != len(self.operations): raise ValueError("Duplicate operation name.") @abc.abstractmethod def get_operation_multipliers( self, benchmark: benchmark_module.Benchmark, settings: settings_module.Settings ) -> Dict[Text, float]: """Returns a map from operation names to their weight multiplier. The weight multiplier should be between 0 and 1 if the operation should be prioritized, or greater than 1 if it should be deprioritized. Args: benchmark: Benchmark object corresponding to the TF-Coder task. settings: A Settings object storing settings for this search. Returns: A map from operation name to weight multiplier, such that the operation with that name should have its weight modified by that multiplier. If the dict does not contain a key, it means the weight should not be modified (equivalent to a multiplier of 1). """ def __repr__(self) -> Text: """Returns a string containing details about this handler and parameters.""" return self.__class__.__name__ class ClassificationModel(PredictionModel): def __init__( self, settings: settings_module.Settings, operations: Optional[List[operation_base.Operation]] = None, ): super(ClassificationModel, self).__init__(operations) self.checkpoint_path = settings.model.checkpoint_path self.api_map_path = settings.model.api_map_path self.multi_ffn_path = settings.model.multi_ffn_path self.multi_rnn_path = settings.model.multi_rnn_path self.multi_api_map_path = settings.model.multi_api_map_path self.api2indx = load_checkpoint(self.api_map_path) self.multi_api2indx = load_checkpoint(self.multi_api_map_path) self.embedding_size = settings.model.embedding_size self.shape_embedding_size = settings.model.shape_embedding_size self.use_shape_encoding = settings.model.use_shape_encoding self.use_type_encoding = settings.model.use_type_encoding self.use_value_encoding = settings.model.use_value_encoding self.rnn_hidden_dims = settings.model.rnn_hidden_dims self.rnn_num_layers = settings.model.rnn_num_layers self.settings = settings # self.load_model(settings.model.use_multi_model) self.load_model(settings) def load_model(self, settings): device = torch.device("cpu") if settings.model.use_multi_model or settings.model.do_first_in_seq: self.multi_ffn_model = load_checkpoint(self.multi_ffn_path).to(device) self.multi_model = load_checkpoint(self.multi_rnn_path).to(device) self.indx2api = {v: k for k, v in self.multi_api2indx.items()} if self.multi_api2indx.get('<eol>', -1) == -1: max_key = max(self.indx2api.keys()) self.indx2api[max_key+1] = '<eol>' self.multi_api2indx['<eol>'] = max_key+1 else: self.model = Net(self.settings, len(self.api2indx)).to(device) checkpoint = load_checkpoint(self.checkpoint_path) self.model.load_state_dict(checkpoint) self.model.eval() # check input tensor type and adjust model def embed_benchmark_example(self, example): it_pad = [] input_list = example.inputs for input_tensor in input_list: input_tensor = torch.tensor(input_tensor) it_pad.append(self.tensor_flatten_pad(input_tensor)) for _ in range(len(it_pad),3): t = torch.zeros(self.embedding_size + self.shape_embedding_size + 2 + 1) t[-1] = -1 it_pad.append(t) ot_pad = self.tensor_flatten_pad(example.output, isNoise=False) domain_embedding = torch.flatten(torch.stack((it_pad[0], it_pad[1], it_pad[2], ot_pad))) return domain_embedding def embed_benchmark_value(self, example): it_pad = [] input_list = example['inputs'] for input_tensor in input_list: if input_tensor == 0: embedding_size = self.embedding_size if self.use_shape_encoding: embedding_size += self.shape_embedding_size if self.use_type_encoding: embedding_size += 2 it_pad.append(torch.zeros(embedding_size + 1)) else: if input_tensor.is_tensor: input_tensor = input_tensor.value elif input_tensor.is_sequence and not input_tensor.elem_type_is_tensor: input_tensor = torch.tensor(input_tensor.value) else: input_tensor = torch.tensor(input_tensor.value) it_pad.append(self.tensor_flatten_pad(input_tensor)) for _ in range(len(it_pad),3): embedding_size = self.embedding_size if self.use_shape_encoding: embedding_size += self.shape_embedding_size if self.use_type_encoding: embedding_size += 2 t = torch.zeros(embedding_size + 1) t[-1] = -1 it_pad.append(t) output_tensor = example['output'].value if not isinstance(output_tensor, torch.Tensor): output_tensor = torch.tensor(output_tensor.value) ot_pad = self.tensor_flatten_pad(output_tensor) domain_embedding = torch.flatten(torch.stack((it_pad[0], it_pad[1], it_pad[2], ot_pad))) return domain_embedding.float() def encode_values_to_code(self, tensor): tensor = tensor.clone() tensor[(tensor>=100) & (tensor<1000)] = 100 tensor[(tensor>=1000)] = 101 tensor[(tensor<=-20) & (tensor>-100)] = -20 tensor[(tensor<=-100) & (tensor>-1000)] = -21 tensor[(tensor<=-1000)] = -22 return tensor def tensor_flatten_pad( self, tensor, embed_size = None, shape_embed_size = None, isNoise = False ): if embed_size is None: embed_size = self.embedding_size if shape_embed_size is None: shape_embed_size = self.shape_embedding_size if not isinstance(tensor, torch.Tensor): tensor = torch.tensor(tensor) t_flatten = torch.flatten(tensor) if self.use_value_encoding: t_flatten = self.encode_values_to_code(t_flatten) padding_length = embed_size - list(t_flatten.shape)[-1] p1d = (0,padding_length) #just padding the last dimension t_pad = F.pad(input=t_flatten, pad=p1d, mode='constant', value=0) if self.use_type_encoding: type_padding = 0 if tensor.dtype == torch.bool: type_padding = 1 if tensor.dtype == torch.float: type_padding = 2 '''size embedding''' if self.use_shape_encoding: if not isinstance(tensor, torch.Tensor): t_shape = [] else: t_shape = list(tensor.shape) padding_length = shape_embed_size -1 - len(t_shape) p1d = (0,padding_length) #just padding the last dimension s_pad = F.pad(input=torch.tensor(t_shape), pad=p1d, mode='constant', value=0) t_pad_list = t_pad.tolist() s_pad_list = s_pad.tolist() if self.use_type_encoding: tensor_embedding = torch.tensor([type_padding] + [-1] + t_pad_list + [-1] + s_pad_list + [-1]) else: tensor_embedding = torch.tensor(t_pad_list + [-1] + s_pad_list + [-1]) else: t_pad_list = t_pad.tolist() if self.use_type_encoding: tensor_embedding = torch.tensor([type_padding] + [-1] + t_pad_list + [-1]) else: tensor_embedding = torch.tensor(t_pad_list + [-1]) return tensor_embedding.float() def predict_operation(self, example, top_n, threshold, is_example, settings): if is_example: domain_embedding = self.embed_benchmark_example(example) else: domain_embedding = self.embed_benchmark_value(example) with torch.no_grad(): predicts, _, _, _ = self.model(domain_embedding) confidence = predicts num_gt_threshold = sum(c >= threshold for c in confidence) predicted_api_list = (torch.argsort(predicts, descending=True)).numpy() topn_list = predicted_api_list[:min(top_n, num_gt_threshold)] topn_operations = [list(self.api2indx.keys())[list(self.api2indx.values()).index(api)] for api in topn_list] topn_confidences = [confidence[api].item() for api in topn_list] return topn_operations, topn_confidences def predict_sequence(self, example_sequence, top_n, beam_n, threshold, is_example, settings): if is_example: domain_embedding = self.embed_benchmark_example(example_sequence) else: embeddings = [] for example in example_sequence: embeddings.append(self.embed_benchmark_value(example)) for _ in range(len(example_sequence), 3): embeddings.append(torch.zeros(embeddings[0].shape)) domain_embedding = torch.stack((embeddings[0], embeddings[1], embeddings[2])) with torch.no_grad(): predicts, z3, z2, z1 = self.multi_ffn_model(domain_embedding) temp_z3 = torch.unsqueeze(z3,0) model_output, hidden, int_output = self.multi_model(temp_z3) topn_list = [] topn_prob_list = [] for i, m in enumerate(model_output): topn = [] topn_prob = [] prob = torch.nn.functional.softmax(m, dim=0).data # Taking the class with the highest probability score from the output topn_ops = torch.topk(prob,beam_n,dim=0)[1] if settings.printing.predicted_operations: print(i, topn_ops) for op in topn_ops.cpu().numpy(): if settings.printing.predicted_operations: print(self.indx2api[op]) topn.append(self.indx2api[op]) topn_prob.append(prob[op].item()) topn_list.append(topn) topn_prob_list.append(topn_prob) if settings.printing.predicted_operations: print('====') topn_operations = list(product(topn_list[0], topn_list[1], topn_list[2])) topn_confidences = list(product(topn_prob_list[0], topn_prob_list[1], topn_prob_list[2])) topn_confidences = [c[0]*c[1]*c[2] for c in topn_confidences] num_gt_threshold = min(sum(c > threshold for c in topn_confidences), top_n) topn_operations = [operation for _, operation in sorted(zip(topn_confidences, topn_operations), reverse=True, key=lambda pair: pair[0])] topn_confidences = sorted(topn_confidences, reverse=True) return topn_operations[:num_gt_threshold], topn_confidences[:num_gt_threshold] def get_operation_multipliers( self, benchmark: benchmark_module.Benchmark, settings: settings_module.Settings ) -> Dict[Text, float]: """See base class.""" if settings.model.use_multi_model: predicted_operations, confidence = self.predict_sequence(benchmark.examples[0], settings.model.multiplier_top_n, settings.model.threshold, True, settings) else: predicted_operations, confidence = self.predict_operation(benchmark.examples[0], settings.model.multiplier_top_n, settings.model.threshold, True, settings) if settings.printing.prioritized_operations: if settings.model.use_multi_model: print("Predicted operations: {}".format(", ".join(["{} ({:.2f})".format(op, c) for op, c in zip(predicted_operations, confidence)]))) predicted_operations = [{item for seq in predicted_operations for item in seq}] else: print("Predicted operations: {}".format(", ".join(["{} ({:.2f})".format(op, c) for op, c in zip(predicted_operations, confidence)]))) multipliers = {} for name in self.all_names: if name.startswith("torch.") and "(" in name: function_name = name[len("torch.") : name.index("(")].lower() if function_name in predicted_operations: if settings.printing.prioritized_operations: print( "Classification Model prioritized {}".format(name) ) multipliers[name] = settings.model.multiplier return multipliers def get_predicted_sequence( self, example_sequence, settings: settings_module.Settings ) -> List[operation_base.Operation]: """See base class.""" predicted_operations, confidence = self.predict_sequence(example_sequence, settings.model.iterative_top_n, settings.model.beam_n, settings.model.threshold, False, settings) if settings.printing.predicted_operations: print() for example in example_sequence: print("With example, inputs: [{}],".format(", ".join([i.reconstruct_expression() if isinstance(i, value_module.Value) else str(i) for i in example['inputs']]))) # print("Predicted operations: {}".format(", ".join(["{} ({:.2f})".format(op, c) for op, c in zip(predicted_operations, confidence)]))) print(predicted_operations) print(confidence) operation_list = [] for sequence in predicted_operations: sequence_list = [] for op in sequence: # sequence_list: [[op1_1, op1_2, ...], [op2_1, op2_2, ...], ] if op == '<eol>': break sequence_list.append(all_operations.find_operation_with_partial_name(op)) operation_list.extend(product(*sequence_list)) return operation_list def get_predicted_operations( self, example, settings: settings_module.Settings ) -> List[operation_base.Operation]: """See base class.""" predicted_operations, confidence = self.predict_operation(example, settings.model.iterative_top_n, settings.model.threshold, False, settings) if settings.printing.predicted_operations: print() print("With example, inputs: ({}),".format(", ".join([i.reconstruct_expression() for i in example['inputs']]))) print("Predicted operations: {}".format(", ".join(["{} ({:.2f})".format(op, c) for op, c in zip(predicted_operations, confidence)]))) operation_list = [] for op in predicted_operations: operation_list.extend(all_operations.find_operation_with_partial_name(op)) return operation_list def __repr__(self) -> Text: """See base class.""" return "{}".format(self.__class__.__name__) def predict_first_in_sequence(self, example_sequence, top_n, threshold, is_example, settings): if is_example: domain_embedding = self.embed_benchmark_example(example_sequence) else: embeddings = [] embeddings.append(self.embed_benchmark_value(example_sequence)) # for _ in range(1, 3): for _ in range(len(embeddings), 3): embeddings.append(torch.zeros(embeddings[0].shape)) domain_embedding = torch.stack((embeddings[0], embeddings[1], embeddings[2])) with torch.no_grad(): predicts, z3, z2, z1 = self.multi_ffn_model(domain_embedding) temp_z3 = torch.unsqueeze(z3, 0) model_output, hidden, int_output = self.multi_model(temp_z3) topn_operations = [] topn_confidences = [] topn = [] topn_prob = [] prob = torch.nn.functional.softmax(model_output[0], dim=0).data topn_ops = torch.topk(prob, top_n, dim=0)[1] for op in topn_ops.cpu().numpy(): if settings.printing.predicted_operations: print(self.indx2api[op]) topn.append(self.indx2api[op]) topn_prob.append(prob[op].item()) # topn_operations.append(topn) topn_operations = topn # topn_confidences.append(topn_prob) topn_confidences = topn_prob num_gt_threshold = sum(c > threshold for c in topn_confidences) topn_operations = [operation for _, operation in sorted(zip(topn_confidences, topn_operations), reverse=True, key=lambda pair: pair[0])] topn_confidences = sorted(topn_confidences, reverse=True) return topn_operations[:num_gt_threshold], topn_confidences[:num_gt_threshold] def get_first_in_sequence( self, example, settings: settings_module.Settings ) -> List[operation_base.Operation]: if settings.printing.predicted_operations: print() print("With example, inputs: ({}),".format(", ".join([i.reconstruct_expression() for i in example['inputs']]))) predicted_opeations,confidence = self.predict_first_in_sequence(example, settings.model.iterative_top_n, settings.model.threshold, False, settings) if settings.printing.predicted_operations: print("Predicted operations: {}".format(", ".join(["{} ({:.2f})".format(op, c) for op, c in zip(predicted_opeations, confidence)]))) operation_list = [] for op in predicted_opeations: if op != '<eol>': operation_list.extend(all_operations.find_operation_with_partial_name(op)) return operation_list PREDICTION_TO_NAME_MAP = { 'abs': "torch.abs", 'add': "torch.add", 'all': "torch.all", 'any': "torch.any", 'arange': "torch.arange", 'argmax': "torch.argmax", 'argsort': "torch.argsort", 'bincount': "torch.bincount", 'cat': "torch.cat", 'cdist': "torch.cdist", 'cumsum': "torch.cumsum", 'div': "torch.div", 'eq': "torch.eq", 'expand': "ExpandOperation", 'eye': "torch.eye", 'flatten': "torch.flatten", 'gather': "torch.gather", 'ge': "torch.ge", 'gt': "torch.gt", 'index_select': "torch.index_select", 'le': "torch.le", 'lt': "torch.lt", 'logical_and': "torch.logical_and", 'masked_select': "torch.masked_select", 'matmul': "torch.matmul", 'max': "torch.max", 'maximum': "torch.maximum", 'mean': "torch.mean", 'min': "torch.min", 'minimum': "torch.minimum", 'mul': "torch.mul", 'ne': "torch.ne", 'nonzero': "torch.nonzero", 'normalize': "torch.nn.functional.normalize", 'one_hot': "torch.nn.functional.one_hot", 'pad': "torch.nn.functional.pad", 'prod': "torch.prod", 'repeat_interleave': "torch.repeat_interleave", 'reshape': "torch.reshape", 'roll': "torch.roll", 'searchsorted': "torch.searchsorted", 'sort': "torch.sort", 'squeeze': "torch.squeeze", 'sqrt': "torch.sqrt", 'square': "torch.square", 'stack': "torch.stack", 'sub': "torch.sub", 'sum': "torch.sum", 'tensordot': "torch.tensordot", 'tile': "torch.tile", 'transpose': "torch.transpose", 'where': "torch.where", 'unique': "torch.unique", 'unsqueeze': "torch.unsqueeze", 'zeros': "torch.zeros", "masked": "torch.masked_select", "index": "torch.index_select", "logical": "torch.logical_and", "onehot": "nn.functional.one_hot", "float": "FloatOperation", "bool": "BoolOperation", "int": "IntOperation" }
APIsynth-master
Synthesis_incorporation/models/prediction_model.py
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # Lint as: python3 """Creates prediction model from strings.""" import collections from typing import Callable, Dict, List, Text from tf_coder.models import prediction_model from tf_coder.value_search import value_search_settings as settings_module # Use lambdas to avoid instantiating handlers until they're used. PREDICTION_MODEL_FNS = collections.OrderedDict( [ ("classification", prediction_model.ClassificationModel), ] ) # type: Dict[Text, Callable[[], prediction_model.PredictionModel]] def handler_string_list() -> List[Text]: """Returns a list of available handler strings.""" return list(PREDICTION_MODEL_FNS.keys()) def load_model(handler_string: Text, settings: settings_module.Settings) -> prediction_model.PredictionModel: """Returns a PredictionModel corresponding to the given handler string.""" if handler_string not in PREDICTION_MODEL_FNS: raise ValueError("Unknown snippet handler: {}".format(handler_string)) # Evaluate the lambda to get the handler. return PREDICTION_MODEL_FNS[handler_string](settings)
APIsynth-master
Synthesis_incorporation/models/prediction_model_factory.py
# Copyright 2021 The TF-Coder Authors. # Copyright (c) Facebook, Inc. and its affiliates. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Benchmarks collected/inspired from StackOverflow.""" # Avoid wrapping URLs and target programs to ease clicking and copying. # pylint: disable=line-too-long # Every function in this module takes no arguments and creates a benchmark. # pylint: disable=missing-docstring import math import torch from tf_coder.benchmarks import benchmark def stackoverflow_01(): examples = [ benchmark.Example( inputs=[ # [[5.0, 2.0], [1.0, 3.0], [0.0, -1.0]], [[5, 2], [1, 3], [0, 2]] ], output=[ # [[5.0, 5.0], [1.0, 1.0], [0.0, 0.0]], # [[2.0, 2.0], [3.0, 3.0], [-1.0, -1.0]], [[5, 5], [1, 1], [0, 0]], [[2, 2], [3, 3], [2, 2]] ], ), ] constants = [] description = "reshape by separating and duplicating columns" target_program = "torch.transpose(torch.stack((in1, in1)), 0, 2)" source = "https://stackoverflow.com/questions/40441503/tensorflow-tensor-reshape" return benchmark.Benchmark( examples=examples, constants=constants, description=description, target_program=target_program, source=source, name="stackoverflow_01", ) def stackoverflow_02(): examples = [ benchmark.Example( inputs=[ # [5, 1, 0, 3, 0, -1, 2, -10, 2], [5, 1, 0, 3, 0, 0, 2, 0, 2], 1, ], output=[1, 1, 0, 1, 0, 0, 1, 0, 1] # [1, 1, 0, 1, 0, -1, 1, -10, 1], ), ] constants = [1] description = "clip values that are greater than 1" target_program = "torch.minimum(in1, torch.tensor(1))" source = ( "https://stackoverflow.com/questions/46408839/tensorflow-trim-values-in-tensor" ) return benchmark.Benchmark( examples=examples, constants=constants, description=description, target_program=target_program, source=source, name="stackoverflow_02", ) @benchmark.ignore('Out of scope') def stackoverflow_03(): examples = [ benchmark.Example( inputs=[ [[11, 22, 33, 44, 55, 66, 77], [70, 60, 50, 40, 30, 20, 10]], [[-9, -8, -7, -6, -5, -4, -3], [11, 12, 13, 14, 15, 16, 17]], ], output=[[11, 22, 33, -6, -5, 66, 77], [70, 60, 50, 14, 15, 20, 10]], ), ] constants = [3, 4, 5] description = "replace certain columns with columns from the other tensor" target_program = """ mask = torch.sum(torch.nn.functional.one_hot(torch.tensor(range(3,5)), in1.size(1)), 0) solution = torch.add(torch.mul(mask, in2), torch.mul(torch.sub(torch.ones(mask.size(), dtype=torch.int), mask), in1)) """ source = "https://stackoverflow.com/questions/44657388/how-to-replace-certain-values-in-tensorflow-tensor-with-the-values-of-the-other" return benchmark.Benchmark( examples=examples, constants=constants, description=description, target_program=target_program, source=source, name="stackoverflow_03", ) @benchmark.ignore('Out of scope') def stackoverflow_04(): examples = [ benchmark.Example( inputs=[ [[12, 23, 34, 45], [66, 77, 88, 99]], [[0, 1], [0, 1], [1, 0], [0, 0]], [[2, 1], [1, 2], [0, 2], [0, 0]], ], output=[[34, 77], [23, 88], [66, 34], [12, 12]], ), ] constants = [] description = "index into the tensor" target_program = """ idxs = torch.stack((in2, in3), dim=1) solution = in1[idxs[:, 0], idxs[:, 1]] """ source = "https://stackoverflow.com/questions/33736795/tensorflow-numpy-like-tensor-indexing" return benchmark.Benchmark( examples=examples, constants=constants, description=description, target_program=target_program, source=source, name="stackoverflow_04", ) def stackoverflow_05(): examples = [ benchmark.Example( inputs=[ [[4, 3, 1], [6, 5, 2]], [[[5, 5]], [[1, 5]], [[6, 0]]], ], output=[[[29, 35]], [[47, 55]]], ), ] constants = [] description = "tensor multiplication like np.tensordot" target_program = "torch.tensordot(in1, in2, dims=1)" source = "https://stackoverflow.com/questions/43067338/tensor-multiplication-in-tensorflow" return benchmark.Benchmark( examples=examples, constants=constants, description=description, target_program=target_program, source=source, name="stackoverflow_05", ) def stackoverflow_06(): examples = [ benchmark.Example( inputs=[ [3, 5, 0, 2, 3, 3, 0], ], output=[ [1, 0, 0, 0, 1, 1, 0], [0, 1, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 1], [0, 0, 0, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1, 0], [1, 0, 0, 0, 1, 1, 0], [0, 0, 1, 0, 0, 0, 1], ], # [ # [1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0], # [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0], # [0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0], # [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0], # [1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0], # [1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0], # [0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0], # ], ), ] constants = [] description = "binary tensor from vector indicating if elements are equal" target_program = "torch.eq(in1, torch.unsqueeze(in1, dim=1)).float()" source = "https://stackoverflow.com/questions/47816231/create-binary-tensor-from-vector-in-tensorflow" return benchmark.Benchmark( examples=examples, constants=constants, description=description, target_program=target_program, source=source, name="stackoverflow_06", ) def stackoverflow_07(): examples = [ benchmark.Example( inputs=[ [ [[8, 4, 6], [2, 12, 3]], [[11, 12, 5], [9, 12, 12]], [[9, 2, 13], [7, 0, 7]], [[2, 10, 5], [7, 1, 2]], ], ], output=[ [[8, 4, 6], [11, 12, 5], [9, 2, 13], [2, 10, 5]], [[2, 12, 3], [9, 12, 12], [7, 0, 7], [7, 1, 2]], ], ), ] constants = [] description = "swap the first two dimensions of the tensor" target_program = "torch.transpose(in1, 0, 1)" source = ( "https://stackoverflow.com/questions/38212205/swap-tensor-axes-in-tensorflow" ) return benchmark.Benchmark( examples=examples, constants=constants, description=description, target_program=target_program, source=source, name="stackoverflow_07", ) def stackoverflow_08(): examples = [ benchmark.Example( inputs=[ # [-1, 0, -3, 2, 1, 3, 5, -1, -9, 2, 10], [1, 0, 0, 2, 1, 3, 5, 0, 1, 2, 10], [12, 3, 45, 6, 7, 8, 9, 87, 65, 4, 32], 1 ], output=[6, 8, 9, 4, 32], ), ] constants = [1] description = ( "select the values in the second tensor where the first " "tensor is greater than 1" ) target_program = "torch.masked_select(in2, torch.gt(in1, 1))" source = "https://stackoverflow.com/questions/33769041/tensorflow-indexing-with-boolean-tensor" return benchmark.Benchmark( examples=examples, constants=constants, description=description, target_program=target_program, source=source, name="stackoverflow_08", ) @benchmark.ignore('Out of scope') def stackoverflow_09(): examples = [ benchmark.Example( inputs=[ [37, 42, 42, 37, 28, 15, 42, 15], ], output=[0, 1, 1, 0, 2, 3, 1, 3], ), ] constants = [] description = "group items by value and get the group indices" target_program = """ original_unique = torch.masked_select(values, torch.tensor([values[i] not in values[:i] for i in range(values.size(0))])) solution = torch.argsort(original_unique)[torch.unique(values, return_inverse=True)[1]] """ source = "https://stackoverflow.com/questions/53054668/assign-values-between-0-and-n-1-for-a-vector-of-length-l-with-n-different-eleme" return benchmark.Benchmark( examples=examples, constants=constants, description=description, target_program=target_program, source=source, name="stackoverflow_09", ) @benchmark.ignore('Out of scope - api3(api1, api2)') def stackoverflow_10(): examples = [ benchmark.Example( inputs=[ [[15, 10], [20, -5]], [[2, 3, 1], [-2, 5, 0]], ], output=[[[30, 45, 15], [20, 30, 10]], [[-40, 100, 0], [10, -25, 0]]], ), ] constants = [] description = "perform matrix multiplication" target_program = "torch.matmul(torch.unsqueeze(in1, -1), torch.unsqueeze(in2, 1))" source = "https://stackoverflow.com/questions/53094212/tensorflow-sxn-matrix-multiply-with-sxd-matrix-to-output-sxnxd-array" return benchmark.Benchmark( examples=examples, constants=constants, description=description, target_program=target_program, source=source, name="stackoverflow_10", ) def stackoverflow_11(): examples = [ benchmark.Example( inputs=[ [4, 0, 1, 1, 0, 4, 0, 0, 3, 4, 1], ], output=[4, 3, 0, 1, 3], ), ] constants = [] description = "count the number of occurences of each distinct number" target_program = "torch.bincount(in1)" source = "https://stackoverflow.com/questions/45194672/how-to-count-elements-in-tensorflow-tensor" return benchmark.Benchmark( examples=examples, constants=constants, description=description, target_program=target_program, source=source, name="stackoverflow_11", ) @benchmark.ignore('Out of scope - api3(api1, api2)') def stackoverflow_12(): examples = [ benchmark.Example( inputs=[[[12, 34, 56], [33, 22, 11]]], output=[[12, 56], [33, 11]] ), ] constants = [0, 1, 2] description = "remove a column from the tensor" target_program = "torch.stack((in1[:,0], in1[:,2]), dim=1)" source = "https://stackoverflow.com/questions/47447183/remove-a-set-of-tensors-from-a-tensor-in-tensorflow" return benchmark.Benchmark( examples=examples, constants=constants, description=description, target_program=target_program, source=source, name="stackoverflow_12", ) def stackoverflow_13(): examples = [ benchmark.Example( inputs=[ [[3, 5], [10, 2]], # [[[1, 0], [5, 4]], [[3, 10], [2, -2]]], [[[1, 0], [5, 4]], [[3, 10], [2, 0]]] ], output=[[[28, 20], [19, 30]], [[20, 8], [34, 100]]] # [[[28, 20], [19, 20]], [[20, 8], [34, 96]]], ), ] constants = [] description = "multiply vectors by tensor" target_program = "torch.transpose(torch.matmul(in1, in2), 0, 1)" source = "https://stackoverflow.com/questions/50777704/n-d-tensor-matrix-multiplication-with-tensorflow" return benchmark.Benchmark( examples=examples, constants=constants, description=description, target_program=target_program, source=source, name="stackoverflow_13", ) def stackoverflow_14(): examples = [ benchmark.Example( inputs=[ [ [ [0, 0, 1], [0, 0, 0], [1, 0, 1], [0, 1, 0], [0, 0, 0], [1, 1, 1], [1, 1, 0], # [False, False, True], # [False, False, False], # [True, False, True], # [False, True, False], # [False, False, False], # [True, True, True], # [True, True, False], ] ], ], output=[[1, 0, 1, 1, 0, 1, 1]] # [[True, False, True, True, False, True, True]], ), ] constants = [] target_program = "torch.sum(in1, -1).bool()" description = "choose True if any value in a row is True, False otherwise" source = "https://stackoverflow.com/questions/35657003/aggregate-each-element-of-tensor-in-tensorflow" return benchmark.Benchmark( examples=examples, constants=constants, description=description, target_program=target_program, source=source, name="stackoverflow_14", ) def stackoverflow_15(): examples = [ benchmark.Example( inputs=[ # [3, 1, 2, 0, 1, -1, 10, 1, -10], [3, 1, 2, 0, 1, 0, 10, 1, 0], 1, ], output=[3, 0, 2, 0, 0, 0, 10, 0, 0] # [3, 0, 2, 0, 0, -1, 10, 0, -10], ), ] constants = [0, 1] description = "set all instances of 1 to 0" target_program = "torch.where(torch.ne(in1,1), in1, 0)" source = "https://stackoverflow.com/questions/39045797/conditional-assignment-of-tensor-values-in-tensorflow" return benchmark.Benchmark( examples=examples, constants=constants, description=description, target_program=target_program, source=source, name="stackoverflow_15", ) def stackoverflow_16(): examples = [ benchmark.Example( inputs=[ # [[2, 5], [3, 0], [8, -7]], [[2, 5], [3, 0], [8, 7]], # [4, 10, -6], [4, 10, 6] ], output=[[8, 20], [30, 0], [48, 42]] # [[8, 20], [30, 0], [-48, 42]], ), ] constants = [] description = "multiply tensors across the first axis" target_program = "torch.mul(in1, torch.unsqueeze(in2, 1))" source = "https://stackoverflow.com/questions/46240646/tensor-multiply-along-axis-in-tensorflow" return benchmark.Benchmark( examples=examples, constants=constants, description=description, target_program=target_program, source=source, name="stackoverflow_16", ) def stackoverflow_17(): examples = [ benchmark.Example( inputs=[ # [17, -32, 99], [17, 32, 99] ], output=[[17, 17], [32, 32], [99, 99]] # [[17, 17], [-32, -32], [99, 99]], ), ] constants = [] description = "duplicate each element of a tensor" # StackOverflow answer doesn't work. target_program = "torch.stack((in1, in1),1)" source = "https://stackoverflow.com/questions/51761353/about-tensor-of-tensorflow" return benchmark.Benchmark( examples=examples, constants=constants, description=description, target_program=target_program, source=source, name="stackoverflow_17", ) def stackoverflow_18(): examples = [ benchmark.Example( inputs=[ # shape=[2, 2, 3]. [[[1, 1, 1], [1, 0, 1]], [[1, 2, 3], [4, 5, 6]]], # shape=[3, 4]. [[1, 1, 1, 1], [1, 2, 3, 4], [5, 6, 7, 8]], # shape=[4]. [100, 200, 300, 400], ], # Shape=[sequence_length, batch_size, 4]=[2, 2, 4]. output=[ [[107, 209, 311, 413], [106, 207, 308, 409]], [[118, 223, 328, 433], [139, 250, 361, 472]], ], ), ] constants = [] description = "multiply 3D tensor and 2D tensor and add another tensor" target_program = "torch.add(in3, torch.matmul(in1, in2))" source = "https://stackoverflow.com/questions/38222126/tensorflow-efficient-way-for-tensor-multiplication" return benchmark.Benchmark( examples=examples, constants=constants, description=description, target_program=target_program, source=source, name="stackoverflow_18", ) @benchmark.ignore('Out of scope') def stackoverflow_19(): examples = [ benchmark.Example( inputs=[ [ [3, 1, 2], [1, 0, 4], [1, 2, 3], [0, 5, 1], [1, 1, 2], [2, 3, 1], [2, 1, 0], ], ], output=[ [0, 5, 1], [1, 0, 4], [1, 1, 2], [1, 2, 3], [2, 1, 0], [2, 3, 1], [3, 1, 2], ], ), ] constants = [] description = ( "sort a tensor considering the first column, breaking ties " "using the second column" ) target_program = """ second_sorted = in1[torch.sort(in1[:, 1])[1]] solution = second_sorted[torch.sort(second_sorted[:, 0])[1]] """ source = "https://stackoverflow.com/questions/49399198/sort-a-tensor-based-on-two-columns-in-tensorflow" return benchmark.Benchmark( examples=examples, constants=constants, description=description, target_program=target_program, source=source, name="stackoverflow_19", ) def stackoverflow_20(): examples = [ benchmark.Example( inputs=[ [ [7, 2, 1], [4, 5, 1], [4, 4, 2], [3, 4, 3], [0, 0, 1], # [0.7, 0.2, 0.1], # [0.4, 0.5, 0.1], # [0.4, 0.4, 0.2], # [0.3, 0.4, 0.3], # [0.0, 0.0, 1.0], ], ], output=[[1, 0, 0], [0, 1, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1]], ), ] constants = [] description = "compute argmax in each tensor and set it to 1" target_program = "torch.nn.functional.one_hot(torch.argmax(in1, 1), in1.size(1))" source = "https://stackoverflow.com/questions/44834739/argmax-on-a-tensor-and-ceiling-in-tensorflow" return benchmark.Benchmark( examples=examples, constants=constants, description=description, target_program=target_program, source=source, name="stackoverflow_20", ) def stackoverflow_21(): examples = [ benchmark.Example( inputs=[ [[2], [0], [1], [0]], # [[2], [0], [1], [0]], [[2, 5, 3], [1, 3, 6], [1, 6, 3], [7, 0, 3]] # [[0.2, 0.5, 0.3], [0.1, 0.3, 0.6], [0.1, 0.6, 0.3], [0.7, 0.0, 0.3]], ], output=[[3], [1], [6], [7]], # [[0.3], [0.1], [0.6], [0.7]], ), ] constants = [] description = "gather elements in a tensor along axis 1" target_program = "torch.gather(in2, 1, in1)" source = "https://stackoverflow.com/questions/51690095/how-to-gather-element-with-index-in-tensorflow" return benchmark.Benchmark( examples=examples, constants=constants, description=description, target_program=target_program, source=source, name="stackoverflow_21", ) def stackoverflow_22(): examples = [ benchmark.Example( inputs=[ [3, 1, 10], [[6, 4], [5, 1], [3, 4]] # [[0.6, 0.4], [0.5, 1.0], [3.0, 4.0]], ], output=[53, 53] # [32.3, 42.2], ), ] constants = [] description = "multiply a vector with a matrix without reshaping the vector" target_program = "torch.squeeze(torch.matmul(torch.unsqueeze(in1, 0).float(), in2))" source = "https://stackoverflow.com/questions/43284897/how-can-i-multiply-a-vector-and-a-matrix-in-tensorflow-without-reshaping" return benchmark.Benchmark( examples=examples, constants=constants, description=description, target_program=target_program, source=source, name="stackoverflow_22", ) def stackoverflow_23(): # Simplified slightly because the user already knows how to do the mod part. examples = [ benchmark.Example( inputs=[ [[0, 5, 2], [3, 1, 4], [5, 1, 5]], ], output=[ [1, 0, 1, 0, 0, 1, 0, 0, 0], [0, 1, 0, 1, 1, 0, 0, 0, 0], [0, 1, 0, 0, 0, 1, 0, 0, 0], ], ), ] constants = [] description = "place 1 at the indices in the input tensor" target_program = ( "torch.max(torch.nn.functional.one_hot(in1, 9), 1, keepdim=False, out=None)[0]" ) source = ( "https://stackoverflow.com/questions/53414433/tensorflow-tensor-binarization" ) return benchmark.Benchmark( examples=examples, constants=constants, description=description, target_program=target_program, source=source, name="stackoverflow_23", ) def stackoverflow_24(): examples = [ benchmark.Example( inputs=[ # [3.0, 1.0, 4.0, 5.0, 2.0, 8.0, -6.0, -7.0], [3, 1, 4, 5, 2, 8, 6, 7], # [0.5, 0.0, -2.0, 0.0, 1.0, -1.0, 0.0, 2.0], [1, 0, 2, 0, 1, 1, 0, 2], 0 ], output=[3, 1, 2, 5, 2, 8, 6, 3.5] # [6.0, 1.0, -2.0, 5.0, 2.0, -8.0, -6.0, -3.5], ), ] constants = [0] description = "like tf.divide(), but when dividing by 0, return the " "numerator" target_program = "torch.where(torch.ne(in2, 0), torch.divide(in1, in2), in1)" source = "https://stackoverflow.com/questions/53643339/tensorflow-overriding-tf-divide-to-return-the-numerator-when-dividing-by-0" return benchmark.Benchmark( examples=examples, constants=constants, description=description, target_program=target_program, source=source, name="stackoverflow_24", ) def stackoverflow_25(): examples = [ benchmark.Example( inputs=[ 3, 4, ], output=[ [1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 0, 0], [0, 1, 0], [0, 0, 1], # [1.0, 0.0, 0.0], # [0.0, 1.0, 0.0], # [0.0, 0.0, 1.0], # [1.0, 0.0, 0.0], # [0.0, 1.0, 0.0], # [0.0, 0.0, 1.0], # [1.0, 0.0, 0.0], # [0.0, 1.0, 0.0], # [0.0, 0.0, 1.0], # [1.0, 0.0, 0.0], # [0.0, 1.0, 0.0], # [0.0, 0.0, 1.0], ], ), ] constants = [] description = "copy the tensor torch.eye(3), 4 times" target_program = "torch.tile(torch.eye(in1), (in2, 1))" source = "https://stackoverflow.com/questions/53602691/duplicate-a-tensor-n-times" return benchmark.Benchmark( examples=examples, constants=constants, description=description, target_program=target_program, source=source, name="stackoverflow_25", ) def stackoverflow_26(): examples = [ benchmark.Example( inputs=[[[[3, 4], [1, 2]], [[5, 2], [10, 3]], [[10, 20], [4, 7]]]], # [[[3, 4], [1, 2]], [[5, -2], [-10, 3]], [[10, 20], [-4, 7]]]], output=[10, 20, 41] # [10, -4, 33], ), ] constants = [] description = "reduction operation for multiple dimensions simultaneously" target_program = "torch.sum(torch.sum(in1, 1), 1)" source = "https://stackoverflow.com/questions/54294780/how-to-perform-reduce-op-on-multiple-dimensions-at-once" return benchmark.Benchmark( examples=examples, constants=constants, description=description, target_program=target_program, source=source, name="stackoverflow_26", ) def stackoverflow_27(): examples = [ benchmark.Example( inputs=[ [0, 3, 5, 6], 8, ], output=[1, 0, 0, 1, 0, 1, 1, 0], ), ] constants = [] description = "boolean tensor with 1 at the indices in the input tensor" target_program = "torch.sum(torch.nn.functional.one_hot(in1, in2), 0)" source = "https://stackoverflow.com/questions/54225704/how-do-i-get-a-tensor-representing-the-on-positions-in-the-original-tensor" return benchmark.Benchmark( examples=examples, constants=constants, description=description, target_program=target_program, source=source, name="stackoverflow_27", ) @benchmark.ignore('Out of scope') def stackoverflow_28(): examples = [ benchmark.Example( inputs=[ [ [[5, 3], [0, 2]], [[7, 4], [5, 1]], [[10, 20], [15, 30]], [[11, 16], [14, 12]], [[-2, -7], [-4, 6]], ], [1, 0, 1, 1, 0], ], output=[[3, 2], [7, 5], [20, 30], [16, 12], [-2, -4]], ), ] constants = [] description = "extract columns from a 3D tensor given column indices" target_program = "torch.transpose(in1, 1, 2)[torch.arange(in1.size(0)), in2, :]" source = "https://stackoverflow.com/questions/54274074/selecting-columns-from-3d-tensor-according-to-a-1d-tensor-of-indices-tensorflow" return benchmark.Benchmark( examples=examples, constants=constants, description=description, target_program=target_program, source=source, name="stackoverflow_28", ) def stackoverflow_29(): examples = [ benchmark.Example( inputs=[ [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21], # [-1.0, -0.8, -0.6, -0.4, -0.2, 0.0, 0.2, 0.4, 0.6, 0.8, 1.0], [12, 0, 10, 23, 16], # [0.1, -10, -0.1, 1.1, 0.41], ], output=[6, 0, 5, 11, 8], ), ] constants = [] description = "place continuous values into buckets given bucket boundaries" target_program = "torch.searchsorted(in1, in2)" source = "https://stackoverflow.com/questions/54155085/bucketing-continous-value-tensors-in-tensorflow" # lint: NOTYPO return benchmark.Benchmark( examples=examples, constants=constants, description=description, target_program=target_program, source=source, name="stackoverflow_29", ) def stackoverflow_30(): examples = [ benchmark.Example( inputs=[ [[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], [[9.0, 4.0], [8.0, 5.0], [7.0, 6.0]], ], output=[ [math.sqrt(68), math.sqrt(58), math.sqrt(52)], [math.sqrt(36), math.sqrt(26), math.sqrt(20)], [math.sqrt(20), math.sqrt(10), math.sqrt(4)], ], ), ] constants = [] description = "compute Euclidean distance between two tensors" # StackOverflow answer is incorrect. target_program = "torch.cdist(in1, in2)" source = "https://stackoverflow.com/questions/54147780/tensorflow-how-to-calculate-the-euclidean-distance-between-two-tensor" return benchmark.Benchmark( examples=examples, constants=constants, description=description, target_program=target_program, source=source, name="stackoverflow_30", ) @benchmark.ignore('Input contains sparse tensor') def stackoverflow_31(): examples = [ benchmark.Example( inputs=[ torch.sparse_coo_tensor( indices=torch.tensor([[0, 0, 1], [0, 1, 1]]), values=[1.0, 1.5, -2.0], size=[2, 2], ), [[3.0, 1.0], [0.2, -1.0]], ], output=5.29, ), ] constants = [] description = "squared error between two tensors, one being a sparse tensor" target_program = "torch.sum(torch.square(torch.sub(in1.to_dense(), in2)))" source = "https://stackoverflow.com/questions/45032668/tensorflow-how-to-compute-the-square-error-between-a-tensor-and-a-sparse-tensor" return benchmark.Benchmark( examples=examples, constants=constants, description=description, target_program=target_program, source=source, name="stackoverflow_31", ) def stackoverflow_32(): examples = [ benchmark.Example( inputs=[ [[1, 6, 2, 1], [3, 1, 4, 2], [2, 1, 2, 5]] # [[0.1, 0.6, 0.2, 0.1], [0.3, 0.1, 0.4, 0.2], [0.2, 0.1, 0.2, 0.5]], ], output=[13, 15, 20] # [1.3, 1.5, 2.0], ), ] constants = [] description = "weighted sum across rows, where the column index is the weight" target_program = "torch.sum(torch.mul(in1, torch.unsqueeze(torch.arange(in1.size(1)),0).expand(in1.size(0),-1)), 1)" source = "https://stackoverflow.com/questions/48659449/how-to-compute-the-weighted-sum-of-a-tensor-in-tensorflow" return benchmark.Benchmark( examples=examples, constants=constants, description=description, target_program=target_program, source=source, name="stackoverflow_32", ) @benchmark.ignore('Out of scope') def stackoverflow_33(): examples = [ benchmark.Example( inputs=[ [ [0.3, 0.1, 0.4], [0.1, 0.5, 0.9], [0.2, 0.6, 0.5], [0.3, 0.5, 0.8], [0.9, 0.7, 0.9], ], [[0.3, 0.2, 0.3], [0.8, 0.4, 0.6], [0.2, 0.6, 0.4], [0.3, 0.3, 0.8]], ], output=[0.02, 0.19, 0.01, 0.04], ), ] constants = [] description = "find the minimum distance between two sets of points" target_program = "torch.min(torch.sum(torch.square(torch.sub(torch.unsqueeze(in1, 0), torch.unsqueeze(in2, 1))), 2), 1)[0]" source = "https://stackoverflow.com/questions/40558251/computing-minimum-distance-for-each-element-in-a-tensor-relative-to-another-tens" return benchmark.Benchmark( examples=examples, constants=constants, description=description, target_program=target_program, source=source, name="stackoverflow_33", ) def stackoverflow_34(): examples = [ benchmark.Example( inputs=[ [[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[10, 20], [30, 40]]], [3, 5, 10], ], output=[[128, 236], [344, 452]], ), ] constants = [] description = "compute a weighted sum of tensors" target_program = "torch.tensordot(in2, in1, 1)" source = "https://stackoverflow.com/questions/49532371/compute-a-linear-combination-of-tensors-in-tensorflow" return benchmark.Benchmark( examples=examples, constants=constants, description=description, target_program=target_program, source=source, name="stackoverflow_34", ) @benchmark.ignore('Out of scope') def stackoverflow_35(): examples = [ benchmark.Example( inputs=[ [ [[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], [[10.0, 20.0], [30.0, 40.0], [50.0, 60.0]], ], [ [[9.0, 8.0], [7.0, 6.0], [5.0, 4.0]], [[90.0, 80.0], [70.0, 60.0], [50.0, 40.0]], ], [0.1, 0.4, 0.8], ], output=[ [[8.2, 7.4], [5.4, 5.2], [5.0, 5.6]], [[82.0, 74.0], [54.0, 52.0], [50.0, 56.0]], ], ), ] constants = [] description = "linear interpolation between two tensors" target_program = ( "torch.add(in2, torch.mul(torch.unsqueeze(in3, 1), torch.sub(in1, in2)))" ) source = "https://stackoverflow.com/questions/49643371/keras-compute-convex-combination-of-two-tensors" return benchmark.Benchmark( examples=examples, constants=constants, description=description, target_program=target_program, source=source, name="stackoverflow_35", ) def stackoverflow_36(): examples = [ benchmark.Example( inputs=[ [1, 0, 1, 1, 0, 1, 0, 1], ], output=[1.0, 0.0, 0.333333, 0.25, 0.0, 0.166667, 0.0, 0.125], ), ] constants = [] description = "divide each element by the column index" target_program = "torch.div(in1, torch.arange(1,in1.size(0)+1))" source = "https://stackoverflow.com/questions/43306788/divide-elements-of-1-d-tensor-by-the-corrispondent-index" return benchmark.Benchmark( examples=examples, constants=constants, description=description, target_program=target_program, source=source, name="stackoverflow_36", ) def stackoverflow_37(): examples = [ benchmark.Example( inputs=[ [ [ [[10, 20, 30], [40, 50, 60]], [[12, 34, 56], [78, 98, 76]], # [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], # [[1.2, 3.4, 5.6], [7.8, 9.8, 7.6]], ] ], # [0.5, 1.0, 2.0], [5, 10, 20] ], output=[[[850, 1900], [1520, 2890]]] # [[[8.5, 19.0], [15.2, 28.9]]], ), ] constants = [] description = "dot product a vector with last dimension of a tensor" target_program = "torch.tensordot(in1, in2, 1)" source = "https://stackoverflow.com/questions/49206051/multiply-4-d-tensor-with-1-d-tensor" return benchmark.Benchmark( examples=examples, constants=constants, description=description, target_program=target_program, source=source, name="stackoverflow_37", ) @benchmark.ignore('Out of scope') def stackoverflow_38(): # To simplify the problem, and to get more than one number as output, this # doesn't include the final reduce_sum step. examples = [ benchmark.Example( inputs=[ [9, 2, 5, 3, 7, 4], [[0, 0, 1, 0, 1, 0], [1, 0, 0, 0, 0, 0], [0, 1, 1, 1, 0, 1]], ], output=[35, 9, 120], ), ] constants = [] description = "compute the product of marked elements" target_program = "torch.prod(torch.maximum(torch.max(in2), torch.mul(in1, in2)), 1)" source = "https://stackoverflow.com/questions/49511529/tensorflow-compute-multiplication-by-binary-matrix" return benchmark.Benchmark( examples=examples, constants=constants, description=description, target_program=target_program, source=source, name="stackoverflow_38", ) def stackoverflow_39(): examples = [ benchmark.Example( inputs=[ # [[-1.5, 1.0, 0.9, 2.0], [1.1, 0.0, -0.1, -0.9], [-1.0, 0.1, -1.1, 2.5]], [[15, 10, 9, 20], [11, 0, 1, 9], [10, 1, 11, 25]] ], output=[ [225, 100, 81, 400], [121, 0, 1, 81], [100, 1, 121, 625], # [2.25, 1.0, 0.0, 4.0], # [1.21, 0.0, 0.0, 0.0], # [1.0, 0.0, 1.21, 6.25], ], ), ] constants = [] description = ( "set to 0 the elements with absolute value less than 1, and " "square the other elements" ) target_program = ( "torch.square(torch.where(torch.lt(torch.abs(in1), 1), torch.tensor(0.), in1))" ) source = "https://stackoverflow.com/questions/37912161/how-can-i-compute-element-wise-conditionals-on-batches-in-tensorflow" return benchmark.Benchmark( examples=examples, constants=constants, description=description, target_program=target_program, source=source, name="stackoverflow_39", ) @benchmark.ignore('Target program contains sparse tensor.') def stackoverflow_40(): examples = [ benchmark.Example( inputs=[ [4, 5, 2, 7, 8, 6], [[0, 2], [0, 4], [1, 1], [1, 3], [2, 0], [2, 3]], ], output=[[0, 0, 4, 0, 5], [0, 2, 0, 7, 0], [8, 0, 0, 6, 0]], ), ] constants = [] description = "use the output of tf.nn.top_k to make a sparse tensor" target_program = ( "torch.sparse_coo_tensor(torch.transpose(in2, 0, 1), in1, (3,5)).to_dense()" ) source = "https://stackoverflow.com/questions/43996831/make-a-sparse-tensor-based-on-the-output-of-tf-nn-top-k" return benchmark.Benchmark( examples=examples, constants=constants, description=description, target_program=target_program, source=source, name="stackoverflow_40", ) def stackoverflow_41(): examples = [ benchmark.Example( inputs=[ [5, 2, 8, 2, 4, 1, 1, 0, 2, 1], 3, ], output=[5, 2, 8, 4, 1, 1, 0, 2, 1], ), ] constants = [] description = "copy all elements except at the given index" target_program = "torch.masked_select(in1, torch.ne(torch.arange(in1.size(0)), 3))" source = "https://stackoverflow.com/questions/54499051/elegant-way-to-access-python-list-and-tensor-in-tensorflow" return benchmark.Benchmark( examples=examples, constants=constants, description=description, target_program=target_program, source=source, name="stackoverflow_41", ) def stackoverflow_42(): examples = [ benchmark.Example( inputs=[ # [4, 6, 2, 6, 7, 3, -3], [4, 6, 2, 6, 7, 3, 3], 7 ], output=[0, 0, 0, 0, 1, 0, 0], ), ] constants = [] description = "create a binary vector where the max element is 1" target_program = "torch.where(torch.eq(torch.max(in1),in1), 1, 0)" source = "https://stackoverflow.com/questions/54493814/binary-vector-of-max" return benchmark.Benchmark( examples=examples, constants=constants, description=description, target_program=target_program, source=source, name="stackoverflow_42", ) @benchmark.ignore('Out of scope') def stackoverflow_43(): examples = [ benchmark.Example( inputs=[ [ [12, 34, 56, 78, 90, 10], [99, 88, 77, 55, 44, 33], [-1, -2, -3, -4, -5, -6], ], [0, 1, 1, 0, 2, 0], ], output=[12, 88, 77, 78, -5, 10], ), ] constants = [] description = "extract elements of a tensor given row indices" target_program = "torch.squeeze(torch.gather(torch.transpose(in1, 0, 1), 1, torch.unsqueeze(in2, 1)))" source = "https://stackoverflow.com/questions/54455169/better-way-to-access-individual-elements-in-a-tensor" return benchmark.Benchmark( examples=examples, constants=constants, description=description, target_program=target_program, source=source, name="stackoverflow_43", ) def stackoverflow_44(): examples = [ benchmark.Example( inputs=[ [ # [3, 5, 2], # [6, 2, 3], # [8, 7, 1], # [0, -3, 5], # [-4, 7, 3], # [2, 1, 6], # [10, 20, 30], # [4, 5, 6], [3, 5, 2], [6, 2, 3], [8, 7, 1], [0, 3, 5], [4, 7, 3], [2, 1, 6], [10, 20, 30], [4, 5, 6], ], ], output=[[9, 7, 5], [8, 19, 6], [6, 8, 9], [14, 25, 36]] # [[9, 7, 5], [8, 4, 6], [-2, 8, 9], [14, 25, 36]], ), ] constants = [2] description = "sum across columns for pairs of consecutive rows" target_program = "torch.sum(torch.reshape(in1, (-1, 2, in1.size(1))), 1)" source = "https://stackoverflow.com/questions/54402389/sum-the-columns-for-each-two-consecutive-rows-of-a-tensor-of-3-dimensions" return benchmark.Benchmark( examples=examples, constants=constants, description=description, target_program=target_program, source=source, name="stackoverflow_44", ) def stackoverflow_45(): examples = [ benchmark.Example( inputs=[ [1, 0, 1, 0, 1], [[[12, 34], [56, 78], [23, 54], [76, 78], [42, 24]]], ], output=[[[34, 12], [56, 78], [54, 23], [76, 78], [24, 42]]], ), ] constants = [] description = "reverse the order in the marked rows" target_program = ( "torch.where(torch.unsqueeze(in2,1).bool(), torch.roll(in1, 1, -1), in1)" ) source = "https://stackoverflow.com/questions/54337925/reverse-order-of-some-elements-in-tensorflow" return benchmark.Benchmark( examples=examples, constants=constants, description=description, target_program=target_program, source=source, name="stackoverflow_45", ) def stackoverflow_46(): examples = [ benchmark.Example( inputs=[ [3, 4, 1], ], output=[0, 0, 0, 1, 1, 1, 1, 2], ), ] constants = [] description = "convert segment lengths to segment ids" target_program = """ mask = torch.arange(torch.max(in1)).expand(in1.size(0), torch.max(in1)) < torch.unsqueeze(in1, dim=1) solution = torch.mul(torch.unsqueeze(torch.arange(mask.size(0)), 1), mask) solution = torch.masked_select(solution, mask) """ source = "https://stackoverflow.com/questions/58652161/how-to-convert-2-3-4-to-0-0-1-1-1-2-2-2-2-to-utilize-tf-math-segment-sum" return benchmark.Benchmark( examples=examples, constants=constants, description=description, target_program=target_program, source=source, name="stackoverflow_46", ) @benchmark.ignore('Out of scope') def stackoverflow_47(): examples = [ benchmark.Example( inputs=[ [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16], [ [True, True, True, False, False], [True, True, False, False, False], [True, True, True, True, True], [True, True, True, True, False], [True, False, False, False, False], [True, True, False, False, False], ], ], output=[ [0, 1, 2, 0, 0], [3, 4, 0, 0, 0], [5, 6, 7, 8, 9], [10, 11, 12, 13, 0], [14, 0, 0, 0, 0], [15, 16, 0, 0, 0], ], ), ] constants = [] description = "put given values into a sequence mask" target_program = """ mask = torch.reshape(in2, [-1]) solution = torch.reshape(torch.where(mask, torch.sub(torch.cumsum(mask, 0), 1), torch.tensor(0)), in2.size()) """ source = "https://stackoverflow.com/questions/58641546/how-can-i-put-the-sequential-values-to-the-sequence-mask" return benchmark.Benchmark( examples=examples, constants=constants, description=description, target_program=target_program, source=source, name="stackoverflow_47", ) def stackoverflow_48(): examples = [ benchmark.Example( inputs=[ [32, 53, 45, 38, 29, 89, 64, 23], [38, 53, 89, 38, 32, 64], ], output=[3, 1, 5, 3, 0, 6], ), ] constants = [] description = "find the indices of all elements" target_program = "torch.argmax(torch.eq(in1, torch.unsqueeze(in2, 1)).int(), 1)" source = "https://stackoverflow.com/questions/58481332/getting-the-indices-of-several-elements-in-a-tensorflow-at-once" return benchmark.Benchmark( examples=examples, constants=constants, description=description, target_program=target_program, source=source, name="stackoverflow_48", ) def stackoverflow_49(): examples = [ benchmark.Example( inputs=[ # Shape = [3, 1, 2, 3]. # [ # [[[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]], # [[[0.8, 1.0, 0.0], [0.6, 0.4, 0.2]]], # [[[0.9, 0.8, 0.7], [0.1, 0.2, 0.3]]], # ], [ [[[1, 2, 3], [4, 5, 6]]], [[[8, 10, 0], [6, 4, 2]]], [[[9, 8, 7], [1, 2, 3]]], ], # [2.0, 0.5, 1.0], [20, 5, 10] ], output=[ [[[20, 40, 60], [80, 100, 120]]], [[[40, 50, 0], [30, 20, 10]]], [[[90, 80, 70], [10, 20, 30]]], # [[[0.2, 0.4, 0.6], [0.8, 1.0, 1.2]]], # [[[0.4, 0.5, 0.0], [0.3, 0.2, 0.1]]], # [[[0.9, 0.8, 0.7], [0.1, 0.2, 0.3]]], ], ), ] constants = [] description = "multiply tensors by scalars in a batched way" target_program = "torch.transpose(torch.mul(in2, torch.transpose(in1, 0, 3)), 0, 3)" source = "https://stackoverflow.com/questions/58466562/given-a-batch-of-n-images-how-to-scalar-multiply-each-image-by-a-different-scal" return benchmark.Benchmark( examples=examples, constants=constants, description=description, target_program=target_program, source=source, name="stackoverflow_49", ) def stackoverflow_50(): examples = [ benchmark.Example( inputs=[ # 5, # Rows. # 6, # Columns. 3, # Index of nonzero column. ], output=[ [0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 0, 0], ], ), ] constants = [] description = "create a binary matrix where a specified column is set to one" target_program = "torch.zeros((in1, in2), dtype=torch.int)" source = "https://stackoverflow.com/questions/58537495/tensorflow-initialize-a-sparse-tensor-with-only-one-line-column-not-zero" return benchmark.Benchmark( examples=examples, constants=constants, description=description, target_program=target_program, source=source, name="stackoverflow_50", ) # # A template for easy copy/pasting. Copying an existing benchmark and replacing # # parts of it will lead to a state where the benchmark is half-correct, but not # # obviously so. Copy this template instead when creating new benchmarks. # """ # def stackoverflow_NUMBER(): # examples = [ # benchmark.Example( # inputs=[ # INPUT_1, # INPUT_2, # ], # output=OUTPUT # ), # ] # constants = [CONSTANTS] # description = 'DESCRIPTION' # target_program = 'SOLUTION_PROGRAM' # source = 'PROBLEM_SOURCE' # return benchmark.Benchmark(examples=examples, # constants=constants, # description=description, # target_program=target_program, # source=source, # name='stackoverflow_NUMBER') # """ # pylint: disable=pointless-string-statement
APIsynth-master
Synthesis_incorporation/benchmarks/stackoverflow_benchmarks.py
# Copyright 2021 The TF-Coder Authors. # Copyright (c) Facebook, Inc. and its affiliates. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Exhaustive value search (enumerating by weight of expression).""" import collections import keyword import re import sys import timeit import tokenize from typing import Any, Dict, List, NamedTuple, Optional, Set, Text, Tuple, Union import random from itertools import product import numpy as np import six import torch from absl import logging from tf_coder import torch_functions from tf_coder.benchmarks import benchmark as benchmark_module from tf_coder.natural_language import description_handler as description_handler_module from tf_coder.models import prediction_model from tf_coder.repair import snippet_handler as snippet_handler_module from tf_coder.value_search import all_operations from tf_coder.value_search import filtered_values_cache from tf_coder.value_search import operation_base from tf_coder.value_search import operation_filtering from tf_coder.value_search import operation_statistics from tf_coder.value_search import value as value_module from tf_coder.value_search import value_search_settings as settings_module from tf_coder.natural_language import description_handler_factory from tf_coder.models import prediction_model_factory from tf_coder.repair import snippet_handler_factory ValuesByWeight = operation_base.ValuesByWeightDict DescriptionHandler = description_handler_module.DescriptionHandler PredictionModel = prediction_model.ClassificationModel SnippetHandler = snippet_handler_module.SnippetHandler Solution = NamedTuple( "Solution", [ ("value", value_module.Value), ("expression", Text), ("weight", int), ("time", float), ], ) ValueSearchResults = NamedTuple( "ValueSearchResults", [ ("solutions", List[Solution]), ("total_time", float), ("value_set", Set[value_module.Value]), ("values_by_weight", ValuesByWeight), ("benchmark", benchmark_module.Benchmark), ("settings", settings_module.Settings), ("statistics", Optional[operation_statistics.OperationStatistics]), ], ) def _suppress_warnings() -> None: """Suppress TensorFlow and Numpy warnings.""" # TensorFlow will produce tons of error logging because we often apply # TensorFlow operations with bad arguments. Suppressing logging noticeably # improves performance. logging.set_verbosity(logging.ERROR) # Numpy sometimes produces warnings for overflow, etc., which can be # distracting. np.seterr(all="ignore") def _user_inputs(inputs: Union[Dict[Text, Any], List[Any]]) -> List[Any]: """Takes the inputs dict or list and extracts the input tensors.""" if isinstance(inputs, list): return inputs elif isinstance(inputs, dict): return list(inputs.values()) elif isinstance(inputs, tuple): return list(inputs) else: raise ValueError( "inputs must be a list or dict, but is {}".format(type(inputs)) ) def _contains_sparse(benchmark: benchmark_module.Benchmark) -> bool: """Returns whether the benchmark involves SparseTensors.""" # TODO(kshi): These heuristics are okay, but we should let the user choose if # they want to. for example in benchmark.examples: if isinstance(example.output, torch.Tensor): if example.output.is_sparse: return True for input_object in _user_inputs(example.inputs): if isinstance(input_object, torch.Tensor): if input_object.is_sparse: return True return "sparse" in benchmark.description.lower() def _add_value_by_weight( values_by_weight: ValuesByWeight, value: value_module.Value, weight: int ) -> None: """Adds a value of a given weight to values_by_weight.""" if weight < len(values_by_weight): values_by_weight[weight][value] = value def _constant_exists(constant: Any, constants_so_far: Set[Any]) -> bool: """Checks whether a constant exists already.""" # We can't use the `in` keyword because `True in [1, 2, 3]` evaluates to True! # (`True == 1` evaluates to True.) return any( constant == existing and type(constant) is type(existing) for existing in constants_so_far ) def _is_valid_name(name: Text) -> bool: """Returns whether name is an acceptable Python identifier.""" # Behavior is slightly different between Python versions, e.g., `await` is a # keyword only in PY3, and `print` is keyword only in PY2. if name in ["torch", "np"] or keyword.iskeyword(name): return False if six.PY3: return name.isidentifier() else: return bool(re.match(tokenize.Name + "$", name)) and name not in [ "True", "False", "None", ] def _input_names_to_objects( inputs_collection: Union[List[Any], Dict[Text, Any]] ) -> Dict[Text, Any]: """Returns a mapping from input names to objects, also validating names.""" if isinstance(inputs_collection, (list, tuple)): input_names_to_objects = collections.OrderedDict( ("in" + str(i + 1), input_object) for i, input_object in enumerate(inputs_collection) ) elif isinstance(inputs_collection, dict): for name in inputs_collection: if not isinstance(name, six.string_types): raise ValueError("The input name {!r} must be a string.".format(name)) if not _is_valid_name(name): raise ValueError( "The input name {!r} is not a valid Python identifier.".format(name) ) input_names_to_objects = inputs_collection else: raise ValueError( "The collection of inputs has the wrong format. It can be " "a list of input objects, or a dict mapping string names " "to input objects." ) return input_names_to_objects def _add_constants_and_inputs_and_print( values_by_weight: ValuesByWeight, benchmark: benchmark_module.Benchmark, output_value: value_module.OutputValue, constant_operation: operation_base.Operation, settings: settings_module.Settings, multipliers: Optional[Dict[Text, float]] = None ) -> None: """Adds constant/input Values to values_by_weight, and prints to stdout.""" # Conceptually this is a set, but it's actually a list so that constants can # be printed in the same order they are chosen by the heuristics. The reduced # efficiency of membership-checking is not a big deal because we have few # constants. constants_so_far = set() constants_to_print = [] # User-provided constants. for c in benchmark.constants: if not _constant_exists(c, constants_so_far): constant_value = value_module.ConstantValue(c) weight = torch_functions.PROVIDED_CONSTANT_WEIGHT if multipliers: weight = max(1, int(round(weight * multipliers.get(str(constant_value.value), 1)))) _add_value_by_weight(values_by_weight, constant_value, weight) constants_so_far.add(c) constants_to_print.append(c) # Add inputs, while computing some info for extra constants later. max_input_tensor_rank = 0 dimension_lengths = set() input_names_to_objects = _input_names_to_objects(benchmark.examples[0].inputs) for name, input_object in input_names_to_objects.items(): input_value = value_module.InputValue(input_object, name) if input_value.is_tensor: max_input_tensor_rank = max(max_input_tensor_rank, len(input_value.shape)) dimension_lengths.update(input_value.shape) if input_value.is_primitive and constant_operation is not None: scalar_tensor_value = constant_operation.apply([input_value], settings) weight = torch_functions.PRIMITIVE_INPUT_AS_TENSOR_WEIGHT if multipliers: weight = max(1, int(round(weight * multipliers.get(name, 1)))) _add_value_by_weight( values_by_weight, scalar_tensor_value, weight, ) weight = torch_functions.INPUT_VARIABLE_WEIGHT if multipliers: weight = max(1, int(round(weight * multipliers.get(name, 1)))) _add_value_by_weight( values_by_weight, input_value, weight ) if input_value.is_primitive: constants_so_far.add(input_value.value) constants_to_print.append(input_value.value) if settings.printing.print_examples: print( "Input '{}'-{}:\n{!s}\n".format(name, input_value.type, input_value.value) ) if output_value.shape is not None: dimension_lengths.update(output_value.shape) # Always include these as constants. common_constants = [0, 1, -1] # common_constants = [0, 1, -1, True, False] # Also include 2, 3, ..., max_example_input_tensor_rank - 1 when applicable. axis_constants = list(range(2, max_input_tensor_rank)) # Also include dimension lengths of input and output tensors. shape_constants = sorted(dimension_lengths) constant_weight_pairs = ( [(c, torch_functions.COMMON_CONSTANT_WEIGHT) for c in common_constants] + [(c, torch_functions.AXIS_CONSTANT_WEIGHT) for c in axis_constants] + [(c, torch_functions.SHAPE_CONSTANT_WEIGHT) for c in shape_constants] ) for constant, weight in constant_weight_pairs: if not _constant_exists(constant, constants_so_far): constant_value = value_module.ConstantValue(constant) if multipliers: weight = max(1, int(round(weight * multipliers.get(str(constant_value.value), 1)))) _add_value_by_weight(values_by_weight, constant_value, weight) constants_so_far.add(constant) constants_to_print.append(constant) if output_value.shape: # Add the output shape as a constant. shape_tuple = tuple(output_value.shape) shape_tuple_value = value_module.ConstantValue(shape_tuple) weight = torch_functions.OUTPUT_SHAPE_TUPLE_WEIGHT if multipliers: weight = max(1, int(round(weight * multipliers.get(str(shape_tuple_value.value), 1)))) _add_value_by_weight(values_by_weight, shape_tuple_value, weight) # Don't add shape_tuple to constants_to_print, because printing it out could # be confusing to users. # Only for experiments in the PLDI paper. if settings.paper_experiments.uniform_weights: # Count the number of values. num_values = sum( len(values_with_weight) for values_with_weight in values_by_weight ) # Take all values and put them in the collection for weight 1. for weight in range(2, len(values_by_weight)): for heavy_value in values_by_weight[weight]: values_by_weight[1][heavy_value] = heavy_value values_by_weight[weight].clear() # Make sure we did it right. for weight, values_with_weight in enumerate(values_by_weight): assert len(values_with_weight) == (num_values if weight == 1 else 0) if settings.printing.print_examples: print("Output-{}:\n{!s}\n".format(output_value.type, output_value.value)) print("Constants: {!r}\n".format(constants_to_print)) if benchmark.snippet: print("Original snippet: {!r}\n".format(benchmark.snippet)) if benchmark.target_program: print("Target snippet: {!r}\n".format(benchmark.target_program)) if benchmark.description: print("Description: {}\n".format(benchmark.description)) print("Searching...\n") sys.stdout.flush() # Flush so the inputs/output appear in Colab immediately. def _check_solution( expression: Text, used_input_names: Set[Text], benchmark: benchmark_module.Benchmark, settings: settings_module.Settings, ) -> bool: """Checks that the solution is good.""" del expression # Unused for now. if settings.require_all_inputs_used: if len(used_input_names) < len(benchmark.examples[0].inputs): return False elif settings.require_one_input_used: if not used_input_names: return False # TODO(kshi): Check that the solution works (floating-point errors may # accumulate beyond an acceptable threshold). return True def _record_solutions( value: value_module.Value, weight: int, start_time: float, solutions: List[Solution], solution_expression_set: Set[Text], benchmark: benchmark_module.Benchmark, settings: settings_module.Settings, ) -> None: """Records new solutions in the `solutions` list.""" reconstructions = value.reconstruct_all_expressions_with_input_names() this_solution_time = timeit.default_timer() - start_time for expression, used_input_names in reconstructions: if expression in solution_expression_set: continue if not _check_solution(expression, used_input_names, benchmark, settings): if settings.printing.bad_solutions: print("Bad solution: {}".format(expression)) continue solution_expression_set.add(expression) solutions.append( Solution( value=value, expression=expression, weight=weight, time=this_solution_time, ) ) if settings.printing.print_solutions: print("Found solution: {}".format(expression)) # Flush so the solutions appear in Colab immediately. sys.stdout.flush() if len(solutions) >= settings.max_solutions: break def _check_solution_found(value, output_value, benchmark, weight, start_time, end_time, solutions, solution_expression_set, settings, is_prediction=False): possible_first_solution = not solutions if settings.printing.print_solutions: if is_prediction: print("Found Solution using prediction") else: print("Found Solution from enumerative search") # Found solution(s), but some may be bad. _record_solutions( value, weight, start_time, solutions, solution_expression_set, benchmark, settings, ) if possible_first_solution and solutions: end_time = min( end_time, timeit.default_timer() + settings.max_extra_solutions_time, ) return end_time def _find_solutions_multi_model( benchmark: benchmark_module.Benchmark, operations: List[operation_base.Operation], start_time: float, settings: settings_module.Settings, prediction_model: Optional[PredictionModel] = None, snippet_constant_multipliers: Optional[Dict[Text, float]] = None ) -> Tuple[ List[Solution], Set[value_module.Value], ValuesByWeight, Optional[operation_statistics.OperationStatistics], ]: """Helper, returning (solutions, value_set, values_by_weight, statistics).""" timeout_reached = False end_time = start_time + settings.timeout only_minimal_solutions = settings.only_minimal_solutions if settings.max_solutions == 1: # If we only want one solution, it will be minimal. only_minimal_solutions = True # An object to track statistics, if requested. statistics = ( operation_statistics.OperationStatistics() if settings.printing.statistics else None ) # A list of Solution namedtuples. solutions = [] # A set of string solution expressions (don't return duplicate solutions). solution_expression_set = set() # The output value to search for. output_value = value_module.OutputValue(benchmark.examples[0].output) # A list of OrderedDicts mapping Value objects to themselves. The i-th # OrderedDict contains all Value objects of weight i. values_by_weight = [ collections.OrderedDict() for _ in range(settings.max_weight + 1) ] # Find and cache the constant and casting operations for use later. constant_operation = None int_operation = None float_operation = None bool_operation = None for operation in operations: if operation.name == torch_functions.CONSTANT_OPERATION_NAME: constant_operation = operation elif operation.name == torch_functions.INT_OPERATION_NAME: int_operation = operation elif operation.name == torch_functions.FLOAT_OPERATION_NAME: float_operation = operation elif operation.name == torch_functions.BOOL_OPERATION_NAME: bool_operation = operation # Create the output dtype value for use later. dtype_value = value_module.ConstantValue(output_value.dtype) # Populate values_by_weight with inputs and constants. This also prints # inputs/output/constants to stdout. _add_constants_and_inputs_and_print( values_by_weight, benchmark, output_value, constant_operation, settings, snippet_constant_multipliers ) # A set storing all values found so far. value_set = set().union(*values_by_weight) constants_values = [value for value in value_set if not value.is_tensor] input_values = [value for value in value_set if isinstance(value, value_module.InputValue)] value_trial_list = [] value_trial_list.extend([[value] for value in input_values]) double_products = product(list(input_values), list(input_values)) double_products = [list(p) for p in double_products] value_trial_list.extend(double_products) # TODO(daye): update this to cover every combination, with smarter prioritization # Current version covers all the benchmark cases. # It might be better to ignore some combinations, give up some examples that will # take long time either way (i.e., complicated ones) example_trial_list = [] # single input tensor, 1 api call - [in1] single_1 = [[{"inputs": [value], "output": output_value}] for value in input_values] example_trial_list.extend(single_1) # double input tensor, 1 api call - [in1, in2] double_products = product(list(input_values), list(input_values)) double_products = [list(p) for p in double_products] double_1 = [[{"inputs": values, "output": output_value}] for values in double_products] example_trial_list.extend(double_1) # double input tensor, 2 api calls - [in1], [in2, 0] double_2 = [[{"inputs": [values[0]], "output": output_value},{"inputs": [values[1], 0], "output": output_value}] for values in double_products] example_trial_list.extend(double_2) # double input tensor, 2 api calls, output1 being the only input to api2. - [in1, in2], [0] double_2_1 = [[{"inputs": [values[0], values[1]], "output": output_value},{"inputs": [0], "output": output_value}] for values in double_products] example_trial_list.extend(double_2_1) triple_products = product(list(input_values), list(input_values), list(input_values)) triple_products = [list(p) for p in triple_products] # [in1, in2], [in3, 0] triple_2 = [[{"inputs": [values[0], values[1]], "output": output_value},{"inputs": [values[2], 0], "output": output_value}] for values in triple_products] example_trial_list.extend(triple_2) # single input tensor, 2 api calls - [in1], [0] single_2 = [[{"inputs": [value], "output": output_value},{"inputs": [0], "output": output_value}] for value in input_values] example_trial_list.extend(single_2) # # double input tensor, 2 api calls, output1 being the first input to api2. - [in1], [0, in1] # double_2_1 = [[{"inputs": [values[0]], "output": output_value},{"inputs": [0, values[1]], "output": output_value}] for values in double_products] # example_trial_list.extend(double_2_1) # # double input tensor, 2 api calls - [in1], [in2], [0, 0, in2] double_2 = [[{"inputs": [values[0]], "output": output_value},{"inputs": [values[1]], "output": output_value},{"inputs": [0, 0, values[2]], "output": output_value}] for values in triple_products] example_trial_list.extend(double_2) # # single input tensor, 2 api calls - [], [in1] # single_2 = [[{"inputs": [], "output": output_value},{"inputs": [value], "output": output_value}] for value in input_values] # example_trial_list.extend(single_2) # # double input tensor, 3 api calls, first api input to be none. - [], [in1], [0, in2] # double_3 = [[{"inputs": [], "output": output_value},{"inputs": [values[0]], "output": output_value},{"inputs": [0, values[1]], "output": output_value}] for values in double_products] # example_trial_list.extend(double_3) # for values in value_trial_list: for example_sequence in example_trial_list: result_values = set() predicted_sequences = prediction_model.get_predicted_sequence(example_sequence=example_sequence, settings=settings) # predicted_sequences: [sequence, sequence, ...] # : [[operation, operation, ...], [operation, operation ...]] for sequence in predicted_sequences: if settings.printing.predicted_operations: print("sequence: {}".format([op.name for op in sequence])) # intermediate_values = set(value_set) intermediate_values = [] prev_intermediate_values = [] # sequence: [operation, operation, ...] for i_op, operation in enumerate(sequence): intermediate_values = [] new_intermediate_values = set() if i_op < len(example_sequence): cur_api_inputs = example_sequence[i_op]["inputs"] # print("Cur API Input") # print("With example, inputs: [{}],".format(", ".join([i.reconstruct_expression() if isinstance(i, value_module.Value) else str(i) for i in example_sequence[i_op]['inputs']]))) # 0 is a placeholder for the previous api's output. if 0 not in cur_api_inputs: intermediate_values.append(cur_api_inputs) # cur_api_inputs = [cur_api_inputs+[i_value] for i_value in intermediate_values] # intermediate_values.extend(cur_api_inputs) elif cur_api_inputs.count(0) == 1: # for this version, there will be at most one 0 in each api input cur_intermediate_values = [] for in_value in prev_intermediate_values: intermediate_value = [] for iv in cur_api_inputs: if iv == 0: intermediate_value.append(in_value) else: intermediate_value.append(iv) cur_intermediate_values.append(intermediate_value) # intermediate_values.append([[in_value] if cur_api_inputs[iv] == 0 else cur_api_inputs[iv] for iv in range(len(cur_api_inputs))]) # print(intermediate_value) # intermediate_values.append(intermediate_value) # cur_api_inputs = [[i_value]+cur_api_inputs[1:] for i_value in intermediate_values] intermediate_values.extend(cur_intermediate_values) elif cur_api_inputs.count(0) == 2: # for this version, there will be at most one 0 in each api input cur_intermediate_values = [] for in_values in product(prev_intermediate_values, prev_intermediate_values): intermediate_value = [] in_value_idx = 0 for iv in cur_api_inputs: if iv == 0: intermediate_value.append(in_values[in_value_idx]) in_value_idx += 1 else: intermediate_value.append(iv) cur_intermediate_values.append(intermediate_value) # intermediate_values.append([[in_value] if cur_api_inputs[iv] == 0 else cur_api_inputs[iv] for iv in range(len(cur_api_inputs))]) # print(intermediate_value) # intermediate_values.append(intermediate_value) # cur_api_inputs = [[i_value]+cur_api_inputs[1:] for i_value in intermediate_values] intermediate_values.extend(cur_intermediate_values) if settings.printing.verbose: print("availalbe input for API-{}".format(i_op)) print([i.reconstruct_expression() if isinstance(i, value_module.Value) else i for i in intermediate_values]) for intermediate_value in intermediate_values: if len(intermediate_value) == 2 and operation.name in ['torch.mul(input, other)']: new_values = [] for value in intermediate_value: if value.is_tensor and value.value.dtype == torch.bool: new_values.append(all_operations.find_operation_with_name('IntOperation').apply([value], settings)) else: new_values.append(value) intermediate_value = new_values elif len(intermediate_value) == 3 and operation.name in ['torch.where(condition, input, other)', 'torch.where(condition, self, other)']: if intermediate_value[0].is_tensor and intermediate_value[0].value.dtype != torch.bool: intermediate_value[0] = all_operations.find_operation_with_name('BoolOperation').apply([intermediate_value[0]], settings) if not isinstance(intermediate_value, list): intermediate_value = [[intermediate_value]] else: intermediate_value = [[v] for v in intermediate_value] predicted_values = operation.enumerate_values_with_values( given_values=intermediate_value, potential_value_list=constants_values, end_time=end_time, settings=settings, statistics=statistics ) for predicted_value in predicted_values: if predicted_value not in value_set: if settings.printing.verbose: expression = predicted_value.reconstruct_expression() print("{} produces:\n{}".format(expression, predicted_value)) if predicted_value == output_value: end_time = _check_solution_found(predicted_value, output_value, benchmark, 0, start_time, end_time, solutions, solution_expression_set, settings, True) if len(solutions) >= settings.max_solutions: return ( solutions, value_set, values_by_weight, statistics, ) elif all_operations.find_operation_with_name('IntOperation').apply([predicted_value], settings) == output_value: end_time = _check_solution_found(predicted_value, output_value, benchmark, 0, start_time, end_time, solutions, solution_expression_set, settings, True) if len(solutions) >= settings.max_solutions: return ( solutions, value_set, values_by_weight, statistics ) else: new_intermediate_values.add(predicted_value) # do casting to new values if i_op == len(sequence)-1: result_values.add(predicted_value) prev_intermediate_values += list(new_intermediate_values) if timeit.default_timer() > end_time: timeout_reached = True # Don't return immediately; still try to cast new values because this is # relatively quick. break # Try casting new values to the output dtype if this has a chance of being # a correct solution. for new_value in result_values: if (new_value.shape == output_value.shape and new_value.dtype != output_value.dtype and operation_filtering.is_castable(new_value, dtype_value) ): casted_value = None if output_value.dtype == torch.int: casted_value = int_operation.apply([new_value], settings) elif output_value.dtype == torch.bool: casted_value = bool_operation.apply([new_value], settings) elif output_value.dtype == torch.float: casted_value = float_operation.apply([new_value], settings) if casted_value == output_value: possible_first_solution = not solutions # Found solution(s), but some may be bad. _record_solutions( casted_value, 0, start_time, solutions, solution_expression_set, benchmark, settings, ) if possible_first_solution and solutions: end_time = min( end_time, timeit.default_timer() + settings.max_extra_solutions_time, ) if len(solutions) >= settings.max_solutions: return solutions, value_set, values_by_weight, statistics if settings.printing.progress: print( "Found {} distinct values of weight {}, or {} total.".format( len(result_values), 0, len(value_set) ) ) if only_minimal_solutions and solutions: return solutions, value_set, values_by_weight, statistics if timeout_reached: break return solutions, value_set, values_by_weight, statistics def _get_predicted_values(values, predicted_operation, constants_values, end_time, settings, statistics): if len(values) > 1 and predicted_operation.name in ['torch.cat(tensors, dim)', 'torch.stack(tensors)', 'torch.stack(tensors, dim)']: stacked_value = all_operations.find_operation_with_name('PairCreationOperation').apply(values, settings) if stacked_value is None: predicted_values = [] else: predicted_values = predicted_operation.enumerate_values_with_values( given_values=[[stacked_value]], potential_value_list=constants_values, end_time=end_time, settings=settings, statistics=statistics ) if len(values) == 2 and predicted_operation.name in ['torch.mul(input, other)']: new_values = [] for value in values: if value.is_tensor and value.value.dtype == torch.bool: new_values.append(all_operations.find_operation_with_name('IntOperation').apply([value], settings)) else: new_values.append(value) # values = [all_operations.find_operation_with_name('IntOperation').apply(value, settings) if value.is value.value.dtype == torch.bool and value is not None else value for value in values] predicted_values = predicted_operation.enumerate_values_with_values( given_values=[[value] for value in new_values], potential_value_list=constants_values, end_time=end_time, settings=settings, statistics=statistics ) elif len(values) == 3 and predicted_operation.name in ['torch.where(condition, input, other)', 'torch.where(condition, self, other)']: if values[0].value.dtype != torch.bool: values[0] = all_operations.find_operation_with_name('BoolOperation').apply([values[0]], settings) predicted_values = predicted_operation.enumerate_values_with_values( given_values= [[value] for valule in values], potential_value_list=constants_values, end_time=end_time, settings=settings, statistics=statistics ) elif len(values) == 1 and predicted_operation.name in ['torch.argmax(input)', 'torch.argmax(input, dim)']: if values[0].value.dtype != torch.int: values[0] = all_operations.find_operation_with_name('IntOperation').apply([values[0]], settings) predicted_values = predicted_operation.enumerate_values_with_values( given_values=[[values[0]]], potential_value_list=constants_values, end_time=end_time, settings=settings, statistics=statistics ) else: predicted_values = predicted_operation.enumerate_values_with_values( given_values=[[value] for value in values], potential_value_list=constants_values, end_time=end_time, settings=settings, statistics=statistics ) return predicted_values # TODO: DFS will speed up the search further def _find_solutions_first_sequence( benchmark: benchmark_module.Benchmark, operations: List[operation_base.Operation], start_time: float, settings: settings_module.Settings, prediction_model: Optional[PredictionModel] = None, snippet_constant_multipliers: Optional[Dict[Text, float]] = None ) -> Tuple[ List[Solution], Set[value_module.Value], ValuesByWeight, Optional[operation_statistics.OperationStatistics], ]: """Helper, returning (solutions, value_set, values_by_weight, statistics).""" timeout_reached = False end_time = start_time + settings.timeout only_minimal_solutions = settings.only_minimal_solutions if settings.max_solutions == 1: # If we only want one solution, it will be minimal. only_minimal_solutions = True # An object to track statistics, if requested. statistics = ( operation_statistics.OperationStatistics() if settings.printing.statistics else None ) # A list of Solution namedtuples. solutions = [] # A set of string solution expressions (don't return duplicate solutions). solution_expression_set = set() # The output value to search for. output_value = value_module.OutputValue(benchmark.examples[0].output) # A list of OrderedDicts mapping Value objects to themselves. The i-th # OrderedDict contains all Value objects of weight i. values_by_weight = [ collections.OrderedDict() for _ in range(settings.max_weight + 1) ] # Find and cache the constant and casting operations for use later. constant_operation = None int_operation = None float_operation = None bool_operation = None for operation in operations: if operation.name == torch_functions.CONSTANT_OPERATION_NAME: constant_operation = operation elif operation.name == torch_functions.INT_OPERATION_NAME: int_operation = operation elif operation.name == torch_functions.FLOAT_OPERATION_NAME: float_operation = operation elif operation.name == torch_functions.BOOL_OPERATION_NAME: bool_operation = operation # Create the output dtype value for use later. dtype_value = value_module.ConstantValue(output_value.dtype) # Populate values_by_weight with inputs and constants. This also prints # inputs/output/constants to stdout. _add_constants_and_inputs_and_print( values_by_weight, benchmark, output_value, constant_operation, settings, snippet_constant_multipliers ) # A set storing all values found so far. value_set = set().union(*values_by_weight) constants_values = [value for value in value_set if not value.is_tensor] # non_primitive_values = [value for value in value_set if value.is_tensor or value.is_sequence] non_primitive_values = [value for value in value_set if isinstance(value, value_module.InputValue)] filter_cache = filtered_values_cache.FilteredValuesCache() if settings.model.do_first_in_seq: value_set = [] for _ in range(3): value_set = list(set(value_set).union(set(non_primitive_values))) value_trial_list = [[]] value_trial_list.extend([value] for value in value_set) value_trial_list.extend(product(value_set, value_set)) for values in value_trial_list: example = {"inputs": values, "output": output_value} predicted_operations = prediction_model.get_first_in_sequence(example=example, settings=settings) for predicted_operation in predicted_operations: predicted_values = _get_predicted_values(values, predicted_operation, constants_values, end_time, settings, statistics) for predicted_value in predicted_values: if predicted_value not in value_set: if settings.printing.verbose: expression = predicted_value.reconstruct_expression() print("[prediction] {} produces:\n{}".format(expression, predicted_value)) if predicted_value == output_value: end_time = _check_solution_found(predicted_value, output_value, benchmark, 0, start_time, end_time, solutions, solution_expression_set, settings, True) if len(solutions) >= settings.max_solutions: return ( solutions, value_set, values_by_weight, statistics ) elif all_operations.find_operation_with_name('IntOperation').apply([predicted_value], settings) == output_value: end_time = _check_solution_found(predicted_value, output_value, benchmark, 0, start_time, end_time, solutions, solution_expression_set, settings, True) if len(solutions) >= settings.max_solutions: return ( solutions, value_set, values_by_weight, statistics ) else: value_set.append(predicted_value) if timeit.default_timer() > end_time: timeout_reached = True # Don't return immediately; still try to cast new values because this is # relatively quick. break # Try casting new values to the output dtype if this has a chance of being # a correct solution. for new_value in value_set: if (new_value.shape == output_value.shape and new_value.dtype != output_value.dtype and operation_filtering.is_castable(new_value, dtype_value) ): casted_value = None if output_value.dtype == torch.int: casted_value = int_operation.apply([new_value], settings) elif output_value.dtype == torch.bool: casted_value = bool_operation.apply([new_value], settings) elif output_value.dtype == torch.float: casted_value = float_operation.apply([new_value], settings) if casted_value == output_value: possible_first_solution = not solutions # Found solution(s), but some may be bad. _record_solutions( casted_value, 0, start_time, solutions, solution_expression_set, benchmark, settings, ) if possible_first_solution and solutions: end_time = min( end_time, timeit.default_timer() + settings.max_extra_solutions_time, ) if len(solutions) >= settings.max_solutions: return solutions, value_set, values_by_weight, statistics if only_minimal_solutions and solutions: return solutions, value_set, values_by_weight, statistics if timeout_reached: break return solutions, value_set, values_by_weight, statistics def _find_solutions( benchmark: benchmark_module.Benchmark, operations: List[operation_base.Operation], start_time: float, settings: settings_module.Settings, prediction_model: Optional[PredictionModel] = None, snippet_constant_multipliers: Optional[Dict[Text, float]] = None ) -> Tuple[ List[Solution], Set[value_module.Value], ValuesByWeight, Optional[operation_statistics.OperationStatistics], ]: """Helper, returning (solutions, value_set, values_by_weight, statistics).""" timeout_reached = False end_time = start_time + settings.timeout only_minimal_solutions = settings.only_minimal_solutions if settings.max_solutions == 1: # If we only want one solution, it will be minimal. only_minimal_solutions = True # An object to track statistics, if requested. statistics = ( operation_statistics.OperationStatistics() if settings.printing.statistics else None ) # A list of Solution namedtuples. solutions = [] # A set of string solution expressions (don't return duplicate solutions). solution_expression_set = set() # The output value to search for. output_value = value_module.OutputValue(benchmark.examples[0].output) # A list of OrderedDicts mapping Value objects to themselves. The i-th # OrderedDict contains all Value objects of weight i. values_by_weight = [ collections.OrderedDict() for _ in range(settings.max_weight + 1) ] # Find and cache the constant and casting operations for use later. constant_operation = None int_operation = None float_operation = None bool_operation = None for operation in operations: if operation.name == torch_functions.CONSTANT_OPERATION_NAME: constant_operation = operation elif operation.name == torch_functions.INT_OPERATION_NAME: int_operation = operation elif operation.name == torch_functions.FLOAT_OPERATION_NAME: float_operation = operation elif operation.name == torch_functions.BOOL_OPERATION_NAME: bool_operation = operation # Create the output dtype value for use later. dtype_value = value_module.ConstantValue(output_value.dtype) # Populate values_by_weight with inputs and constants. This also prints # inputs/output/constants to stdout. _add_constants_and_inputs_and_print( values_by_weight, benchmark, output_value, constant_operation, settings, snippet_constant_multipliers ) # A set storing all values found so far. value_set = set().union(*values_by_weight) constants_values = [value for value in value_set if not value.is_tensor] non_primitive_values = [value for value in value_set if value.is_tensor or value.is_sequence] filter_cache = filtered_values_cache.FilteredValuesCache() if settings.model.do_iterative_prediction: # try with values in value_set and run prediction value_trial_list = [] value_trial_list.extend([[value] for value in non_primitive_values]) value_trial_list.extend(product(non_primitive_values, non_primitive_values)) for values in value_trial_list: example = {"inputs": values, "output": output_value} predicted_operations = prediction_model.get_predicted_operations(example=example, settings=settings) for predicted_operation in predicted_operations: if len(values) > 1 and predicted_operation.name in ['torch.cat(tensors, dim)', 'torch.stack(tensors)', 'torch.stack(tensors, dim)']: stacked_value = all_operations.find_operation_with_name('PairCreationOperation').apply(values, settings) if stacked_value is None: predicted_values = [] else: predicted_values = predicted_operation.enumerate_values_with_values( given_values=[[stacked_value]], potential_value_list=constants_values, end_time=end_time, settings=settings, statistics=statistics ) else: predicted_values = predicted_operation.enumerate_values_with_values( given_values=[[value] for value in values], potential_value_list=constants_values, end_time=end_time, settings=settings, statistics=statistics ) for predicted_value in predicted_values: if predicted_value not in value_set: if settings.printing.verbose: expression = predicted_value.reconstruct_expression() print("[prediction] {} produces:\n{}".format(expression, predicted_value)) if predicted_value == output_value: end_time = _check_solution_found(predicted_value, output_value, benchmark, 0, start_time, end_time, solutions, solution_expression_set, settings, True) if len(solutions) >= settings.max_solutions: return ( solutions, value_set, values_by_weight, statistics, ) else: if settings.model.do_first_in_seq: value_set.add(predicted_value) # Value search by weight. for weight in range(1, settings.max_weight + 1): if settings.printing.progress: print("Searching weight {}...".format(weight)) # Values with the current weight. This might already include leaf values. new_values = values_by_weight[weight] # # Random iteration of operations for operation in random.sample(operations, len(operations)): for value in operation.enumerate_values_with_weight( target_weight=weight, values_by_weight=values_by_weight, filter_cache=filter_cache, end_time=end_time, settings=settings, statistics=statistics, ): if value not in value_set: # This value has never been seen before, or it's the desired output. if settings.printing.verbose: expression = value.reconstruct_expression() print("{} produces:\n{}".format(expression, value)) if value == output_value: end_time = _check_solution_found(value, output_value, benchmark, weight, start_time, end_time, solutions, solution_expression_set, settings) if len(solutions) >= settings.max_solutions: return ( solutions, value_set, values_by_weight, statistics, ) else: # Only store the value if it isn't a solution. Otherwise, we'll get # lots of "almost duplicate" solutions, e.g., by adding 0. new_values[value] = value # We should never add output_value (or anything equal) to value_set # so that we can continue finding other solutions. value_set.add(value) if settings.model.do_iterative_prediction: if not value.is_tensor: continue value_trial_list = [[value]] value_trial_list.extend(product([value], non_primitive_values)) value_trial_list.extend(product(non_primitive_values, [value])) for values in value_trial_list: example = {"inputs": values, "output": output_value} predicted_operations = prediction_model.get_predicted_operations(example=example, settings=settings) for predicted_operation in predicted_operations: if len(values) > 1 and predicted_operation.name in ['torch.cat(tensors, dim)', 'torch.stack(tensors)', 'torch.stack(tensors, dim)']: stacked_value = all_operations.find_operation_with_name('PairCreationOperation').apply(values, settings) if stacked_value is None: predicted_values = [] else: predicted_values = predicted_operation.enumerate_values_with_values( given_values=[[stacked_value]], potential_value_list=constants_values, end_time=end_time, settings=settings, statistics=statistics ) else: predicted_values = predicted_operation.enumerate_values_with_values( given_values=[[value] for value in values], potential_value_list=constants_values, end_time=end_time, settings=settings, statistics=statistics ) for predicted_value in predicted_values: if predicted_value not in value_set: if settings.printing.verbose: expression = predicted_value.reconstruct_expression() print("[prediction] {} produces:\n{}".format(expression, predicted_value)) if predicted_value == output_value: end_time = _check_solution_found(predicted_value, output_value, benchmark, 0, start_time, end_time, solutions, solution_expression_set, settings, True) if len(solutions) >= settings.max_solutions: return ( solutions, value_set, values_by_weight, statistics, ) else: if settings.model.do_first_in_seq: value_set.add(predicted_value) else: # This value has been seen before. if value in new_values: # The value was already computed differently with this weight. original_value = new_values[value] if isinstance(original_value, value_module.OperationValue): # Only merge reconstructions if this was originally an # OperationValue. (It could be a ConstantValue instead.) operation_value = ( original_value ) # type: value_module.OperationValue operation_value.merge_reconstructions(value) elif not only_minimal_solutions: # If we want non-minimal solutions, we need to store the value even # if we have already seen that value with a smaller weight. new_values[value] = value if timeit.default_timer() > end_time: timeout_reached = True # Don't return immediately; still try to cast new values because this is # relatively quick. break # Try casting new values to the output dtype if this has a chance of being # a correct solution. for new_value in new_values: if (new_value.shape == output_value.shape and new_value.dtype != output_value.dtype and operation_filtering.is_castable(new_value, dtype_value) ): casted_value = None if output_value.dtype == torch.int: casted_value = int_operation.apply([new_value], settings) elif output_value.dtype == torch.bool: casted_value = bool_operation.apply([new_value], settings) elif output_value.dtype == torch.float: casted_value = float_operation.apply([new_value], settings) if casted_value == output_value: possible_first_solution = not solutions # Found solution(s), but some may be bad. _record_solutions( casted_value, weight, start_time, solutions, solution_expression_set, benchmark, settings, ) if possible_first_solution and solutions: end_time = min( end_time, timeit.default_timer() + settings.max_extra_solutions_time, ) if len(solutions) >= settings.max_solutions: return solutions, value_set, values_by_weight, statistics if settings.printing.progress: print( "Found {} distinct values of weight {}, or {} total.".format( len(new_values), weight, len(value_set) ) ) if only_minimal_solutions and solutions: return solutions, value_set, values_by_weight, statistics if timeout_reached: break return solutions, value_set, values_by_weight, statistics def _combine_multipliers( first: Dict[Text, float], second: Dict[Text, float] ) -> Dict[Text, float]: """Combines operation weight multiplier dicts. Modifies the first dict.""" for name in second: first[name] = first.get(name, 1.0) * second[name] return first def get_reweighted_operations( benchmark: benchmark_module.Benchmark, settings: settings_module.Settings, description_handler: Optional[DescriptionHandler] = None, prediction_model: Optional[PredictionModel] = None, snippet_operation_multipliers: Optional[Dict[Text, float]] = None ) -> List[operation_base.Operation]: """Returns a list of operations with correct weights for the problem.""" include_sparse_operations = ( not settings.operations.limit_sparse_operations or _contains_sparse(benchmark) ) operations = all_operations.get_operations( include_sparse_operations=include_sparse_operations ) operation_names = [op.name for op in operations] if len(operation_names) != len(set(operation_names)): raise ValueError("Operation names were not unique.") if settings.paper_experiments.uniform_weights: # Only for experiments in the PLDI paper. for operation in operations: operation.weight = 1 return operations multipliers = {} if description_handler and benchmark.description: multipliers = _combine_multipliers( multipliers, description_handler.get_operation_multipliers(benchmark, settings), ) if prediction_model and settings.model.use_multiplier: multipliers = _combine_multipliers( multipliers, prediction_model.get_operation_multipliers(benchmark, settings), ) if snippet_operation_multipliers: multipliers = _combine_multipliers( multipliers, snippet_operation_multipliers ) for operation in operations: operation.weight = max( 1, int(round(operation.weight * multipliers.get(operation.name, 1))) ) return operations def run_value_search( benchmark: benchmark_module.Benchmark, settings: settings_module.Settings, description_handler: Optional[DescriptionHandler] = None, prediction_model: Optional[PredictionModel] = None, snippet_handler: Optional[SnippetHandler] = None, ) -> ValueSearchResults: """Performs value search, iterating by the expression weight. Starts with the constants and user-provided inputs, and applies the given operations, for a given number of iterations. An expression's "weight" is the number of nodes in the expression tree. Args: benchmark: The Benchmark containing input-output examples and constants. settings: A Settings object containing settings for this search. description_handler: A DescriptionHandler that scores operations based on the benchmark's description. prediction_model: A PredictionModel that scores operations based on the pre-trained prediction model. snippet_handler: A SnippetHandler that scores operations based on the the original snippet. Returns: A ValueSearchResults namedtuple. Raises: ValueError: If max_weight is too large to be reasonable. """ _suppress_warnings() if len(benchmark.examples) > 1: print("Warning: for now, value search only uses a single example.") start_time = timeit.default_timer() snippet_operation_multipliers = None snippet_constant_multipliers = None if benchmark.snippet: snippet_operation_multipliers, snippet_constant_multipliers = snippet_handler.get_multipliers(benchmark, settings) operations = get_reweighted_operations( benchmark, settings, description_handler=description_handler, prediction_model=prediction_model, snippet_operation_multipliers=snippet_operation_multipliers ) if settings.model.use_multi_model: solutions, value_set, values_by_weight, statistics = _find_solutions_multi_model( benchmark=benchmark, operations=operations, start_time=start_time, settings=settings, prediction_model=prediction_model, snippet_constant_multipliers=snippet_constant_multipliers ) elif settings.model.do_first_in_seq: solutions, value_set, values_by_weight, statistics = _find_solutions_first_sequence( benchmark=benchmark, operations=operations, start_time=start_time, settings=settings, prediction_model=prediction_model, snippet_constant_multipliers=snippet_constant_multipliers ) else: solutions, value_set, values_by_weight, statistics = _find_solutions( benchmark=benchmark, operations=operations, start_time=start_time, settings=settings, prediction_model=prediction_model, snippet_constant_multipliers=snippet_constant_multipliers ) total_time = timeit.default_timer() - start_time if solutions: if settings.printing.print_solutions: print() print( "Solution was found in {:.1f} seconds:\n{}".format( solutions[0].time, solutions[0].expression ) ) if settings.max_solutions != 1: print( "Found {} solution(s) in {:.1f} seconds total.".format( len(solutions), total_time ) ) else: if settings.printing.print_solutions: print( "Could not find solution within {} seconds.".format( min(settings.timeout, total_time) ) ) sys.stdout.flush() return ValueSearchResults( solutions=solutions, total_time=total_time, value_set=value_set, values_by_weight=values_by_weight, benchmark=benchmark, settings=settings, statistics=statistics, ) def run_value_search_from_example( inputs: Union[List[Any], Dict[Text, Any]], output: Any, settings: Optional[settings_module.Settings] = None, **kwargs ) -> ValueSearchResults: """Performs value search for a single user-provided input-output example. Args: inputs: A list of inputs, or a dict mapping input names to inputs. output: The corresponding desired output. settings: An optional Settings object to use, or None to use defaults. **kwargs: The kwarg 'constants' can be used to specify a list of constants, and 'description' can be used to provide a natural language description of the task. Other arguments are passed directly to run_value_search(). Returns: A ValueSearchResults namedtuple. """ if settings is None: settings = settings_module.default_settings() constants = kwargs.pop("constants", None) description = kwargs.pop("description", None) snippet = kwargs.pop("snippet", None) source = kwargs.pop("source", "From user-provided example.") benchmark = benchmark_module.Benchmark( examples=[benchmark_module.Example(inputs, output)], constants=constants, # Will turn into empty list if constants=None. description=description, # Will turn into '' if description=None. snippet=snippet, source=source, ) description_handler = description_handler_factory.create_handler( settings.description_handler_name ) if settings.printing.print_init: print("Description handler: {!r}\n".format(description_handler)) prediction_model = prediction_model_factory.load_model( "classification" ) if settings.printing.print_init: print("Prediction model: {!r}\n".format(prediction_model)) snippet_handler = snippet_handler_factory.create_handler( "function_constant" ) if settings.printing.print_init: print("Snippet handler: {!r}\n".format(snippet_handler)) return run_value_search(benchmark, settings, **kwargs)
APIsynth-master
Synthesis_incorporation/value_search/value_search.py
# Copyright 2021 The TF-Coder Authors. # Copyright (c) Facebook, Inc. and its affiliates. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Defines the base Operation class for value search.""" import abc import itertools import sys import timeit import typing from typing import Callable, Dict, List, Optional, Sequence, Set, Text, Tuple, Union import six from tf_coder import tf_coder_utils from tf_coder import torch_functions from tf_coder.value_search import filtered_values_cache from tf_coder.value_search import operation_statistics from tf_coder.value_search import value from tf_coder.value_search import value_search_settings as settings_module ################################################################################ # Type aliases. # The i-th element contains all Value objects of weight i, mapped to themselves. ValuesByWeightDict = List[Dict[value.Value, value.Value]] # The i-th element is an iterable of all Value objects of weight i. ValuesByWeightIterable = List[Union[List[value.Value], Dict[value.Value, value.Value]]] # The i-th inner list contains Value objects that are candidates for argument i. ArgOptionsType = List[List[value.Value]] # The i-th Value is used as the i-th argument for an Operation application. ArgValuesType = Sequence[value.Value] # An optional filter function that is applied to single Value objects. If None, # it is treated as a function that always returns True (all Value objects are # allowed). ValueFilterType = Optional[Callable[[value.Value], bool]] # An optional filter function that is applied to a list of argument values. If # None, it is treated as a function that always returns True (all argument lists # are allowed). ApplyFilterType = Optional[Callable[[ArgValuesType], bool]] ################################################################################ OperationMetadata = typing.NamedTuple("OperationMetadata", [("docstring", Text)]) @six.add_metaclass(abc.ABCMeta) class Operation(object): """An operation that can be applied to a constant number of arguments. Arguments are always ordered, and subclasses can choose their own conventions for this ordering. The operation must be deterministic, must not have side-effects, and must not modify its arguments. Attributes: name: A unique name for this operation. num_args: The number of arguments required by this Operation. weight: The weight of this node in the AST. metadata: Metadata for this Operation. _value_filters_list: A list of lists of filter functions. Each inner list has length num_args and contains a filter function for each argument, where the i-th filter function takes a Value and returns whether that Value should be an option for the i-th argument. Any filter function can be None, which means all values should be options for that argument. The outer list can have multiple lists of filter functions, where each inner list describes one class of valid argument values. The value_filters_list attribute can also be None, in which case all values should be options for all arguments. _apply_filter: A filter function that takes a list of Value objects of length num_args (the arguments to a potential application of this Operation), and returns whether those Value objects are compatible (i.e., whether the operation should be applied). If None, the operation is always applied. _name_cache: A cached copy of this Operation's name. """ def __init__(self, num_args: int, weight: int, metadata: OperationMetadata) -> None: """Initializes an Operation.""" self.num_args = num_args self.weight = weight self.metadata = metadata self._value_filters_list = None # type: Optional[List[List[ValueFilterType]]] self._apply_filter = None # type: ApplyFilterType self._name_cache = None @property def name(self) -> Text: """The (cached) name of the operation.""" if self._name_cache is not None: return self._name_cache self._name_cache = self._compute_name() return self._name_cache def _compute_name(self) -> Text: """Computes a name for this operation.""" return self.__class__.__name__ def add_value_filters(self, value_filters: List[ValueFilterType]) -> None: """Adds the given value filters to the value_filters_list attribute. Args: value_filters: A list of filter functions, one per argument, where the i-th filter function takes a Value and returns whether it should be an option for argument i. Raises: ValueError: If the list of filter functions has the wrong length. """ if len(value_filters) != self.num_args: raise ValueError("value_filters must contain one filter per argument.") if self._value_filters_list is None: self._value_filters_list = [] self._value_filters_list.append(value_filters) def set_apply_filter(self, apply_filter: ApplyFilterType) -> None: """Sets the given apply_filter.""" self._apply_filter = apply_filter @abc.abstractmethod def apply( self, arg_values: ArgValuesType, settings: settings_module.Settings ) -> Optional[value.Value]: """Applies this Operation to a list of arguments (Value objects). Args: arg_values: A list of Value objects representing the arguments. settings: A Settings object storing settings for this search. Returns: A Value object representing the result if successful, or None if the operation raises an exception. """ def _enumerate_values( self, arg_options: ArgOptionsType, end_time: float, settings: settings_module.Settings, statistics: Optional[operation_statistics.OperationStatistics] = None, ) -> List[value.Value]: """Enumerates values that are created from multiple choices of arguments. Args: arg_options: A list of lists of Value objects, where the i-th list contains the possible Value objects for the i-th argument. end_time: A timeit.default_timer() cutoff where this should timeout. settings: A Settings object storing settings for this search. statistics: An optional OperationStatistics object to track statistics during this function's execution. Returns: A list of Value objects, one for every successful application of the operation. """ results = [] # type: List[value.Value] apply_count = 0 apply_successes = 0 start_time = timeit.default_timer() for i, arg_values in enumerate(itertools.product(*arg_options)): # Check for timeout periodically. if i % 1000 == 0 and timeit.default_timer() > end_time: break # Skipping filtering is only used for experiments in the PLDI paper. if not ( settings.paper_experiments.skip_filtering and self.name not in torch_functions.REQUIRES_FILTERING ): # _apply_filter is either None or callable. if self._apply_filter is not None and not self._apply_filter( arg_values ): # pylint: disable=not-callable continue if settings.printing.all_apply: print( "Applying {} on arguments: {}".format( self.name, [ arg_value.reconstruct_expression() for arg_value in arg_values ], ) ) # Print the output immediately so it isn't swallowed by a stacktrace. sys.stdout.flush() maybe_value = self.apply(arg_values, settings) apply_count += 1 if maybe_value is not None: yes_value = maybe_value # type: value.Value apply_successes += 1 results.append(yes_value) elapsed_time = timeit.default_timer() - start_time if statistics: statistics.update( operation_name=self.name, count=apply_count, successes=apply_successes, time=elapsed_time, ) return results def enumerate_values_with_values( self, given_values: List[List[value.Value]], potential_value_list: List[value.Value], end_time: float, settings: settings_module.Settings, statistics: Optional[operation_statistics.OperationStatistics] = None, ) -> List[value.Value]: """Enumerates values with given fixed argument values. Args: given_values: A list of lists of Value objects, where the i-th list contains the Value objects for the i-th argument. potential_value_list: A list of Value objects that can be used for arguments that are not already given. end_time: A timeit.default_timer() cutoff where this should timeout. settings: A Settings object storing settings for this search. statistics: An optional OperationStatistics object to track statistics during this function's execution. Returns: A list of Value objects that are output of the given input arguments. """ num_args = self.num_args if num_args == 0: return [] # An operation with no arguments can't have variable weight. if num_args < len(given_values): return [] # It got more values than the operation needs. results = [] # type: List[value.Value] for value_filters in self._value_filters_list: assert len(value_filters) == num_args arg_options = [] is_valid_option = True if len(given_values) == 0: for arg in range(num_args): arg_option = list(filter(value_filters[arg], potential_value_list)) if len(arg_option) == 0: is_valid_option = False arg_options.append(arg_option) else: # no need to enumerate if self.name in ['torch.add(input, other)', 'torch.any(input)', 'torch.argmax(input)', 'torch.bincount(input)', 'torch.cdist(x1, x2)', 'torch.div(input, other)', 'torch.eq(input, other)', 'torch.gt(input, other)', 'torch.lt(input, other)', 'torch.masked_select(input, mask)', 'torch.matmul(input, other)', 'torch.max(input)', 'torch.minimum(input, other)', 'torch.mul(input, other)', 'torch.ne(input, other)', 'torch.searchsorted(sorted_sequence, input)', 'torch.squeeze(input)', 'torch.square(input)', 'torch.stack(tensors)', 'torch.sum(input)', 'torch.where(condition, input, other)', 'torch.where(condition, self, other)', ]: if num_args != len(given_values): return results for arg in range(num_args): arg_option = list(filter(value_filters[arg], given_values[arg])) if len(arg_option) == 0: is_valid_option = False arg_options.append(arg_option) # enumerate second arg elif self.name in ['torch.any(input, dim)', 'torch.argmax(input, dim)', 'torch.max(input, dim)', 'torch.nn.functional.one_hot(input, num_classes)', 'torch.reshape(input, shape)', 'torch.stack(tensors, dim)', 'torch.sum(input, dim)', 'torch.tile(input, dims)', 'torch.squeeze(input, dim)', 'torch.unsqueeze(input, dim)', 'ExpandOperation' ]: if num_args != len(given_values) + 1: return results arg_option = list(filter(value_filters[0], given_values[0])) if len(arg_option) == 0: is_valid_option = False arg_options.append(arg_option) arg_option = list(filter(value_filters[1], potential_value_list)) if len(arg_option) == 0: is_valid_option = False arg_options.append(arg_option) # enumerate second arg, third fixed elif self.name in ['torch.gather(input, dim, index)']: if num_args != len(given_values)+1: return results arg_option = list(filter(value_filters[0], given_values[0])) if len(arg_option) == 0: is_valid_option = False arg_options.append(arg_option) arg_option = list(filter(value_filters[1], potential_value_list)) if len(arg_option) == 0: is_valid_option = False arg_options.append(arg_option) arg_option = list(filter(value_filters[2], given_values[1])) if len(arg_option) == 0: is_valid_option = False arg_options.append(arg_option) # enumerate the third arg elif self.name in ['torch.repeat_interleave(input, repeats, dim)', 'torch.tensordot(a, b, dims)']: if num_args != len(given_values)+1: return results for arg in range(2): arg_option = list(filter(value_filters[arg], given_values[arg])) if len(arg_option) == 0: is_valid_option = False arg_options.append(arg_option) arg_option = list(filter(value_filters[2], potential_value_list)) if len(arg_option) == 0: is_valid_option = False arg_options.append(arg_option) # enumerate the second and the third elif self.name in ['torch.roll(input, shifts, dims)', 'torch.transpose(input, dim0, dim1)']: if num_args != len(given_values)+2: return results arg_option = list(filter(value_filters[0], given_values[0])) if len(arg_option) == 0: is_valid_option = False arg_options.append(arg_option) for arg in range(1, 3): arg_option = list(filter(value_filters[arg], potential_value_list)) if len(arg_option) == 0: is_valid_option = False arg_options.append(arg_option) if is_valid_option: results.extend( self._enumerate_values(arg_options, end_time, settings, statistics) ) return results def enumerate_values_with_weight( self, target_weight: int, values_by_weight: ValuesByWeightDict, filter_cache: filtered_values_cache.FilteredValuesCache, end_time: float, settings: settings_module.Settings, statistics: Optional[operation_statistics.OperationStatistics] = None, ) -> List[value.Value]: """Enumerates values with a given target weight. Args: target_weight: The desired weight of resulting values. values_by_weight: A collection of Values organized by their weight. filter_cache: The FilteredValuesCache object used during this search. end_time: A timeit.default_timer() cutoff where this should timeout. settings: A Settings object storing settings for this search. statistics: An optional OperationStatistics object to track statistics during this function's execution. Returns: A list of Value objects of the specified weight. """ num_args = self.num_args if num_args == 0: return [] # An operation with no arguments can't have variable weight. if target_weight - self.weight - num_args < 0: return [] # Too many arguments for this weight. results = [] # type: List[value.Value] for value_filters in self._value_filters_list: assert len(value_filters) == num_args # Enumerate ways of partitioning (target_weight - self.weight) into # (num_args) positive pieces. # Equivalently, partition (target_weight - self.weight - num_args) into # (num_args) nonnegative pieces. arg_options_list = [] # type: List[ArgOptionsType] for partition in tf_coder_utils.generate_partitions( target_weight - self.weight - num_args, num_args ): # type: Tuple[int, ...] # pytype: disable=annotation-type-mismatch if ( settings.paper_experiments.skip_filtering and self.name not in torch_functions.REQUIRES_FILTERING ): # Only for experiments in the PLDI paper. arg_options = [ values_by_weight[weight_minus_1 + 1] for arg, weight_minus_1 in enumerate(partition) ] # type: ArgOptionsType # pytype: disable=annotation-type-mismatch else: arg_options = [ filter_cache.filter_values( value_filters[arg], weight_minus_1 + 1, values_by_weight[weight_minus_1 + 1], ) for arg, weight_minus_1 in enumerate(partition) ] # type: ArgOptionsType arg_options_list.append(arg_options) for arg_options in arg_options_list: results.extend( self._enumerate_values(arg_options, end_time, settings, statistics) ) return results def reconstruct_expression(self, arg_values: ArgValuesType, use_cache=True) -> Text: """Returns an expression for this operation applied to the given arguments. This can be slow and should not be called in a tight loop. Args: arg_values: A list of Value objects representing the arguments' values. use_cache: If True, the reconstruction may be looked up from a cache. If False, the reconstruction will be recomputed on each call. Returns: A string representation of the code expression. """ arg_strings = [ arg_value.reconstruct_expression(use_cache=use_cache) for arg_value in arg_values ] return self.reconstruct_expression_from_strings(arg_strings) def reconstruct_expression_with_input_names( self, arg_values: ArgValuesType ) -> Tuple[Text, Set[Text]]: """Returns an expression for this operation and the used input names.""" arg_strings_list, input_names_list = zip( *[ arg_value.reconstruct_expression_with_input_names() for arg_value in arg_values ] ) return ( self.reconstruct_expression_from_strings(arg_strings_list), set.union(*input_names_list), ) @abc.abstractmethod def reconstruct_expression_from_strings(self, arg_strings: List[Text]) -> Text: """Returns an expression for this operation applied to the given arguments. This can be slow and should not be called in a tight loop. Args: arg_strings: A list of strings representing the arguments' reconstructions. Returns: A string representation of the code expression. """
APIsynth-master
Synthesis_incorporation/value_search/operation_base.py
# Copyright 2021 The TF-Coder Authors. # Copyright (c) Facebook, Inc. and its affiliates. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Settings specific to the value search approach to the TF-Coder problem.""" import ast import os from typing import Any, Dict, List, Text class Settings(object): """Stores settings for TF-Coder's value search algorithm.""" def __init__(self): # A string describing the current version of the search algorithm. self.algorithm_version = ( "Value search, " "TF-IDF (k=5, min_score=0.15), " "tensor features model with F_1 loss and max weighting, " "2020/08/26" ) # Time limit in seconds. self.timeout = 300 # Maximum number of solutions to search for. self.max_solutions = 1 # Whether to only search for solutions with minimal weight. self.only_minimal_solutions = True # Maximum number of seconds to spend searching for solutions after the # first. self.max_extra_solutions_time = 10 # Maximum weight of an expression to search for. self.max_weight = 300 # Whether to require solutions to use all inputs, at least one input, or no # restriction. self.require_all_inputs_used = True self.require_one_input_used = True # The description handler to use. # self.description_handler_name = "tfidf_5_0.15" self.description_handler_name = "no_change" # Other settings organized into separate objects. self.operations = OperationSettings() self.model = ModelSettings() self.printing = PrintSettings() self.paper_experiments = PaperExperimentSettings() # Used to parse setting names. _GROUP_NAMES = ["operations", "tensor_model", "printing", "paper_experiments"] def set(self, name: Text, value: Any) -> None: """Sets the setting with the given name to the given value. Args: name: The name of the setting to set. For example, 'timeout' is used to set `self.timeout`, and either 'printing.statistics' or 'printing_statistics' can be used to set `self.printing.statistics`. value: The value to set the setting to. """ if hasattr(self, name): setattr(self, name, value) else: for group_name in Settings._GROUP_NAMES: if name.startswith(group_name) and name[len(group_name)] in {".", "_"}: reduced_name = name[len(group_name) + 1 :] group = getattr(self, group_name) if hasattr(group, reduced_name): setattr(group, reduced_name, value) break else: raise ValueError( "The name `{}` does not match any setting.".format(name) ) def as_dict(self) -> Dict[Text, Any]: """Returns all settings as a dict.""" result = {} for name, value in self.__dict__.items(): if name in Settings._GROUP_NAMES: for inner_name, inner_value in value.__dict__.items(): full_name = name + "." + inner_name result[full_name] = inner_value else: result[name] = value return result class OperationSettings(object): """Settings about operations to use during search.""" def __init__(self): # Whether to limit sparse operations to benchmarks that contain # SparseTensors in their examples. self.limit_sparse_operations = False # TODO(kshi): Add options to exclude specific operations, or prioritize # user-chosen operations. class ModelSettings(object): """Settings for the prediction model.""" def __init__(self): # whether to use multiply model-predicted APIs before search self.use_multiplier = False # reweight constant. [0, 1) self.multiplier = 0.75 # the number of APIs to reweight, given a ranked list from prediction model self.multiplier_top_n = 3 # whether to use iterative search self.do_iterative_prediction = False # the number of APIs to evaluate for each prediction self.do_first_in_seq = True self.iterative_top_n = 10 self.beam_n = 3 # softmax probability threshold self.threshold = 0.0 # whether to use multi-api prediction model self.use_multi_model = False # self.checkpoint_path = "manifold://bigcode/tree/pyCoder/daye_models/Single_100000_integer_30_aug10_nocasting_model.pt" self.checkpoint_path = "manifold://bigcode/tree/pyCoder/data/multilabel_data_corrected/multilabel_200k_10k_10k_integer_16_aug29_shape_type_value_model.pt" # self.api_map_path = "manifold://bigcode/tree/pyCoder/daye_models/Single_100000_integer_30_aug10_nocasting_api2indx.pt" self.api_map_path = "manifold://bigcode/tree/pyCoder/data/multilabel_data_corrected/multilabel_200k_10k_10k_integer_16_aug29_shape_type_value_api2indx.pt" # self.multi_ffn_path = "manifold://bigcode/tree/pyCoder/data/Composite_100000/ffn_model.pt" # 16-exhaustive # self.multi_ffn_path = "manifold://bigcode/tree/pyCoder/data/exhaustive_16api/2_train_net_model.pt" # 33 self.multi_ffn_path = "manifold://bigcode/tree/pyCoder/data/gen_model/10_train_net_model.pt" # self.multi_rnn_path = "manifold://bigcode/tree/pyCoder/data/Composite_100000/rnn_model.pt" # 16-exhaustive # self.multi_rnn_path = "manifold://bigcode/tree/pyCoder/data/exhaustive_16api/2_train_rnn_model.pt" # 33 self.multi_rnn_path = "manifold://bigcode/tree/pyCoder/data/gen_model/10_train_rnn_model.pt" # self.multi_api_map_path = "manifold://bigcode/tree/pyCoder/data/Composite_100000/api2indx17api.pt" # 16-exhaustive # self.multi_api_map_path = "manifold://bigcode/tree/pyCoder/data/exhaustive_16api/api2indx.pt" self.multi_api_map_path = "manifold://bigcode/tree/pyCoder/data/gen_model/api2indx.pt" self.embedding_size = 150 self.shape_embedding_size = 6 self.rnn_hidden_dims = 128 self.rnn_num_layers = 1 self.use_shape_encoding = True self.use_type_encoding = True self.use_value_encoding = True class PrintSettings(object): """Settings that affect printing to stdout.""" def __init__(self): # Whether to print initialization settings self.print_init = True # Whether to print examples self.print_examples = True # Whether to print solutions self.print_solutions = True # Whether to print intermediate results and progress. Setting this to True # will cause significant slowdown from computing and printing many # expressions. self.verbose = False # Whether to print every FunctionOperation application before it occurs. # Setting this to True will cause a huge amount of output and significant # slowdown. self.all_apply = False # Whether to print warnings about too-large tensors. self.tensor_size_warnings = False # Whether to print progress at each iteration of target expression weight. self.progress = False # Whether to print bad solutions. self.bad_solutions = False # Whether to print statistics about operations and executions. self.statistics = False # Whether to print statistics sorted by time (versus by name). Ignored if # `statistics` is False. self.statistics_sort_by_time = False # Whether to print the operations that are prioritized or deprioritized. self.prioritized_operations = False self.deprioritized_operations = False # Whether to print the predicted operations during the iterative predictions. self.predicted_operations = False class PaperExperimentSettings(object): """Settings for experiments in the PLDI 2020 paper.""" def __init__(self): self.skip_filtering = False self.uniform_weights = False def default_settings() -> Settings: """Returns a Settings object with default settings.""" return Settings() def from_dict(overrides: Dict[Text, Any]) -> Settings: """Sets settings using a dict to override defaults.""" settings = default_settings() for name, value in overrides.items(): settings.set(name, value) return settings def from_list(overrides: List[Text]) -> Settings: """Sets settings using a list to override defaults. Args: overrides: A list of strings like 'timeout=120' or 'printing.statistics=True'. Each string should contain exactly one '=' character. The portion before the '=' character names a setting to override. The portion after the '=' character describes the value of the setting, in a form parseable by ast.literal_eval(). Raises: ValueError: If any element of `overrides` cannot be processed successfully. Returns: A Settings object. """ settings = default_settings() for override_string in overrides: if override_string.count("=") != 1: raise ValueError( "The override string {!r} does not contain exactly " "one '=' character.".format(override_string) ) equals_index = override_string.index("=") name = override_string[:equals_index] value_string = override_string[equals_index + 1 :] try: value = ast.literal_eval(value_string) settings.set(name, value) except Exception as e: raise ValueError( "Exception raised in ast.literal_eval on {!r}: {}".format( value_string, e ) ) return settings
APIsynth-master
Synthesis_incorporation/value_search/value_search_settings.py
# Copyright 2021 The TF-Coder Authors. # Copyright (c) Facebook, Inc. and its affiliates. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Utilities for operation filtering.""" import functools import math import operator from typing import Any, Tuple, Type import torch from tf_coder import filter_group from tf_coder import tensor_limits as limits from tf_coder import tf_coder_utils from tf_coder.value_search import value as value_module @functools.lru_cache(maxsize=None) def get_type_filter(desired_type): """Returns a value filter that only keeps values of the given type.""" return lambda arg_value: arg_value.type is desired_type @functools.lru_cache(maxsize=None) def get_types_filter(desired_types: Tuple[Type[Any], ...]): """Returns a value filter that only keeps values with the given types.""" return lambda arg_value: arg_value.type in desired_types @functools.lru_cache(maxsize=None) def get_dtype_filter(dtype): """Returns a value filter that only keeps tensor values of the given dtype.""" if not isinstance(dtype, torch.dtype): raise TypeError("dtype must be a torch.dtype.") return lambda arg_value: arg_value.dtype is dtype @functools.lru_cache(maxsize=None) def get_tensor_min_rank_filter(rank): """Returns a value filter that only keeps tensors of high enough rank.""" return lambda arg_value: arg_value.is_tensor and len(arg_value.shape) >= rank def _check_tensor_finite(tensor): """Returns whether the float tensor contains all finite entries. Args: tensor: A float tensor. This cannot be an int tensor, or else torch.math.is_finite() will fail! """ return bool(torch.all(torch.isfinite(tensor))) def is_castable(to_cast, dtype): """Returns whether `to_cast` (a Value) can be safely casted to the dtype. This filtering strategy is a workaround for undefined behavior in TensorFlow (b/119633897). Args: to_cast: A Value object that would be casted. dtype: A Value containing a torch.dtype that `to_cast` would be casted to. """ if not dtype.is_int_dtype(): return True # We can always cast to a non-int dtype. to_cast_value = to_cast.value if to_cast.is_sparse_tensor: to_cast_value = to_cast.value.values if to_cast.is_tensor or to_cast.is_sparse_tensor: if not to_cast.has_float_dtype(): return True # Only float -> int is potentially unsafe. if not _check_tensor_finite(to_cast_value): return False # Non-finite floats cannot be casted to int dtypes. elif to_cast.is_sequence: if to_cast.elem_type is float: if float("nan") in to_cast_value: return False # inf and -inf will be caught by the min/max logic. elif to_cast.elem_type_is_tensor: return all( element.size() and is_castable(value_module.InputValue(element, "dummy"), to_cast) for element in to_cast_value ) elif to_cast.elem_type_is_sparse_tensor: return all( element.values.size() and is_castable(value_module.InputValue(element, "dummy"), to_cast) for element in to_cast_value ) else: return True # Only lists of floats or float tensors can be unsafe. elif to_cast.type is float: if math.isnan(to_cast_value): return False else: return True min_int, max_int = tf_coder_utils.INT_DTYPE_MIN_MAX[dtype.value] # Floats are truncated when casted to int (nearest int in the zero direction). # Assuming min_int <= 0, the minimum safe float is (min_int - 1 + epsilon), # and the maximum safe float is (max_int + 1 - epsilon). return to_cast.min() > min_int - 1 and to_cast.max() < max_int + 1 def broadcastable(shape_1, shape_2): """Returns whether the two shapes are broadcastable.""" return ( not shape_1 or not shape_2 or all(x == y or x == 1 or y == 1 for x, y in zip(shape_1[::-1], shape_2[::-1])) ) # Constants for common filters. These are named with uppercase to reinforce the # fact that these are constants and should be used as such, even though they are # also technically functions. # pylint: disable=invalid-name # A filter that only keeps primitives. PRIMITIVE_FILTER = operator.attrgetter("is_primitive") # A filter that only keeps torch.DType objects. DTYPE_FILTER = operator.attrgetter("is_dtype") # A filter that only keeps sequences. SEQUENCE_FILTER = operator.attrgetter("is_sequence") # A filter that only keeps tensors. TENSOR_FILTER = operator.attrgetter("is_tensor") def FLOAT_TENSOR_FILTER(arg_value): """Only keeps float tensors.""" return arg_value.is_tensor and not arg_value.is_sparse_tensor and arg_value.has_float_dtype() def NUMERIC_TENSOR_FILTER(arg_value): """Only keeps int and float tensors.""" return arg_value.is_tensor and not arg_value.is_sparse_tensor and ( arg_value.has_int_dtype() or arg_value.has_float_dtype() ) def NUMERIC_PRIMITIVE_FILTER(arg_value): """Only keeps int and float primitives.""" return arg_value.is_primitive and arg_value.type is not bool def NONSCALAR_NUMERIC_TENSOR_FILTER(arg_value): """Only keeps non-scalar int and float tensors.""" return NUMERIC_TENSOR_FILTER(arg_value) and len(arg_value.shape) def INDICES_FILTER(arg_value): """Only keeps tensors/sequences containing ints suitable for indexing.""" return ( arg_value.is_tensor and arg_value.has_int_dtype() and arg_value.min() >= 0 and len(arg_value.shape) == 1 ) def GATHER_INDICES_FILTER(arg_value): """Only keeps tensors/sequences containing ints suitable for indexing.""" return ( arg_value.is_tensor and arg_value.has_int_dtype() and arg_value.min() >= 0 ) def AXIS_FILTER(arg_value): """Only keeps ints in the range [-1, limits.MAX_NUM_DIMENSIONS).""" return arg_value.type is int and -1 <= arg_value.value < limits.MAX_NUM_DIMENSIONS def AXIS_SEQUENCE_FILTER(arg_value): """Only keeps sequences of axis-like ints.""" return ( INTS_SEQUENCE_FILTER(arg_value) and len(arg_value.value) <= limits.MAX_NUM_DIMENSIONS and -1 <= arg_value.min() and arg_value.max() < limits.MAX_NUM_DIMENSIONS ) def PRIMITIVE_OR_SCALAR_TENSOR_FILTER(arg_value): """Only keeps primitives or scalar tensors.""" return arg_value.is_primitive or arg_value.is_tensor and arg_value.shape is None def NON_SCALAR_TENSOR_FILTER(arg_value): """Only keeps tensors that are not scalars.""" return arg_value.is_tensor and arg_value.shape def NOT_TENSOR_FILTER(arg_value): """Only keeps a value if it is not a Tensor or SparseTensor.""" return not arg_value.is_tensor and not arg_value.is_sparse_tensor and not arg_value.is_dtype def PRIMITIVE_OR_TENSOR_FILTER(arg_value): """Only keeps primitives and tensors.""" return arg_value.is_primitive or arg_value.is_tensor def NUMERIC_PRIMITIVE_OR_TENSOR_FILTER(arg_value): """Only keeps numeric primitives and tensors.""" return (NUMERIC_TENSOR_FILTER(arg_value) or NUMERIC_PRIMITIVE_FILTER(arg_value)) def NONZERO_PRIMITIVE_OR_TENSOR_FILTER(arg_value): """Only keeps non-zero primitives and tensors""" if NUMERIC_TENSOR_FILTER(arg_value): return len(torch.nonzero(arg_value.value)) > 0 elif NUMERIC_PRIMITIVE_FILTER(arg_value): return arg_value.value != 0 else: return False def TENSOR_1D_FILTER(arg_value): """Only keeps 1-D tensors.""" return arg_value.is_tensor and len(arg_value.shape) == 1 def CONTAINS_INTS_FILTER(arg_value): """Only keeps int sequences or int tensors.""" return arg_value.elem_type is int or arg_value.has_int_dtypes() def INTS_SEQUENCE_FILTER(arg_value): """Only keeps int sequences .""" return arg_value.elem_type is int def TENSOR_SEQUENCE_FILTER(arg_value): """ Only keeps a tensor sequence having same shapes and dtypes.""" if not arg_value.elem_type_is_tensor: return False dtype = arg_value.value[0].dtype shape = arg_value.value[0].shape for a in arg_value.value[1:]: if a.dtype != dtype: return False if a.shape != shape: return False return True def TENSOR_LIKE_SEQUENCE_FILTER(arg_value): """Only keeps rectangular possibly-nested sequences of primitives.""" return arg_value.is_sequence and arg_value.sequence_dtype is not None def INT_OR_INT_TENSOR_FILTER(arg_value): """Only keeps int primitives or int tensors.""" return arg_value.type is int or ( arg_value.is_tensor and not arg_value.shape and arg_value.has_int_dtype() ) def INT_LENGTH_FILTER(arg_value): """Only keeps int primitives or tensors representing a dimension length.""" return ( arg_value.type is int and 0 < int(arg_value.value) <= limits.MAX_DIMENSION_LENGTH ) def SHAPE_FILTER(arg_value): """Only keeps int sequences representing tensor shapes.""" return ( arg_value.is_sequence and arg_value.elem_type is int and 0 < len(arg_value.value) <= limits.MAX_NUM_DIMENSIONS and arg_value.min() > 0 and arg_value.max() <= limits.MAX_DIMENSION_LENGTH and arg_value.reduce_prod() <= limits.MAX_TENSOR_ELEMENTS ) def TENSOR_OR_SPARSE_FILTER(arg_value): """Only keeps Tensors and SparseTensors.""" return arg_value.is_tensor or arg_value.is_sparse_tensor def VECTOR_LENGTH_FILTER(arg_value): """Ensures that a vector of length N (N is the argument) is small enough.""" return ( INT_OR_INT_TENSOR_FILTER(arg_value) and 0 < int(arg_value.value) <= limits.MAX_DIMENSION_LENGTH ) def SQUARE_MATRIX_SIZE_FILTER(arg_value): """Ensures that an NxN matrix (N is the argument) is small enough.""" if not INT_OR_INT_TENSOR_FILTER(arg_value): return False num_rows = int(arg_value.value) return ( 0 < num_rows <= limits.MAX_DIMENSION_LENGTH and num_rows ** 2 <= limits.MAX_TENSOR_ELEMENTS ) def SEQUENCE_MASK_LENGTHS_FILTER(arg_value): """The value must contain few ints with a small maximum.""" # Only int tensors (not SparseTensors), or list of ints, are ok. if not ( arg_value.is_tensor and arg_value.has_int_dtype() or arg_value.elem_type is int ): return False max_value = arg_value.max() num_elements = arg_value.num_elements() return num_elements > 0 and max_value * num_elements <= limits.MAX_TENSOR_ELEMENTS def PADDINGS_FILTER(arg_value): """Must be a [N, 2] shape int32 tensor or nested sequence of ints.""" if arg_value.is_sequence: elem_type = arg_value.elem_type shape = arg_value.sequence_shape else: return False if not ( elem_type in [int, float] and len(shape) == 1 and shape[0] % 2 == 0 and shape[0] / 2 <= limits.MAX_NUM_DIMENSIONS ): return False return 0 <= arg_value.min() and arg_value.max() < limits.MAX_DIMENSION_LENGTH / 2 def BATCH_DIMS_FILTER(arg_value): """Must be an int representing a number of batch dimensions.""" return arg_value.type is int and 0 <= arg_value.value < limits.MAX_NUM_DIMENSIONS def SCATTER_INDICES_FILTER(arg_value): """Must be an int tensor appropriate for indices in scatter operations.""" return ( arg_value.is_tensor and arg_value.has_int_dtype and len(arg_value.shape) >= 2 and arg_value.shape[-1] <= limits.MAX_NUM_DIMENSIONS and arg_value.min() >= 0 and arg_value.max() < limits.MAX_DIMENSION_LENGTH ) def BROADCASTABLE_APPLY_FILTER(arg_values): """The two args must be braodcastable.""" x, y = arg_values return broadcastable(x.shape, y.shape) def SAME_DTYPES_APPLY_FILTER(arg_values): """Ensures that the first two arguments have the same dtype.""" return arg_values[0].dtype == arg_values[1].dtype def SAME_DTYPES_BROADCASTABLE_APPLY_FILTER(arg_values): """The two args must have the same dtypes and be broadcastable.""" x, y = arg_values return x.dtype == y.dtype and broadcastable(x.shape, y.shape) def SAME_SHAPES_APPLY_FILTER(arg_values): """Ensures that the first two arguments have the same shape.""" return arg_values[0].shape == arg_values[1].shape def TENSOR_PRIMITIVE_SAME_TYPES_APPLY_FILTER(arg_values): x, y = arg_values if x.is_tensor: if y.is_tensor: return x.dtype == y.dtype elif y.is_primitive: if x.has_float_dtype() and y.type is float: return True elif x.has_int_dtype() and y.type is int: return True else: return False elif x.is_primitive: if y.is_primitive: return x.type == y.type elif y.is_tensor: if x.type is float and x.has_float_dtype(): return True elif x.type is int and x.has_int_dtype(): return True else: return False return False def TENSOR_AXIS_IN_RANGE_APPLY_FILTER(arg_values): """Ensures the axis is less than the rank of the tensor.""" tensor, axis = arg_values return axis.value < len(tensor.shape) # End of section for filter constants. pylint: enable=invalid-name # LINT.IfChange(add_filters_to_function_operation) def add_filters_to_function_operation(function_operation): """Adds filters to the FunctionOperation depending on its FilterGroup.""" group = function_operation.function_info.filter_group if group == filter_group.FilterGroup.NONE: # Do nothing. pass elif group == filter_group.FilterGroup.SHAPE_1: function_operation.add_value_filters([SHAPE_FILTER]) elif group == filter_group.FilterGroup.TENSOR_1: function_operation.add_value_filters([TENSOR_FILTER]) elif group == filter_group.FilterGroup.TENSORSEQUENCE_1: function_operation.add_value_filters([TENSOR_SEQUENCE_FILTER]) elif group == filter_group.FilterGroup.FLOATTENSOR_1: function_operation.add_value_filters([FLOAT_TENSOR_FILTER]) elif group == filter_group.FilterGroup.NUMERICTENSOR_1: function_operation.add_value_filters([NUMERIC_TENSOR_FILTER]) elif group == filter_group.FilterGroup.PRIMITIVE_OR_TENSOR_1: function_operation.add_value_filters([PRIMITIVE_OR_TENSOR_FILTER]) elif group == filter_group.FilterGroup.TENSOR_AXIS_2: function_operation.add_value_filters([TENSOR_FILTER, AXIS_FILTER]) function_operation.set_apply_filter(TENSOR_AXIS_IN_RANGE_APPLY_FILTER) elif group == filter_group.FilterGroup.NUMERICTENSOR_AXIS_2: function_operation.add_value_filters([NUMERIC_TENSOR_FILTER, AXIS_FILTER]) function_operation.set_apply_filter(TENSOR_AXIS_IN_RANGE_APPLY_FILTER) elif group == filter_group.FilterGroup.TENSORSEQUENCE_AXIS_2: function_operation.add_value_filters([TENSOR_SEQUENCE_FILTER, AXIS_FILTER]) elif group == filter_group.FilterGroup.TENSOR_BOOLTENSOR_2: function_operation.add_value_filters( [TENSOR_FILTER, get_dtype_filter(torch.bool)] ) elif group == filter_group.FilterGroup.SAME_SHAPES_NUMERICTENSOR_2: function_operation.add_value_filters([NUMERIC_TENSOR_FILTER] * 2) function_operation.set_apply_filter(SAME_SHAPES_APPLY_FILTER) elif group == filter_group.FilterGroup.SAME_DTYPE_NUMERIC_BROADCASTABLE_2: function_operation.add_value_filters([NUMERIC_TENSOR_FILTER] * 2) function_operation.set_apply_filter(SAME_DTYPES_BROADCASTABLE_APPLY_FILTER) elif group == filter_group.FilterGroup.ELEMENTWISE_COMPARISON_2: function_operation.add_value_filters( [NUMERIC_TENSOR_FILTER, PRIMITIVE_OR_TENSOR_FILTER] ) function_operation.set_apply_filter(BROADCASTABLE_APPLY_FILTER) elif group == filter_group.FilterGroup.NE_BROADCASTABLE_2: function_operation.add_value_filters( [NUMERIC_TENSOR_FILTER, NONZERO_PRIMITIVE_OR_TENSOR_FILTER] ) def _not_equal_broadcastable_filter(arg_values): arg1, arg2 = arg_values return (arg1 != arg2 and BROADCASTABLE_APPLY_FILTER(arg_values)) function_operation.set_apply_filter(_not_equal_broadcastable_filter) # Operations with other special handling. elif group == filter_group.FilterGroup.BINCOUNT_1: def _bincount_filter(arg_value): """The value must contain nonnegative ints with a small maximum.""" # Must be an int tensor, lists of ints, or int primitive. if not ( arg_value.is_tensor and arg_value.has_int_dtype() ): return False max_value = arg_value.max() min_value = arg_value.min() return (min_value >= 0 and max_value <= limits.MAX_DIMENSION_LENGTH and len(arg_value.shape) == 1) function_operation.add_value_filters([_bincount_filter]) elif group == filter_group.FilterGroup.TENSORIZABLE_1: def _tensorizable_filter(arg_value): if arg_value.is_primitive: return True elif arg_value.is_sequence: return not arg_value.elem_type_is_tensor else: return False function_operation.add_value_filters([_tensorizable_filter]) elif group == filter_group.FilterGroup.BMM_2: def _numeric_min_rank_3_filter(arg_value): """Must be an int or float tensor of rank = 3.""" return arg_value.is_tensor and len(arg_value.shape) == 3 def _bmm_filter(arg_values): """Ensures the third dimension of the first tensor equals to the second dimension of the second tensor, and the first dimension of the two argumetns should be equal.""" return (SAME_DTYPES_APPLY_FILTER(arg_values) and arg_values[0].shape[2] == arg_values[1].shape[1] and arg_values[0].shape[0] == arg_values[1].shape[0] ) function_operation.add_value_filters([_numeric_min_rank_3_filter] * 2) function_operation.set_apply_filter(_bmm_filter) elif group == filter_group.FilterGroup.CAT_TENSORSEQUENCE_AXIS_2: function_operation.add_value_filters([TENSOR_SEQUENCE_FILTER, AXIS_FILTER]) def _axis_in_range(arg_values): """Ensures the axis is at most the rank of the tensor.""" tensor, axis = arg_values return axis.value < len(tensor.value[0].shape) function_operation.set_apply_filter(_axis_in_range) elif group == filter_group.FilterGroup.CDIST_2: def _cdist_filter(arg_value): return (arg_value.is_tensor and arg_value.has_float_dtype() and len(arg_value.shape) > 1) function_operation.add_value_filters([_cdist_filter] * 2) function_operation.set_apply_filter(SAME_SHAPES_APPLY_FILTER) elif group == filter_group.FilterGroup.EYE_1: function_operation.add_value_filters([SQUARE_MATRIX_SIZE_FILTER]) elif group == filter_group.FilterGroup.RANGE_1: function_operation.add_value_filters([VECTOR_LENGTH_FILTER]) elif group == filter_group.FilterGroup.EXPAND_DIMS_2: function_operation.add_value_filters([TENSOR_FILTER, AXIS_FILTER]) def _axis_in_range(arg_values): """Ensures the axis is at most the rank of the tensor.""" tensor, axis = arg_values return axis.value < len(tensor.shape) function_operation.set_apply_filter(_axis_in_range) elif group == filter_group.FilterGroup.EXPAND_DIMS_ADDITIONAL_2: function_operation.add_value_filters([TENSOR_FILTER, AXIS_FILTER]) def _axis_in_range(arg_values): """Ensures the axis is at most the rank of the tensor.""" tensor, axis = arg_values return axis.value <= len(tensor.shape) function_operation.set_apply_filter(_axis_in_range) elif group == filter_group.FilterGroup.EYE_ROWS_COLS_2: def _eye_rows_cols_apply_filter(arg_values): """Checks that the result will have a small number of elements.""" num_rows, num_cols = arg_values return ( int(num_rows.value) * int(num_cols.value) <= limits.MAX_TENSOR_ELEMENTS ) function_operation.add_value_filters([VECTOR_LENGTH_FILTER] * 2) function_operation.set_apply_filter(_eye_rows_cols_apply_filter) elif group == filter_group.FilterGroup.MATMUL_2: def _numeric_min_rank_2_filter(arg_value): """Must be an int or float tensor of rank >= 2.""" return arg_value.is_tensor and len(arg_value.shape) >= 2 function_operation.add_value_filters([_numeric_min_rank_2_filter] * 2) function_operation.set_apply_filter(SAME_DTYPES_APPLY_FILTER) elif group == filter_group.FilterGroup.MM_2: def _numeric_min_rank_2_filter(arg_value): """Must be an int or float tensor of rank = 2.""" return arg_value.is_tensor and len(arg_value.shape) == 2 def _mm_filter(arg_values): """Ensures the second dimension of the first tensor equals to the first dimension of the second tensor.""" return (SAME_DTYPES_APPLY_FILTER(arg_values) and arg_values[0].shape[1] == arg_values[1].shape[0] ) function_operation.add_value_filters([_numeric_min_rank_2_filter] * 2) function_operation.set_apply_filter(_mm_filter) elif group == filter_group.FilterGroup.NORMALIZE_2: def _complex_tensor_filter(arg_value): return (arg_value.is_tensor and arg_value.has_float_dtype()) function_operation.add_value_filters([_complex_tensor_filter, AXIS_FILTER]) def _axis_in_range(arg_values): """Ensures the axis is at most the rank of the tensor.""" tensor, axis = arg_values return axis.value < len(tensor.shape) function_operation.set_apply_filter(_axis_in_range) elif group == filter_group.FilterGroup.ONE_HOT_2: def _one_hot_indices_filter(arg_value): """Must contain ints and less than the max number of dimensions.""" return ( arg_value.is_tensor and arg_value.dtype == torch.int64 and arg_value.min() >= 0 and len(arg_value.shape) < limits.MAX_NUM_DIMENSIONS ) def _one_hot_apply_filter(arg_values): """Checks that the result will have a small number of elements.""" indices, num_classes = arg_values return ( indices.num_elements() * int(num_classes.value) <= limits.MAX_TENSOR_ELEMENTS and indices.max() < num_classes.value ) function_operation.add_value_filters( [_one_hot_indices_filter, INT_LENGTH_FILTER] ) function_operation.set_apply_filter(_one_hot_apply_filter) elif group == filter_group.FilterGroup.PAD_2: function_operation.add_value_filters([TENSOR_FILTER, PADDINGS_FILTER]) def _pad_2_apply_filter(arg_values): tensor, paddings = arg_values paddings_shape = paddings.sequence_shape return ( tensor.shape and paddings_shape[0] / 2 <= len(tensor.shape) ) function_operation.set_apply_filter(_pad_2_apply_filter) elif group == filter_group.FilterGroup.RESHAPE_2: def _reshape_filter(arg_values): """The new size must be compatible with its original size.""" tensor, shape = arg_values num_tensor_elements = torch.prod(torch.tensor(tensor.value.shape)) num_shape_elements = torch.prod(torch.tensor(shape.value)) return (num_tensor_elements % num_shape_elements == 0 and num_shape_elements != 1) function_operation.add_value_filters([TENSOR_FILTER, SHAPE_FILTER]) function_operation.set_apply_filter(_reshape_filter) elif group == filter_group.FilterGroup.SEARCHSORTED_2: def _sorted_last_dimension(arg_value): """Must be a numeric tensor that is sorted in the last dimension.""" return ( NONSCALAR_NUMERIC_TENSOR_FILTER(arg_value) and ( arg_value.has_float_dtype() or arg_value.dtype in [torch.int32, torch.int64] ) and bool( torch.all(torch.eq(arg_value.value, torch.sort(arg_value.value)[0])) ) ) function_operation.add_value_filters( [_sorted_last_dimension, NUMERIC_PRIMITIVE_OR_TENSOR_FILTER] ) def _searchsorted_apply_filter(arg_values): """DTypes must match, dimension lengths equal except the last.""" sorted_sequence, values = arg_values return ( sorted_sequence.dtype == values.dtype and len(sorted_sequence.shape) == len(values.shape) and sorted_sequence.shape[:-1] == values.shape[:-1] ) function_operation.set_apply_filter(_searchsorted_apply_filter) elif group == filter_group.FilterGroup.TILE_2: def _tile_apply_filter(arg_values): """Checks that the result will have a small number of elements.""" tensor, multiples = arg_values return ( multiples.min() > 0 and multiples.max() > 1 and multiples.reduce_prod() * tensor.num_elements() <= limits.MAX_TENSOR_ELEMENTS ) function_operation.add_value_filters([TENSOR_FILTER, AXIS_SEQUENCE_FILTER]) function_operation.set_apply_filter(_tile_apply_filter) elif group == filter_group.FilterGroup.SQUEEZE_2: def _very_squeezable_filter(arg_value): """Keeps tensors with more than 1 squeezable dimension.""" # If a tensor only has 1 squeezable dimension, then this operation is # useless because it is simpler to use the one-arg version of squeeze. return TENSOR_FILTER(arg_value) and (arg_value.shape or []).count(1) >= 2 function_operation.add_value_filters([_very_squeezable_filter, AXIS_FILTER]) def _squeeze_2_apply_filter(arg_values): tensor, axis = arg_values return axis.value < len(tensor.shape) and tensor.shape[axis.value] == 1 function_operation.set_apply_filter(_squeeze_2_apply_filter) elif group == filter_group.FilterGroup.GATHER_3: function_operation.add_value_filters( [ NON_SCALAR_TENSOR_FILTER, BATCH_DIMS_FILTER, GATHER_INDICES_FILTER, ] ) def _gather_3_apply_filter(arg_values): params, batch_dims, indices = arg_values batch_dims_int = batch_dims.value indices_shape = ( indices.shape if indices.is_tensor else indices.sequence_shape ) return ( indices.is_tensor and batch_dims_int < min(len(indices_shape), len(params.shape)) and params.shape[:batch_dims_int] == indices_shape[:batch_dims_int] and indices_shape # It is also required that index.size(d) <= input.size(d) for all dimensions d != dim and all([(indices_shape[d] <= params.shape[d]) or d == batch_dims_int for d in range(min(len(params.shape), len(indices_shape)))]) and indices.max() < params.shape[batch_dims_int] and # Upper bound on resulting tensor size. indices.num_elements() * params.num_elements() <= limits.MAX_TENSOR_ELEMENTS ) function_operation.set_apply_filter(_gather_3_apply_filter) elif group == filter_group.FilterGroup.INDEX_SELECT_3: function_operation.add_value_filters( [ NON_SCALAR_TENSOR_FILTER, BATCH_DIMS_FILTER, INDICES_FILTER, ] ) def _index_select_3_apply_filter(arg_values): params, dim, indices = arg_values dim_int = dim.value indices_shape = indices.shape return ( dim_int < len(params.shape) and indices_shape and indices.max() < max(params.shape) and # Upper bound on resulting tensor size. indices.num_elements() * params.num_elements() <= limits.MAX_TENSOR_ELEMENTS ) function_operation.set_apply_filter(_index_select_3_apply_filter) elif group == filter_group.FilterGroup.RANGE_3: def _range_3_apply_filter(arg_values): """Checks that the range will end up having a small number of elements.""" start, limit, delta = arg_values return ( delta.value != 0 and 0 < len(range(start.value, limit.value, delta.value)) <= limits.MAX_DIMENSION_LENGTH ) function_operation.add_value_filters([get_type_filter(int)] * 3) function_operation.set_apply_filter(_range_3_apply_filter) elif group == filter_group.FilterGroup.REPEAT_3: def _repeat_filter(arg_value): return (INT_OR_INT_TENSOR_FILTER(arg_value) and arg_value.min() > 0) def _repeat_3_apply_filter(arg_values): """Checks the first two arguments are broadcastable and the third argument is at most the rank of the tensor.""" return (BROADCASTABLE_APPLY_FILTER([arg_values[0], arg_values[1]]) and TENSOR_AXIS_IN_RANGE_APPLY_FILTER([arg_values[0], arg_values[2]])) function_operation.add_value_filters([NUMERIC_TENSOR_FILTER, _repeat_filter, AXIS_FILTER]) function_operation.set_apply_filter(_repeat_3_apply_filter) elif group == filter_group.FilterGroup.ROLL_3: # The case where the shift and axis are both single integers. function_operation.add_value_filters( [TENSOR_FILTER, INT_OR_INT_TENSOR_FILTER, AXIS_FILTER] ) # The case where the shift and axis are both sequences of integers. function_operation.add_value_filters( [TENSOR_FILTER, INTS_SEQUENCE_FILTER, AXIS_SEQUENCE_FILTER] ) def _roll_apply_filter(arg_values): tensor, shift, axis = arg_values if axis.type is int: return axis.value < len(tensor.shape) else: return len(axis.value) == len(shift.value) and axis.max() < len( tensor.shape ) function_operation.set_apply_filter(_roll_apply_filter) elif group == filter_group.FilterGroup.TENSORDOT_3: def _tensordot_arg_3_filter(arg_value): """The argument "axes" must have axis-like ints and the right shape.""" if arg_value.type is int: # An int N means "sum over the last N axes of a and the first N axes of # b in order", so 0 <= N <= maximum rank. return 0 <= arg_value.value <= limits.MAX_NUM_DIMENSIONS if arg_value.elem_type is int: # List of length 2 is ok, elements must be valid axes. return ( len(arg_value.value) == 2 and 0 <= arg_value.min() and arg_value.max() < limits.MAX_NUM_DIMENSIONS ) # Otherwise, must be an int tensor of shape [2] or [2, k]. return ( arg_value.is_tensor and arg_value.has_int_dtype() and 1 <= len(arg_value.shape) <= 2 and arg_value.shape[0] == 2 and 0 <= arg_value.min() and arg_value.max() < limits.MAX_NUM_DIMENSIONS ) function_operation.add_value_filters( [ NONSCALAR_NUMERIC_TENSOR_FILTER, NONSCALAR_NUMERIC_TENSOR_FILTER, _tensordot_arg_3_filter, ] ) def _tensordot_apply_filter(arg_value): """First two tensors must have same dtype, and axes must be in range.""" a, b, axes = arg_value if ( a.dtype != b.dtype or # This check is overly conservative for the sake of efficiency; the # resulting number of elements is most likely smaller but will take # effort to compute more precisely. a.num_elements() * b.num_elements() > limits.MAX_TENSOR_ELEMENTS ): return False a_rank = len(a.shape) b_rank = len(b.shape) min_rank = min(a_rank, b_rank) if axes.type is int: return axes.value <= min_rank elif axes.is_sequence or len(axes.shape) == 1: # axes is a list or tensor of shape [2]. return axes.max() < min_rank else: # axes is a tensor of shape [2, k]. return ( axes.shape[1] <= min_rank and tf_coder_utils.max_tensor_value(axes.value[0]) < a_rank and tf_coder_utils.max_tensor_value(axes.value[1]) < b_rank ) function_operation.set_apply_filter(_tensordot_apply_filter) elif group == filter_group.FilterGroup.TRANSPOSE_3: def _transpose_3_apply_filter(arg_values): """Checks that perm has length equal to the number of a's dimensions.""" tensor, dim0, dim1 = arg_values return (dim0.value < len(tensor.shape) and dim1.value < len(tensor.shape) and dim0.value < dim1.value) function_operation.add_value_filters( [TENSOR_FILTER, BATCH_DIMS_FILTER, BATCH_DIMS_FILTER] ) function_operation.set_apply_filter(_transpose_3_apply_filter) elif group == filter_group.FilterGroup.WHERE_TENSOR_3: def _where_apply_filter(arg_values): """Ensures that the last two arguments have matching shapes and dtypes.""" condition, x, y = arg_values return (TENSOR_PRIMITIVE_SAME_TYPES_APPLY_FILTER([x, y]) and broadcastable(condition.shape, x.shape) and broadcastable(condition.shape, y.shape) and x != y) function_operation.add_value_filters( [ get_dtype_filter(torch.bool), NUMERIC_TENSOR_FILTER, NUMERIC_PRIMITIVE_OR_TENSOR_FILTER, ] ) function_operation.set_apply_filter(_where_apply_filter) elif group == filter_group.FilterGroup.WHERE_NUMERIC_3: def _where_apply_filter(arg_values): """Ensures that the last two arguments have matching shapes and dtypes.""" condition, x, y = arg_values return (TENSOR_PRIMITIVE_SAME_TYPES_APPLY_FILTER([x, y]) and broadcastable(condition.shape, x.shape) and broadcastable(condition.shape, y.shape) and x != y) function_operation.add_value_filters( [ get_dtype_filter(torch.bool), NUMERIC_PRIMITIVE_FILTER, NUMERIC_PRIMITIVE_OR_TENSOR_FILTER, ] ) function_operation.set_apply_filter(_where_apply_filter) else: raise ValueError( "Unknown filter group {} for FunctionOperation {}.".format( group, function_operation.name ) ) # LINT.ThenChange() # It is reasonable to strengthen or relax a filtering strategy here without # involving a change to the filter groups.
APIsynth-master
Synthesis_incorporation/value_search/operation_filtering.py
# Copyright (c) Facebook, Inc. and its affiliates. # Copyright (c) Facebook, Inc. and its affiliates. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Defines Operation objects for Python operators.""" import torch from tf_coder.value_search import operation_base from tf_coder.value_search import operation_filtering as filtering from tf_coder.value_search import value # Weights for Python operations. SIZE_WEIGHT = 32 INT_WEIGHT = 16 FLOAT_WEIGHT = 16 BOOL_WEIGHT = 16 VIEW_WEIGHT = 28 EXPAND_WEIGHT = 24 # "Docstrings" for Python operations, so they can used for ranking in the same # way as for TensorFlow operations. SIZE_DOCSTRING = """ Returns the size of the self tensor. The returned value is a subclass of tuple. """ INT_DOCSTRING = """ Cast the self tensor to int. """ FLOAT_DOCSTRING = """ Cast the self tensor to float. """ BOOL_DOCSTRING = """ Cast the self tensor to bool. """ VIEW_DOCSTRINGS = """ Returns a new tensor with the same data as the self tensor but of a different shape. """ EXPAND_DOCSTRINGS = """ Returns a new view of the self tensor with singleton dimensions expanded to a larger size. """ class SizeOperation(operation_base.Operation): def __init__(self): metadata = operation_base.OperationMetadata(docstring=SIZE_DOCSTRING) super(SizeOperation, self).__init__( num_args=1, weight=SIZE_WEIGHT, metadata=metadata) self.add_value_filters([filtering.NON_SCALAR_TENSOR_FILTER]) def apply(self, arg_values, settings): """See base class.""" try: return value.OperationValue(arg_values[0].value.size(), self, arg_values) except Exception: # pylint: disable=broad-except return None def reconstruct_expression_from_strings(self, arg_strings): """See base class.""" if len(arg_strings) == 1: return arg_strings[0] + '.size()' else: return arg_strings[0] + '.size(' + arg_strings[1] + ')' class IntOperation(operation_base.Operation): def __init__(self): metadata = operation_base.OperationMetadata(docstring=INT_DOCSTRING) super(IntOperation, self).__init__( num_args=1, weight=INT_WEIGHT, metadata=metadata) def _non_int_tensor_filter(arg_value): """Only keeps values that are non-int tensors.""" return arg_value.is_tensor and not arg_value.has_int_dtype() self.add_value_filters([_non_int_tensor_filter]) def apply(self, arg_values, settings): """See base class.""" try: return value.OperationValue(arg_values[0].value.long(), self, arg_values) except Exception: # pylint: disable=broad-except return None def reconstruct_expression_from_strings(self, arg_strings): """See base class.""" return arg_strings[0] + '.long()' class FloatOperation(operation_base.Operation): def __init__(self): metadata = operation_base.OperationMetadata(docstring=FLOAT_DOCSTRING) super(FloatOperation, self).__init__( num_args=1, weight=FLOAT_WEIGHT, metadata=metadata) def _non_float_tensor_filter(arg_value): """Only keeps values that are non-float tensors.""" return arg_value.is_tensor and not arg_value.has_float_dtype() self.add_value_filters([_non_float_tensor_filter]) def apply(self, arg_values, settings): """See base class.""" try: return value.OperationValue(arg_values[0].value.float(), self, arg_values) except Exception: # pylint: disable=broad-except return None def reconstruct_expression_from_strings(self, arg_strings): """See base class.""" return arg_strings[0] + '.float()' class BoolOperation(operation_base.Operation): def __init__(self): metadata = operation_base.OperationMetadata(docstring=BOOL_DOCSTRING) super(BoolOperation, self).__init__( num_args=1, weight=BOOL_WEIGHT, metadata=metadata) def _non_bool_tensor_filter(arg_value): """Only keeps values that are non-bool tensors.""" return arg_value.is_tensor and not arg_value.has_bool_dtype() self.add_value_filters([_non_bool_tensor_filter]) def apply(self, arg_values, settings): """See base class.""" try: return value.OperationValue(arg_values[0].value.bool(), self, arg_values) except Exception: # pylint: disable=broad-except return None def reconstruct_expression_from_strings(self, arg_strings): """See base class.""" return arg_strings[0] + '.bool()' class ViewOperation(operation_base.Operation): def __init__(self): metadata = operation_base.OperationMetadata(docstring=VIEW_DOCSTRINGS) super(ViewOperation, self).__init__( num_args=2, weight=VIEW_WEIGHT, metadata=metadata) def _size_compatable_filter(arg_values): """The new size must be compatible with its original size.""" in1, in2 = arg_values return torch.prod(torch.tensor(in1.value.shape)) % torch.prod(torch.abs(torch.tensor(in2.value))) == 0 self.add_value_filters([filtering.TENSOR_FILTER, filtering.SHAPE_FILTER]) self.set_apply_filter(_size_compatable_filter) def apply(self, arg_values, settings): """See base class.""" try: return value.OperationValue(arg_values[0].value.view(arg_values[1].value), self, arg_values) except Exception: # pylint: disable=broad-except return None def reconstruct_expression_from_strings(self, arg_strings): """See base class.""" return arg_strings[0] + '.view(' + arg_strings[1] + ')' class ExpandOperation(operation_base.Operation): def __init__(self): metadata = operation_base.OperationMetadata(docstring=EXPAND_DOCSTRINGS) super(ExpandOperation, self).__init__( num_args=2, weight=EXPAND_WEIGHT, metadata=metadata) def _size_compatable_filter(arg_values): """The new size must be compatible with its original size.""" in1, in2 = arg_values in1_dims_len = len(in1.value.shape) in2_dims_len = len(in2.value) if in1_dims_len > in2_dims_len: return False for i in range(in1_dims_len, in2_dims_len): if (in2.value[i] == -1 ): return False return True self.add_value_filters([filtering.TENSOR_FILTER, filtering.SHAPE_FILTER]) self.set_apply_filter(_size_compatable_filter) def apply(self, arg_values, settings): """See base class.""" try: return value.OperationValue(arg_values[0].value.expand(arg_values[1].value), self, arg_values) except Exception: # pylint: disable=broad-except return None def reconstruct_expression_from_strings(self, arg_strings): """See base class.""" return arg_strings[0] + '.expand(' + arg_strings[1] + ')'
APIsynth-master
Synthesis_incorporation/value_search/tensor_member_operations.py
# Copyright 2021 The TF-Coder Authors. # Copyright (c) Facebook, Inc. and its affiliates. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Manages all Operation objects used by value search.""" import inspect from typing import List, Optional, Text from tf_coder import torch_functions from tf_coder.models import prediction_model from tf_coder.value_search import function_operation from tf_coder.value_search import operation_base from tf_coder.value_search import python_operations from tf_coder.value_search import tensor_member_operations def get_python_operations() -> List[operation_base.Operation]: """Returns a list of Operation objects from the python_operations module.""" operation_classes = inspect.getmembers( python_operations, lambda x: inspect.isclass(x) and not inspect.isabstract(x) ) return [operation_class() for unused_name, operation_class in operation_classes] def get_member_operations() -> List[operation_base.Operation]: """Returns a list of Operation objects from the torch_member_operations module.""" operation_classes = inspect.getmembers( tensor_member_operations, lambda x: inspect.isclass(x) and not inspect.isabstract(x), ) return [operation_class() for unused_name, operation_class in operation_classes] def get_torch_operations() -> List[operation_base.Operation]: """Returns a list of Operation objects for dense PyTorch operations.""" return [ function_operation.FunctionOperation(function_info) for function_info in torch_functions.TORCH_FUNCTIONS ] def get_sparse_operations() -> List[operation_base.Operation]: """Returns a list of Operation objects for sparse operations.""" return [function_operation.FunctionOperation(function_info) for function_info in torch_functions.SPARSE_FUNCTIONS] def get_operations( include_sparse_operations: bool = False, ) -> List[operation_base.Operation]: """Returns a list of Operation objects that value search should use.""" operations = [] operations.extend(get_torch_operations()) if include_sparse_operations: operations.extend(get_sparse_operations()) operations.extend(get_python_operations()) operations.extend(get_member_operations()) return operations def find_operation_with_name( operation_name: Text, operation_list: Optional[List[operation_base.Operation]] = None, ) -> operation_base.Operation: """Finds an operation with the given name, optionally within a given list.""" if operation_list is None: operation_list = get_operations(include_sparse_operations=True) matching_operations = [op for op in operation_list if op.name == operation_name] if len(matching_operations) == 1: return matching_operations[0] raise ValueError( "Found {} operations matching the name {}".format( len(matching_operations), operation_name ) ) def find_operation_with_partial_name( operation_name: Text, operation_list: Optional[List[operation_base.Operation]] = None, ) -> operation_base.Operation: """Finds an operation with the given name, optionally within a given list.""" if operation_list is None: operation_list = get_operations(include_sparse_operations=True) mapped_name = prediction_model.PREDICTION_TO_NAME_MAP[operation_name] matching_operations = [op for op in operation_list if mapped_name in op.name] return matching_operations
APIsynth-master
Synthesis_incorporation/value_search/all_operations.py
# Copyright 2021 The TF-Coder Authors. # Copyright (c) Facebook, Inc. and its affiliates. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Defines the Operation objects for functions.""" import re import torch from tf_coder import tensor_limits as limits from tf_coder import tf_coder_utils from tf_coder import torch_functions from tf_coder.value_search import operation_base from tf_coder.value_search import operation_filtering from tf_coder.value_search import value class FunctionOperation(operation_base.Operation): """An operation that applies a function to some arguments. The arguments must be given in the same order as they appear in the function's signature. Arguments with default values in the function signature are optional at the time of FunctionOperation creation. However, once created, a FunctionOperation must be used with all of the arguments provided to its constructor. """ def __init__(self, function_info): """Creates a FunctionOperation. Args: function_info: A tf_functions.FunctionInfo. """ ( function_name, arg_names, constant_kwargs, ) = torch_functions.parse_function_info_name(function_info) self._function_obj = tf_coder_utils.get_torch_function(function_name) docstring = self._function_obj.__doc__ if not docstring: print( "Warning: could not get docstring for function {}".format(function_name) ) docstring = "" # Make sure the function and argument names appear in the docstring. (Args # should already appear in the docstring "Args" section though.) docstring += "\n" + function_info.name # If 'reduce_max' is the function name, make sure 'reduce' and 'max' also # appear as separate words. Ditto for argument names as well. docstring += "\n" + function_info.name.replace("_", " ") # Upweight the function name (moreso than the argument names). function_name_without_torch = re.sub(r"^torch\.", "", function_name) docstring += ("\n" + function_name_without_torch) * 4 if "_" in function_name_without_torch: docstring += ("\n" + function_name_without_torch.replace("_", " ")) * 2 metadata = operation_base.OperationMetadata(docstring=docstring) super(FunctionOperation, self).__init__( num_args=len(arg_names), weight=function_info.weight, metadata=metadata ) self.function_info = function_info self.function_name = function_name self.arg_names = arg_names self.constant_kwargs = constant_kwargs operation_filtering.add_filters_to_function_operation(self) def _compute_name(self): return self.function_info.name def _print_warnings(self, arg_values, result_value): if isinstance(result_value, torch.Tensor): num_elements = tf_coder_utils.num_tensor_elements(result_value) else: return if num_elements > 10 * limits.MAX_TENSOR_ELEMENTS: print( "Warning: {} produced much-too-large tensor of shape {} and {} " "elements.".format( self.name, result_value.shape.as_list(), num_elements ) ) for i, arg_value in enumerate(arg_values): if isinstance(arg_value.value, torch.Tensor): print( " argument {} has shape {} and {} elements".format( i, arg_value.shape, arg_value.num_elements() ) ) if arg_value.num_elements() <= 20: print(" argument {} is: {}".format(i, arg_value.value)) elif arg_value.is_primitive: print(" argument {} is: {}".format(i, arg_value.value)) else: print(" argument {} has type {}".format(i, type(arg_value.value))) print( " argument {} has reconstruction: {}".format( i, arg_value.reconstruct_expression() ) ) def apply(self, arg_values, settings): """See base class.""" value_objects = [arg_value.value for arg_value in arg_values] arg_dict = dict(zip(self.arg_names, value_objects)) arg_dict.update(self.constant_kwargs) try: result_value = self._function_obj(**arg_dict) except Exception as e: if settings.printing.verbose: expression = self.reconstruct_expression(arg_values) print("[Error] {}: {}".format(expression, e)) return None try: return value.OperationValue(result_value, self, arg_values) except ValueError: if settings.printing.tensor_size_warnings: self._print_warnings(arg_values, result_value) return None def reconstruct_expression_from_strings(self, arg_strings): """See base class.""" arg_strings = list(arg_strings) for kwarg_name, kwarg_value in self.constant_kwargs.items(): arg_strings.append("{}={!r}".format(kwarg_name, kwarg_value)) return self.function_name + "(" + ", ".join(arg_strings) + ")"
APIsynth-master
Synthesis_incorporation/value_search/function_operation.py
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # import argparse from logging import getLogger import math import os import shutil import time import torch import torch.nn as nn from src.data.loader import load_data, get_data_transformations from src.model.model_factory import model_factory, to_cuda, sgd_optimizer, sobel2RGB from src.slurm import init_signal_handler, trigger_job_requeue from src.trainer import validate_network, accuracy from src.utils import (bool_flag, init_distributed_mode, initialize_exp, AverageMeter, restart_from_checkpoint, fix_random_seeds,) from src.model.pretrain import load_pretrained logger = getLogger() def get_parser(): """ Generate a parameters parser. """ # parse parameters parser = argparse.ArgumentParser(description="Train classification") # main parameters parser.add_argument("--dump_path", type=str, default=".", help="Experiment dump path") parser.add_argument('--epoch', type=int, default=0, help='Current epoch to run') parser.add_argument('--start_iter', type=int, default=0, help='First iter to run in the current epoch') parser.add_argument("--checkpoint_freq", type=int, default=20, help="Save the model periodically ") parser.add_argument("--evaluate", type=bool_flag, default=False, help="Evaluate the model only") parser.add_argument('--seed', type=int, default=35, help='random seed') # model params parser.add_argument('--sobel', type=bool_flag, default=0) parser.add_argument('--sobel2RGB', type=bool_flag, default=False, help='Incorporate sobel filter in first conv') parser.add_argument('--pretrained', type=str, default='', help='Use this instead of random weights.') # datasets params parser.add_argument('--data_path', type=str, default='', help='Where to find ImageNet dataset') parser.add_argument('--workers', type=int, default=8, help='Number of data loading workers') # optim params parser.add_argument('--lr', type=float, default=0.05, help='Learning rate') parser.add_argument('--wd', type=float, default=1e-5, help='Weight decay') parser.add_argument('--nepochs', type=int, default=100, help='Max number of epochs to run') parser.add_argument('--batch_size', default=128, type=int) # distributed training params parser.add_argument('--rank', default=0, type=int, help='rank') parser.add_argument("--local_rank", type=int, default=-1, help="Multi-GPU - Local rank") parser.add_argument('--world-size', default=1, type=int, help='number of distributed processes') parser.add_argument('--dist-url', default='', type=str, help='url used to set up distributed training') # debug parser.add_argument("--debug", type=bool_flag, default=False, help="Load val set of ImageNet") parser.add_argument("--debug_slurm", type=bool_flag, default=False, help="Debug within a SLURM job") return parser.parse_args() def main(args): # initialize the multi-GPU / multi-node training init_distributed_mode(args, make_communication_groups=False) # initialize the experiment logger, training_stats = initialize_exp(args, 'epoch', 'iter', 'prec', 'loss', 'prec_val', 'loss_val') # initialize SLURM signal handler for time limit / pre-emption init_signal_handler() main_data_path = args.data_path if args.debug: args.data_path = os.path.join(main_data_path, 'val') else: args.data_path = os.path.join(main_data_path, 'train') train_dataset = load_data(args) args.data_path = os.path.join(main_data_path, 'val') val_dataset = load_data(args) # prepare the different data transformations tr_val, tr_train = get_data_transformations() train_dataset.transform = tr_train val_dataset.transform = tr_val val_loader = torch.utils.data.DataLoader( val_dataset, batch_size=args.batch_size, num_workers=args.workers, pin_memory=True, ) # build model skeleton fix_random_seeds(args.seed) nmb_classes = 205 if 'places' in args.data_path else 1000 model = model_factory(args, relu=True, num_classes=nmb_classes) # load pretrained weights load_pretrained(model, args) # merge sobel layers with first convolution layer if args.sobel2RGB: sobel2RGB(model) # re initialize classifier if hasattr(model.body, 'classifier'): for m in model.body.classifier.modules(): if isinstance(m, nn.Linear): m.weight.data.normal_(0, 0.01) m.bias.data.fill_(0.1) # distributed training wrapper model = to_cuda(model, [args.gpu_to_work_on], apex=True) logger.info('model to cuda') # set optimizer optimizer = sgd_optimizer(model, args.lr, args.wd) ## variables to reload to fetch in checkpoint to_restore = {'epoch': 0, 'start_iter': 0} # re start from checkpoint restart_from_checkpoint( args, run_variables=to_restore, state_dict=model, optimizer=optimizer, ) args.epoch = to_restore['epoch'] args.start_iter = to_restore['start_iter'] if args.evaluate: validate_network(val_loader, [model], args) return # Supervised training for _ in range(args.epoch, args.nepochs): logger.info("============ Starting epoch %i ... ============" % args.epoch) fix_random_seeds(args.seed + args.epoch) # train the network for one epoch adjust_learning_rate(optimizer, args) scores = train_network(args, model, optimizer, train_dataset) scores_val = validate_network(val_loader, [model], args) # save training statistics logger.info(scores + scores_val) training_stats.update(scores + scores_val) def adjust_learning_rate(optimizer, args): lr = args.lr * (0.1 ** (args.epoch // 30)) for param_group in optimizer.param_groups: param_group['lr'] = lr def train_network(args, model, optimizer, dataset): """ Train the models on the dataset. """ # swith to train mode model.train() sampler = torch.utils.data.distributed.DistributedSampler(dataset) loader = torch.utils.data.DataLoader( dataset, sampler=sampler, batch_size=args.batch_size, num_workers=args.workers, pin_memory=True, ) # running statistics batch_time = AverageMeter() data_time = AverageMeter() # training statistics log_top1 = AverageMeter() log_loss = AverageMeter() end = time.perf_counter() cel = nn.CrossEntropyLoss().cuda() for iter_epoch, (inp, target) in enumerate(loader): # measure data loading time data_time.update(time.perf_counter() - end) # start at iter start_iter if iter_epoch < args.start_iter: continue # move to gpu inp = inp.cuda(non_blocking=True) target = target.cuda(non_blocking=True) # forward output = model(inp) # compute cross entropy loss loss = cel(output, target) optimizer.zero_grad() # compute the gradients loss.backward() # step optimizer.step() # log # signal received, relaunch experiment if os.environ['SIGNAL_RECEIVED'] == 'True': if not args.rank: torch.save({ 'epoch': args.epoch, 'start_iter': iter_epoch + 1, 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict(), }, os.path.join(args.dump_path, 'checkpoint.pth.tar')) trigger_job_requeue(os.path.join(args.dump_path, 'checkpoint.pth.tar')) # update stats log_loss.update(loss.item(), output.size(0)) prec1 = accuracy(args, output, target) log_top1.update(prec1.item(), output.size(0)) batch_time.update(time.perf_counter() - end) end = time.perf_counter() # verbose if iter_epoch % 100 == 0: logger.info('Epoch[{0}] - Iter: [{1}/{2}]\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' 'Data {data_time.val:.3f} ({data_time.avg:.3f})\t' 'Loss {loss.val:.4f} ({loss.avg:.4f})\t' 'Prec {log_top1.val:.3f} ({log_top1.avg:.3f})\t' .format(args.epoch, iter_epoch, len(loader), batch_time=batch_time, data_time=data_time, loss=log_loss, log_top1=log_top1)) # end of epoch args.start_iter = 0 args.epoch += 1 # dump checkpoint if not args.rank: torch.save({ 'epoch': args.epoch, 'start_iter': 0, 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict(), }, os.path.join(args.dump_path, 'checkpoint.pth.tar')) if not (args.epoch - 1) % args.checkpoint_freq: shutil.copyfile( os.path.join(args.dump_path, 'checkpoint.pth.tar'), os.path.join(args.dump_checkpoints, 'checkpoint' + str(args.epoch - 1) + '.pth.tar'), ) return (args.epoch - 1, args.epoch * len(loader), log_top1.avg, log_loss.avg) if __name__ == '__main__': # generate parser / parse parameters args = get_parser() # run experiment main(args)
DeeperCluster-main
eval_pretrain.py
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # import argparse import time import numpy as np import torch import torch.nn as nn import torch.optim import torch.utils.data import torchvision.transforms as transforms from sklearn import metrics from src.utils import AverageMeter, bool_flag, fix_random_seeds from src.trainer import accuracy from src.data.VOC2007 import VOC2007_dataset from src.model.model_factory import model_factory, sgd_optimizer from src.model.pretrain import load_pretrained parser = argparse.ArgumentParser() # model params parser.add_argument('--pretrained', type=str, required=False, default='', help='evaluate this model') # data params parser.add_argument('--data_path', type=str, default='', help='Where to find pascal 2007 dataset') parser.add_argument('--split', type=str, required=False, default='train', choices=['train', 'trainval'], help='training split') parser.add_argument('--sobel', type=bool_flag, default=False, help='If true, sobel applies') # transfer params parser.add_argument('--fc6_8', type=bool_flag, default=True, help='If true, train only the final classifier') parser.add_argument('--eval_random_crops', type=bool_flag, default=True, help='If true, eval on 10 random crops, otherwise eval on 10 fixed crops') # optim params parser.add_argument('--nit', type=int, default=150000, help='Number of training iterations') parser.add_argument('--stepsize', type=int, default=10000, help='Decay step') parser.add_argument('--lr', type=float, required=False, default=0.003, help='learning rate') parser.add_argument('--wd', type=float, required=False, default=1e-6, help='weight decay') parser.add_argument('--seed', type=int, default=1993, help='random seed') def main(): args = parser.parse_args() args.world_size = 1 print(args) fix_random_seeds(args.seed) # create model model = model_factory(args, relu=True, num_classes=20) # load pretrained weights load_pretrained(model, args) model = model.cuda() print('model to cuda') # on which split to train if args.split == 'train': args.test = 'val' elif args.split == 'trainval': args.test = 'test' # data loader normalize = [transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])] dataset = VOC2007_dataset(args.data_path, split=args.split, transform=transforms.Compose([ transforms.RandomHorizontalFlip(), transforms.RandomResizedCrop(224), transforms.ToTensor(),] + normalize )) loader = torch.utils.data.DataLoader(dataset, batch_size=16, shuffle=False, num_workers=4, pin_memory=True) print('PASCAL VOC 2007 ' + args.split + ' dataset loaded') # re initialize classifier if hasattr(model.body, 'classifier'): for m in model.body.classifier.modules(): if isinstance(m, nn.Linear): m.weight.data.normal_(0, 0.01) m.bias.data.fill_(0.1) for m in model.pred_layer.modules(): if isinstance(m, nn.Linear): m.weight.data.normal_(0, 0.01) m.bias.data.fill_(0.1) # freeze conv layers if args.fc6_8: if hasattr(model.body, 'features'): for param in model.body.features.parameters(): param.requires_grad = False # set optimizer optimizer = torch.optim.SGD( filter(lambda x: x.requires_grad, model.parameters()), lr=args.lr, momentum=0.9, weight_decay=args.wd, ) criterion = nn.BCEWithLogitsLoss(reduction='none') print('Start training') it = 0 losses = AverageMeter() while it < args.nit: it = train( loader, model, optimizer, criterion, args.fc6_8, losses, current_iteration=it, total_iterations=args.nit, stepsize=args.stepsize, ) print('Model Evaluation') if args.eval_random_crops: transform_eval = [ transforms.RandomHorizontalFlip(), transforms.RandomResizedCrop(224), transforms.ToTensor(),] + normalize else: transform_eval = [ transforms.Resize(256), transforms.TenCrop(224), transforms.Lambda(lambda crops: torch.stack([transforms.Compose(normalize)(transforms.ToTensor()(crop)) for crop in crops])) ] print('Train set') train_dataset = VOC2007_dataset( args.data_path, split=args.split, transform=transforms.Compose(transform_eval), ) train_loader = torch.utils.data.DataLoader( train_dataset, batch_size=1, shuffle=False, num_workers=4, pin_memory=True, ) evaluate(train_loader, model, args.eval_random_crops) print('Test set') test_dataset = VOC2007_dataset(args.data_path, split=args.test, transform=transforms.Compose(transform_eval)) test_loader = torch.utils.data.DataLoader( test_dataset, batch_size=1, shuffle=False, num_workers=4, pin_memory=True, ) evaluate(test_loader, model, args.eval_random_crops) def evaluate(loader, model, eval_random_crops): model.eval() gts = [] scr = [] for crop in range(9 * eval_random_crops + 1): for i, (input, target) in enumerate(loader): # move input to gpu and optionally reshape it if len(input.size()) == 5: bs, ncrops, c, h, w = input.size() input = input.view(-1, c, h, w) input = input.cuda(non_blocking=True) # forward pass without grad computation with torch.no_grad(): output = model(input) if crop < 1 : scr.append(torch.sum(output, 0, keepdim=True).cpu().numpy()) gts.append(target) else: scr[i] += output.cpu().numpy() gts = np.concatenate(gts, axis=0).T scr = np.concatenate(scr, axis=0).T aps = [] for i in range(20): # Subtract eps from score to make AP work for tied scores ap = metrics.average_precision_score(gts[i][gts[i]<=1], scr[i][gts[i]<=1]-1e-5*gts[i][gts[i]<=1]) aps.append( ap ) print(np.mean(aps), ' ', ' '.join(['%0.2f'%a for a in aps])) def train(loader, model, optimizer, criterion, fc6_8, losses, current_iteration=0, total_iterations=None, stepsize=None, verbose=True): # to log batch_time = AverageMeter() data_time = AverageMeter() top1 = AverageMeter() end = time.time() # use dropout for the MLP if hasattr(model.body, 'classifier'): model.train() # in the batch norms always use global statistics model.body.features.eval() else: model.eval() for i, (input, target) in enumerate(loader): # measure data loading time data_time.update(time.time() - end) # adjust learning rate if current_iteration != 0 and current_iteration % stepsize == 0: for param_group in optimizer.param_groups: param_group['lr'] = param_group['lr'] * 0.5 print('iter {0} learning rate is {1}'.format(current_iteration, param_group['lr'])) # move input to gpu input = input.cuda(non_blocking=True) # forward pass with or without grad computation output = model(input) target = target.float().cuda() mask = (target == 255) loss = torch.sum(criterion(output, target).masked_fill_(mask, 0)) / target.size(0) # backward optimizer.zero_grad() loss.backward() # clip gradients torch.nn.utils.clip_grad_norm_(model.parameters(), 10) # and weights update optimizer.step() # measure accuracy and record loss losses.update(loss.item(), input.size(0)) # measure elapsed time batch_time.update(time.time() - end) end = time.time() if verbose is True and current_iteration % 25 == 0: print('Iteration[{0}]\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' 'Data {data_time.val:.3f} ({data_time.avg:.3f})\t' 'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format( current_iteration, batch_time=batch_time, data_time=data_time, loss=losses)) current_iteration = current_iteration + 1 if total_iterations is not None and current_iteration == total_iterations: break return current_iteration if __name__ == '__main__': main()
DeeperCluster-main
eval_voc_classif.py
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # import argparse import os import apex import numpy as np import torch import torch.distributed as dist import torch.nn as nn from src.clustering import get_cluster_assignments, load_cluster_assignments from src.data.loader import get_data_transformations from src.data.YFCC100M import YFCC100M_dataset from src.model.model_factory import (build_prediction_layer, model_factory, sgd_optimizer, to_cuda) from src.model.pretrain import load_pretrained from src.slurm import init_signal_handler from src.trainer import train_network from src.utils import (bool_flag, check_parameters, end_of_epoch, fix_random_seeds, init_distributed_mode, initialize_exp, restart_from_checkpoint) def get_parser(): """ Generate a parameters parser. """ # parse parameters parser = argparse.ArgumentParser(description="Unsupervised feature learning.") # handling experiment parameters parser.add_argument("--checkpoint_freq", type=int, default=1, help="Save the model every this epoch.") parser.add_argument("--dump_path", type=str, default="./exp", help="Experiment dump path.") parser.add_argument('--epoch', type=int, default=0, help='Current epoch to run.') parser.add_argument('--start_iter', type=int, default=0, help='First iter to run in the current epoch.') # network params parser.add_argument('--pretrained', type=str, default='', help='Start from this instead of random weights.') # datasets params parser.add_argument('--data_path', type=str, default='', help='Where to find training dataset.') parser.add_argument('--size_dataset', type=int, default=10000000, help='How many images to use.') parser.add_argument('--workers', type=int, default=8, help='Number of data loading workers.') parser.add_argument('--sobel', type=bool_flag, default=0, help='Apply Sobel filter.') # optim params parser.add_argument('--lr', type=float, default=0.1, help='Learning rate.') parser.add_argument('--wd', type=float, default=1e-5, help='Weight decay.') parser.add_argument('--nepochs', type=int, default=100, help='Max number of epochs to run.') parser.add_argument('--batch_size', default=48, type=int, help='Batch-size per process.') # Model params parser.add_argument('--reassignment', type=int, default=3, help='Reassign clusters every this epoch(s).') parser.add_argument('--dim_pca', type=int, default=4096, help='Dimension of the pca applied to the descriptors.') parser.add_argument('--k', type=int, default=10000, help='Total number of clusters.') parser.add_argument('--super_classes', type=int, default=4, help='Total number of super-classes.') parser.add_argument('--rotnet', type=bool_flag, default=True, help='Network needs to classify large rotations.') # k-means params parser.add_argument('--warm_restart', type=bool_flag, default=False, help='Use previous centroids as init.') parser.add_argument('--use_faiss', type=bool_flag, default=True, help='Use faiss for E steps in k-means.') parser.add_argument('--niter', type=int, default=10, help='Number of k-means iterations.') # distributed training params parser.add_argument('--rank', default=0, type=int, help='Global process rank.') parser.add_argument("--local_rank", type=int, default=-1, help="Multi-GPU - Local rank") parser.add_argument('--world-size', default=1, type=int, help='Number of distributed processes.') parser.add_argument('--dist-url', default='', type=str, help='Url used to set up distributed training.') # debug parser.add_argument("--debug_slurm", type=bool_flag, default=False, help="Debug within a SLURM job.") return parser.parse_args() def main(args): """ This code implements the paper: https://arxiv.org/abs/1905.01278 The method consists in alternating between a hierachical clustering of the features and learning the parameters of a convnet by predicting both the angle of the rotation applied to the input data and the cluster assignments in a single hierachical loss. """ # initialize communication groups training_groups, clustering_groups = init_distributed_mode(args) # check parameters check_parameters(args) # initialize the experiment logger, training_stats = initialize_exp(args, 'epoch', 'iter', 'prec', 'loss', 'prec_super_class', 'loss_super_class', 'prec_sub_class', 'loss_sub_class') # initialize SLURM signal handler for time limit / pre-emption init_signal_handler() # load data dataset = YFCC100M_dataset(args.data_path, size=args.size_dataset) # prepare the different data transformations tr_cluster, tr_train = get_data_transformations(args.rotation * 90) # build model skeleton fix_random_seeds() model = model_factory(args.sobel) logger.info('model created') # load pretrained weights load_pretrained(model, args) # convert batch-norm layers to nvidia wrapper to enable batch stats reduction model = apex.parallel.convert_syncbn_model(model) # distributed training wrapper model = to_cuda(model, args.gpu_to_work_on, apex=True) logger.info('model to cuda') # set optimizer optimizer = sgd_optimizer(model, args.lr, args.wd) # load cluster assignments cluster_assignments = load_cluster_assignments(args, dataset) # build prediction layer on the super_class pred_layer, optimizer_pred_layer = build_prediction_layer( model.module.body.dim_output_space, args, ) nmb_sub_classes = args.k // args.nmb_super_clusters sub_class_pred_layer, optimizer_sub_class_pred_layer = build_prediction_layer( model.module.body.dim_output_space, args, num_classes=nmb_sub_classes, group=training_groups[args.training_local_world_id], ) # variables to fetch in checkpoint to_restore = {'epoch': 0, 'start_iter': 0} # re start from checkpoint restart_from_checkpoint( args, run_variables=to_restore, state_dict=model, optimizer=optimizer, pred_layer_state_dict=pred_layer, optimizer_pred_layer=optimizer_pred_layer, ) pred_layer_name = str(args.training_local_world_id) + '-pred_layer.pth.tar' restart_from_checkpoint( args, ckp_path=os.path.join(args.dump_path, pred_layer_name), state_dict=sub_class_pred_layer, optimizer=optimizer_sub_class_pred_layer, ) args.epoch = to_restore['epoch'] args.start_iter = to_restore['start_iter'] for _ in range(args.epoch, args.nepochs): logger.info("============ Starting epoch %i ... ============" % args.epoch) fix_random_seeds(args.epoch) # step 1: Get the final activations for the whole dataset / Cluster them if cluster_assignments is None and not args.epoch % args.reassignment: logger.info("=> Start clustering step") dataset.transform = tr_cluster cluster_assignments = get_cluster_assignments(args, model, dataset, clustering_groups) # reset prediction layers if args.nmb_super_clusters > 1: pred_layer, optimizer_pred_layer = build_prediction_layer( model.module.body.dim_output_space, args, ) sub_class_pred_layer, optimizer_sub_class_pred_layer = build_prediction_layer( model.module.body.dim_output_space, args, num_classes=nmb_sub_classes, group=training_groups[args.training_local_world_id], ) # step 2: Train the network with the cluster assignments as labels # prepare dataset dataset.transform = tr_train dataset.sub_classes = cluster_assignments # concatenate models and their corresponding optimizers models = [model, pred_layer, sub_class_pred_layer] optimizers = [optimizer, optimizer_pred_layer, optimizer_sub_class_pred_layer] # train the network for one epoch scores = train_network(args, models, optimizers, dataset) ## save training statistics logger.info(scores) training_stats.update(scores) # reassign clusters at the next epoch if not args.epoch % args.reassignment: cluster_assignments = None dataset.subset_indexes = None end_of_epoch(args) dist.barrier() if __name__ == '__main__': # generate parser / parse parameters args = get_parser() # run experiment main(args)
DeeperCluster-main
main.py
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # import argparse from logging import getLogger import os import time import numpy as np from sklearn import metrics import torch import torch.nn as nn import torch.utils.data from src.data.loader import load_data, get_data_transformations, KFold, per_target from src.model.model_factory import model_factory, to_cuda, sgd_optimizer from src.model.pretrain import load_pretrained from src.slurm import init_signal_handler, trigger_job_requeue from src.trainer import validate_network, accuracy from src.data.VOC2007 import VOC2007_dataset from src.utils import (bool_flag, init_distributed_mode, initialize_exp, AverageMeter, restart_from_checkpoint, fix_random_seeds,) logger = getLogger() def get_parser(): """ Generate a parameters parser. """ # parse parameters parser = argparse.ArgumentParser(description="Train a linear classifier on conv layer") # main parameters parser.add_argument("--dump_path", type=str, default=".", help="Experiment dump path") parser.add_argument('--epoch', type=int, default=0, help='Current epoch to run') parser.add_argument('--start_iter', type=int, default=0, help='First iter to run in the current epoch') # model params parser.add_argument('--pretrained', type=str, default='', help='Use this instead of random weights.') parser.add_argument('--conv', type=int, default=1, choices=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13], help='On top of which layer train classifier.') # datasets params parser.add_argument('--data_path', type=str, default='', help='Where to find supervised dataset') parser.add_argument('--workers', type=int, default=8, help='Number of data loading workers') parser.add_argument('--sobel', type=bool_flag, default=False) # optim params parser.add_argument('--lr', type=float, default=0.05, help='Learning rate') parser.add_argument('--wd', type=float, default=1e-5, help='Weight decay') parser.add_argument('--nepochs', type=int, default=100, help='Max number of epochs to run') parser.add_argument('--batch_size', default=64, type=int) # model selection parser.add_argument('--split', type=str, required=False, default='train', choices=['train', 'trainval'], help='for PASCAL dataset, train on train or train+val') parser.add_argument('--kfold', type=int, default=None, help="""dataset randomly partitioned into kfold equal sized subsamples. Default None: no cross validation: train on full train set""") parser.add_argument('--cross_valid', type=int, default=None, help='between 0 and kfold - 1: index of the round of cross validation') # distributed training params parser.add_argument('--rank', default=0, type=int, help='rank') parser.add_argument("--local_rank", type=int, default=-1, help="Multi-GPU - Local rank") parser.add_argument('--world-size', default=1, type=int, help='number of distributed processes') parser.add_argument('--dist-url', default='', type=str, help='url used to set up distributed training') # debug parser.add_argument("--debug_slurm", type=bool_flag, default=False, help="Debug within a SLURM job") return parser.parse_args() def main(args): # initialize the multi-GPU / multi-node training init_distributed_mode(args, make_communication_groups=False) # initialize the experiment logger, training_stats = initialize_exp(args, 'epoch', 'iter', 'prec', 'loss', 'prec_val', 'loss_val') # initialize SLURM signal handler for time limit / pre-emption init_signal_handler() if not 'pascal' in args.data_path: main_data_path = args.data_path args.data_path = os.path.join(main_data_path, 'train') train_dataset = load_data(args) else: train_dataset = VOC2007_dataset(args.data_path, split=args.split) args.test = 'val' if args.split == 'train' else 'test' if not 'pascal' in args.data_path: if args.cross_valid is None: args.data_path = os.path.join(main_data_path, 'val') val_dataset = load_data(args) else: val_dataset = VOC2007_dataset(args.data_path, split=args.test) if args.cross_valid is not None: kfold = KFold(per_target(train_dataset.imgs), args.cross_valid, args.kfold) train_loader = torch.utils.data.DataLoader( train_dataset, batch_size=args.batch_size, sampler=kfold.train, num_workers=args.workers, pin_memory=True) val_loader = torch.utils.data.DataLoader( val_dataset, batch_size=args.batch_size, sampler=kfold.val, num_workers=args.workers) else: train_loader = torch.utils.data.DataLoader( train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.workers, pin_memory=True) val_loader = torch.utils.data.DataLoader( val_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.workers) # prepare the different data transformations tr_val, tr_train = get_data_transformations() train_dataset.transform = tr_train val_dataset.transform = tr_val # build model skeleton fix_random_seeds() model = model_factory(args) load_pretrained(model, args) # keep only conv layers model.body.classifier = None model.conv = args.conv if 'places' in args.data_path: nmb_classes = 205 elif 'pascal' in args.data_path: nmb_classes = 20 else: nmb_classes = 1000 reglog = RegLog(nmb_classes, args.conv) # distributed training wrapper model = to_cuda(model, [args.gpu_to_work_on], apex=True) reglog = to_cuda(reglog, [args.gpu_to_work_on], apex=True) logger.info('model to cuda') # set optimizer optimizer = sgd_optimizer(reglog, args.lr, args.wd) ## variables to reload to fetch in checkpoint to_restore = {'epoch': 0, 'start_iter': 0} # re start from checkpoint restart_from_checkpoint( args, run_variables=to_restore, state_dict=reglog, optimizer=optimizer, ) args.epoch = to_restore['epoch'] args.start_iter = to_restore['start_iter'] model.eval() reglog.train() # Linear training for _ in range(args.epoch, args.nepochs): logger.info("============ Starting epoch %i ... ============" % args.epoch) # train the network for one epoch scores = train_network(args, model, reglog, optimizer, train_loader) if not 'pascal' in args.data_path: scores_val = validate_network(val_loader, [model, reglog], args) else: scores_val = evaluate_pascal(val_dataset, [model, reglog]) scores = scores + scores_val # save training statistics logger.info(scores) training_stats.update(scores) def evaluate_pascal(val_dataset, models): val_loader = torch.utils.data.DataLoader( val_dataset, sampler=torch.utils.data.distributed.DistributedSampler(val_dataset), batch_size=1, num_workers=args.workers, pin_memory=True, ) for model in models: model.eval() gts = [] scr = [] for i, (input, target) in enumerate(val_loader): # move input to gpu and optionally reshape it input = input.cuda(non_blocking=True) # forward pass without grad computation with torch.no_grad(): output = models[0](input) output = models[1](output) scr.append(torch.sum(output, 0, keepdim=True).cpu().numpy()) gts.append(target) scr[i] += output.cpu().numpy() gts = np.concatenate(gts, axis=0).T scr = np.concatenate(scr, axis=0).T aps = [] for i in range(20): # Subtract eps from score to make AP work for tied scores ap = metrics.average_precision_score(gts[i][gts[i]<=1], scr[i][gts[i]<=1]-1e-5*gts[i][gts[i]<=1]) aps.append(ap) print(np.mean(aps), ' ', ' '.join(['%0.2f'%a for a in aps])) return np.mean(aps), 0 class RegLog(nn.Module): """Creates logistic regression on top of frozen features""" def __init__(self, num_labels, conv): super(RegLog, self).__init__() if conv < 3: av = 18 s = 9216 elif conv < 5: av = 14 s = 8192 elif conv < 8: av = 9 s = 9216 elif conv < 11: av = 6 s = 8192 elif conv < 14: av = 3 s = 8192 self.av_pool = nn.AvgPool2d(av, stride=av, padding=0) self.linear = nn.Linear(s, num_labels) def forward(self, x): x = self.av_pool(x) x = x.view(x.size(0), -1) return self.linear(x) def train_network(args, model, reglog, optimizer, loader): """ Train the models on the dataset. """ # running statistics batch_time = AverageMeter() data_time = AverageMeter() # training statistics log_top1 = AverageMeter() log_loss = AverageMeter() end = time.perf_counter() if 'pascal' in args.data_path: criterion = nn.BCEWithLogitsLoss(reduction='none') else: criterion = nn.CrossEntropyLoss().cuda() for iter_epoch, (inp, target) in enumerate(loader): # measure data loading time data_time.update(time.perf_counter() - end) learning_rate_decay(optimizer, len(loader) * args.epoch + iter_epoch, args.lr) # start at iter start_iter if iter_epoch < args.start_iter: continue # move to gpu inp = inp.cuda(non_blocking=True) target = target.cuda(non_blocking=True) if 'pascal' in args.data_path: target = target.float() # forward with torch.no_grad(): output = model(inp) output = reglog(output) # compute cross entropy loss loss = criterion(output, target) if 'pascal' in args.data_path: mask = (target == 255) loss = torch.sum(loss.masked_fill_(mask, 0)) / target.size(0) optimizer.zero_grad() # compute the gradients loss.backward() # step optimizer.step() # log # signal received, relaunch experiment if os.environ['SIGNAL_RECEIVED'] == 'True': if not args.rank: torch.save({ 'epoch': args.epoch, 'start_iter': iter_epoch + 1, 'state_dict': reglog.state_dict(), 'optimizer': optimizer.state_dict(), }, os.path.join(args.dump_path, 'checkpoint.pth.tar')) trigger_job_requeue(os.path.join(args.dump_path, 'checkpoint.pth.tar')) # update stats log_loss.update(loss.item(), output.size(0)) if not 'pascal' in args.data_path: prec1 = accuracy(args, output, target) log_top1.update(prec1.item(), output.size(0)) batch_time.update(time.perf_counter() - end) end = time.perf_counter() # verbose if iter_epoch % 100 == 0: logger.info('Epoch[{0}] - Iter: [{1}/{2}]\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' 'Data {data_time.val:.3f} ({data_time.avg:.3f})\t' 'Loss {loss.val:.4f} ({loss.avg:.4f})\t' 'Prec {log_top1.val:.3f} ({log_top1.avg:.3f})\t' .format(args.epoch, iter_epoch, len(loader), batch_time=batch_time, data_time=data_time, loss=log_loss, log_top1=log_top1)) # end of epoch args.start_iter = 0 args.epoch += 1 # dump checkpoint if not args.rank: torch.save({ 'epoch': args.epoch, 'start_iter': 0, 'state_dict': reglog.state_dict(), 'optimizer': optimizer.state_dict(), }, os.path.join(args.dump_path, 'checkpoint.pth.tar')) return (args.epoch - 1, args.epoch * len(loader), log_top1.avg, log_loss.avg) def learning_rate_decay(optimizer, t, lr_0): for param_group in optimizer.param_groups: lr = lr_0 / np.sqrt(1 + lr_0 * param_group['weight_decay'] * t) param_group['lr'] = lr if __name__ == '__main__': # generate parser / parse parameters args = get_parser() # run experiment main(args)
DeeperCluster-main
eval_linear.py
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # from logging import getLogger import os import pickle import faiss import torch import torch.distributed as dist from torch.utils.data.sampler import Sampler import numpy as np from .utils import PCA, AverageMeter, normalize, get_indices_sparse from .distributed_kmeans import distributed_kmeans, initialize_cache logger = getLogger() def get_cluster_assignments(args, model, dataset, groups): """ """ # pseudo-labels are confusing dataset.sub_classes = None # swith to eval mode model.eval() # this process deals only with a subset of the dataset local_nmb_data = len(dataset) // args.world_size indices = torch.arange(args.rank * local_nmb_data, (args.rank + 1) * local_nmb_data).int() if os.path.isfile(os.path.join(args.dump_path, 'super_class_assignments.pkl')): # super-class assignments have already been computed in a previous run super_class_assignements = pickle.load(open(os.path.join(args.dump_path, 'super_class_assignments.pkl'), 'rb')) logger.info('loaded super-class assignments') # dump cache where_helper = get_indices_sparse(super_class_assignements[indices]) nmb_data_per_super_cluster = torch.zeros(args.nmb_super_clusters).cuda() for super_class in range(len(where_helper)): nmb_data_per_super_cluster[super_class] = len(where_helper[super_class][0]) else: sampler = Subset_Sampler(indices) # we need a data loader loader = torch.utils.data.DataLoader( dataset, batch_size=args.batch_size, sampler=sampler, num_workers=args.workers, pin_memory=True, ) # initialize cache, pca and centroids cache, centroids = initialize_cache(args, loader, model) # empty cuda cache (useful because we're about to use faiss on gpu) torch.cuda.empty_cache() ## perform clustering into super_clusters super_class_assignements, centroids_sc = distributed_kmeans( args, args.size_dataset, args.nmb_super_clusters, cache, args.rank, args.world_size, centroids, ) # dump activations in the cache where_helper = get_indices_sparse(super_class_assignements[indices]) nmb_data_per_super_cluster = torch.zeros(args.nmb_super_clusters).cuda() for super_class in range(len(where_helper)): ind_sc = where_helper[super_class][0] np.save(open(os.path.join( args.dump_path, 'cache/', 'super_class' + str(super_class) + '-' + str(args.rank), ), 'wb'), cache[ind_sc]) nmb_data_per_super_cluster[super_class] = len(ind_sc) dist.barrier() # dump super_class assignment and centroids of super_class if not args.rank: pickle.dump( super_class_assignements, open(os.path.join(args.dump_path, 'super_class_assignments.pkl'), 'wb'), ) pickle.dump( centroids_sc, open(os.path.join(args.dump_path, 'super_class_centroids.pkl'), 'wb'), ) # size of the different super clusters all_counts = [torch.zeros(args.nmb_super_clusters).cuda() for _ in range(args.world_size)] dist.all_gather(all_counts, nmb_data_per_super_cluster) all_counts = torch.cat(all_counts).cpu().long() all_counts = all_counts.reshape(args.world_size, args.nmb_super_clusters) logger.info(all_counts.sum(dim=0)) # what are the data belonging to this super class dataset.subset_indexes = np.where(super_class_assignements == args.clustering_local_world_id)[0] div = args.batch_size * args.clustering_local_world_size dataset.subset_indexes = dataset.subset_indexes[:len(dataset) // div * div] dist.barrier() # which files this process is going to read local_nmb_data = int(len(dataset) / args.clustering_local_world_size) low = np.long(args.clustering_local_rank * local_nmb_data) high = np.long(low + local_nmb_data) curr_ind = 0 cache = torch.zeros(local_nmb_data, args.dim_pca, dtype=torch.float32) cumsum = torch.cumsum(all_counts[:, args.clustering_local_world_id].long(), 0).long() for r in range(args.world_size): # data in this bucket r: [cumsum[r - 1] : cumsum[r] - 1] low_bucket = np.long(cumsum[r - 1]) if r else 0 # this bucket is empty if low_bucket > cumsum[r] - 1: continue if cumsum[r] - 1 < low: continue if low_bucket >= high: break # which are the data we are interested in inside this bucket ? ind_low = np.long(max(low, low_bucket)) ind_high = np.long(min(high, cumsum[r])) cache_r = np.load(open(os.path.join(args.dump_path, 'cache/', 'super_class' + str(args.clustering_local_world_id) + '-' + str(r)), 'rb')) cache[curr_ind: curr_ind + ind_high - ind_low] = torch.FloatTensor(cache_r[ind_low - low_bucket: ind_high - low_bucket]) curr_ind += (ind_high - ind_low) # randomly pick some centroids and dump them centroids_path = os.path.join(args.dump_path, 'centroids' + str(args.clustering_local_world_id) + '.pkl') if not args.clustering_local_rank: centroids = cache[np.random.choice( np.arange(cache.shape[0]), replace=cache.shape[0] < args.k // args.nmb_super_clusters, size=args.k // args.nmb_super_clusters, )] pickle.dump(centroids, open(centroids_path, 'wb'), -1) dist.barrier() # read centroids centroids = pickle.load(open(centroids_path, 'rb')).cuda() # distributed kmeans into sub-classes cluster_assignments, centroids = distributed_kmeans( args, len(dataset), args.k // args.nmb_super_clusters, cache, args.clustering_local_rank, args.clustering_local_world_size, centroids, world_id=args.clustering_local_world_id, group=groups[args.clustering_local_world_id], ) # free RAM del cache # write cluster assignments and centroids if not args.clustering_local_rank: pickle.dump( cluster_assignments, open(os.path.join(args.dump_path, 'cluster_assignments' + str(args.clustering_local_world_id) + '.pkl'), 'wb'), ) pickle.dump( centroids, open(centroids_path, 'wb'), ) dist.barrier() return cluster_assignments class Subset_Sampler(Sampler): """ Sample indices. """ def __init__(self, indices): self.indices = indices def __iter__(self): return iter(self.indices) def __len__(self): return len(self.indices) def load_cluster_assignments(args, dataset): """ Load cluster assignments if they are present in experiment repository. """ super_file = os.path.join(args.dump_path, 'super_class_assignments.pkl') sub_file = os.path.join( args.dump_path, 'sub_class_assignments' + str(args.clustering_local_world_id) + '.pkl', ) if os.path.isfile(super_file) and os.path.isfile(sub_file): super_class_assignments = pickle.load(open(super_file, 'rb')) dataset.subset_indexes = np.where(super_class_assignments == args.clustering_local_world_id)[0] div = args.batch_size * args.clustering_local_world_size clustering_size_dataset = len(dataset) // div * div dataset.subset_indexes = dataset.subset_indexes[:clustering_size_dataset] logger.info('Found cluster assignments in experiment repository') return pickle.load(open(sub_file, "rb")) return None
DeeperCluster-main
src/clustering.py
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # from logging import getLogger import os import signal import time logger = getLogger() def trigger_job_requeue(checkpoint_filename): ''' Submit a new job to resume from checkpoint. Be careful to use only for main process. ''' if int(os.environ['SLURM_PROCID']) == 0 and \ str(os.getpid()) == os.environ['MAIN_PID'] and os.path.isfile(checkpoint_filename): print('time is up, back to slurm queue', flush=True) command = 'scontrol requeue ' + os.environ['SLURM_JOB_ID'] print(command) if os.system(command): raise RuntimeError('requeue failed') print('New job submitted to the queue', flush=True) exit(0) def SIGTERMHandler(a, b): print('received sigterm') pass def signalHandler(a, b): print('Signal received', a, time.time(), flush=True) os.environ['SIGNAL_RECEIVED'] = 'True' return def init_signal_handler(): """ Handle signals sent by SLURM for time limit / pre-emption. """ os.environ['SIGNAL_RECEIVED'] = 'False' os.environ['MAIN_PID'] = str(os.getpid()) signal.signal(signal.SIGUSR1, signalHandler) signal.signal(signal.SIGTERM, SIGTERMHandler) print("Signal handler installed.", flush=True)
DeeperCluster-main
src/slurm.py
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. #
DeeperCluster-main
src/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # import os import logging import time from datetime import timedelta import pandas as pd class LogFormatter(): def __init__(self): self.start_time = time.time() def format(self, record): elapsed_seconds = round(record.created - self.start_time) prefix = "%s - %s - %s" % ( record.levelname, time.strftime('%x %X'), timedelta(seconds=elapsed_seconds) ) message = record.getMessage() message = message.replace('\n', '\n' + ' ' * (len(prefix) + 3)) return "%s - %s" % (prefix, message) if message else '' def create_logger(filepath, rank): """ Create a logger. Use a different log file for each process. """ # create log formatter log_formatter = LogFormatter() # create file handler and set level to debug if filepath is not None: if rank > 0: filepath = '%s-%i' % (filepath, rank) file_handler = logging.FileHandler(filepath, "a") file_handler.setLevel(logging.DEBUG) file_handler.setFormatter(log_formatter) # create console handler and set level to info console_handler = logging.StreamHandler() console_handler.setLevel(logging.INFO) console_handler.setFormatter(log_formatter) # create logger and set level to debug logger = logging.getLogger() logger.handlers = [] logger.setLevel(logging.DEBUG) logger.propagate = False if filepath is not None: logger.addHandler(file_handler) logger.addHandler(console_handler) # reset logger elapsed time def reset_time(): log_formatter.start_time = time.time() logger.reset_time = reset_time return logger class PD_Stats(object): """ Log stuff with pandas library """ def __init__(self, path, columns): self.path = path # reload path stats if os.path.isfile(self.path): self.stats = pd.read_pickle(self.path) # check that columns are the same assert list(self.stats.columns) == list(columns) else: self.stats = pd.DataFrame(columns=columns) def update(self, row, save=True): self.stats.loc[len(self.stats.index)] = row # save the statistics if save: self.stats.to_pickle(self.path)
DeeperCluster-main
src/logger.py
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # from logging import getLogger import os import pickle import time import faiss import numpy as np import torch import torch.distributed as dist from .utils import fix_random_seeds, AverageMeter, PCA, normalize logger = getLogger() def initialize_cache(args, loader, model): """ Accumulate features to compute pca. Cache the dataset. """ # we limit the size of the cache per process local_cache_size = min(len(loader), 3150000 // args.batch_size) * args.batch_size # total batch_size batch_size = args.batch_size * args.world_size # how many batches do we need to approximate the covariance matrix N = model.module.body.dim_output_space nmb_batches_for_pca = int(N * (N - 1) / 2 / args.batch_size / args.world_size) logger.info("Require {} images ({} iterations) for pca".format( nmb_batches_for_pca * args.batch_size * args.world_size, nmb_batches_for_pca)) if nmb_batches_for_pca > len(loader): nmb_batches_for_pca = len(loader) logger.warning("Compute the PCA on {} images (entire dataset)".format(args.size_dataset)) # statistics batch_time = AverageMeter() data_time = AverageMeter() end = time.time() with torch.no_grad(): for i, (input_tensor, _) in enumerate(loader): # time spent to load data data_time.update(time.time() - end) # move to gpu input_tensor = input_tensor.type(torch.FloatTensor).cuda() # forward feat = model(input_tensor) # before the pca has been computed if i < nmb_batches_for_pca: # gather the features computed by all processes all_feat = [torch.cuda.FloatTensor(feat.size()) for src in range(args.world_size)] dist.all_gather(all_feat, feat) # only main process computes the PCA if not args.rank: all_feat = torch.cat(all_feat).cpu().numpy() # initialize storage arrays if i == 0: if not args.rank: for_pca = np.zeros( (nmb_batches_for_pca * batch_size, all_feat.shape[1]), dtype=np.float32, ) for_cache = torch.zeros( nmb_batches_for_pca * args.batch_size, feat.size(1), dtype=torch.float32, ) # fill in arrays if not args.rank: for_pca[i * batch_size: (i + 1) * batch_size] = all_feat for_cache[i * args.batch_size: (i + 1) * args.batch_size] = feat.cpu() # train the pca if i == nmb_batches_for_pca - 1: pca_path = os.path.join(args.dump_path, 'pca.pkl') centroids_path = os.path.join(args.dump_path, 'centroids.pkl') # compute the PCA if not args.rank: # init PCA object pca = PCA(dim=args.dim_pca, whit=0.5) # center data mean = np.mean(for_pca, axis=0).astype('float32') for_pca -= mean # compute covariance cov = np.dot(for_pca.T, for_pca) / for_pca.shape[0] # calculate the pca pca.train_pca(cov) # randomly pick some centroids centroids = pca.apply(for_pca[np.random.choice( np.arange(for_pca.shape[0]), replace=False, size=args.nmb_super_clusters, )]) centroids = normalize(centroids) pca.mean = mean # free memory del for_pca # write PCA to disk pickle.dump(pca, open(pca_path, 'wb')) pickle.dump(centroids, open(centroids_path, 'wb')) # processes wait that main process compute and write PCA and centroids dist.barrier() # processes read PCA and centroids from disk pca = pickle.load(open(pca_path, "rb")) centroids = pickle.load(open(centroids_path, "rb")) # apply the pca to the cached features for_cache = pca.apply(for_cache) for_cache = normalize(for_cache) # extend the cache current_cache_size = for_cache.size(0) for_cache = torch.cat((for_cache, torch.zeros( local_cache_size - current_cache_size, args.dim_pca, ))) logger.info('{0} imgs cached => cache is {1:.2f} % full' .format(current_cache_size, 100 * current_cache_size / local_cache_size)) # keep accumulating data if i > nmb_batches_for_pca - 1: feat = pca.apply(feat) feat = normalize(feat) for_cache[i * args.batch_size: (i + 1) * args.batch_size] = feat.cpu() # verbose batch_time.update(time.time() - end) end = time.time() if i % 200 == 0: logger.info('{0} / {1}\t' 'Time: {batch_time.val:.3f} ({batch_time.avg:.3f})\t' 'Data Time: {data_time.val:.3f} ({data_time.avg:.3f})\t' .format(i, len(loader), batch_time=batch_time, data_time=data_time)) # move centroids to GPU centroids = torch.cuda.FloatTensor(centroids) return for_cache, centroids def distributed_kmeans(args, n_all, nk, cache, rank, world_size, centroids, world_id=0, group=None): """ Distributed mini-batch k-means. """ # local assignments assignments = -1 * np.ones(n_all // world_size) # prepare faiss index if args.use_faiss: res = faiss.StandardGpuResources() cfg = faiss.GpuIndexFlatConfig() cfg.device = args.gpu_to_work_on index = faiss.GpuIndexFlatL2(res, args.dim_pca, cfg) end = time.time() for p in range(args.niter + 1): start_pass = time.time() # running statistics batch_time = AverageMeter() log_loss = AverageMeter() # initialize arrays for update local_counts = torch.zeros(nk).cuda() local_feats = torch.zeros(nk, args.dim_pca).cuda() # prepare E step torch.cuda.empty_cache() if args.use_faiss: index.reset() index.add(centroids.cpu().numpy().astype('float32')) else: centroids_L2_norm = centroids.norm(dim=1)**2 nmb_batches = n_all // world_size // args.batch_size for it in range(nmb_batches): # fetch mini-batch feat = cache[it * args.batch_size: (it + 1) * args.batch_size] # E-step if args.use_faiss: D, I = index.search(feat.numpy().astype('float32'), 1) I = I.squeeze(1) else: # find current cluster assignments l2dist = 1 - 2 * torch.mm(feat.cuda(non_blocking=True), centroids.transpose(0, 1)) + centroids_L2_norm D, I = l2dist.min(dim=1) I = I.cpu().numpy() D = D.cpu().numpy() # update assignment array assignments[it * args.batch_size: (it + 1) * args.batch_size] = I # log log_loss.update(D.mean()) for k in np.unique(I): idx_k = np.where(I == k)[0] # number of elmt in cluster k for this batch local_counts[k] += len(idx_k) # sum of elmt belonging to this cluster local_feats[k, :] += feat.cuda(non_blocking=True)[idx_k].sum(dim=0) batch_time.update(time.time() - end) end = time.time() if it and it % 1000 == 0: logger.info('Pass[{0}] - Iter: [{1}/{2}]\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' .format(p, it, nmb_batches, batch_time=batch_time)) # all reduce operation # processes share what it is needed for M-step if group is not None: dist.all_reduce(local_counts, group=group) dist.all_reduce(local_feats, group=group) else: dist.all_reduce(local_counts) dist.all_reduce(local_feats) # M-step # update centroids (for the last pass we only want the assignments) mask = local_counts.nonzero() if p < args.niter: centroids[mask] = 1. / local_counts[mask].unsqueeze(1) * local_feats[mask] # deal with empty clusters for k in (local_counts == 0).nonzero(): # choose a random cluster from the set of non empty clusters np.random.seed(world_id) m = mask[np.random.randint(len(mask))] # replace empty centroid by a non empty one with a perturbation centroids[k] = centroids[m] for j in range(args.dim_pca): sign = (j % 2) * 2 - 1; centroids[k, j] += sign * 1e-7; centroids[m, j] -= sign * 1e-7; # update the counts local_counts[k] = local_counts[m] // 2; local_counts[m] -= local_counts[k]; # update the assignments assignments[np.where(assignments == m.item())[0][: int(local_counts[m])]] = k.cpu() logger.info('cluster {} empty => split cluster {}'.format(k, m)) logger.info(' # Pass[{0}]\tTime {1:.3f}\tLoss {2:.4f}' .format(p, time.time() - start_pass, log_loss.avg)) # now each process needs to share its own set of pseudo_labels # where to write / read the pseudo_labels dump_labels = os.path.join( args.dump_path, 'pseudo_labels' + str(world_id) + '-' + str(rank) + '.pkl', ) # log the cluster assignment pickle.dump( assignments, open(dump_labels, 'wb'), -1, ) # process wait for all processes to finish writing if group is not None: dist.barrier(group=group) else: dist.barrier() pseudo_labels = np.zeros(n_all) # process read and reconstitute the pseudo_labels local_nmb_data = n_all // world_size for r in range(world_size): pseudo_labels[torch.arange(r * local_nmb_data, (r + 1) * local_nmb_data).int()] = \ pickle.load(open(os.path.join(args.dump_path, 'pseudo_labels' + str(world_id) + '-' + str(r) + '.pkl'), "rb")) # clean del assignments dist.barrier() os.remove(dump_labels) return pseudo_labels, centroids.cpu()
DeeperCluster-main
src/distributed_kmeans.py
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # import argparse from logging import getLogger import os import pickle import shutil import time import numpy as np from scipy.sparse import csr_matrix import torch import torch.distributed as dist from .logger import create_logger, PD_Stats FALSY_STRINGS = {'off', 'false', '0'} TRUTHY_STRINGS = {'on', 'true', '1'} logger = getLogger() def bool_flag(s): """ Parse boolean arguments from the command line. """ if s.lower() in FALSY_STRINGS: return False elif s.lower() in TRUTHY_STRINGS: return True else: raise argparse.ArgumentTypeError("invalid value for a boolean flag") def init_distributed_mode(args, make_communication_groups=True): """ Handle single and multi-GPU / multi-node / SLURM jobs. Initialize the following variables: - global rank - clustering_local_rank - clustering_local_world_size - clustering_local_world_id - training_local_rank - training_local_world_size - training_local_world_id - rotation """ args.is_slurm_job = 'SLURM_JOB_ID' in os.environ and not args.debug_slurm if args.is_slurm_job: args.rank = int(os.environ['SLURM_PROCID']) else: # jobs started with torch.distributed.launch # read environment variables args.rank = int(os.environ['RANK']) args.world_size = int(os.environ['WORLD_SIZE']) # prepare distributed dist.init_process_group(backend='nccl', init_method=args.dist_url, world_size=args.world_size, rank=args.rank) # set cuda device args.gpu_to_work_on = args.rank % torch.cuda.device_count() torch.cuda.set_device(args.gpu_to_work_on) if not make_communication_groups: return None, None # each super_class has the same number of processes assert args.world_size % args.super_classes == 0 # each super-class forms a training communication group args.training_local_world_size = args.world_size // args.super_classes args.training_local_rank = args.rank % args.training_local_world_size args.training_local_world_id = args.rank // args.training_local_world_size # prepare training groups training_groups = [] for group_id in range(args.super_classes): ranks = [args.training_local_world_size * group_id + i \ for i in range(args.training_local_world_size)] training_groups.append(dist.new_group(ranks=ranks)) # compute number of super-clusters if args.rotnet: assert args.super_classes % 4 == 0 args.nmb_super_clusters = args.super_classes // 4 else: args.nmb_super_clusters = args.super_classes # prepare clustering communication groups args.clustering_local_world_size = args.training_local_world_size * \ (args.super_classes // args.nmb_super_clusters) args.clustering_local_rank = args.rank % args.clustering_local_world_size args.clustering_local_world_id = args.rank // args.clustering_local_world_size clustering_groups = [] for group_id in range(args.nmb_super_clusters): ranks = [args.clustering_local_world_size * group_id + i \ for i in range(args.clustering_local_world_size)] clustering_groups.append(dist.new_group(ranks=ranks)) # this process deals only with a certain rotation if args.rotnet: args.rotation = args.clustering_local_rank // args.training_local_world_size else: args.rotation = 0 return training_groups, clustering_groups def check_parameters(args): """ Check if corpus of arguments is consistent. """ args.size_dataset = min(args.size_dataset, 95920149) # make dataset size divisible by both the batch-size and the world-size div = args.batch_size * args.world_size args.size_dataset = args.size_dataset // div * div args.epoch_size = args.size_dataset // args.nmb_super_clusters // 4 args.epoch_size = args.epoch_size // div * div assert args.super_classes # number of super classes must be divisible by the number of rotation categories if args.rotnet: assert args.super_classes % 4 == 0 # feature dimension assert args.dim_pca <= 4096 def initialize_exp(params, *args): """ Initialize the experience: - dump parameters - create checkpoint and cache repos - create a logger - create a panda object to log the training statistics """ # dump parameters pickle.dump(params, open(os.path.join(params.dump_path, 'params.pkl'), 'wb')) # create repo to store checkpoints params.dump_checkpoints = os.path.join(params.dump_path, 'checkpoints') if not params.rank and not os.path.isdir(params.dump_checkpoints): os.mkdir(params.dump_checkpoints) # create repo to cache activations between the two stages of the hierarchical k-means if not params.rank and not os.path.isdir(os.path.join(params.dump_path, 'cache')): os.mkdir(os.path.join(params.dump_path, 'cache')) # create a panda object to log loss and acc training_stats = PD_Stats( os.path.join(params.dump_path, 'stats' + str(params.rank) + '.pkl'), args, ) # create a logger logger = create_logger(os.path.join(params.dump_path, 'train.log'), rank=params.rank) logger.info("============ Initialized logger ============") logger.info("\n".join("%s: %s" % (k, str(v)) for k, v in sorted(dict(vars(params)).items()))) logger.info("The experiment will be stored in %s\n" % params.dump_path) logger.info("") return logger, training_stats def end_of_epoch(args): """ Remove cluster assignment from experiment repository """ def src_dst(what, cl=False): src = os.path.join( args.dump_path, what + cl * str(args.clustering_local_world_id) + '.pkl', ) dst = os.path.join( args.dump_checkpoints, what + '{}-epoch{}.pkl'.format(cl * args.clustering_local_world_id, args.epoch - 1), ) return src, dst # main processes only are working here if not args.clustering_local_rank: for what in ['cluster_assignments', 'centroids']: src, dst = src_dst(what, cl=True) if not (args.epoch - 1) % args.checkpoint_freq: shutil.copy(src, dst) if not 'centroids' in src: os.remove(src) if not args.rank: for what in ['super_class_assignments', 'super_class_centroids']: src, dst = src_dst(what) if not (args.epoch - 1) % args.checkpoint_freq: shutil.copy(src, dst) os.remove(src) def restart_from_checkpoint(args, ckp_path=None, run_variables=None, **kwargs): """ Re-start from checkpoint present in experiment repo """ if ckp_path is None: ckp_path = os.path.join(args.dump_path, 'checkpoint.pth.tar') # look for a checkpoint in exp repository if not os.path.isfile(ckp_path): return logger.info('Found checkpoint in experiment repository') # open checkpoint file map_location = None if args.world_size > 1: map_location = "cuda:" + str(args.gpu_to_work_on) checkpoint = torch.load(ckp_path, map_location=map_location) # key is what to look for in the checkpoint file # value is the object to load # example: {'state_dict': model} for key, value in kwargs.items(): if key in checkpoint and value is not None: value.load_state_dict(checkpoint[key]) logger.info("=> loaded {} from checkpoint '{}'" .format(key, ckp_path)) else: logger.warning("=> failed to load {} from checkpoint '{}'" .format(key, ckp_path)) # re load variable important for the run if run_variables is not None: for var_name in run_variables: if var_name in checkpoint: run_variables[var_name] = checkpoint[var_name] def fix_random_seeds(seed=1993): """ Fix random seeds. """ torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) np.random.seed(seed) class PCA(): """ Class to compute and apply PCA. """ def __init__(self, dim=256, whit=0.5): self.dim = dim self.whit = whit self.mean = None def train_pca(self, cov): """ Takes a covariance matrix (np.ndarray) as input. """ d, v = np.linalg.eigh(cov) eps = d.max() * 1e-5 n_0 = (d < eps).sum() if n_0 > 0: d[d < eps] = eps # total energy totenergy = d.sum() # sort eigenvectors with eigenvalues order idx = np.argsort(d)[::-1][:self.dim] d = d[idx] v = v[:, idx] logger.warning("keeping %.2f %% of the energy" % (d.sum() / totenergy * 100.0)) # for the whitening d = np.diag(1. / d**self.whit) # principal components self.dvt = np.dot(d, v.T) def apply(self, x): # input is from numpy if isinstance(x, np.ndarray): if self.mean is not None: x -= self.mean return np.dot(self.dvt, x.T).T # input is from torch and is on GPU if x.is_cuda: if self.mean is not None: x -= torch.cuda.FloatTensor(self.mean) return torch.mm(torch.cuda.FloatTensor(self.dvt), x.transpose(0, 1)).transpose(0, 1) # input if from torch, on CPU if self.mean is not None: x -= torch.FloatTensor(self.mean) return torch.mm(torch.FloatTensor(self.dvt), x.transpose(0, 1)).transpose(0, 1) class AverageMeter(object): """computes and stores the average and current value""" def __init__(self): self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count def normalize(data): # data in numpy array if isinstance(data, np.ndarray): row_sums = np.linalg.norm(data, axis=1) data = data / row_sums[:, np.newaxis] return data # data is a tensor row_sums = data.norm(dim=1, keepdim=True) data = data / row_sums return data def compute_M(data): cols = np.arange(data.size) return csr_matrix((cols, (data.ravel(), cols)), shape=(data.max() + 1, data.size)) def get_indices_sparse(data): M = compute_M(data) return [np.unravel_index(row.data, data.shape) for row in M]
DeeperCluster-main
src/utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # from logging import getLogger import os import shutil import time import numpy as np import torch import torch.distributed as dist import torch.nn as nn from torch.utils.data.sampler import Sampler from .utils import AverageMeter, get_indices_sparse from src.slurm import trigger_job_requeue logger = getLogger() class DistUnifTargSampler(Sampler): """ Distributively samples elements based on a uniform distribution over the labels. """ def __init__(self, total_size, pseudo_labels, num_replicas, rank, seed=31): np.random.seed(seed) # world size self.num_replicas = num_replicas # rank of this process self.rank = rank # how many data to be loaded by the corpus of processes self.total_size = total_size # set of labels to consider set_of_pseudo_labels = np.unique(pseudo_labels) nmb_pseudo_lab = int(len(set_of_pseudo_labels)) # number of images per label per_label = int(self.total_size // nmb_pseudo_lab + 1) # initialize indexes epoch_indexes = np.zeros(int(per_label * nmb_pseudo_lab)) # select a number of per_label data for each label indexes = get_indices_sparse(np.asarray(pseudo_labels)) for i, k in enumerate(set_of_pseudo_labels): k = int(k) label_indexes = indexes[k][0] epoch_indexes[i * per_label: (i + 1) * per_label] = np.random.choice( label_indexes, per_label, replace=(len(label_indexes) <= per_label) ) # make sure indexes are integers epoch_indexes = epoch_indexes.astype(int) # shuffle the indexes np.random.shuffle(epoch_indexes) self.epoch_indexes = epoch_indexes[:self.total_size] # this process only deals with this subset self.process_ind = self.epoch_indexes[self.rank:self.total_size:self.num_replicas] def __iter__(self): return iter(self.process_ind) def __len__(self): return len(self.process_ind) def train_network(args, models, optimizers, dataset): """ Train the models with cluster assignments as targets """ # swith to train mode for model in models: model.train() # uniform sampling over pseudo labels sampler = DistUnifTargSampler( args.epoch_size, dataset.sub_classes, args.training_local_world_size, args.training_local_rank, seed=args.epoch + args.training_local_world_id, ) loader = torch.utils.data.DataLoader( dataset, sampler=sampler, batch_size=args.batch_size, num_workers=args.workers, pin_memory=True, ) # running statistics batch_time = AverageMeter() data_time = AverageMeter() # training statistics log_top1_subclass = AverageMeter() log_loss_subclass = AverageMeter() log_top1_superclass = AverageMeter() log_loss_superclass = AverageMeter() log_top1 = AverageMeter() log_loss = AverageMeter() end = time.perf_counter() cel = nn.CrossEntropyLoss().cuda() relu = torch.nn.ReLU().cuda() for iter_epoch, (inp, target) in enumerate(loader): # start at iter start_iter if iter_epoch < args.start_iter: continue # measure data loading time data_time.update(time.perf_counter() - end) # move input to gpu inp = inp.cuda(non_blocking=True) target = target.cuda(non_blocking=True).long() # forward on the model inp = relu(models[0](inp)) # forward on sub-class prediction layer output = models[-1](inp) loss_subclass = cel(output, target) # forward on super-class prediction layer super_class_output = models[1](inp) sc_target = args.training_local_world_id + \ 0 * torch.cuda.LongTensor(args.batch_size) loss_superclass = cel(super_class_output, sc_target) loss = loss_subclass + loss_superclass # initialize the optimizers for optimizer in optimizers: optimizer.zero_grad() # compute the gradients loss.backward() # step for optimizer in optimizers: optimizer.step() # log # signal received, relaunch experiment if os.environ['SIGNAL_RECEIVED'] == 'True': save_checkpoint(args, iter_epoch + 1, models, optimizers) if not args.rank: trigger_job_requeue(os.path.join(args.dump_path, 'checkpoint.pth.tar')) # regular checkpoints if iter_epoch and iter_epoch % 1000 == 0: save_checkpoint(args, iter_epoch + 1, models, optimizers) # update stats log_loss.update(loss.item(), output.size(0)) prec1 = accuracy(args, output, target, sc_output=super_class_output) log_top1.update(prec1.item(), output.size(0)) log_loss_superclass.update(loss_superclass.item(), output.size(0)) prec1 = accuracy(args, super_class_output, sc_target) log_top1_superclass.update(prec1.item(), output.size(0)) log_loss_subclass.update(loss_subclass.item(), output.size(0)) prec1 = accuracy(args, output, target) log_top1_subclass.update(prec1.item(), output.size(0)) batch_time.update(time.perf_counter() - end) end = time.perf_counter() # verbose if iter_epoch % 100 == 0: logger.info('Epoch[{0}] - Iter: [{1}/{2}]\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' 'Data {data_time.val:.3f} ({data_time.avg:.3f})\t' 'Loss {loss.val:.4f} ({loss.avg:.4f})\t' 'Prec {log_top1.val:.3f} ({log_top1.avg:.3f})\t' 'Super-class loss: {sc_loss.val:.3f} ({sc_loss.avg:.3f})\t' 'Super-class prec: {sc_prec.val:.3f} ({sc_prec.avg:.3f})\t' 'Intra super-class loss: {los.val:.3f} ({los.avg:.3f})\t' 'Intra super-class prec: {prec.val:.3f} ({prec.avg:.3f})\t' .format(args.epoch, iter_epoch, len(loader), batch_time=batch_time, data_time=data_time, loss=log_loss, log_top1=log_top1, sc_loss=log_loss_superclass, sc_prec=log_top1_superclass, los=log_loss_subclass, prec=log_top1_subclass)) # end of epoch args.start_iter = 0 args.epoch += 1 # dump checkpoint save_checkpoint(args, 0, models, optimizers) if not args.rank: if not (args.epoch - 1) % args.checkpoint_freq: shutil.copyfile( os.path.join(args.dump_path, 'checkpoint.pth.tar'), os.path.join(args.dump_checkpoints, 'checkpoint' + str(args.epoch - 1) + '.pth.tar'), ) return (args.epoch - 1, args.epoch * len(loader), log_top1.avg, log_loss.avg, log_top1_superclass.avg, log_loss_superclass.avg, log_top1_subclass.avg, log_loss_subclass.avg, ) def save_checkpoint(args, iter_epoch, models, optimizers, path=''): if not os.path.isfile(path): path = os.path.join(args.dump_path, 'checkpoint.pth.tar') # main process saves the training state if not args.rank: torch.save({ 'epoch': args.epoch, 'start_iter': iter_epoch, 'state_dict': models[0].state_dict(), 'optimizer': optimizers[0].state_dict(), 'pred_layer_state_dict': models[1].state_dict(), 'optimizer_pred_layer': optimizers[1].state_dict(), }, path) # main local training process saves the last layer if not args.training_local_rank: torch.save({ 'epoch': args.epoch, 'start_iter': iter_epoch, 'state_dict': models[-1].state_dict(), 'optimizer': optimizers[-1].state_dict(), }, os.path.join(args.dump_path, str(args.training_local_world_id) + '-pred_layer.pth.tar')) def accuracy(args, output, target, sc_output=None): """Computes the accuracy over the k top predictions for the specified values of k""" with torch.no_grad(): batch_size = target.size(0) _, pred = output.topk(1, 1, True, True) pred = pred.t() correct = pred.eq(target.view(1, -1).expand_as(pred)) if sc_output is not None: _, pred = sc_output.topk(1, 1, True, True) pred = pred.t() target = args.training_local_world_id + 0 * torch.cuda.LongTensor(batch_size) correct_sc = pred.eq(target.view(1, -1).expand_as(pred)) correct *= correct_sc correct_1 = correct[:1].view(-1).float().sum(0, keepdim=True) return correct_1.mul_(100.0 / batch_size) def validate_network(val_loader, models, args): batch_time = AverageMeter() losses = AverageMeter() top1 = AverageMeter() # switch to evaluate mode for model in models: model.eval() criterion = nn.CrossEntropyLoss().cuda() with torch.no_grad(): end = time.perf_counter() for i, (inp, target) in enumerate(val_loader): # move to gpu inp = inp.cuda(non_blocking=True) target = target.cuda(non_blocking=True) # compute output output = inp for model in models: output = model(output) loss = criterion(output, target) # measure accuracy and record loss acc1 = accuracy(args, output, target) losses.update(loss.item(), inp.size(0)) top1.update(acc1[0], inp.size(0)) # measure elapsed time batch_time.update(time.perf_counter() - end) end = time.perf_counter() if i % 100 == 0: logger.info('Test: [{0}/{1}]\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' 'Loss {loss.val:.4f} ({loss.avg:.4f})\t' 'Acc@1 {top1.val:.3f} ({top1.avg:.3f})\t' .format(i, len(val_loader), batch_time=batch_time, loss=losses, top1=top1)) return (top1.avg.item(), losses.avg)
DeeperCluster-main
src/trainer.py
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # import os from logging import getLogger import pickle import numpy as np import torch import torch.nn as nn from src.model.model_factory import create_sobel_layer from src.model.vgg16 import VGG16 logger = getLogger() def load_pretrained(model, args): """ Load weights """ if not os.path.isfile(args.pretrained): logger.info('pretrained weights not found') return # open checkpoint file map_location = None if args.world_size > 1: map_location = "cuda:" + str(args.gpu_to_work_on) checkpoint = torch.load(args.pretrained, map_location=map_location) # clean keys from 'module' checkpoint['state_dict'] = {rename_key(key): val for key, val in checkpoint['state_dict'].items()} # remove sobel keys if 'sobel.0.weight' in checkpoint['state_dict']: del checkpoint['state_dict']['sobel.0.weight'] del checkpoint['state_dict']['sobel.0.bias'] del checkpoint['state_dict']['sobel.1.weight'] del checkpoint['state_dict']['sobel.1.bias'] # remove pred_layer keys if 'pred_layer.weight' in checkpoint['state_dict']: del checkpoint['state_dict']['pred_layer.weight'] del checkpoint['state_dict']['pred_layer.bias'] # load weights model.body.load_state_dict(checkpoint['state_dict']) logger.info("=> loaded pretrained weights from '{}'".format(args.pretrained)) def rename_key(key): "Remove module from key" if not 'module' in key: return key if key.startswith('module.body.'): return key[12:] if key.startswith('module.'): return key[7:] return ''.join(key.split('.module'))
DeeperCluster-main
src/model/pretrain.py
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. #
DeeperCluster-main
src/model/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # import math import torch import torch.nn as nn import torch.nn.init as init cfg = { 'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'], } class VGG16(nn.Module): ''' VGG16 model ''' def __init__(self, dim_in, relu=True, dropout=0.5, batch_norm=True): super(VGG16, self).__init__() self.features = make_layers(cfg['D'], dim_in, batch_norm=batch_norm) self.dim_output_space = 4096 classifier = [ nn.Linear(512 * 7 * 7, 4096), nn.ReLU(True), nn.Dropout(dropout), nn.Linear(4096, 4096), ] if relu: classifier.append(nn.ReLU(True)) self.classifier = nn.Sequential(*classifier) # Initialize weights for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) m.bias.data.zero_() def forward(self, x): x = self.features(x) if self.classifier is not None: x = x.view(x.size(0), -1) x = self.classifier(x) return x def make_layers(cfg, in_channels, batch_norm=True): layers = [] for v in cfg: if v == 'M': layers += [nn.MaxPool2d(kernel_size=2, stride=2)] else: conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1) if batch_norm: layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)] else: layers += [conv2d, nn.ReLU(inplace=True)] in_channels = v return nn.Sequential(*layers)
DeeperCluster-main
src/model/vgg16.py
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # from logging import getLogger import torch import torch.nn as nn import torch.optim from .vgg16 import VGG16 logger = getLogger() def create_sobel_layer(): grayscale = nn.Conv2d(3, 1, kernel_size=1, stride=1, padding=0) grayscale.weight.data.fill_(1.0 / 3.0) grayscale.bias.data.zero_() sobel_filter = nn.Conv2d(1, 2, kernel_size=3, stride=1, padding=0) sobel_filter.weight.data[0, 0].copy_( torch.FloatTensor([[1, 0, -1], [2, 0, -2], [1, 0, -1]]) ) sobel_filter.weight.data[1, 0].copy_( torch.FloatTensor([[1, 2, 1], [0, 0, 0], [-1, -2, -1]]) ) sobel_filter.bias.data.zero_() sobel = nn.Sequential(grayscale, sobel_filter) for p in sobel.parameters(): p.requires_grad = False return sobel class Net(nn.Module): def __init__(self, padding, sobel, body, pred_layer): super(Net, self).__init__() # padding self.padding = padding # sobel filter self.sobel = create_sobel_layer() if sobel else None # main architecture self.body = body # prediction layer self.pred_layer = pred_layer self.conv = None def forward(self, x): if self.padding is not None: x = self.padding(x) if self.sobel is not None: x = self.sobel(x) if self.conv is not None: count = 1 for m in self.body.features.modules(): if not isinstance(m, nn.Sequential): x = m(x) if isinstance(m, nn.ReLU): if count == self.conv: return x count = count + 1 x = self.body(x) if self.pred_layer is not None: x = self.pred_layer(x) return x def model_factory(sobel, relu=False, num_classes=0, batch_norm=True): """ Create a network. """ dim_in = 2 if sobel else 3 padding = nn.ConstantPad2d(1, 0.0) if sobel: padding = nn.ConstantPad2d(2, 0.0) body = VGG16(dim_in, relu=relu, batch_norm=batch_norm) pred_layer = nn.Linear(body.dim_output_space, num_classes) if num_classes else None return Net(padding, sobel, body, pred_layer) def build_prediction_layer(dim_in, args, group=None, num_classes=0): """ Create prediction layer on gpu and its associated optimizer. """ if not num_classes: num_classes = args.super_classes # last fully connected layer pred_layer = nn.Linear(dim_in, num_classes) # move prediction layer to gpu pred_layer = to_cuda(pred_layer, args.gpu_to_work_on, group=group) # set optimizer for the prediction layer optimizer_pred_layer = sgd_optimizer(pred_layer, args.lr, args.wd) return pred_layer, optimizer_pred_layer def to_cuda(net, gpu_id, apex=False, group=None): net = net.cuda() if apex: from apex.parallel import DistributedDataParallel as DDP net = DDP(net, delay_allreduce=True) else: net = nn.parallel.DistributedDataParallel( net, device_ids=[gpu_id], process_group=group, ) return net def sgd_optimizer(module, lr, wd): return torch.optim.SGD( filter(lambda x: x.requires_grad, module.parameters()), lr=lr, momentum=0.9, weight_decay=wd, ) def sobel2RGB(net): if net.sobel is None: return def computeweight(conv, alist, blist): sob = net.sobel._modules['1'].weight res = 0 for atup in alist: for btup in blist: x = conv[:, 0, atup[0], btup[0]]*sob[0, :, atup[1], btup[1]] y = conv[:, 1, atup[0], btup[0]]*sob[1, :, atup[1], btup[1]] res = res + x + y return res def aux(a): if a == 0: return [(0, 0)] elif a == 1: return [(1, 0), (0, 1)] elif a == 2: return [(2, 0), (1, 1), (0, 2)] elif a == 3: return [(2, 1), (1, 2)] elif a == 4: return [(2, 2)] features = list(net.body.features.children()) conv_old = features[0] conv_final = nn.Conv2d(3, 64, kernel_size=5, padding=1, bias=True) for i in range(conv_old.kernel_size[0]): for j in range(conv_old.kernel_size[0]): neweight = 1/3* computeweight(conv_old.weight, aux(i), aux(j)).expand(3, 64).transpose(1, 0) conv_final.weight.data[:, :, i, j].copy_(neweight) conv_final.bias.data.copy_(conv_old.bias.data) features[0] = conv_final net.body.features = nn.Sequential(*features) net.sobel = None return
DeeperCluster-main
src/model/model_factory.py
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. #
DeeperCluster-main
src/data/__init__.py