python_code
stringlengths 0
4.04M
| repo_name
stringlengths 7
58
| file_path
stringlengths 5
147
|
---|---|---|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import sys
import uuid
import shutil
import logging
import datetime
import importlib
import traceback
import contextlib
import typing as tp
from pathlib import Path
import numpy as np
import submitit
import omegaconf
import hydra
from .executor import ( # pylint: disable=unused-import
DelayedExecutor as DelayedExecutor,
)
PathLike = tp.Union[str, Path]
logger = logging.getLogger(__name__)
@contextlib.contextmanager
def working_directory(path: tp.Union[str, Path]) -> tp.Iterator[None]:
cwd = Path().cwd()
os.chdir(path)
try:
yield
finally:
os.chdir(cwd)
class HydraEntryPoint:
"""Creates a callable from a Hydra main
Config and python files are expected to be in the same folder
Parameter
---------
script_path: str/Path
Path to the python script containing main
"""
# callable to be typed when using an actual package
def __init__(self, script_path: PathLike) -> None:
self._script_path = Path(script_path).absolute()
assert self._script_path.suffix == ".py"
assert self._script_path.is_file(), f"{self._script_path} is not a file"
assert self._script_path.with_name("base_config.yaml").is_file()
self._folder: tp.Optional[Path] = None # defined later
@property
def folder(self) -> Path:
if self._folder is None:
raise RuntimeError(
"Folder is not defined if call method has not be called yet"
)
return self._folder
def validated(self, **kwargs: tp.Any) -> "HydraEntryPoint":
self._folder = (
None # reset folder if validated to avoid reusing a previous test folder
)
self.config(**kwargs)
return self
def _relative_path(self) -> Path:
return Path(os.path.relpath(self._script_path, Path(__file__).parent))
def config(self, **kwargs: tp.Any) -> omegaconf.DictConfig:
self._get_module() # needs to be loaded to make sure configs are available
name = self._script_path.stem
rel_path = self._relative_path().with_name("base_config.yaml")
overrides = [f"{x}={y}" for x, y in kwargs.items()]
with hydra.initialize(
config_path=str(rel_path.parent), job_name=name, version_base="1.1"
):
cfg_ = hydra.compose(config_name="base_config", overrides=overrides)
return cfg_
def _get_module(self) -> tp.Any:
benchpath = str(self._script_path.parents[1])
if benchpath not in sys.path:
sys.path.insert(0, benchpath)
# add url_benchmark, for legacy buffers
sys.path.append(str(self._script_path.parent))
already_imported = any("url_benchmark" in x for x in sys.modules)
module = importlib.import_module("url_benchmark." + self._script_path.stem)
module = importlib.reload(module) # reload to override hydra configstore
assert module is not None
if module.__file__ is None or not module.__file__.startswith(benchpath):
if already_imported:
logger.warning(
"url_benchmark had already been imported, using {module.__file__}"
)
else:
raise RuntimeError(
f"Imported {module.__file__} while expecting to be in {benchpath}"
)
return module
def main(self, **kwargs: tp.Any) -> tp.Any:
return self._get_module().main(self.config(**kwargs))
def workspace(self, **kwargs: tp.Any) -> tp.Any:
return self._get_module().Workspace(self.config(**kwargs))
def __repr__(self) -> str:
rel_path = str(self._relative_path())
return f"{self.__class__.__name__}({rel_path!r})"
def get_hiplog(self) -> tp.Any:
if self._folder is None:
raise RuntimeError("No workspace avaible")
import hiplogs # type: ignore
loggers = list(hiplogs.HipLog.find_in_folder(self._folder))
assert len(loggers) == 1
return loggers[0]
def __call__(
self, _working_directory_: tp.Optional[PathLike] = None, **kwargs: tp.Any
) -> float:
config = self.config(**kwargs)
try:
slurm_folder: tp.Optional[Path] = submitit.JobEnvironment().paths.folder
except RuntimeError:
slurm_folder = None
if self._folder is None and _working_directory_ is not None:
self._folder = Path(_working_directory_) # override working directory
self._folder.mkdir(exist_ok=True, parents=True)
logger.warning(
f"Bypassing folder affectation and using provided: {self._folder}"
)
if slurm_folder is not None:
# try and link to latest slurm dir anyway
symlink = self._folder / "slurm"
if symlink.exists():
symlink.unlink()
symlink.symlink_to(slurm_folder)
if self._folder is None:
if slurm_folder is not None:
self._folder = slurm_folder
else:
name = f"{datetime.date.today().isoformat()}_{config.experiment}_{uuid.uuid4().hex[:6]}"
self._folder = Path("exp_local") / name
self._folder.mkdir(exist_ok=True, parents=True)
omegaconf.OmegaConf.save(config=config, f=str(self.folder / "config.yaml"))
with working_directory(self.folder):
workspace = self._get_module().Workspace(config)
try:
workspace.train()
except Exception as e:
if not workspace.eval_rewards_history:
raise e # it did not even run :s
logger.warning(f"Something went wrong:\n{traceback.format_exc()}")
reward = -float("inf")
if workspace.eval_rewards_history:
reward = np.mean(workspace.eval_rewards_history[-12:])
return -float(reward) # minimization for nevergrad
def checkpoint(self, *args: tp.Any, **kwargs: tp.Any) -> tp.Any:
return submitit.helpers.DelayedSubmission(self, *args, **kwargs)
class CopiedBenchmark(HydraEntryPoint):
def __init__(self, folder: PathLike, name: str) -> None:
self.code = Path(folder) / "code"
self.code.parent.mkdir(parents=True, exist_ok=True)
if self.code.exists():
logger.warning(
f"Folder {folder} already exists, it will **not** be updated"
)
else:
shutil.copytree(
Path(__file__).parents[1] / "url_benchmark",
self.code / "url_benchmark",
ignore=shutil.ignore_patterns("exp_*"),
)
super().__init__(self.code / "url_benchmark" / f"{name}.py")
def on_exception_enter_postmortem(f):
"""Decorator for triggering pdb in case of exception"""
import pdb
import sys
from functools import wraps
import traceback
@wraps(f)
def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception:
traceback.print_exc()
pdb.post_mortem(sys.exc_info()[2])
raise
return wrapper
|
controllable_agent-main
|
controllable_agent/runner.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
|
controllable_agent-main
|
controllable_agent/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import time
import concurrent.futures
from pathlib import Path
import pytest
import submitit
from . import executor as _exec
def func(fail: bool = False) -> str:
if fail:
raise ValueError("This is a failure")
return "success"
def get_executor(tmp_path: Path) -> _exec.DelayedExecutor[str]:
local_exec = submitit.AutoExecutor(folder=tmp_path, cluster="debug")
return _exec.DelayedExecutor(
local_exec, default="ERROR", batch_size=2, max_delay=0.2, max_failure_rate=0.5
)
def test_delayed_exec_num(tmp_path: Path) -> None:
executor = get_executor(tmp_path)
job1 = executor.submit(func)
assert not job1.done()
assert job1.job is None, "Job should not be submitted"
job2 = executor.submit(func)
assert job2.done()
assert job1.job is not None, "Job should not be submitted"
assert job2.job is not None, "Job should not be submitted"
assert not executor._unsubmitted, "Unsubmitted jobs should be purged"
def test_delayed_exec_delay(tmp_path: Path) -> None:
executor = get_executor(tmp_path)
job1 = executor.submit(func)
time.sleep(0.1)
assert job1.job is None, "Job should not be submitted"
time.sleep(0.11)
job1.done() # trigger a possible submission
assert job1.job is not None, "Job should be submitted"
assert not executor._unsubmitted, "Unsubmitted jobs should be purged"
def test_delayed_exec_error(tmp_path: Path) -> None:
executor = get_executor(tmp_path)
jobs = [executor.submit(func, fail=f) for f in [True, True]]
with pytest.raises(RuntimeError):
jobs[0].result()
def test_delayed_exec_caught_error(tmp_path: Path) -> None:
executor = get_executor(tmp_path)
jobs = [executor.submit(func, fail=f) for f in [False, True]]
assert jobs[0].result() == "success"
assert jobs[1].result() == "ERROR"
def _do_nothing() -> int:
return 12
def test_wait_for_jobs() -> None:
jobs = []
with concurrent.futures.ThreadPoolExecutor(max_workers=2) as exc:
for _ in range(2):
jobs.append(exc.submit(_do_nothing))
_exec.wait_for_jobs(jobs, sleep=0.04)
|
controllable_agent-main
|
controllable_agent/test_executor.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import re
import itertools
import subprocess
from pathlib import Path
import controllable_agent
from . import runner
def test_quadruped_goal(tmp_path: Path) -> None:
conf_path = Path(__file__).parents[1] / "url_benchmark" / "pretrain.py"
with runner.working_directory(tmp_path):
ep = runner.HydraEntryPoint(conf_path)
ep(
_working_directory_=tmp_path / "bypass",
agent="fb_ddpg",
device="cpu",
num_train_frames=1,
num_eval_episodes=1,
replay_buffer_episodes=2,
goal_space="simplified_quadruped",
task="quadruped_walk",
use_hiplog=True,
final_tests=1,
**{"agent.feature_dim": 80, "agent.z_dim": 100},
)
reward_file = tmp_path / "bypass" / "test_rewards.json"
text = reward_file.read_text()
assert "quadruped_run" in text
def test_anytrain(tmp_path: Path) -> None:
with runner.working_directory(tmp_path):
ep = runner.CopiedBenchmark(tmp_path / "no_code", "anytrain")
ep(
agent="fb_ddpg",
device="cpu",
num_train_episodes=1,
num_eval_episodes=1,
replay_buffer_episodes=2,
use_hiplog=True,
final_tests=0,
)
def test_grid_anytrain(tmp_path: Path) -> None:
with runner.working_directory(tmp_path):
ep = runner.CopiedBenchmark(tmp_path / "no_code", "anytrain")
ep(
agent="discrete_fb",
device="cpu",
task="grid_simple",
num_train_episodes=1,
num_eval_episodes=1,
replay_buffer_episodes=2,
use_hiplog=True,
final_tests=0,
)
def test_package_init_annotations() -> None:
# automatically updates the __init__ functions with "-> None:" if missing
# it fails the first time when adding it, then it should work
# feel free to deactivate if that helps, it's not that important :p
failed = []
pattern = re.compile(r"(def __init__\(self.*\)):")
root = Path(__file__).parents[1]
assert (root / "url_benchmark").is_dir()
for fp in root.rglob("*.py"):
if "expected" in str(fp) or "test_" in fp.name:
continue
text = fp.read_text()
text2 = pattern.sub(r"\g<1> -> None:", text)
if text2 != text:
failed.append(str(fp))
fp.write_text(text2)
if failed:
string = "\n -".join(
["Missing -> None at the end of __init__ definition"] + failed
)
string += "\nUpdate, or run this test locally for automatic addition"
raise AssertionError(string)
def test_property_syntax() -> None:
# automatic linters tend to change @property to @ property for no reason
root = Path(__file__).parents[1]
assert (root / "url_benchmark").is_dir()
errors = []
for fp in root.rglob("*.py"):
if fp == Path(__file__):
continue
if "@ property" in fp.read_text():
errors.append(str(fp))
if errors:
msg = ["Additional space in @property, linter got crazy:"] + errors
raise AssertionError("\n - ".join(msg))
def test_pretrain_checkpoint(tmp_path: Path) -> None:
conf_path = Path(__file__).parents[1] / "url_benchmark" / "pretrain.py"
with runner.working_directory(tmp_path):
ep = runner.HydraEntryPoint(conf_path)
params = dict(
agent="fb_ddpg",
device="cpu",
num_train_frames=1001,
num_eval_episodes=1,
replay_buffer_episodes=2,
use_hiplog=True,
checkpoint_every=1000,
final_tests=0,
)
wsp = ep.workspace(**params)
assert not wsp.global_step
wsp.train()
assert wsp.global_step == 1001
wsp2 = ep.workspace(**params)
assert wsp2.global_step == 1001
# keep last because it may make a mess with the paths (for copied benchmark)
def test_pretrain_from_runner(tmp_path: Path) -> None:
conf_path = Path(__file__).parents[1] / "url_benchmark" / "pretrain.py"
with runner.working_directory(tmp_path):
ep = runner.HydraEntryPoint(conf_path)
reward = ep(
agent="fb_ddpg",
device="cpu",
num_train_frames=1011,
num_eval_episodes=1,
num_seed_frames=1010,
replay_buffer_episodes=2,
use_hiplog=True,
final_tests=0,
)
assert isinstance(reward, float)
assert -1000 < reward < 0
from url_benchmark import hiplogs # pylint: disable=import-outside-toplevel
hippath = ep.folder / "hip.log"
assert hippath.exists()
hiploggers = list(hiplogs.HipLog.find_in_folder(tmp_path, recursive=True))
assert len(hiploggers) == 1
hiplog = hiploggers[0]
out = hiplog.read()
assert "eval/episode_reward" in out[0]
confpath = ep.folder / "config.yaml"
assert confpath.exists()
def test_header() -> None:
lines = Path(__file__).read_text("utf8").splitlines()
header = "\n".join(itertools.takewhile(lambda l: l.startswith("#"), lines))
assert len(header.splitlines()) == 4, f"Identified header:\n{header}"
root = Path(controllable_agent.__file__).parents[1]
base = [x for x in root.iterdir() if not x.name.startswith(".")] # avoid .git
tocheck = []
for fp in base:
if fp.is_file() and fp.suffix == ".py":
tocheck.append(fp)
elif fp.is_dir():
output = subprocess.check_output(
["find", str(fp), "-name", "*.py"], shell=False
)
tocheck.extend([Path(p) for p in output.decode().splitlines()])
missing = []
AUTOADD = True
for fp in tocheck:
text = Path(fp).read_text("utf8")
if not text.startswith(header):
if AUTOADD and not any(x in text.lower() for x in ("license", "copyright")):
print(f"Automatically adding header to {fp}")
Path(fp).write_text(header + "\n\n" + text, "utf8")
missing.append(str(fp))
if missing:
missing_str = "\n - ".join(missing)
raise AssertionError(
f"Following files are/were missing standard header (see other files):\n - {missing_str}"
)
|
controllable_agent-main
|
controllable_agent/test_url_benchmark.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import time
import logging
import traceback
import contextlib
import nevergrad.common.typing as tp
logger = logging.getLogger(__name__)
@contextlib.contextmanager
def batch_if_available(executor: tp.ExecutorLike) -> tp.Iterator[None]:
"""Only submitit executors have a batch context, so we need different
cases for other executor (eg: concurrent.futures)
Batch context in submitit allows for using arrays in slurm, which is
better for the cluster health.
"""
if hasattr(executor, "batch"):
with executor.batch(): # type: ignore
yield
else:
yield
X = tp.TypeVar("X")
Fn = tp.Callable[..., X]
class DelayedJob(tp.Generic[X]):
def __init__(
self, executor: "DelayedExecutor[X]", fn: Fn[X], *args: tp.Any, **kwargs: tp.Any
) -> None:
self.executor = executor
self.time = time.time()
self.job: tp.Optional[tp.JobLike[X]] = None
self._submission: tp.Optional[
tp.Tuple[Fn[X], tp.Tuple[tp.Any, ...], tp.Dict[str, tp.Any]]
] = (fn, args, kwargs)
def _is_submited(self, force: bool = False) -> bool:
if self.job is None:
self.executor._check_submit(force=force)
return self.job is not None
def done(self) -> bool:
if not self._is_submited():
return False
return self.job is not None and self.job.done()
def result(self) -> X:
self._is_submited(force=True)
if self.job is None:
raise RuntimeError("Job should have been submitted")
error = ""
try:
result = self.job.result()
except Exception: # pylint: disable=broad-except
error = traceback.format_exc()
result = self.executor._default
self.executor._add_result(error=error)
return result
class DelayedExecutor(tp.Generic[X]):
def __init__(
self,
executor: tp.ExecutorLike,
default: X,
batch_size: int = 8,
max_delay: float = 45 * 60,
max_failure_rate: float = 0.25,
) -> None:
self.executor = executor
self.batch_size = batch_size
self.max_delay = max_delay
self.max_failure_rate = max_failure_rate
self._default = default
self._unsubmitted: tp.List[DelayedJob[X]] = []
self._total_results = 0
self._total_failures = 0
assert 0 < max_failure_rate < 1
def submit(self, fn: Fn[X], *args: tp.Any, **kwargs: tp.Any) -> DelayedJob[X]:
job = DelayedJob(self, fn, *args, **kwargs)
self._unsubmitted.append(job)
return job
def _check_submit(self, force: bool = False) -> None:
delay = time.time() - self._unsubmitted[0].time
if self._unsubmitted:
if (
force
or len(self._unsubmitted) >= self.batch_size
or delay > self.max_delay
):
logger.info(
f"Submitting {len(self._unsubmitted)} job(s) after {int(delay / 60)}min wait"
)
with batch_if_available(self.executor):
for job in self._unsubmitted:
assert job._submission is not None
fn, args, kwargs = job._submission
job._submission = None
job.job = self.executor.submit(fn, *args, **kwargs)
self._unsubmitted = []
def _add_result(self, error: str) -> None:
self._total_results += 1
self._total_failures += bool(error)
if error:
logger.warning(
f"Caught {self._total_failures} out of {self._total_results} runs:\n{error}"
)
if self._total_failures / self._total_results > self.max_failure_rate:
raise RuntimeError(
f"Stopping since failure rate is above the threshold: {self.max_failure_rate}."
)
logger.warning("Ignoring since this is below max failure rate")
def wait_for_jobs(jobs: tp.Iterable[tp.Any], sleep: float = 2.0) -> None:
"""Very crude function for regularly printing the percent
of finished jobs in a list
"""
jobs = list(jobs)
done = 0
print(f"Submitted {len(jobs)} jobs")
while done < 100:
new_done = int(100 * sum(j.done() for j in jobs) / len(jobs))
if new_done > done:
print(f"{new_done}% done")
jdone = [j for j in jobs if j.done()]
if not done:
print(jdone[0].result())
# pylint: disable=expression-not-assigned
# [j.result() for j in jdone] # raise asap
done = new_done
else:
time.sleep(sleep)
print("Waiting is over")
|
controllable_agent-main
|
controllable_agent/executor.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import dataclasses
import typing as tp
import numpy as np
from url_benchmark.agent.ddpg import MetaDict
from url_benchmark.dmc import EnvWrapper, ExtendedTimeStep, TimeStep
from url_benchmark.in_memory_replay_buffer import ReplayBuffer
from dm_env import specs, StepType
from dm_env.auto_reset_environment import AutoResetEnvironment
@dataclasses.dataclass
class D4RLConfig:
minimum_episode_length: tp.Optional[int] = None
ignore_terminals: bool = False
class EmptyPhysics():
def __init__(self) :
self.empty_physics = np.zeros((1,1))
def get_state(self) -> np.ndarray:
return self.empty_physics
@dataclasses.dataclass
class ExtendedTimeStepD4RL(ExtendedTimeStep):
reward: tp.Any
discount: tp.Any
class D4RLWrapper(AutoResetEnvironment):
def __init__(self, env) -> None:
self.physics = EmptyPhysics()
super().__init__()
self._env = env
def observation_spec(self) -> tp.Any:
return specs.BoundedArray(shape=self._env.observation_space.shape,
dtype=self._env.observation_space.dtype,
minimum=self._env.observation_space.low,
maximum=self._env.observation_space.high,
name='observation')
def action_spec(self) -> specs.Array:
return specs.BoundedArray(shape=self._env.action_space.shape,
dtype=self._env.action_space.dtype,
minimum=self._env.action_space.low,
maximum=self._env.action_space.high,
name='action')
def get_normalized_score(self, reward: float) -> float:
return self._env.get_normalized_score(reward)
def _step(self, action) -> TimeStep:
obs, reward, done, _ = self._env.step(action)
step_type = StepType.LAST if done else StepType.MID
return ExtendedTimeStepD4RL(step_type=step_type,observation=obs,reward=reward,discount=1.0,action=action)
def _reset(self) -> TimeStep:
obs = self._env.reset()
return ExtendedTimeStepD4RL(step_type=StepType.FIRST, observation=obs, reward= None, discount= None, action=self._env.action_space.sample())
@property
def base_env(self) -> tp.Any:
env = self._env
if isinstance(env, D4RLWrapper):
return env.base_env
return env
def get_dataset(self) -> tp.Any:
return self._env.get_dataset()
class D4RLReplayBufferBuilder:
def filter_dataset_by_episode_length(self, dataset: tp.Any, minimum_episode_length: tp.Optional[int]):
if minimum_episode_length is None or minimum_episode_length <= 1:
return dataset
end_indices = (dataset["terminals"] + dataset["timeouts"]).nonzero()[0]
episode_lengths = np.diff(np.concatenate(([-1], end_indices)))
episode_lengths_expanded = episode_lengths.repeat(episode_lengths)
diff_len = dataset['observations'].shape[0] - len(episode_lengths_expanded)
assert diff_len >= 0 # there is no guarantee that last step in the data step is last step in an episode
episode_lengths_expanded = np.concatenate((episode_lengths_expanded, np.zeros(diff_len, dtype=int))) # ignore last steps if they do not belong to an episode
filter_indices = episode_lengths_expanded >= minimum_episode_length
dataset_size = len(dataset["observations"])
for key, values in dataset.items():
if isinstance(values, np.ndarray) and len(values) == dataset_size:
dataset[key] = values[filter_indices]
return dataset
def prepare_replay_buffer_d4rl(self, env: EnvWrapper, meta: MetaDict, cfg: tp.Any) -> ReplayBuffer:
dataset = env.base_env.get_dataset()
dataset = self.filter_dataset_by_episode_length(dataset, cfg.d4rl_config.minimum_episode_length)
# please note we can use d4rl.qlearning_dataset instead, but termination conditions are not calculated as expected only consider (terminals)
# last next_obs, I used first obs (I see they neglect it at qlearning_dataset, but this will result that last episode will not be terminiated, however we can fake it)
observations = dataset['observations']
actions = dataset['actions']
rewards = dataset['rewards']
is_ignore_terminals = cfg.d4rl_config and (cfg.d4rl_config.ignore_terminals)
terminals = np.zeros_like(dataset['terminals']) if is_ignore_terminals else dataset['terminals']
timeouts = dataset['timeouts']
end_indices = (terminals + timeouts).nonzero()[0]
episode_lengths = np.diff(np.concatenate(([-1], end_indices)))
max_episode_length = episode_lengths.max()
if not cfg.d4rl_config or cfg.d4rl_config.minimum_episode_length is None:
assert (episode_lengths==1).sum()==0
else:
assert (episode_lengths<cfg.d4rl_config.minimum_episode_length).sum()==0
replay_storage = ReplayBuffer(max_episodes=len(end_indices), discount=cfg.discount, future=cfg.future, max_episode_length=max_episode_length)
first = True
dataset_len = dataset['rewards'].shape[0]
for idx in range(dataset_len):
if first:
time_step = ExtendedTimeStep(
step_type = StepType.FIRST, observation=observations[idx], reward=0, discount=1, action=actions[0])
first = False
else:
time_step = ExtendedTimeStep(
step_type = StepType.MID, observation=observations[idx], reward=rewards[idx-1], discount=1, action=actions[idx-1])
if terminals[idx] or timeouts[idx]:
first = True
final_discount = 1
if terminals[idx]:
final_discount = 0
time_step.step_type = StepType.LAST
time_step.discount = final_discount
replay_storage.add(time_step, meta)
assert (episode_lengths-1 == replay_storage._episodes_length).all()
if episode_lengths.min()!=episode_lengths.max():
assert not replay_storage._is_fixed_episode_length
return replay_storage
|
controllable_agent-main
|
url_benchmark/d4rl_benchmark.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import json
import pdb # pylint: disable=unused-import
import logging
import dataclasses
import typing as tp
import warnings
from pathlib import Path
warnings.filterwarnings('ignore', category=DeprecationWarning)
os.environ['MKL_SERVICE_FORCE_INTEL'] = '1'
# if the default egl does not work, you may want to try:
# export MUJOCO_GL=glfw
os.environ['MUJOCO_GL'] = os.environ.get('MUJOCO_GL', 'egl')
import hydra
from hydra.core.config_store import ConfigStore
import numpy as np
import torch
import wandb
import omegaconf as omgcf
# from dm_env import specs
from url_benchmark import dmc
from dm_env import specs
from url_benchmark import utils
from url_benchmark import goals as _goals
from url_benchmark.logger import Logger
from url_benchmark.in_memory_replay_buffer import ReplayBuffer
from url_benchmark.video import TrainVideoRecorder, VideoRecorder
from url_benchmark import agent as agents
from url_benchmark.d4rl_benchmark import D4RLReplayBufferBuilder, D4RLWrapper
from url_benchmark.gridworld.env import build_gridworld_task
logger = logging.getLogger(__name__)
torch.backends.cudnn.benchmark = True
# os.environ['WANDB_MODE']='offline'
# from url_benchmark.dmc_benchmark import PRIMAL_TASKS
# # # Config # # #
@dataclasses.dataclass
class Config:
agent: tp.Any
# misc
seed: int = 1
device: str = "cuda"
save_video: bool = False
use_tb: bool = False
use_wandb: bool = False
use_hiplog: bool = False
# experiment
experiment: str = "online"
# task settings
task: str = "walker_stand"
obs_type: str = "states" # [states, pixels]
frame_stack: int = 3 # only works if obs_type=pixels
action_repeat: int = 1 # set to 2 for pixels
discount: float = 0.99
future: float = 0.99 # discount of future sampling, future=1 means no future sampling
goal_space: tp.Optional[str] = None
append_goal_to_observation: bool = False
# eval
num_eval_episodes: int = 10
custom_reward: tp.Optional[str] = None # activates custom eval if not None
final_tests: int = 10
# checkpoint
snapshot_at: tp.Tuple[int, ...] = (100000, 200000, 500000, 800000, 1000000, 1500000,
2000000, 3000000, 4000000, 5000000, 9000000, 10000000)
checkpoint_every: int = 100000
load_model: tp.Optional[str] = None
# training
num_seed_frames: int = 4000
replay_buffer_episodes: int = 5000
update_encoder: bool = True
batch_size: int = omgcf.II("agent.batch_size")
@dataclasses.dataclass
class PretrainConfig(Config):
# mode
reward_free: bool = True
# train settings
num_train_frames: int = 2000010
# snapshot
eval_every_frames: int = 10000
load_replay_buffer: tp.Optional[str] = None
# replay buffer
# replay_buffer_num_workers: int = 4
# nstep: int = omgcf.II("agent.nstep")
# misc
save_train_video: bool = False
# loaded as base_pretrain in pretrain.yaml
# we keep the yaml since it's easier to configure plugins from it
ConfigStore.instance().store(name="workspace_config", node=PretrainConfig)
# # # Implem # # #
def make_agent(
obs_type: str, obs_spec, action_spec, num_expl_steps: int, cfg: omgcf.DictConfig
) -> tp.Union[agents.FBDDPGAgent, agents.DDPGAgent]:
cfg.obs_type = obs_type
cfg.obs_shape = obs_spec.shape
cfg.action_shape = (action_spec.num_values, ) if isinstance(action_spec, specs.DiscreteArray) \
else action_spec.shape
cfg.num_expl_steps = num_expl_steps
return hydra.utils.instantiate(cfg)
C = tp.TypeVar("C", bound=Config)
def _update_legacy_class(obj: tp.Any, classes: tp.Sequence[tp.Type[tp.Any]]) -> tp.Any:
"""Updates a legacy class (eg: agent.FBDDPGAgent) to the new
class (url_benchmark.agent.FBDDPGAgent)
Parameters
----------
obj: Any
Object to update
classes: Types
Possible classes to update the object to. If current name is one of the classes
name, the object class will be remapped to it.
"""
classes = tuple(classes)
if not isinstance(obj, classes):
clss = {x.__name__: x for x in classes}
cls = clss.get(obj.__class__.__name__, None)
if cls is not None:
logger.warning(f"Promoting legacy object {obj.__class__} to {cls}")
obj.__class__ = cls
def _init_eval_meta(workspace: "BaseWorkspace", custom_reward: tp.Optional[_goals.BaseReward] = None) -> agents.MetaDict:
if workspace.domain == "grid":
assert isinstance(workspace.agent, agents.DiscreteFBAgent)
return workspace.agent.get_goal_meta(workspace.eval_env.get_goal_obs())
special = (agents.FBDDPGAgent, agents.SFAgent, agents.SFSVDAgent, agents.APSAgent, agents.NEWAPSAgent, agents.GoalSMAgent, agents.UVFAgent)
ag = workspace.agent
_update_legacy_class(ag, special)
# we need to check against name for legacy reason when reloading old checkpoints
if not isinstance(ag, special) or not len(workspace.replay_loader):
return workspace.agent.init_meta()
if custom_reward is not None:
try: # if the custom reward implements a goal, return it
goal = custom_reward.get_goal(workspace.cfg.goal_space)
return workspace.agent.get_goal_meta(goal)
except Exception: # pylint: disable=broad-except
pass
if not isinstance(workspace.agent, agents.SFSVDAgent):
# we cannot fully type because of the FBBDPG string check :s
num_steps = workspace.agent.cfg.num_inference_steps # type: ignore
obs_list, reward_list = [], []
batch_size = 0
while batch_size < num_steps:
batch = workspace.replay_loader.sample(workspace.cfg.batch_size, custom_reward=custom_reward)
batch = batch.to(workspace.cfg.device)
obs_list.append(batch.next_goal if workspace.cfg.goal_space is not None else batch.next_obs)
reward_list.append(batch.reward)
batch_size += batch.next_obs.size(0)
obs, reward = torch.cat(obs_list, 0), torch.cat(reward_list, 0) # type: ignore
obs_t, reward_t = obs[:num_steps], reward[:num_steps]
# phy = workspace.replay_loader._storage["physics"]
# phy = phy.reshape(-1, phy.shape[-1])
# back_input = "observation" if workspace.cfg.goal_space is None else "goal"
# obs = workspace.replay_loader._storage[back_input].reshape(phy.shape[0], -1) # should have been next obs
# inds = np.random.choice(phy.shape[0], size=workspace.agent.cfg.num_inference_steps, replace=False)
# phy, obs = (x[inds, :] for x in (phy, obs))
# rewards = [[custom_reward.from_physics(p)] for p in phy]
# obs_t, reward_t = (torch.Tensor(x).float().to(workspace.agent.cfg.device) for x in (obs, rewards))
return workspace.agent.infer_meta_from_obs_and_rewards(obs_t, reward_t)
else:
assert isinstance(workspace.agent, agents.SFSVDAgent)
obs_list, reward_list, action_list = [], [], []
batch_size = 0
while batch_size < workspace.agent.cfg.num_inference_steps:
batch = workspace.replay_loader.sample(workspace.cfg.batch_size, custom_reward=custom_reward)
batch = batch.to(workspace.cfg.device)
obs_list.append(batch.goal if workspace.cfg.goal_space is not None else batch.obs)
action_list.append(batch.action)
reward_list.append(batch.reward)
batch_size += batch.next_obs.size(0)
obs, reward, action = torch.cat(obs_list, 0), torch.cat(reward_list, 0), torch.cat(action_list, 0) # type: ignore
obs_t, reward_t, action_t = obs[:workspace.agent.cfg.num_inference_steps], reward[:workspace.agent.cfg.num_inference_steps],\
action[:workspace.agent.cfg.num_inference_steps]
return workspace.agent.infer_meta_from_obs_action_and_rewards(obs_t, action_t, reward_t)
if workspace.cfg.goal_space is not None:
funcs = _goals.goals.funcs.get(workspace.cfg.goal_space, {})
if workspace.cfg.task in funcs:
g = funcs[workspace.cfg.task]()
return workspace.agent.get_goal_meta(g)
return workspace.agent.infer_meta(workspace.replay_loader)
class BaseWorkspace(tp.Generic[C]):
def __init__(self, cfg: C) -> None:
self.work_dir = Path.cwd()
print(f'Workspace: {self.work_dir}')
print(f'Running code in : {Path(__file__).parent.resolve().absolute()}')
logger.info(f'Workspace: {self.work_dir}')
logger.info(f'Running code in : {Path(__file__).parent.resolve().absolute()}')
self.cfg = cfg
utils.set_seed_everywhere(cfg.seed)
if not torch.cuda.is_available():
if cfg.device != "cpu":
logger.warning(f"Falling back to cpu as {cfg.device} is not available")
cfg.device = "cpu"
cfg.agent.device = "cpu"
self.device = torch.device(cfg.device)
# goal_spec: tp.Optional[specs.Array] = None
# if cfg.goal_space is not None:
# g = _goals.goals.funcs[cfg.goal_space][cfg.task]()
# goal_spec = specs.Array((len(g),), np.float32, 'goal')
# create envs
# task = PRIMAL_TASKS[self.domain]
task = cfg.task
if task.startswith('point_mass_maze'):
self.domain = 'point_mass_maze'
else:
self.domain = task.split('_', maxsplit=1)[0]
self.train_env = self._make_env()
self.eval_env = self._make_env()
# create agent
self.agent = make_agent(cfg.obs_type,
self.train_env.observation_spec(),
self.train_env.action_spec(),
cfg.num_seed_frames // cfg.action_repeat,
cfg.agent)
# create logger
self.logger = Logger(self.work_dir,
use_tb=cfg.use_tb,
use_wandb=cfg.use_wandb,
use_hiplog=cfg.use_hiplog)
if cfg.use_wandb:
exp_name = '_'.join([
cfg.experiment, cfg.agent.name, self.domain
])
wandb.init(project="controllable_agent", group=cfg.agent.name, name=exp_name, # mode="disabled",
config=omgcf.OmegaConf.to_container(cfg, resolve=True, throw_on_missing=True)) # type: ignore
if cfg.use_hiplog:
# record config now that it is filled
parts = ("snapshot", "_type", "_shape", "num_", "save_", "frame", "device", "use_tb", "use_wandb")
skipped = [x for x in cfg if any(y in x for y in parts)] # type: ignore
self.logger.hiplog.flattened({x: y for x, y in cfg.items() if x not in skipped}) # type: ignore
self.logger.hiplog(workdir=self.work_dir.stem)
for rm in ("agent/use_tb", "agent/use_wandb", "agent/device"):
del self.logger.hiplog._content[rm]
self.logger.hiplog(observation_size=np.prod(self.train_env.observation_spec().shape))
# # create replay buffer
# self._data_specs: tp.List[tp.Any] = [self.train_env.observation_spec(),
# self.train_env.action_spec(), ]
if cfg.goal_space is not None:
if cfg.goal_space not in _goals.goal_spaces.funcs[self.domain]:
raise ValueError(f"Unregistered goal space {cfg.goal_space} for domain {self.domain}")
# g = _goals.goals.funcs[cfg.goal_space][cfg.task]()
# self._data_specs.append(specs.Array((len(g),), np.float32, 'goal'))
# self._data_specs.extend([specs.Array((1,), np.float32, 'reward'),
# specs.Array((1,), np.float32, 'discount')])
self.replay_loader = ReplayBuffer(max_episodes=cfg.replay_buffer_episodes, discount=cfg.discount, future=cfg.future)
# # create data storage
# self.replay_storage = ReplayBufferStorage(data_specs, meta_specs,
# self.work_dir / 'buffer')
#
# # create replay buffer
# self.replay_loader = make_replay_loader(self.replay_storage,
# cfg.replay_buffer_size,
# cfg.batch_size,
# cfg.replay_buffer_num_workers,
# False, True, cfg.nstep, cfg.discount)
# create video recorders
# cam_id = 2 if 'quadruped' not in self.domain else 1
# cam_id = 1 # centered on subject
cam_id = 0 if 'quadruped' not in self.domain else 2
self.video_recorder = VideoRecorder(self.work_dir if cfg.save_video else None,
camera_id=cam_id, use_wandb=self.cfg.use_wandb)
self.timer = utils.Timer()
self.global_step = 0
self.global_episode = 0
self.eval_rewards_history: tp.List[float] = []
self._checkpoint_filepath = self.work_dir / "models" / "latest.pt"
if self._checkpoint_filepath.exists():
self.load_checkpoint(self._checkpoint_filepath)
elif cfg.load_model is not None:
self.load_checkpoint(cfg.load_model, exclude=["replay_loader"])
self.reward_cls: tp.Optional[_goals.BaseReward] = None
if self.cfg.custom_reward == "maze_multi_goal":
self.reward_cls = self._make_custom_reward(seed=self.cfg.seed)
def _make_env(self) -> dmc.EnvWrapper:
cfg = self.cfg
if self.domain == "grid":
return dmc.EnvWrapper(build_gridworld_task(self.cfg.task.split('_')[1]))
if self.domain == "d4rl":
import d4rl # type: ignore # pylint: disable=unused-import
import gym
return dmc.EnvWrapper(D4RLWrapper(gym.make(self.cfg.task.split('_')[1])))
return dmc.make(cfg.task, cfg.obs_type, cfg.frame_stack, cfg.action_repeat, cfg.seed,
goal_space=cfg.goal_space, append_goal_to_observation=cfg.append_goal_to_observation)
@property
def global_frame(self) -> int:
return self.global_step * self.cfg.action_repeat
def _make_custom_reward(self, seed: int) -> tp.Optional[_goals.BaseReward]:
"""Creates a custom reward function if provided in configuration
else returns None
"""
if self.cfg.custom_reward is None:
return None
return _goals.get_reward_function(self.cfg.custom_reward, seed)
def eval_maze_goals(self) -> None:
if isinstance(self.agent, (agents.SFAgent, agents.SFSVDAgent, agents.NEWAPSAgent)) and len(self.replay_loader) > 0:
self.agent.precompute_cov(self.replay_loader)
reward_cls = _goals.MazeMultiGoal()
rewards = list()
for g in reward_cls.goals:
goal_rewards = list()
goal_distances = list()
meta = self.agent.get_goal_meta(g)
for episode in range(self.cfg.num_eval_episodes):
self.video_recorder.init(self.eval_env, enabled=(episode == 0))
time_step = self.eval_env.reset()
episode_reward = 0.0
while not time_step.last():
with torch.no_grad(), utils.eval_mode(self.agent):
action = self.agent.act(time_step.observation,
meta,
0,
eval_mode=True)
time_step = self.eval_env.step(action)
self.video_recorder.record(self.eval_env)
assert isinstance(time_step, dmc.ExtendedGoalTimeStep)
step_reward, distance = reward_cls.from_goal(time_step.goal, g)
episode_reward += step_reward
goal_rewards.append(episode_reward)
goal_distances.append(distance)
self.video_recorder.save(f'{g}.mp4')
print(f"goal: {g}, avg_reward: {round(float(np.mean(goal_rewards)), 2)}, avg_distance: {round(float(np.mean(goal_distances)), 5)}")
rewards.append(float(np.mean(goal_rewards)))
self.eval_rewards_history.append(float(np.mean(rewards)))
with self.logger.log_and_dump_ctx(self.global_frame, ty='eval') as log:
log('episode_reward', self.eval_rewards_history[-1])
log('step', self.global_step)
log('episode', self.global_episode)
def eval(self) -> None:
step, episode = 0, 0
eval_until_episode = utils.Until(self.cfg.num_eval_episodes)
physics_agg = dmc.PhysicsAggregator()
rewards: tp.List[float] = []
normalized_scores: tp.List[float] = []
meta = _init_eval_meta(self) # Don't work
z_correl = 0.0
is_d4rl_task = self.cfg.task.split('_')[0] == 'd4rl'
actor_success: tp.List[float] = []
while eval_until_episode(episode):
time_step = self.eval_env.reset()
# create custom reward if need be (if field exists)
seed = 12 * self.cfg.num_eval_episodes + len(rewards)
custom_reward = self._make_custom_reward(seed=seed)
if custom_reward is not None:
meta = _init_eval_meta(self, custom_reward)
if self.domain == "grid":
meta = _init_eval_meta(self)
total_reward = 0.0
self.video_recorder.init(self.eval_env, enabled=(episode == 0))
while not time_step.last():
with torch.no_grad(), utils.eval_mode(self.agent):
action = self.agent.act(time_step.observation,
meta,
self.global_step,
eval_mode=True)
time_step = self.eval_env.step(action)
physics_agg.add(self.eval_env)
self.video_recorder.record(self.eval_env)
# for legacy reasons, we need to check the name :s
if isinstance(self.agent, agents.FBDDPGAgent):
if self.agent.cfg.additional_metric:
z_correl += self.agent.compute_z_correl(time_step, meta)
actor_success.extend(self.agent.actor_success)
if custom_reward is not None:
time_step.reward = custom_reward.from_env(self.eval_env)
total_reward += time_step.reward
step += 1
if is_d4rl_task:
normalized_scores.append(self.eval_env.get_normalized_score(total_reward))
rewards.append(total_reward)
episode += 1
self.video_recorder.save(f'{self.global_frame}.mp4')
self.eval_rewards_history.append(float(np.mean(rewards)))
with self.logger.log_and_dump_ctx(self.global_frame, ty='eval') as log:
if is_d4rl_task:
log('episode_normalized_score', float(100 * np.mean(normalized_scores)))
log('episode_reward', self.eval_rewards_history[-1])
if len(rewards) > 1:
log('episode_reward#std', float(np.std(rewards)))
log('episode_length', step * self.cfg.action_repeat / episode)
log('episode', self.global_episode)
log('z_correl', z_correl / episode)
log('step', self.global_step)
if actor_success:
log('actor_sucess', float(np.mean(actor_success)))
if isinstance(self.agent, agents.FBDDPGAgent):
log('z_norm', np.linalg.norm(meta['z']).item())
for key, val in physics_agg.dump():
log(key, val)
_CHECKPOINTED_KEYS = ('agent', 'global_step', 'global_episode', "replay_loader")
def save_checkpoint(self, fp: tp.Union[Path, str], exclude: tp.Sequence[str] = ()) -> None:
logger.info(f"Saving checkpoint to {fp}")
exclude = list(exclude)
assert all(x in self._CHECKPOINTED_KEYS for x in exclude)
fp = Path(fp)
fp.parent.mkdir(exist_ok=True, parents=True)
assert isinstance(self.replay_loader, ReplayBuffer), "Is this buffer designed for checkpointing?"
# this is just a dumb security check to not forget about it
payload = {k: self.__dict__[k] for k in self._CHECKPOINTED_KEYS if k not in exclude}
with fp.open('wb') as f:
torch.save(payload, f, pickle_protocol=4)
def load_checkpoint(self, fp: tp.Union[Path, str], only: tp.Optional[tp.Sequence[str]] = None, exclude: tp.Sequence[str] = ()) -> None:
"""Reloads a checkpoint or part of it
Parameters
----------
only: None or sequence of str
reloads only a specific subset (defaults to all)
exclude: sequence of str
does not reload the provided keys
"""
print(f"loading checkpoint from {fp}")
fp = Path(fp)
with fp.open('rb') as f:
payload = torch.load(f)
_update_legacy_class(payload, (ReplayBuffer,))
if isinstance(payload, ReplayBuffer): # compatibility with pure buffers pickles
payload = {"replay_loader": payload}
if only is not None:
only = list(only)
assert all(x in self._CHECKPOINTED_KEYS for x in only)
payload = {x: payload[x] for x in only}
exclude = list(exclude)
assert all(x in self._CHECKPOINTED_KEYS for x in exclude)
for x in exclude:
payload.pop(x, None)
for name, val in payload.items():
logger.info("Reloading %s from %s", name, fp)
if name == "agent":
self.agent.init_from(val)
elif name == "replay_loader":
_update_legacy_class(val, (ReplayBuffer,))
assert isinstance(val, ReplayBuffer)
# pylint: disable=protected-access
# drop unecessary meta which could make a mess
val._current_episode.clear() # make sure we can start over
val._future = self.cfg.future
val._discount = self.cfg.discount
val._max_episodes = len(val._storage["discount"])
self.replay_loader = val
else:
assert hasattr(self, name)
setattr(self, name, val)
if name == "global_episode":
logger.warning(f"Reloaded agent at global episode {self.global_episode}")
def finalize(self) -> None:
print("Running final test", flush=True)
repeat = self.cfg.final_tests
if not repeat:
return
if self.cfg.custom_reward == "maze_multi_goal":
eval_hist = self.eval_rewards_history
rewards = {}
self.eval_rewards_history = []
self.cfg.num_eval_episodes = repeat
self.eval_maze_goals()
rewards["rewards"] = self.eval_rewards_history
self.eval_rewards_history = eval_hist # restore
else:
domain_tasks = {
"cheetah": ['walk', 'walk_backward', 'run', 'run_backward'],
"quadruped": ['stand', 'walk', 'run', 'jump'],
"walker": ['stand', 'walk', 'run', 'flip'],
}
if self.domain not in domain_tasks:
return
eval_hist = self.eval_rewards_history
rewards = {}
for name in domain_tasks[self.domain]:
task = "_".join([self.domain, name])
self.cfg.task = task
self.cfg.custom_reward = task # for the replay buffer
self.cfg.seed += 1 # for the sake of avoiding similar seeds
self.eval_env = self._make_env()
self.eval_rewards_history = []
self.cfg.num_eval_episodes = 1
for _ in range(repeat):
self.eval()
rewards[task] = self.eval_rewards_history
self.eval_rewards_history = eval_hist # restore
with (self.work_dir / "test_rewards.json").open("w") as f:
json.dump(rewards, f)
class Workspace(BaseWorkspace[PretrainConfig]):
def __init__(self, cfg: PretrainConfig) -> None:
super().__init__(cfg)
self.train_video_recorder = TrainVideoRecorder(self.work_dir if cfg.save_train_video else None,
camera_id=self.video_recorder.camera_id, use_wandb=self.cfg.use_wandb)
if not self._checkpoint_filepath.exists(): # don't relay if there is a checkpoint
if cfg.load_replay_buffer is not None:
if self.cfg.task.split('_')[0] == "d4rl":
d4rl_replay_buffer_builder = D4RLReplayBufferBuilder()
self.replay_storage = d4rl_replay_buffer_builder.prepare_replay_buffer_d4rl(self.train_env, self.agent.init_meta(), self.cfg)
self.replay_loader = self.replay_storage
else:
self.load_checkpoint(cfg.load_replay_buffer, only=["replay_loader"])
def _init_meta(self):
if isinstance(self.agent, agents.GoalTD3Agent) and isinstance(self.reward_cls, _goals.MazeMultiGoal):
meta = self.agent.init_meta(self.reward_cls)
elif isinstance(self.agent, agents.GoalSMAgent) and len(self.replay_loader) > 0:
meta = self.agent.init_meta(self.replay_loader)
else:
meta = self.agent.init_meta()
return meta
def train(self) -> None:
# predicates
train_until_step = utils.Until(self.cfg.num_train_frames,
self.cfg.action_repeat)
seed_until_step = utils.Until(self.cfg.num_seed_frames,
self.cfg.action_repeat)
eval_every_step = utils.Every(self.cfg.eval_every_frames,
self.cfg.action_repeat)
# if self.cfg.custom_reward is not None:
# raise NotImplementedError("Custom reward not implemented in pretrain.py train loop (see anytrain.py)")
episode_step, episode_reward, z_correl = 0, 0.0, 0.0
time_step = self.train_env.reset()
meta = self._init_meta()
self.replay_loader.add(time_step, meta)
self.train_video_recorder.init(time_step.observation)
metrics = None
physics_agg = dmc.PhysicsAggregator()
while train_until_step(self.global_step):
if time_step.last():
self.global_episode += 1
self.train_video_recorder.save(f'{self.global_frame}.mp4')
# wait until all the metrics schema is populated
if metrics is not None:
# log stats
elapsed_time, total_time = self.timer.reset()
episode_frame = episode_step * self.cfg.action_repeat
with self.logger.log_and_dump_ctx(self.global_frame,
ty='train') as log:
log('fps', episode_frame / elapsed_time)
log('total_time', total_time)
log('episode_reward', episode_reward)
log('episode_length', episode_frame)
log('episode', self.global_episode)
log('buffer_size', len(self.replay_loader))
log('step', self.global_step)
log('z_correl', z_correl)
for key, val in physics_agg.dump():
log(key, val)
if self.cfg.use_hiplog and self.logger.hiplog.content:
self.logger.hiplog.write()
# reset env
time_step = self.train_env.reset()
meta = self._init_meta()
self.replay_loader.add(time_step, meta)
self.train_video_recorder.init(time_step.observation)
# try to save snapshot
if self.global_frame in self.cfg.snapshot_at:
self.save_checkpoint(self._checkpoint_filepath.with_name(f'snapshot_{self.global_frame}.pt'))
episode_step = 0
episode_reward = 0.0
z_correl = 0.0
# try to evaluate
if eval_every_step(self.global_step):
self.logger.log('eval_total_time', self.timer.total_time(),
self.global_frame)
if self.cfg.custom_reward == "maze_multi_goal":
self.eval_maze_goals()
# elif self.domain == "grid":
# self.eval_grid_goals()
else:
self.eval()
meta = self.agent.update_meta(meta, self.global_step, time_step, finetune=False, replay_loader=self.replay_loader)
# sample action
with torch.no_grad(), utils.eval_mode(self.agent):
action = self.agent.act(time_step.observation,
meta,
self.global_step,
eval_mode=False)
# try to update the agent
if not seed_until_step(self.global_step):
# TODO: reward_free should be handled in the agent update itself !
# TODO: the commented code below raises incompatible type "Generator[EpisodeBatch[ndarray[Any, Any]], None, None]"; expected "ReplayBuffer"
# replay = (x.with_no_reward() if self.cfg.reward_free else x for x in self.replay_loader)
if isinstance(self.agent, agents.GoalTD3Agent) and isinstance(self.reward_cls, _goals.MazeMultiGoal):
metrics = self.agent.update(self.replay_loader, self.global_step, self.reward_cls)
else:
metrics = self.agent.update(self.replay_loader, self.global_step)
self.logger.log_metrics(metrics, self.global_frame, ty='train')
# take env step
time_step = self.train_env.step(action)
physics_agg.add(self.train_env)
episode_reward += time_step.reward
self.replay_loader.add(time_step, meta)
self.train_video_recorder.record(time_step.observation)
if isinstance(self.agent, agents.FBDDPGAgent):
z_correl += self.agent.compute_z_correl(time_step, meta)
episode_step += 1
self.global_step += 1
# save checkpoint to reload
if not self.global_frame % self.cfg.checkpoint_every:
self.save_checkpoint(self._checkpoint_filepath)
self.save_checkpoint(self._checkpoint_filepath) # make sure we save the final checkpoint
self.finalize()
@hydra.main(config_path='.', config_name='base_config', version_base="1.1")
def main(cfg: omgcf.DictConfig) -> None:
# we assume cfg is a PretrainConfig (but actually not really)
workspace = Workspace(cfg) # type: ignore
workspace.train()
if __name__ == '__main__':
main()
|
controllable_agent-main
|
url_benchmark/pretrain.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pdb # pylint: disable=unused-import
import logging
import dataclasses
import typing as tp
from url_benchmark import pretrain # NEEDS TO BE FIRST NON-STANDARD IMPORT (sets up env variables)
import omegaconf as omgcf
import hydra
from hydra.core.config_store import ConfigStore
import torch
from url_benchmark import dmc
from url_benchmark import utils
from url_benchmark import agent as agents
from url_benchmark.video import TrainVideoRecorder
logger = logging.getLogger(__name__)
torch.backends.cudnn.benchmark = True
from pathlib import Path
import sys
base = Path(__file__).absolute().parents[1]
# we need to add base repo to be able to import url_benchmark
# we need to add url_benchmarl to be able to reload legacy checkpoints
for fp in [base, base / "url_benchmark"]:
assert fp.exists()
if str(fp) not in sys.path:
sys.path.append(str(fp))
@dataclasses.dataclass
class OnlinetrainConfig(pretrain.Config):
# mode
reward_free: bool = True
# train settings
num_train_episodes: int = 2000
# snapshot
eval_every_episodes: int = 10
load_replay_buffer: tp.Optional[str] = None
# replay buffer
# replay_buffer_num_workers: int = 4
# nstep: int = omgcf.II("agent.nstep")
# misc
save_train_video: bool = False
update_replay_buffer: bool = True
num_rollout_episodes: int = 10
num_agent_updates: int = 10
ConfigStore.instance().store(name="workspace_config", node=OnlinetrainConfig)
class Workspace(pretrain.BaseWorkspace[OnlinetrainConfig]):
def __init__(self, cfg: OnlinetrainConfig) -> None:
super().__init__(cfg)
self.train_video_recorder = TrainVideoRecorder(self.work_dir if cfg.save_train_video else None,
camera_id=self.video_recorder.camera_id, use_wandb=self.cfg.use_wandb)
self._last_processed_step = 0 # for checkpointing
if not cfg.update_replay_buffer:
cfg.num_seed_frames = -1
if cfg.load_replay_buffer is None:
raise ValueError("If update_replay_buffer is False, load_replay_buffer must be provided")
if not self._checkpoint_filepath.exists(): # don't relay if there is a checkpoint
if cfg.load_replay_buffer is not None:
self.load_checkpoint(cfg.load_replay_buffer, only=["replay_loader"])
def _play_episode(self, log_metrics: bool = True) -> None:
time_step = self.train_env.reset()
meta = self.agent.init_meta()
self.replay_loader.add(time_step, meta)
self.train_video_recorder.init(time_step.observation)
episode_step = 0
episode_reward = 0.0
z_correl = 0.0
physics_agg = dmc.PhysicsAggregator()
custom_reward = self._make_custom_reward(seed=self.global_step)
while not time_step.last():
meta = self.agent.update_meta(meta, self.global_step, time_step, replay_loader=self.replay_loader)
with torch.no_grad(), utils.eval_mode(self.agent):
action = self.agent.act(time_step.observation,
meta,
self.global_step,
eval_mode=False)
time_step = self.train_env.step(action)
if custom_reward is not None:
time_step.reward = custom_reward.from_env(self.train_env)
physics_agg.add(self.train_env)
episode_reward += time_step.reward
self.replay_loader.add(time_step, meta)
self.train_video_recorder.record(time_step.observation)
if isinstance(self.agent, agents.FBDDPGAgent):
z_correl += self.agent.compute_z_correl(time_step, meta)
episode_step += 1
self.global_step += 1
# log episode stats
if log_metrics:
self.train_video_recorder.save(f'{self.global_frame}.mp4')
elapsed_time, total_time = self.timer.reset()
episode_frame = episode_step * self.cfg.action_repeat
with self.logger.log_and_dump_ctx(self.global_frame,
ty='train') as log:
log('fps', episode_frame / elapsed_time)
log('z_correl', z_correl)
log('total_time', total_time)
log('episode_reward', episode_reward)
log('episode_length', episode_frame)
log('episode', self.global_episode)
log('buffer_size', len(self.replay_loader))
log('step', self.global_step)
for key, val in physics_agg.dump():
log(key, val)
def _checkpoint_if_need_be(self) -> None:
# save checkpoint to reload
if self.global_step // self.cfg.checkpoint_every != self._last_processed_step // self.cfg.checkpoint_every:
self.save_checkpoint(self._checkpoint_filepath)
if any(self._last_processed_step < x <= self.global_step for x in self.cfg.snapshot_at):
self.save_checkpoint(self._checkpoint_filepath.with_name(f'snapshot_{self.global_frame}.pt'))
self._last_processed_step = self.global_step
def train(self) -> None:
metrics: tp.Optional[tp.Dict[str, float]] = None
while self.global_episode < self.cfg.num_train_episodes:
logger.info(f"rollout {self.cfg.num_rollout_episodes} episodes...")
for _ in range(self.cfg.num_rollout_episodes):
self._play_episode(log_metrics=metrics is not None) # logging requires all metrics available
self.global_episode += 1
# update the agent
if self.global_frame > self.cfg.num_seed_frames:
# TODO: reward_free should be handled in the agent update itself !
# replay = (x.with_no_reward() if self.cfg.reward_free else x for x in self.replay_loader)
logger.info(f"Agent update for {self.cfg.num_agent_updates}...")
for _ in range(self.cfg.num_agent_updates):
metrics = self.agent.update(self.replay_loader, self.global_step)
if metrics is not None:
self.logger.log_metrics(metrics, self.global_step, ty='train')
# evaluate
if not self.global_episode % self.cfg.eval_every_episodes:
self.logger.log('eval_total_time', self.timer.total_time(),
self.global_frame)
self.eval()
if self.cfg.use_hiplog and self.logger.hiplog.content:
self.logger.hiplog.write() # write to hiplog only once per episode
# checkpoint
self._checkpoint_if_need_be()
self.save_checkpoint(self._checkpoint_filepath)
self.finalize()
@hydra.main(config_path='.', config_name='base_config')
def main(cfg: omgcf.DictConfig) -> None:
# we assume cfg is a PretrainConfig (but actually not really)
workspace = Workspace(cfg) # type: ignore
workspace.train()
if __name__ == '__main__':
main()
|
controllable_agent-main
|
url_benchmark/train_online.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pdb # pylint: disable=unused-import
import logging
import dataclasses
import typing as tp
from url_benchmark import pretrain # NEEDS TO BE FIRST NON-STANDARD IMPORT (sets up env variables)
import omegaconf as omgcf
import hydra
from hydra.core.config_store import ConfigStore
import torch
from url_benchmark import dmc
from url_benchmark import utils
from url_benchmark import agent as agents
from url_benchmark.d4rl_benchmark import D4RLConfig, D4RLReplayBufferBuilder
from url_benchmark.video import TrainVideoRecorder
logger = logging.getLogger(__name__)
torch.backends.cudnn.benchmark = True
from pathlib import Path
import sys
base = Path(__file__).absolute().parents[1]
# we need to add base repo to be able to import url_benchmark
# we need to add url_benchmarl to be able to reload legacy checkpoints
for fp in [base, base / "url_benchmark"]:
assert fp.exists()
if str(fp) not in sys.path:
sys.path.append(str(fp))
@dataclasses.dataclass
class AnytrainConfig(pretrain.Config):
# mode
reward_free: bool = True
# train settings
num_train_episodes: int = 2000
# snapshot
eval_every_episodes: int = 10
load_replay_buffer: tp.Optional[str] = None
# replay buffer
# replay_buffer_num_workers: int = 4
# nstep: int = omgcf.II("agent.nstep")
# misc
save_train_video: bool = False
update_replay_buffer: bool = True
num_total_updates: tp.Optional[int] = None
d4rl_config: D4RLConfig = dataclasses.field(default_factory=D4RLConfig)
ConfigStore.instance().store(name="workspace_config", node=AnytrainConfig)
class Workspace(pretrain.BaseWorkspace[AnytrainConfig]):
def __init__(self, cfg: AnytrainConfig) -> None:
super().__init__(cfg)
self.train_video_recorder = TrainVideoRecorder(self.work_dir if cfg.save_train_video else None,
camera_id=self.video_recorder.camera_id, use_wandb=self.cfg.use_wandb)
self._last_processed_step = 0 # for checkpointing
if not cfg.update_replay_buffer:
cfg.num_seed_frames = -1
if cfg.load_replay_buffer is None:
raise ValueError("If update_replay_buffer is False, load_replay_buffer must be provided")
if not self._checkpoint_filepath.exists(): # don't relay if there is a checkpoint
if cfg.load_replay_buffer is not None:
if self.cfg.task.split('_')[0] == "d4rl":
d4rl_replay_buffer_builder = D4RLReplayBufferBuilder()
self.replay_storage = d4rl_replay_buffer_builder.prepare_replay_buffer_d4rl(self.train_env, self.agent.init_meta(), self.cfg)
self.replay_loader = self.replay_storage
else:
self.load_checkpoint(cfg.load_replay_buffer, only=["replay_loader"])
def _play_episode(self, log_metrics: bool = True) -> None:
time_step = self.train_env.reset()
meta = self.agent.init_meta()
self.replay_loader.add(time_step, meta)
self.train_video_recorder.init(time_step.observation)
episode_step = 0
episode_reward = 0.0
z_correl = 0.0
physics_agg = dmc.PhysicsAggregator()
custom_reward = self._make_custom_reward(seed=self.global_step)
while not time_step.last():
meta = self.agent.update_meta(meta, self.global_step, time_step, replay_loader=self.replay_loader)
with torch.no_grad(), utils.eval_mode(self.agent):
action = self.agent.act(time_step.observation,
meta,
self.global_step,
eval_mode=False)
time_step = self.train_env.step(action)
if custom_reward is not None:
time_step.reward = custom_reward.from_env(self.train_env)
physics_agg.add(self.train_env)
episode_reward += time_step.reward
self.replay_loader.add(time_step, meta)
self.train_video_recorder.record(time_step.observation)
if isinstance(self.agent, agents.FBDDPGAgent):
z_correl += self.agent.compute_z_correl(time_step, meta)
episode_step += 1
self.global_step += 1
# log episode stats
if log_metrics:
self.train_video_recorder.save(f'{self.global_frame}.mp4')
elapsed_time, total_time = self.timer.reset()
episode_frame = episode_step * self.cfg.action_repeat
with self.logger.log_and_dump_ctx(self.global_frame,
ty='train') as log:
log('fps', episode_frame / elapsed_time)
log('z_correl', z_correl)
log('total_time', total_time)
log('episode_reward', episode_reward)
log('episode_length', episode_frame)
log('episode', self.global_episode)
log('buffer_size', len(self.replay_loader))
log('step', self.global_step)
for key, val in physics_agg.dump():
log(key, val)
def _checkpoint_if_need_be(self) -> None:
# save checkpoint to reload
if self.global_step // self.cfg.checkpoint_every != self._last_processed_step // self.cfg.checkpoint_every:
self.save_checkpoint(self._checkpoint_filepath)
if any(self._last_processed_step < x <= self.global_step for x in self.cfg.snapshot_at):
self.save_checkpoint(self._checkpoint_filepath.with_name(f'snapshot_{self.global_frame}.pt'))
self._last_processed_step = self.global_step
def train(self) -> None:
metrics: tp.Optional[tp.Dict[str, float]] = None
last_step = 0
while self.global_episode < self.cfg.num_train_episodes:
# play 1 episode
if self.cfg.update_replay_buffer:
self._play_episode(log_metrics=metrics is not None) # logging requires all metrics available
else:
global_step_update = self.replay_loader.avg_episode_length
if self.cfg.num_total_updates is not None:
global_step_update = self.cfg.num_total_updates // self.cfg.num_train_episodes
self.global_step += global_step_update
self.global_episode += 1
# update the agent
if self.global_frame > self.cfg.num_seed_frames:
# TODO: reward_free should be handled in the agent update itself !
# replay = (x.with_no_reward() if self.cfg.reward_free else x for x in self.replay_loader)
for step in range(last_step + 1, self.global_step + 1): # make it comparable to the standard pretrain pipeline
metrics = self.agent.update(self.replay_loader, step)
self.logger.log_metrics(metrics, step, ty='train')
last_step = self.global_step
# evaluate
if not self.global_episode % self.cfg.eval_every_episodes:
self.logger.log('eval_total_time', self.timer.total_time(),
self.global_frame)
self.eval()
if self.cfg.use_hiplog and self.logger.hiplog.content:
self.logger.hiplog.write() # write to hiplog only once per episode
# checkpoint
self._checkpoint_if_need_be()
self.save_checkpoint(self._checkpoint_filepath)
self.finalize()
@hydra.main(config_path='.', config_name='base_config', version_base="1.1")
def main(cfg: omgcf.DictConfig) -> None:
# we assume cfg is a PretrainConfig (but actually not really)
workspace = Workspace(cfg) # type: ignore
workspace.train()
if __name__ == '__main__':
main()
|
controllable_agent-main
|
url_benchmark/anytrain.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pdb # pylint: disable=unused-import
import token
import tokenize
import functools
import typing as tp
from io import BytesIO
from collections import OrderedDict
import numpy as np
from url_benchmark import dmc
from dm_control.utils import rewards
from url_benchmark.custom_dmc_tasks.jaco import TASKS as jaco_tasks_list
from url_benchmark.custom_dmc_tasks.point_mass_maze import TASKS as point_mass_maze_tasks_list
from url_benchmark.in_memory_replay_buffer import ReplayBuffer
jaco_tasks = dict(jaco_tasks_list)
point_mass_maze_tasks = dict(point_mass_maze_tasks_list)
F = tp.TypeVar("F", bound=tp.Callable[..., np.ndarray])
class Register(tp.Generic[F]):
def __init__(self) -> None:
self.funcs: tp.Dict[str, tp.Dict[str, F]] = {}
def __call__(self, name: str) -> tp.Callable[[F], F]:
return functools.partial(self._register, name=name)
def _register(self, func: F, name: str) -> F:
fname = func.__name__
subdict = self.funcs.setdefault(name, {})
if fname in subdict:
raise ValueError(f"Already registered a function {fname} for {name}")
subdict[fname] = func
return func
goal_spaces: Register[tp.Callable[[dmc.EnvWrapper], np.ndarray]] = Register()
goals: Register[tp.Callable[[], np.ndarray]] = Register()
# # # # #
# goal spaces, defined on one environment to specify:
# # # # #
# pylint: disable=function-redefined
@goal_spaces("jaco")
def simplified_jaco(env: dmc.EnvWrapper) -> np.ndarray:
return np.array(env.physics.bind(env.task._hand.tool_center_point).xpos,
dtype=np.float32)
@goal_spaces("point_mass_maze")
def simplified_point_mass_maze(env: dmc.EnvWrapper) -> np.ndarray:
return np.array(env.physics.named.data.geom_xpos['pointmass'][:2],
dtype=np.float32)
@goal_spaces("walker")
def simplified_walker(env: dmc.EnvWrapper) -> np.ndarray:
# check the physics here:
# https://github.com/deepmind/dm_control/blob/d72c22f3bb89178bff38728957daf62965632c2f/dm_control/suite/walker.py
return np.array([env.physics.torso_height(),
env.physics.torso_upright(),
env.physics.horizontal_velocity()],
dtype=np.float32)
@goal_spaces("walker")
def walker_pos_speed(env: dmc.EnvWrapper) -> np.ndarray:
"""simplifed walker, with x position as additional variable"""
# check the physics here:
# https://github.com/deepmind/dm_control/blob/d72c22f3bb89178bff38728957daf62965632c2f/dm_control/suite/walker.py
x = env.physics.named.data.xpos['torso', 'x']
return np.concatenate([simplified_walker(env), [x]], axis=0, dtype=np.float32) # type: ignore
@goal_spaces("walker")
def walker_pos_speed_z(env: dmc.EnvWrapper) -> np.ndarray:
"""simplifed walker, with x position as additional variable"""
# check the physics here:
# https://github.com/deepmind/dm_control/blob/d72c22f3bb89178bff38728957daf62965632c2f/dm_control/suite/walker.py
# vz = env.physics.named.data.sensordata["torso_subtreelinvel"][-1]
# om_y = env.physics.named.data.subtree_angmom['torso'][1]
vz = env.physics.named.data.subtree_linvel['torso', 'z']
om_y = env.physics.named.data.subtree_angmom['torso', 'y']
return np.concatenate([walker_pos_speed(env), [vz, om_y]], axis=0, dtype=np.float32) # type: ignore
@goal_spaces("quadruped")
def simplified_quadruped(env: dmc.EnvWrapper) -> np.ndarray:
# check the physics here:
# https://github.com/deepmind/dm_control/blob/d72c22f3bb89178bff38728957daf62965632c2f/dm_control/suite/quadruped.py#L145
return np.array([env.physics.torso_upright(),
np.linalg.norm(env.physics.torso_velocity())],
dtype=np.float32)
@goal_spaces("quadruped")
def quad_pos_speed(env: dmc.EnvWrapper) -> np.ndarray:
# check the physics here:
# https://github.com/deepmind/dm_control/blob/d72c22f3bb89178bff38728957daf62965632c2f/dm_control/suite/quadruped.py#L145
x = np.array(env.physics.named.data.site_xpos['workspace'])
states = [[env.physics.torso_upright()], x, env.physics.torso_velocity()]
return np.concatenate(states, dtype=np.float32)
# @goal_spaces("quadruped") # this one needs a specific task for the ball to be present
# def quadruped_positions(env: dmc.EnvWrapper) -> np.ndarray:
# data = env.physics.named.data
# states = [data.xpos['ball'] - data.site_xpos['target'], data.xpos["torso"] - data.site_xpos['target']]
# return np.concatenate(states, dtype=np.float32)
# # # # #
# goals, defined on one goal_space to specify:
# # # # #
@goals("simplified_walker")
def walker_stand() -> np.ndarray:
return np.array([1.2, 1.0, 0], dtype=np.float32)
@goals("simplified_walker")
def walker_walk() -> np.ndarray:
return np.array([1.2, 1.0, 2], dtype=np.float32)
@goals("simplified_walker")
def walker_run() -> np.ndarray:
return np.array([1.2, 1.0, 4], dtype=np.float32)
@goals("simplified_quadruped")
def quadruped_stand() -> np.ndarray:
return np.array([1.0, 0], dtype=np.float32)
@goals("simplified_quadruped")
def quadruped_walk() -> np.ndarray:
return np.array([1.0, 0.6], dtype=np.float32)
@goals("simplified_quadruped")
def quadruped_run() -> np.ndarray:
return np.array([1.0, 6], dtype=np.float32)
@goals("quadruped_positions")
def quadruped_fetch() -> np.ndarray:
return np.zeros((6,), dtype=np.float32)
@goals("simplified_point_mass_maze")
def point_mass_maze_reach_top_left() -> np.ndarray:
return np.array(point_mass_maze_tasks['reach_top_left'],
dtype=np.float32)
@goals("simplified_point_mass_maze")
def point_mass_maze_reach_top_right() -> np.ndarray:
return np.array(point_mass_maze_tasks['reach_top_right'],
dtype=np.float32)
@goals("simplified_point_mass_maze")
def point_mass_maze_reach_bottom_left() -> np.ndarray:
return np.array(point_mass_maze_tasks['reach_bottom_left'],
dtype=np.float32)
@goals("simplified_point_mass_maze")
def point_mass_maze_reach_bottom_right() -> np.ndarray:
return np.array(point_mass_maze_tasks['reach_bottom_right'],
dtype=np.float32)
@goals("simplified_jaco")
def jaco_reach_top_left() -> np.ndarray:
return jaco_tasks['reach_top_left'].astype(np.float32)
@goals("simplified_jaco")
def jaco_reach_top_right() -> np.ndarray:
return jaco_tasks['reach_top_right'].astype(np.float32)
@goals("simplified_jaco")
def jaco_reach_bottom_left() -> np.ndarray:
return jaco_tasks['reach_bottom_left'].astype(np.float32)
@goals("simplified_jaco")
def jaco_reach_bottom_right() -> np.ndarray:
return jaco_tasks['reach_bottom_right'].astype(np.float32)
@goals("walker_pos_speed_z")
def walker_dummy() -> np.ndarray:
return np.zeros((6,), dtype=np.float32)
# # # Custom Reward # # #
def _make_env(domain: str) -> dmc.EnvWrapper:
task = {"quadruped": "stand", "walker": "walk", "jaco": "reach_top_left", "point_mass_maze": "reach_bottom_right"}[domain]
return dmc.make(f"{domain}_{task}", obs_type="states", frame_stack=1, action_repeat=1, seed=12)
def get_goal_space_dim(name: str) -> int:
domain = {space: domain for domain, spaces in goal_spaces.funcs.items() for space in spaces}[name]
env = _make_env(domain)
return goal_spaces.funcs[domain][name](env).size
class BaseReward:
def __init__(self, seed: tp.Optional[int] = None) -> None:
self._env: dmc.EnvWrapper # to be instantiated in subclasses
self._rng = np.random.RandomState(seed)
def get_goal(self, goal_space: str) -> np.ndarray:
raise NotImplementedError
def from_physics(self, physics: np.ndarray) -> float:
"careful this is not threadsafe"
with self._env.physics.reset_context():
self._env.physics.set_state(physics)
return self.from_env(self._env)
def from_env(self, env: dmc.EnvWrapper) -> float:
raise NotImplementedError
def get_reward_function(name: str, seed: tp.Optional[int] = None) -> BaseReward:
if name == "quadruped_mix":
return QuadrupedReward(seed)
if name == "walker_random_equation":
return WalkerRandomReward(seed)
if name == "quadruped_position":
return QuadrupedPosReward(seed)
if name == "maze_multi_goal":
return MazeMultiGoal(seed)
if name == "walker_position":
return WalkerPosReward(seed)
return DmcReward(name)
def _inv(distance: float) -> float:
# print("dist", distance)
return 1 / (1 + abs(distance))
class DmcReward(BaseReward):
def __init__(self, name: str) -> None:
super().__init__()
self.name = name
env_name, task_name = name.split("_", maxsplit=1)
try:
from dm_control import suite # import
from url_benchmark import custom_dmc_tasks as cdmc
except ImportError as e:
raise dmc.UnsupportedPlatform("DMC does not run on Mac") from e
make = suite.load if (env_name, task_name) in suite.ALL_TASKS else cdmc.make
self._env = make(env_name, task_name)
def from_env(self, env: dmc.EnvWrapper) -> float:
return float(self._env.task.get_reward(env.physics))
# def from_env(self, env: dmc.EnvWrapper) -> float:
# return self.from_physics(env.physics.get_state())
#
# def from_physics(self, physics: np.ndarray) -> float:
# # pdb.set_trace()
# with self._env.physics.reset_context():
# self._env.physics.set_state(physics)
# return float(self._env.task.get_reward(self._env.physics))
class QuadrupedReward(BaseReward):
NUM_CASES = 7
def __init__(self, seed: tp.Optional[int] = None) -> None:
super().__init__(seed)
self._env = _make_env("quadruped")
self.x = self._rng.uniform(-5, 5, size=2)
self.vx = self._rng.uniform(-3, 3, size=2)
self.quadrant = self._rng.choice([1, -1], size=2, replace=True)
self.speed = float(np.linalg.norm(self.vx))
self._case = self._rng.randint(self.NUM_CASES)
def from_env(self, env: dmc.EnvWrapper) -> float:
# x = env.physics.named.data.xpos["torso"][:2]
x = env.physics.named.data.site_xpos['workspace'][:2]
vx = env.physics.torso_velocity()[:2]
up = max(0, float(env.physics.torso_upright()))
speed = float(np.linalg.norm(vx))
if not self._case: # specific speed norm
return up * _inv(speed - self.speed)
if self._case == 1: # specific position
return up * _inv(float(np.linalg.norm(x - self.x)))
if self._case == 2: # specific quadrant
return up * float(np.all(x * self.quadrant > self.x))
if self._case == 3: # specific quadrant and speed norm
return up * float(np.all(x * self.quadrant > self.x)) * _inv(self.speed - speed)
if self._case == 4: # specific speed
return up * _inv(np.linalg.norm(self.vx - vx) / np.sqrt(2))
if self._case == 5: # specific quadrant and sufficient speed
return up * float(np.all(x * self.quadrant > self.x)) * (speed > self.speed)
if self._case == 6: # sufficient speed
return up * (speed > self.speed)
else:
raise ValueError(f"No case #{self._case}")
class QuadrupedPosReward(BaseReward):
"""Deterministic positional reward"""
def __init__(self, seed: tp.Optional[int] = None) -> None:
super().__init__(seed)
self._env = _make_env("quadruped")
self.x = np.array([2, 2, 0.8])
def get_goal(self, goal_space: str) -> np.ndarray:
if goal_space != "quad_pos_speed":
raise ValueError(f"Goal space {goal_space} not supported with this reward")
states = [[1.0], self.x, [0] * 3]
return np.concatenate(states, dtype=np.float32) # type: ignore
def from_env(self, env: dmc.EnvWrapper) -> float:
x = env.physics.named.data.site_xpos['workspace']
up = float(env.physics.torso_upright())
up = (up + 1) / 2
out = 0.5 * up + 0.5 * _inv(float(np.linalg.norm(x - self.x))) # * _inv(speed)
return out
class WalkerPosReward(BaseReward):
"""Random positional reward"""
def __init__(self, seed: tp.Optional[int] = None) -> None:
super().__init__(seed)
self._env = _make_env("walker")
self.x = np.random.randint(-20, 20)
def get_goal(self, goal_space: str) -> np.ndarray:
if goal_space != "walker_pos_speed_z":
raise ValueError(f"Goal space {goal_space} not supported with this reward")
states = [1, 1, 0, self.x, 0, 0] # [z, up, vx, x, vz, om_y]
# states = [self.x]
return np.array(states, dtype=np.float32) # type: ignore
def from_env(self, env: dmc.EnvWrapper) -> float:
x = env.physics.named.data.xpos['torso', 'x']
target_size = 1
d = abs(x - self.x)
reward = rewards.tolerance(d, bounds=(0, target_size), margin=target_size)
return reward
class MazeMultiGoal(BaseReward):
def __init__(self, seed: tp.Optional[int] = None) -> None:
super().__init__(seed)
self.goals = np.array([
[-0.15, 0.15], # room 1: top left
[-0.22, 0.22], # room 1
[-0.08, 0.08], # room 1
[-0.22, 0.08], # room 1
[-0.08, 0.22], # room 1
[0.15, 0.15], # room 2: top right
[0.22, 0.22], # room 2
[0.08, 0.08], # room 2
[0.22, 0.08], # room 2
[0.08, 0.22], # room 2
[-0.15, -0.15], # room 3: bottom left
[-0.22, -0.22], # room 3
[-0.08, -0.08], # room 3
[-0.22, -0.08], # room 3
[-0.08, -0.22], # room 3
[0.15, -0.15], # room 4: bottom right
[0.22, -0.22], # room 4
[0.08, -0.08], # room 4
[0.22, -0.08], # room 4
[0.08, -0.22], # room 4
], dtype=np.float32)
# save images for debug
# import imageio
# self._env = dmc.make("point_mass_maze_multi_goal", obs_type="states", frame_stack=1, action_repeat=1, seed=12)
# self._env.reset()
# img = self._env.physics.render(height=256, width=256, camera_id=0)
# imageio.imsave("maze.png", img)
def from_goal(self, achieved_goal: np.ndarray, desired_goal: np.ndarray) -> tp.Tuple[float, float]:
"""returns reward and distance"""
assert achieved_goal.shape == desired_goal.shape
target_size = .03
d: np.ndarray = achieved_goal - desired_goal
distance = np.linalg.norm(d, axis=-1) if len(d.shape) > 0 else np.linalg.norm(d)
reward = rewards.tolerance(distance,
bounds=(0, target_size), margin=target_size)
return reward, distance
class WalkerYogaReward():
def __init__(self) -> None:
self._env = _make_env("walker")
self._goals = get_walkeryoga_goals()
self.target_obs = {}
for key, g in self._goals.items():
self.target_obs[key] = get_obs_from_yoga_goal(self._env, g).astype(np.float32)
# save images for debug
# import imageio
# img = self._env.physics.render(height=256, width=256, camera_id=0)
# imageio.imsave(f"yoga_goals/{key}.png", img)
def compute_reward(self, phy: np.ndarray, g: str) -> float:
assert g in self._goals.keys()
distance = _oracle_distance(phy, self._goals[g])
return - distance
def _shortest_angle(angle):
if not angle.shape:
return _shortest_angle(angle[None])[0]
angle = angle % (2 * np.pi)
angle[angle > np.pi] = 2 * np.pi - angle[angle > np.pi]
return angle
def _oracle_distance(x1, x2):
assert x1.shape[0] in [9, 18], x2.shape[0] in [9, 18]
x1, x2 = x1[:9], x2[:9]
def get_su(_goal):
dist = np.abs(x1 - _goal)
dist = dist[..., [0, 2, 3, 4, 6, 7]]
dist[..., 1] = _shortest_angle(dist[..., 1])
return dist.max(-1)
return min(get_su(x2), get_su(x2[..., [0, 1, 2, 6, 7, 8, 3, 4, 5]]))
def get_obs_from_yoga_goal(env, goal):
new_state = np.pad(goal, (0, 9), mode="constant")
env.physics.set_state(new_state)
env.physics.forward()
obs = env.task.get_observation(env.physics)
return _flatten_obs(obs)
def _flatten_obs(obs):
obs_pieces = []
for v in obs.values():
flat = np.array([v]) if np.isscalar(v) else v.ravel()
obs_pieces.append(flat)
return np.concatenate(obs_pieces, axis=0)
def get_walkeryoga_goals():
# pose[0] is height
# pose[1] is x
# pose[2] is global rotation
# pose[3:6] - first leg hip, knee, ankle
# pose[6:9] - second leg hip, knee, ankle
# Note: seems like walker can't bend legs backwards
lie_back = [-1.2, 0., -1.57, 0., 0., 0., 0, -0., 0.]
lie_front = [-1.2, -0, 1.57, 0., 0, 0., 0., 0., 0.]
legs_up = [-1.24, 0., -1.57, 1.57, 0., 0.0, 1.57, -0., 0.0]
kneel = [-0.5, 0., 0., 0., -1.57, -0.8, 1.57, -1.57, 0.0]
side_angle = [-0.3, 0., 0.9, 0., 0., -0.7, 1.87, -1.07, 0.0]
stand_up = [-0.15, 0., 0.34, 0.74, -1.34, -0., 1.1, -0.66, -0.1]
lean_back = [-0.27, 0., -0.45, 0.22, -1.5, 0.86, 0.6, -0.8, -0.4]
boat = [-1.04, 0., -0.8, 1.6, 0., 0.0, 1.6, -0., 0.0]
bridge = [-1.1, 0., -2.2, -0.3, -1.5, 0., -0.3, -0.8, -0.4]
head_stand = [-1, 0., -3, 0.6, -1, -0.3, 0.9, -0.5, 0.3]
one_feet = [-0.2, 0., 0, 0.7, -1.34, 0.5, 1.5, -0.6, 0.1]
arabesque = [-0.34, 0., 1.57, 1.57, 0, 0., 0, -0., 0.]
return {'lie_back': np.array(lie_back, dtype=np.float32),
'lie_front': np.array(lie_front, dtype=np.float32),
'legs_up': np.array(legs_up, dtype=np.float32),
'kneel': np.array(kneel, dtype=np.float32),
'side_angle': np.array(side_angle, dtype=np.float32),
'stand_up': np.array(stand_up, dtype=np.float32),
'lean_back': np.array(lean_back, dtype=np.float32),
'boat': np.array(boat, dtype=np.float32),
'bridge': np.array(bridge, dtype=np.float32),
'one_feet': np.array(one_feet, dtype=np.float32),
'head_stand': np.array(head_stand, dtype=np.float32),
'arabesque': np.array(arabesque, dtype=np.float32)
}
def extract_names(string: str) -> tp.Set[str]:
rl = BytesIO(string.encode('utf-8')).readline
tokens = list(tokenize.tokenize(rl))
return {t.string for t in tokens if t.type == token.NAME}
class WalkerEquation(BaseReward):
def __init__(self, string: str) -> None:
super().__init__()
self._env = _make_env("walker")
self._np = ["sin", "cos", "tan", "abs", "exp", "sqrt"]
variables = list(self._extract(self._env)) + self._np
not_allowed = extract_names(string) - set(variables)
# keep this safety measure to avoid being hacked in the demo!
if not_allowed:
raise ValueError(f"The following variables are not allowed: {not_allowed}\nPlease only use {variables}")
self.string = string
self._precomputed: tp.Dict[str, np.ndarray] = {}
def _extract(self, env: dmc.EnvWrapper) -> tp.Dict[str, float]:
data = env.physics.named.data
return dict(
x=data.xpos["torso", "x"],
z=data.xpos["torso", "z"],
vx=env.physics.horizontal_velocity(),
vz=env.physics.named.data.sensordata["torso_subtreelinvel"][-1],
up=env.physics.torso_upright(),
am=env.physics.named.data.subtree_angmom['torso', 'y']
)
def from_env(self, env: dmc.EnvWrapper) -> float:
# pylint: disable=eval-used
variables = self._extract(env)
for name in self._np:
variables[name] = getattr(np, name)
return eval(self.string, {}, variables) # type: ignore
def _precompute_for_demo(self, workspace: tp.Any) -> None:
"""special method for the demo which precomputes data
please only use in demo, since it's messy
"""
ws = workspace
if hasattr(ws, "_precomputed_"):
self._precomputed = ws._precomputed_
return
import torch # pylint: disable=import-outside-toplevel
replay: ReplayBuffer = ws.replay_loader # recover some typing
batch = replay.sample(ws.agent.cfg.num_inference_steps, with_physics=True)
with torch.no_grad():
obs = torch.Tensor(batch.goal).to(ws.cfg.device)
B = workspace.agent.backward_net(obs).detach().cpu().numpy()
precomputed = {"#B": B.astype(np.float32)}
for k, phy in enumerate(batch._physics): # type: ignore
with self._env.physics.reset_context():
self._env.physics.set_state(phy)
step_feat = self._extract(self._env)
for key, val in step_feat.items():
if key not in precomputed:
precomputed[key] = np.zeros((B.shape[0], 1), dtype=np.float32)
precomputed[key][k] = val
ws._precomputed_ = precomputed # store it for reuse
self._precomputed = precomputed
def _from_precomputed(self) -> tp.Dict[str, np.ndarray]:
variables = dict(self._precomputed)
var_name0 = [x for x in variables if not x.startswith("#")][0]
for name in self._np:
variables[name] = getattr(np, name)
rewards = eval(self.string, {}, variables) # type: ignore
if not isinstance(rewards, np.ndarray):
rewards = rewards * np.ones_like(variables[var_name0])
z = self._precomputed["#B"].T.dot(rewards).squeeze()
if True: # ASSUMING SCALED
norm = float(np.linalg.norm(z))
if not norm:
norm = 1e-9
z *= np.sqrt(z.size) / norm
meta = OrderedDict()
meta['z'] = z
return meta
class WalkerRandomReward(WalkerEquation):
"""Deterministic positional reward"""
def __init__(self, seed: tp.Optional[int] = None) -> None:
rng = np.random.RandomState(seed)
x = rng.uniform(3, 15)
nx = rng.uniform(3, 8)
# equation + weight
cases = [
(f"exp(-(x-{x:.1f})**2)", 5),
(f"exp(-(x-{x:.1f})**2) * up", 5),
(f"exp(-(x+{nx:.1f})**2)", 2),
("vx > 1", 1),
("vx > 3", 1),
("vx < -1", 1),
]
p = np.array([float(x[1]) for x in cases])
p /= p.sum()
selected = cases[rng.choice(range(p.size), p=p)][0]
super().__init__(selected)
self._rng = rng
|
controllable_agent-main
|
url_benchmark/goals.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import functools
import collections
from collections import abc
from concurrent import futures
import time
import uuid
import json
import typing as tp
import logging
from datetime import datetime
import subprocess
from pathlib import Path
try:
from typing import Protocol
except ImportError:
# backward compatible
from typing_extensions import Protocol # type: ignore
import numpy as np
import pandas as pd
# pylint: disable=import-outside-toplevel
START_LINE = "# Hiplot logs"
logger: logging.Logger = logging.getLogger(__name__)
class _StatCall(Protocol):
def __call__(self, **kwargs: float) -> "HipLog":
...
class HipLogfileError(RuntimeError):
pass
class STYLE: # pylint: disable=too-few-public-methods
metrics = "badge badge-pill badge-primary"
internal = "badge badge-pill badge-secondary"
optim = "badge badge-pill badge-dark"
model = "badge badge-pill badge-success"
other = "badge badge-pill badge-danger"
# "badge badge-pill badge-warning"
def _set_style(exp: tp.Any) -> None:
import hiplot as hip
assert isinstance(exp, hip.Experiment)
# Don't display `uid` and `from_uid` columns to the user
cols = set(x for dp in exp.datapoints for x in dp.values.keys())
internals = ["workdir", "#now", "train/episode", "eval/episode", "#time", "#reloaded", "#job_id"]
hidden = [x for x in cols if x.startswith(("eval/", "train/"))]
hidden = [x for x in hidden if not any(y in x for y in ("episode", "loss"))]
exp.display_data(hip.Displays.PARALLEL_PLOT).update(
{
"hide": ["uid", "from_uid"] + hidden,
}
)
# for the record, some more options:
exp.display_data(hip.Displays.XY).update(
{"lines_thickness": 1.4, "lines_opacity": 0.9}
)
exp.display_data(hip.Displays.XY).update(
{"axis_x": "eval/episode", "axis_y": "eval/episode_reward"}
)
# colors
styles = {}
styles.update(
{
name: STYLE.metrics
for name in cols
if name.startswith(("eval/", "train/"))
and not any(y in name for y in ("/episode", "episode_reward"))
}
)
styles.update(
{name: STYLE.other for name in ("eval/episode_reward", "train/episode_reward")}
)
styles.update({name: STYLE.internal for name in internals})
styles["experiment"] = STYLE.other
for col in cols:
for start, style in styles.items():
if col.startswith(start):
exp.parameters_definition[col].label_css = style
def create_hiplot_experiment(uri: tp.Union[str, Path]) -> tp.Any:
import hiplot as hip
# one xp case
uri = Path(uri)
assert uri.suffix == ".csv", f"Path should be a csv, got {uri}"
assert uri.is_file(), f"Path should be a valid file, but got {uri}"
df = pd.read_csv(uri)
prev_uid: tp.Optional[str] = None
exp = hip.Experiment()
base = dict(xp=uri.parent.name, date=uri.parents[1].name, mode=uri.stem)
for k, xp in enumerate(df.itertuples(index=False)):
data = xp._asdict()
data.update(base)
dp = hip.Datapoint(
uid=f"{uri.parent.name}_{uri.stem}_{k}", from_uid=prev_uid, values=data
)
prev_uid = dp.uid
exp.datapoints.append(dp)
_set_style(exp)
return exp
def load(uri: tp.Union[Path, str], step: int = 10) -> tp.Any:
"""Loader for hiplot
Running:
python -m hiplot controllable_agent.hiplogs..load --port=XXXX
will run an hiplot server in which you can past one (or more) log paths
to plot them
Note
----
if you install first: "pip install -e ."
you can simplify to:
hiplot xxxx.load --port=XXXX
Then either provide the folder of the experiments in the freeform,
or their parent directory, so that all subfolders will be parsed for logs.
"""
import hiplot as hip
uri = Path(uri)
if str(uri).startswith("#"): # deactivate a line
return hip.Experiment()
assert uri.is_dir(), f"uri should be a valid directory, got {uri}"
jobs = []
with futures.ProcessPoolExecutor() as executor:
for path in uri.rglob("eval.csv"):
for hlog in HipLog.find_in_folder(path.parent):
jobs.append(executor.submit(hlog.to_hiplot_experiment, step))
# exps.append(create_hiplot_experiment(path))
# exps.append(create_hiplot_experiment(path.with_name("eval.csv")))
exps = [j.result() for j in jobs]
exp = hip.Experiment.merge({str(k): xp for k, xp in enumerate(exps)})
_set_style(exp)
return exp
class HipLog:
"""Simple object for logging hiplot compatible content
Parameters
----------
filepath: str or Path
path to the logfile. It will be created if it does not exist, otherwise
data will be appended to it.
Usage
-----
hiplogs are not mutable, adding content is done through
`with_content` and creates a new instance. This way, you can prefill
some content, then use the object to add more content and write.
Example
-------
hiplog = hiplogs.HipLog(filepath)
hiplog = hiplog.with_content(shared_key=12)
hiplog.write() # writes only {"shared_key": 12}
hiplog.with_content(hello="world").write() # writes shared_key and hello
hiplog.with_content(something="blublu").write() # writes shared_key and something
"""
def __init__(self, filepath: tp.Union[Path, str]) -> None:
self._filepath = Path(filepath)
if self._filepath.suffix not in (".txt", ".log"):
raise ValueError("Filepath must have .txt or .log as extension")
self._content: tp.Dict[str, tp.Any] = {
"#start_time": f"{datetime.now():%Y-%m-%d %H:%M}"
}
self._floats: tp.Dict[str, tp.List[float]] = collections.defaultdict(list)
self._stats: tp.Dict[str, tp.Tuple[str, ...]] = {}
self._reloaded = 0
try:
self._filepath.parent.mkdir(parents=True, exist_ok=True)
if not self._filepath.exists():
self._filepath.write_text(START_LINE + " v1\n", encoding="utf8")
except Exception as e: # pylint: disable=broad-except
logger.warning("Failing to write data to json: %s", e)
try:
import submitit
self._content["#job_id"] = submitit.JobEnvironment().job_id
except Exception: # pylint: disable=broad-except
pass
data = self.read()
if data:
self._reloaded = data[-1].get("#reloaded", -1) + 1 # type: ignore
@classmethod
def find_in_folder(
cls, folder: tp.Union[str, Path], recursive: bool = False
) -> tp.Iterator["HipLog"]:
"""Instantiate all hiplog instances from the folder or subfolders
Parameters
----------
folder: str/Path
folder to look into
recursive: bool
instantiate all hiplog logs recursively
Yields
------
HipLog
hiplog instance
"""
folder = Path(folder)
for suffix in (".txt", ".log"):
iterator = (folder.rglob if recursive else folder.glob)("*" + suffix)
for fp in iterator:
if fp.suffix in (".log", ".txt"):
with fp.open("r", encoding="utf8") as f:
is_hiplog = START_LINE in f.readline()
if is_hiplog:
yield cls(fp)
def __call__(self, **kwargs: tp.Hashable) -> "HipLog":
sanitized = {
x: y if not isinstance(y, np.generic) else y.item()
for x, y in kwargs.items()
}
self._content.update(sanitized)
return self
def with_stats(self, *stats: tp.Sequence[str]) -> _StatCall:
return functools.partial(self._with_stats, tuple(stats))
def _with_stats(self, _internal_name_stats: tp.Tuple[str, ...], **kwargs: float) -> "HipLog":
for key, val in kwargs.items():
self._stats[key] = _internal_name_stats # overridden by last call
self._floats[key].append(float(val))
return self
def read(self, step: int = 1) -> tp.List[tp.Dict[str, tp.Hashable]]:
"""Returns the data recorded through the logger
Parameter
---------
step: int
step for decimating the data if too big
Returns
-------
list of dict
all the timepoints. Data from past timepoints are used if not
provided in newer timepoints (eg: initial hyperparameters are
passed to all timepoints)
"""
with self._filepath.open("r", encoding="utf8") as f:
lines = f.readlines()
if lines and not lines[0].startswith(START_LINE):
raise HipLogfileError(
f"Did not recognize first line: {lines[0]!r} instead of {START_LINE!r}"
)
data: tp.List[tp.Dict[str, tp.Hashable]] = []
last = {}
for k, line in enumerate(lines):
if not line.startswith("#"):
line_dict = json.loads(line.strip())
last.update(line_dict)
if not k % step:
data.append(dict(last))
return data
def last_line(self) -> tp.Dict[str, tp.Hashable]:
data = self.read()
return {} if not data else data[-1]
@property
def content(self) -> tp.Dict[str, tp.Hashable]:
return dict(self._content)
def _export_floats(self) -> tp.Dict[str, float]:
out: tp.Dict[str, float] = {}
for key, vals in self._floats.items():
for stat in self._stats[key]:
out[f"{key}#{stat}"] = getattr(np, stat)(vals)
return out
def write(self) -> None:
# avoid as much as possible any disruption
self._content["#now"] = f"{datetime.now():%Y-%m-%d %H:%M}"
self._content["#time"] = time.time()
self._content["#reloaded"] = self._reloaded
self._content.update(self._export_floats())
if not self._filepath.exists():
return # initialization failed, can't do anything more
try:
string = json.dumps(self._content)
except Exception as e: # pylint: disable=broad-except
logger.warning("Failing to write data to json: %s", e)
return # can't be json-ed, stop there
# if it reaches here, it should be safe to write
with self._filepath.open("a", encoding="utf8") as f:
f.write(string + "\n")
self._content.clear()
self._floats.clear()
self._stats.clear()
def flattened(self, data: tp.Any) -> "HipLog":
"""Flattens a structured configuration and adds it to the content"""
self(**_flatten(data))
return self
def to_hiplot_experiment(self, step: int = 1) -> tp.Any:
"""Returns the Experiment recorded through the logger
Parameter
---------
step: int
step for decimating the data if too big
Returns
-------
Experiment
Hiplot Experiment instance containing the logger data
"""
import hiplot as hip
exp = hip.Experiment()
prev_uid: tp.Optional[str] = None
name = uuid.uuid4().hex[:8]
for k, data in enumerate(self.read(step=step)):
# update the displayed name to something readable
if not k:
xp = data.get("experiment", "#UNKNOWN#")
job_id = data.get("#job_id", name)
name = f"{xp} / {job_id}"
dp = hip.Datapoint(uid=f"{name} / {k}", from_uid=prev_uid, values=data) # type: ignore
prev_uid = dp.uid
exp.datapoints.append(dp)
_set_style(exp)
logger.info("Finished loading %s", self._filepath)
return exp
def _flatten(data: abc.Mapping) -> tp.Dict[str, tp.Hashable]: # type: ignore
output: tp.Dict[str, tp.Hashable] = {}
if isinstance(data, abc.Mapping):
for x, y in data.items():
if isinstance(y, abc.Mapping):
content = _flatten(y)
output.update({f"{x}/{x2}": y2 for x2, y2 in content.items()})
elif isinstance(y, abc.Sequence) and not isinstance(y, str):
if y and isinstance(
y[0], (int, float, str)
): # ignoring weird structures
output[x] = ",".join(str(z) for z in y)
elif isinstance(y, abc.Hashable):
output[x] = y
return output
def repository_information() -> tp.Dict[str, str]:
commands = {
"commit": "git rev-parse --short HEAD",
"branch": "git rev-parse --abbrev-ref HEAD",
"closest_main": "git rev-parse --short main",
}
here = Path(__file__).parent
output: tp.Dict[str, str] = {}
for name, command in commands.items():
try:
output[name] = (
subprocess.check_output(command.split(), shell=False, cwd=here)
.strip()
.decode()
)
except Exception: # pylint: disable=broad-except
pass
return output
|
controllable_agent-main
|
url_benchmark/hiplogs.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import typing as tp
import sys
from pathlib import Path
import subprocess
import pytest
def _run(tmp_path: Path, **params: tp.Any) -> None:
folder = Path(__file__).parents[1] / "url_benchmark"
assert folder.exists()
if sys.platform == "darwin":
pytest.skip(reason="Does not run on Mac")
string = " ".join(f"{x}={y}" for (x, y) in params.items())
command = (
f"python -m url_benchmark.pretrain device=cpu hydra.run.dir={tmp_path} final_tests=0 "
+ string
)
print(f"Running: {command}")
subprocess.check_call(command.split())
@pytest.mark.parametrize(
"agent", ["aps", "diayn", "rnd", "proto"]
) # test most important ones
def test_pretrain_from_commandline(agent: str, tmp_path: Path) -> None:
_run(
tmp_path,
agent=agent,
num_train_frames=1011,
num_eval_episodes=1,
num_seed_frames=1010,
replay_buffer_episodes=2,
)
def test_pretrain_from_commandline_fb_with_goal(tmp_path: Path) -> None:
_run(
tmp_path,
agent="fb_ddpg",
num_train_frames=1,
num_eval_episodes=1,
replay_buffer_episodes=2,
goal_space="simplified_walker",
use_hiplog=True,
)
assert (tmp_path / "hip.log").exists()
|
controllable_agent-main
|
url_benchmark/test_pretrain.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pdb # pylint: disable=unused-import
import logging
import dataclasses
import typing as tp
from url_benchmark import pretrain # NEEDS TO BE FIRST NON-STANDARD IMPORT (sets up env variables)
import omegaconf as omgcf
import hydra
from hydra.core.config_store import ConfigStore
import torch
from url_benchmark import goals as _goals
from url_benchmark import utils
from url_benchmark.in_memory_replay_buffer import ReplayBuffer # pylint: disable=unused-import
from url_benchmark.replay_buffer import EpisodeBatch # pylint: disable=unused-import
from url_benchmark import agent as agents
logger = logging.getLogger(__name__)
torch.backends.cudnn.benchmark = True
from pathlib import Path
import sys
base = Path(__file__).absolute().parents[1]
for fp in [base, base / "url_benchmark"]:
assert fp.exists()
if str(fp) not in sys.path:
sys.path.append(str(fp))
@dataclasses.dataclass
class OfflineConfig(pretrain.Config):
# training
num_grad_steps: int = 1000000
num_seed_frames: int = 0
log_every_steps: int = 1000
# eval
num_eval_episodes: int = 10
eval_every_steps: int = 10000
# dataset
load_replay_buffer: tp.Optional[str] = None
expl_agent: str = "proto"
replay_buffer_dir: str = omgcf.SI("../../../../datasets") # make sure to update this if you change hydra run dir
# misc
experiment: str = "offline"
reward_free: bool = False
ConfigStore.instance().store(name="workspace_config", node=OfflineConfig)
class Workspace(pretrain.BaseWorkspace[OfflineConfig]):
def __init__(self, cfg: OfflineConfig) -> None:
super().__init__(cfg)
self.agent.cfg.update_every_steps = 1
datasets_dir = self.work_dir / cfg.replay_buffer_dir
replay_dir = datasets_dir.resolve() / self.domain / cfg.expl_agent / 'buffer'
print(f'replay dir: {replay_dir}')
# self.replay_loader = ReplayBuffer([], # self._data_specs, [], # meta_specs = []
# cfg.batch_size, cfg.replay_buffer_episodes,
# cfg.discount, True)
if self.cfg.load_replay_buffer is not None:
print("loading Replay from %s", self.cfg.load_replay_buffer)
self.load_checkpoint(self.cfg.load_replay_buffer, only=["replay_loader"])
# with open(self.cfg.load_replay_buffer, 'rb') as f:
# content = torch.load(f)
# if isinstance(content, dict):
# content = content["replay_loader"]
# # assert isinstance(content, ReplayBuffer)
# self.replay_loader = content
else:
relabeled_replay_file_path = replay_dir / f"../relabeled_replay_{cfg.task}_{cfg.replay_buffer_episodes}.pt"
if relabeled_replay_file_path.exists():
print("loading Replay from %s", relabeled_replay_file_path.resolve())
self.load_checkpoint(relabeled_replay_file_path, only=["replay_loader"])
# with relabeled_replay_file_path.open('rb') as f:
# self.replay_loader = torch.load(f)
else:
print("loading and relabeling...")
goal_func = None if cfg.goal_space is None else _goals.goal_spaces.funcs[self.domain][cfg.goal_space]
self.replay_loader.load(self.train_env, replay_dir, relabel=True, goal_func=goal_func)
print("loading is done")
with relabeled_replay_file_path.open('wb') as f:
torch.save(self.replay_loader, f)
self.replay_loader._future = cfg.future
self.replay_loader._discount = cfg.discount
# self.replay_loader._full = True
self.replay_loader._max_episodes = len(self.replay_loader._storage["discount"])
if isinstance(self.agent, agents.GoalTD3Agent) and self.agent.cfg.fb_reward:
self.agent.precompute_cov(self.replay_loader)
def train(self):
train_until_step = utils.Until(self.cfg.num_grad_steps)
eval_every_step = utils.Every(self.cfg.eval_every_steps)
log_every_step = utils.Every(self.cfg.log_every_steps)
while train_until_step(self.global_step):
# try to evaluate
if eval_every_step(self.global_step):
self.logger.log('eval_total_time', self.timer.total_time(), self.global_step)
if self.cfg.custom_reward == "maze_multi_goal":
self.eval_maze_goals()
else:
self.eval()
if isinstance(self.agent, agents.GoalTD3Agent):
metrics = self.agent.update(self.replay_loader, self.global_step, self.reward_cls)
else:
metrics = self.agent.update(self.replay_loader, self.global_step)
self.logger.log_metrics(metrics, self.global_step, ty='train')
if log_every_step(self.global_step):
elapsed_time, total_time = self.timer.reset()
with self.logger.log_and_dump_ctx(self.global_step, ty='train') as log:
log('fps', self.cfg.log_every_steps / elapsed_time)
log('total_time', total_time)
log('step', self.global_step)
self.global_step += 1
# try to save snapshot
if self.global_frame in self.cfg.snapshot_at:
self.save_checkpoint(self._checkpoint_filepath.with_name(f'snapshot_{self.global_frame}.pt'), exclude=["replay_loader"])
# save checkpoint to reload
if not self.global_frame % self.cfg.checkpoint_every:
self.save_checkpoint(self._checkpoint_filepath, exclude=["replay_loader"])
self.save_checkpoint(self._checkpoint_filepath) # make sure we save the final checkpoint
self.finalize()
# def load_checkpoint(self, fp: tp.Union[Path, str]) -> None:
# fp = Path(fp)
# with fp.open('rb') as f:
# payload = torch.load(f)
# self.agent.init_from(payload['agent'])
@hydra.main(config_path='.', config_name='base_config')
def main(cfg: omgcf.DictConfig) -> None:
workspace = Workspace(cfg) # type: ignore
# for _ in range(10):
# workspace.eval()
if isinstance(workspace.agent, agents.DDPGAgent):
if workspace.agent.reward_free:
workspace.agent.train_reward(workspace.replay_loader)
workspace.train()
if __name__ == '__main__':
main()
|
controllable_agent-main
|
url_benchmark/train_offline.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import subprocess
import sys
import tempfile
import typing as tp
from pathlib import Path
import pytest
import hydra
import numpy as np
from url_benchmark import hiplogs
from url_benchmark import utils
def test_until_repr() -> None:
until = utils.Until(3, 1)
assert str(until) == "Until(action_repeat=1, until=3)"
def test_parse_logs() -> None:
path = (
Path(__file__).parents[1]
/ "controllable_agent"
/ "data"
/ "mockpretrain"
/ "hip.log"
)
hlog = hiplogs.HipLog(path)
logs = hlog.to_hiplot_experiment().datapoints
assert len(logs) == 13
vals = logs[-1].values
assert vals["workdir"] == "054238_fb_ddpg", "Xp id not exported"
bad_type = {x: y for x, y in vals.items() if not isinstance(y, (int, float, str))}
assert not bad_type, "Found unsupported type(s)"
def test_load() -> None:
xp = hiplogs.load(str(Path(__file__).parents[1] / "controllable_agent"), step=2)
assert len(xp.datapoints) == 6
def test_hiplog(tmp_path: Path) -> None:
hiplog = hiplogs.HipLog(tmp_path / "log.txt")
hiplog(hello="world")
hiplog.write()
hiplog(hello="monde")
hiplog(number=12).write()
hiplog(something=np.int32(12)).write()
data = hiplog.read()
for d in data:
for key in list(d):
if key.startswith("#"):
d.pop(key)
expected = [
dict(hello="world"),
dict(hello="monde", number=12),
dict(hello="monde", number=12, something=12),
]
assert data == expected
# reloaded
assert not hiplog._reloaded
hiplog = hiplogs.HipLog(tmp_path / "log.txt")
assert hiplog._reloaded == 1
def test_hiplog_stats(tmp_path: Path) -> None:
hiplog = hiplogs.HipLog(tmp_path / "log.txt")
for vals in ([3, 5], [7, 8, 9]):
for val in vals:
hiplog.with_stats("mean")(val=val)
hiplog.write()
data = hiplog.read()
for d in data:
for key in list(d):
if key.startswith("#"):
d.pop(key)
expected = [{"val#mean": 4}, {"val#mean": 8}]
assert data == expected
def test_repository_information() -> None:
out = hiplogs.repository_information()
assert len(out) == 3
def test_hiplogs_from_hydra_config(tmp_path: Path) -> None:
if sys.platform == "darwin":
pytest.skip(reason="Does not run on Mac")
train_cmd = [
sys.executable,
"-m",
"url_benchmark.test_hiplogs",
f"hydra.run.dir={tmp_path}",
]
subprocess.check_call(train_cmd)
@hydra.main(config_name="base_config", config_path=".", version_base="1.1")
def main(args: tp.Any) -> None:
args.agent.obs_type = "blublu"
args.agent.obs_shape = (2, 2)
args.agent.action_shape = (2, 2)
args.agent.num_expl_steps = 12
with tempfile.TemporaryDirectory() as tmp:
log = hiplogs.HipLog(Path(tmp) / "hiplog.test.log").flattened(args)
assert "agent/obs_type" in log.content
if __name__ == "__main__":
# needed to load the config:
from url_benchmark import pretrain # pylint: disable=unused-import,import-outside-toplevel
main()
|
controllable_agent-main
|
url_benchmark/test_hiplogs.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
|
controllable_agent-main
|
url_benchmark/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import csv
import logging
import typing as tp
from pathlib import Path
import datetime
from collections import defaultdict
import torch
import wandb
from termcolor import colored
from torch.utils.tensorboard import SummaryWriter
from url_benchmark.hiplogs import HipLog
Formating = tp.List[tp.Tuple[str, str, str]]
COMMON_TRAIN_FORMAT = [('frame', 'F', 'int'), ('step', 'S', 'int'),
('episode', 'E', 'int'), ('episode_length', 'L', 'int'),
('episode_reward', 'R', 'float'),
('fps', 'FPS', 'float'), ('total_time', 'T', 'time')]
COMMON_EVAL_FORMAT = [('frame', 'F', 'int'), ('step', 'S', 'int'),
('episode', 'E', 'int'), ('episode_length', 'L', 'int'),
('episode_reward', 'R', 'float'),
('total_time', 'T', 'time')]
pylogger = logging.getLogger(__name__)
class AverageMeter:
def __init__(self) -> None:
self._sum = 0.0
self._count = 0
def update(self, value: float, n: int = 1) -> None:
self._sum += value
self._count += n
def value(self) -> float:
return self._sum / max(1, self._count)
Metrics = tp.Dict[str, float]
class MetersGroup:
def __init__(self, csv_file_name: tp.Union[Path, str], formating: Formating, use_wandb: bool) -> None:
self._csv_file_name = Path(csv_file_name)
self._formating = formating
self._meters: tp.Dict[str, AverageMeter] = defaultdict(AverageMeter)
self._csv_file: tp.Optional[tp.TextIO] = None
self._csv_writer: tp.Optional[csv.DictWriter[str]] = None
self.use_wandb = use_wandb
def log(self, key: str, value: float, n: int = 1) -> None:
self._meters[key].update(value, n)
def _prime_meters(self) -> Metrics:
data = {}
for key, meter in self._meters.items():
if key.startswith('train'):
key = key[len('train') + 1:]
else:
key = key[len('eval') + 1:]
key = key.replace('/', '_')
data[key] = meter.value()
return data
def _remove_old_entries(self, data: Metrics) -> None:
rows = []
with self._csv_file_name.open('r') as f:
reader = csv.DictReader(f)
for row in reader:
if float(row['episode']) >= data['episode']:
break
rows.append(row)
with self._csv_file_name.open('w') as f:
writer = csv.DictWriter(f,
fieldnames=sorted(data.keys()),
restval=0.0)
writer.writeheader()
for row in rows:
writer.writerow(row)
def _dump_to_csv(self, data: Metrics) -> None:
if self._csv_writer is None:
should_write_header = True
if self._csv_file_name.exists():
self._remove_old_entries(data)
should_write_header = False
self._csv_file = self._csv_file_name.open('a')
self._csv_writer = csv.DictWriter(self._csv_file,
fieldnames=sorted(data.keys()),
restval=0.0)
if should_write_header:
self._csv_writer.writeheader()
if self._csv_writer is None or self._csv_file is None:
raise RuntimeError("CSV writer and file should have been instantiated")
self._csv_writer.writerow(data)
self._csv_file.flush()
@staticmethod
def _format(key: str, value: float, ty: str) -> str:
if ty == 'int':
value = int(value)
return f'{key}: {value}'
elif ty == 'float':
return f'{key}: {value:.04f}'
elif ty == 'time':
value_ = str(datetime.timedelta(seconds=int(value)))
return f'{key}: {value_}'
raise ValueError(f'invalid format type: {ty}')
def _dump_to_console(self, data: Metrics, prefix: str) -> None:
prefix = colored(prefix, 'yellow' if prefix == 'train' else 'green')
pieces = [f'| {prefix: <14}']
for key, disp_key, ty in self._formating:
value = data.get(key, 0)
pieces.append(self._format(disp_key, value, ty))
print(' | '.join(pieces))
@staticmethod
def _dump_to_wandb(data: Metrics) -> None:
wandb.log(data)
def dump(self, step: int, prefix: str) -> None:
if len(self._meters) == 0:
return
data = self._prime_meters()
data['frame'] = step
if self.use_wandb:
wandb_data = {prefix + '/' + key: val for key, val in data.items()}
self._dump_to_wandb(data=wandb_data)
self._dump_to_csv(data)
self._dump_to_console(data, prefix)
self._meters.clear()
class Logger:
def __init__(self, log_dir: Path, use_tb: bool, use_wandb: bool, use_hiplog: bool) -> None:
self._log_dir = log_dir
self._train_mg = MetersGroup(log_dir / 'train.csv',
formating=COMMON_TRAIN_FORMAT,
use_wandb=use_wandb)
self._eval_mg = MetersGroup(log_dir / 'eval.csv',
formating=COMMON_EVAL_FORMAT,
use_wandb=use_wandb)
self._sw: tp.Optional[SummaryWriter] = None
# self.hiplog: tp.Optional[HipLog] = None
self.use_hiplog = use_hiplog
if use_hiplog:
self.hiplog = HipLog(log_dir / "hip.log")
if use_tb:
self._sw = SummaryWriter(str(log_dir / 'tb'))
self.use_wandb = use_wandb
def _try_sw_log(self, key, value, step) -> None:
if self._sw is not None:
self._sw.add_scalar(key, value, step)
def log(self, key: str, value: tp.Union[float, torch.Tensor], step: int) -> None:
assert key.startswith('train') or key.startswith('eval')
if isinstance(value, torch.Tensor):
value = value.item()
self._try_sw_log(key, value, step)
mg = self._train_mg if key.startswith('train') else self._eval_mg
mg.log(key, value)
if self.use_hiplog:
self.hiplog(**{key: value})
def log_metrics(self, metrics: tp.Dict[str, float], step: int, ty: str) -> None:
for key, value in metrics.items():
self.log(f'{ty}/{key}', value, step)
def dump(self, step, ty=None) -> None:
try:
if ty is None or ty == 'eval':
self._eval_mg.dump(step, 'eval')
if ty is None or ty == 'train':
self._train_mg.dump(step, 'train')
except ValueError as e:
pylogger.warning(f"Could not dump metrics: {e}")
def log_and_dump_ctx(self, step: int, ty: str) -> "LogAndDumpCtx":
return LogAndDumpCtx(self, step, ty)
class LogAndDumpCtx:
def __init__(self, logger: Logger, step: int, ty: str) -> None:
self._logger = logger
self._step = step
self._ty = ty
def __enter__(self) -> "LogAndDumpCtx":
return self
def __call__(self, key: str, value: float) -> None:
self._logger.log(f'{self._ty}/{key}', value, self._step)
def __exit__(self, *args: tp.Any) -> None:
self._logger.dump(self._step, self._ty)
|
controllable_agent-main
|
url_benchmark/logger.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
from url_benchmark import replay_buffer as rb
def test_batch() -> None:
shapes = dict(obs=(4, 12), action=(5, 11), next_obs=(6, 10))
meta = dict(a=np.random.rand(16), b=np.random.rand(17))
batch = rb.EpisodeBatch(
reward=np.array([1.0]),
discount=np.array([0.5]),
meta=meta,
**{x: np.random.rand(*y) for x, y in shapes.items()}
)
batches = rb.EpisodeBatch.collate_fn([batch, batch])
assert batches.obs.shape == (2, 4, 12)
assert isinstance(batches.meta, dict)
assert len(batches.meta) == 2
assert batches.meta["a"].shape == (2, 16)
# check that moving to Tensor does not change anything
cpu = batch.to("cpu")
assert cpu.reward.shape == (1,)
batches = rb.EpisodeBatch.collate_fn([cpu, cpu])
assert batches.reward.shape == (2, 1)
no_reward = batches.with_no_reward()
assert not no_reward.reward.abs().sum(), "reward should be masked"
assert batches.reward.abs().sum(), "reward should not be masked"
assert no_reward.obs is batches.obs, "Observations have been copied, which is time consuming"
|
controllable_agent-main
|
url_benchmark/test_replay_buffer.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import random
import re
import time
import typing as tp
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
from torch import distributions as pyd
from torch.distributions.utils import _standard_normal
try:
from typing import Protocol
except ImportError:
# backward compatible
from typing_extensions import Protocol # type: ignore
class Trainable(Protocol): # cannot from url_benchmark import agent
@property
def training(self) -> bool:
...
def train(self, train: bool) -> None:
...
class eval_mode:
def __init__(self, *models: Trainable) -> None:
self.models = models
self.prev_states: tp.List[bool] = []
def __enter__(self) -> None:
self.prev_states = []
for model in self.models:
self.prev_states.append(model.training)
model.train(False)
def __exit__(self, *args: tp.Any) -> None:
for model, state in zip(self.models, self.prev_states):
model.train(state)
def set_seed_everywhere(seed: int) -> None:
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
X = tp.TypeVar("X")
def chain(*iterables: tp.Iterable[X]) -> tp.Iterator[X]: # TODO remove
for it in iterables:
yield from it
def soft_update_params(net, target_net, tau) -> None:
for param, target_param in zip(net.parameters(), target_net.parameters()):
target_param.data.copy_(tau * param.data +
(1 - tau) * target_param.data)
def hard_update_params(net, target_net) -> None:
for param, target_param in zip(net.parameters(), target_net.parameters()):
target_param.data.copy_(param.data)
def to_torch(xs, device) -> tuple:
return tuple(torch.as_tensor(x, device=device) for x in xs)
def weight_init(m) -> None:
"""Custom weight init for Conv2D and Linear layers."""
if isinstance(m, nn.Linear):
nn.init.orthogonal_(m.weight.data)
if m.bias is not None:
# if hasattr(m.bias, 'data'):
m.bias.data.fill_(0.0)
elif isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
gain = nn.init.calculate_gain('relu')
nn.init.orthogonal_(m.weight.data, gain)
if m.bias is not None:
# if hasattr(m.bias, 'data'):
m.bias.data.fill_(0.0)
def grad_norm(params, norm_type: float = 2.0):
params = [p for p in params if p.grad is not None]
total_norm = torch.norm(
torch.stack([torch.norm(p.grad.detach(), norm_type) for p in params]),
norm_type)
return total_norm.item()
def param_norm(params, norm_type: float = 2.0):
total_norm = torch.norm(
torch.stack([torch.norm(p.detach(), norm_type) for p in params]),
norm_type)
return total_norm.item()
def _repr(obj: tp.Any) -> str:
items = {x: y for x, y in obj.__dict__.items() if not x.startswith("_")}
params = ", ".join(f"{x}={y!r}" for x, y in sorted(items.items()))
return f"{obj.__class__.__name__}({params})"
class Until:
def __init__(self, until: tp.Optional[int], action_repeat: int = 1) -> None:
self.until = until
self.action_repeat = action_repeat
def __call__(self, step: int) -> bool:
if self.until is None:
return True
until = self.until // self.action_repeat
return step < until
def __repr__(self) -> str:
return _repr(self)
class Every:
def __init__(self, every: tp.Optional[int], action_repeat: int = 1) -> None:
self.every = every
self.action_repeat = action_repeat
def __call__(self, step: int) -> bool:
if self.every is None:
return False
every = self.every // self.action_repeat
if step % every == 0:
return True
return False
def __repr__(self) -> str:
return _repr(self)
class Timer:
def __init__(self) -> None:
self._start_time = time.time()
self._last_time = time.time()
def reset(self) -> tp.Tuple[float, float]:
elapsed_time = time.time() - self._last_time
self._last_time = time.time()
total_time = time.time() - self._start_time
return elapsed_time, total_time
def total_time(self) -> float:
return time.time() - self._start_time
class TruncatedNormal(pyd.Normal):
def __init__(self, loc, scale, low=-1.0, high=1.0, eps=1e-6) -> None:
super().__init__(loc, scale, validate_args=False)
self.low = low
self.high = high
self.eps = eps
def _clamp(self, x) -> torch.Tensor:
clamped_x = torch.clamp(x, self.low + self.eps, self.high - self.eps)
x = x - x.detach() + clamped_x.detach()
return x
def sample(self, clip=None, sample_shape=torch.Size()) -> torch.Tensor: # type: ignore
shape = self._extended_shape(sample_shape)
eps = _standard_normal(shape,
dtype=self.loc.dtype,
device=self.loc.device)
eps *= self.scale
if clip is not None:
eps = torch.clamp(eps, -clip, clip)
x = self.loc + eps
return self._clamp(x)
class TanhTransform(pyd.transforms.Transform):
domain = pyd.constraints.real
codomain = pyd.constraints.interval(-1.0, 1.0)
bijective = True
sign = +1
def __init__(self, cache_size=1) -> None:
super().__init__(cache_size=cache_size)
@staticmethod
def atanh(x) -> torch.Tensor:
return 0.5 * (x.log1p() - (-x).log1p())
def __eq__(self, other):
return isinstance(other, TanhTransform)
def _call(self, x) -> torch.Tensor:
return x.tanh()
def _inverse(self, y) -> torch.Tensor:
# We do not clamp to the boundary here as it may degrade the performance of certain algorithms.
# one should use `cache_size=1` instead
return self.atanh(y)
def log_abs_det_jacobian(self, x, y) -> torch.Tensor:
# We use a formula that is more numerically stable, see details in the following link
# https://github.com/tensorflow/probability/commit/ef6bb176e0ebd1cf6e25c6b5cecdd2428c22963f#diff-e120f70e92e6741bca649f04fcd907b7
return 2. * (math.log(2.) - x - F.softplus(-2. * x))
class SquashedNormal(pyd.transformed_distribution.TransformedDistribution):
def __init__(self, loc, scale) -> None:
self.loc = loc
self.scale = scale
self.base_dist = pyd.Normal(loc, scale)
transforms = [TanhTransform()]
super().__init__(self.base_dist, transforms)
@property
def mean(self):
mu = self.loc
for tr in self.transforms:
mu = tr(mu)
return mu
def schedule(schdl, step) -> float:
try:
return float(schdl)
except ValueError:
match = re.match(r'linear\((.+),(.+),(.+)\)', schdl)
if match:
init, final, duration = [float(g) for g in match.groups()]
mix = np.clip(step / duration, 0.0, 1.0)
return (1.0 - mix) * init + mix * final
match = re.match(r'step_linear\((.+),(.+),(.+),(.+),(.+)\)', schdl)
if match:
init, final1, duration1, final2, duration2 = [
float(g) for g in match.groups()
]
if step <= duration1:
mix = np.clip(step / duration1, 0.0, 1.0)
return (1.0 - mix) * init + mix * final1
else:
mix = np.clip((step - duration1) / duration2, 0.0, 1.0)
return (1.0 - mix) * final1 + mix * final2
raise NotImplementedError(schdl)
class RandomShiftsAug(nn.Module):
def __init__(self, pad) -> None:
super().__init__()
self.pad = pad
def forward(self, x) -> torch.Tensor:
x = x.float()
n, _, h, w = x.size()
assert h == w
padding = tuple([self.pad] * 4)
x = F.pad(x, padding, 'replicate')
eps = 1.0 / (h + 2 * self.pad)
arange = torch.linspace(-1.0 + eps,
1.0 - eps,
h + 2 * self.pad,
device=x.device,
dtype=x.dtype)[:h]
arange = arange.unsqueeze(0).repeat(h, 1).unsqueeze(2)
base_grid = torch.cat([arange, arange.transpose(1, 0)], dim=2)
base_grid = base_grid.unsqueeze(0).repeat(n, 1, 1, 1)
shift = torch.randint(0,
2 * self.pad + 1,
size=(n, 1, 1, 2),
device=x.device,
dtype=x.dtype)
shift *= 2.0 / (h + 2 * self.pad)
grid = base_grid + shift
return F.grid_sample(x,
grid,
padding_mode='zeros',
align_corners=False)
class RMS:
"""running mean and std """
def __init__(self, device, epsilon=1e-4, shape=(1,)) -> None:
self.M = torch.zeros(shape).to(device)
self.S = torch.ones(shape).to(device)
self.n = epsilon
def __call__(self, x):
bs = x.size(0)
delta = torch.mean(x, dim=0) - self.M
new_M = self.M + delta * bs / (self.n + bs)
new_S = (self.S * self.n + torch.var(x, dim=0) * bs +
torch.square(delta) * self.n * bs /
(self.n + bs)) / (self.n + bs)
self.M = new_M
self.S = new_S
self.n += bs
return self.M, self.S
class PBE:
"""particle-based entropy based on knn normalized by running mean """
def __init__(self, rms, knn_clip, knn_k, knn_avg, knn_rms, device) -> None:
self.rms = rms
self.knn_rms = knn_rms
self.knn_k = knn_k
self.knn_avg = knn_avg
self.knn_clip = knn_clip
self.device = device
def __call__(self, rep):
source = target = rep
b1, b2 = source.size(0), target.size(0)
# (b1, 1, c) - (1, b2, c) -> (b1, 1, c) - (1, b2, c) -> (b1, b2, c) -> (b1, b2)
sim_matrix = torch.norm(source[:, None, :].view(b1, 1, -1) -
target[None, :, :].view(1, b2, -1),
dim=-1,
p=2)
reward, _ = sim_matrix.topk(self.knn_k,
dim=1,
largest=False,
sorted=True) # (b1, k)
if not self.knn_avg: # only keep k-th nearest neighbor
reward = reward[:, -1]
reward = reward.reshape(-1, 1) # (b1, 1)
reward /= self.rms(reward)[0] if self.knn_rms else 1.0
reward = torch.maximum(
reward - self.knn_clip,
torch.zeros_like(reward).to(self.device)
) if self.knn_clip >= 0.0 else reward # (b1, 1)
else: # average over all k nearest neighbors
reward = reward.reshape(-1, 1) # (b1 * k, 1)
reward /= self.rms(reward)[0] if self.knn_rms else 1.0
reward = torch.maximum(
reward - self.knn_clip,
torch.zeros_like(reward).to(
self.device)) if self.knn_clip >= 0.0 else reward
reward = reward.reshape((b1, self.knn_k)) # (b1, k)
reward = reward.mean(dim=1, keepdim=True) # (b1, 1)
reward = torch.log(reward + 1.0)
return reward
class FloatStats:
def __init__(self) -> None:
self.min = np.inf
self.max = -np.inf
self.mean = 0.0
self.count = 0
def add(self, value: float) -> "FloatStats":
self.min = min(value, self.min)
self.max = max(value, self.max)
self.count += 1
self.mean = (self.count - 1) / self.count * self.mean + 1 / self.count * value
return self
|
controllable_agent-main
|
url_benchmark/utils.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import List
DOMAINS = [
'walker',
'quadruped',
'jaco',
'point_mass_maze'
'cheetah'
]
CHEETAH_TASKS = [
'cheetah_walk',
'cheetah_walk_backward',
'cheetah_run',
'cheetah_run_backward'
]
WALKER_TASKS = [
'walker_stand',
'walker_walk',
'walker_run',
'walker_flip',
]
QUADRUPED_TASKS = [
'quadruped_walk',
'quadruped_run',
'quadruped_stand',
'quadruped_jump',
]
JACO_TASKS = [
'jaco_reach_top_left',
'jaco_reach_top_right',
'jaco_reach_bottom_left',
'jaco_reach_bottom_right',
]
POINT_MASS_MAZE_TASKS = [
'point_mass_maze_reach_top_left',
'point_mass_maze_reach_top_right',
'point_mass_maze_reach_bottom_left',
'point_mass_maze_reach_bottom_right',
]
TASKS: List[str] = WALKER_TASKS + QUADRUPED_TASKS + JACO_TASKS + POINT_MASS_MAZE_TASKS
PRIMAL_TASKS = {
'walker': 'walker_stand',
'jaco': 'jaco_reach_top_left',
'quadruped': 'quadruped_walk'
}
|
controllable_agent-main
|
url_benchmark/dmc_benchmark.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pdb # pylint: disable=unused-import
import logging
import typing as tp
import dataclasses
import collections
from pathlib import Path
import numpy as np
import torch
from dm_env import specs, TimeStep
from tqdm import tqdm
from url_benchmark.replay_buffer import EpisodeBatch
from url_benchmark.dmc import ExtendedGoalTimeStep
Specs = tp.Sequence[specs.Array]
logger = logging.getLogger(__name__)
EpisodeTuple = tp.Tuple[np.ndarray, ...]
Episode = tp.Dict[str, np.ndarray]
T = tp.TypeVar("T", np.ndarray, torch.Tensor)
def episode_len(episode: Episode) -> int:
# subtract -1 because the dummy first transition
return next(iter(episode.values())).shape[0] - 1
def load_episode(fn: Path) -> tp.Dict[str, np.ndarray]:
with fn.open('rb') as f:
episode = np.load(f)
episode = {k: episode[k] for k in episode.keys()}
return episode # type: ignore
def relabel_episode(env: tp.Any, episode: tp.Dict[str, np.ndarray], goal_func: tp.Any) -> tp.Dict[str, np.ndarray]:
goals = []
rewards = []
states = episode['physics']
for i in range(states.shape[0]):
with env.physics.reset_context():
env.physics.set_state(states[i])
reward = env.task.get_reward(env.physics)
reward = np.full((1,), reward, dtype=np.float32)
rewards.append(reward)
if goal_func is not None:
goals.append(goal_func(env))
episode['reward'] = np.array(rewards, dtype=np.float32)
if goals:
episode['goal'] = np.array(goals, dtype=np.float32)
return episode
# class ReplayBufferIterable:
# def __init__(self, replay_buffer: "ReplayBuffer") -> None:
# self._replay_buffer = replay_buffer
#
# def __next__(self) -> EpisodeBatch:
# return self._replay_buffer.sample()
class ReplayBuffer:
def __init__(self,
max_episodes: int, discount: float, future: float, max_episode_length: tp.Optional[int] = None) -> None:
# data_specs: Specs,
# self._data_specs = tuple(data_specs)
# self._meta_specs = tuple(meta_specs)
# self._batch_size = batch_size
self._max_episodes = max_episodes
self._discount = discount
assert 0 <= future <= 1
self._future = future
self._current_episode: tp.Dict[str, tp.List[np.ndarray]] = collections.defaultdict(list)
self._idx = 0
self._full = False
self._num_transitions = 0
self._storage: tp.Dict[str, np.ndarray] = collections.defaultdict()
self._collected_episodes = 0
self._batch_names = set(field.name for field in dataclasses.fields(ExtendedGoalTimeStep))
self._episodes_length = np.zeros(max_episodes, dtype=np.int32)
self._episodes_selection_probability = None
self._is_fixed_episode_length = True
self._max_episode_length = max_episode_length
def __len__(self) -> int:
return self._max_episodes if self._full else self._idx
def __setstate__(self, state):
self.__dict__.update(state)
self._backward_compatibility()
def _backward_compatibility(self):
if self._storage and not hasattr(self, '_episodes_length'):
self._episodes_length = np.array([len(array) - 1 for array in self._storage["discount"]], dtype=np.int32)
self._episodes_length[len(self):] = 0
assert self._episodes_length[:len(self)].min() == self._episodes_length[:len(self)].max()
self._episodes_selection_probability = None
self._is_fixed_episode_length = True
self._max_episode_length = None
def add(self, time_step: TimeStep, meta: tp.Mapping[str, np.ndarray]) -> None:
dtype = np.float32
for key, value in meta.items():
self._current_episode[key].append(value)
for field in dataclasses.fields(time_step):
value = time_step[field.name]
if np.isscalar(value):
value = np.full((1,), value, dtype=dtype)
if isinstance(value, np.ndarray):
self._current_episode[field.name].append(np.array(value, dtype=dtype))
if time_step.last():
if not hasattr(self, "_batch_names"):
self._batch_names = set(field.name for field in dataclasses.fields(ExtendedGoalTimeStep))
for name, value_list in self._current_episode.items():
values = np.array(value_list, dtype)
if name not in self._storage:
# first iteration, the buffer is created with appropriate size
_shape = values.shape
if self._max_episode_length is not None:
_shape = (self._max_episode_length,) + _shape[1:]
self._storage[name] = np.empty((self._max_episodes,) + _shape, dtype=dtype)
self._storage[name][self._idx][:len(values)] = values
self._episodes_length[self._idx] = len(self._current_episode['discount']) - 1 # compensate for the dummy transition at the beginning
if self._episodes_length[self._idx] != self._episodes_length[self._idx - 1] and self._episodes_length[self._idx - 1] != 0:
self._is_fixed_episode_length = False
self._current_episode = collections.defaultdict(list)
self._collected_episodes += 1
self._idx = (self._idx + 1) % self._max_episodes
self._full = self._full or self._idx == 0
self._episodes_selection_probability = None
@property
def avg_episode_length(self) -> int:
return round(self._episodes_length[:len(self)].mean())
def sample(self, batch_size, custom_reward: tp.Optional[tp.Any] = None, with_physics: bool = False) -> EpisodeBatch:
if not hasattr(self, "_batch_names"):
self._batch_names = set(field.name for field in dataclasses.fields(ExtendedGoalTimeStep))
if not isinstance(self._future, float):
assert isinstance(self._future, bool)
self._future = float(self._future)
if self._is_fixed_episode_length:
ep_idx = np.random.randint(0, len(self), size=batch_size)
else:
if self._episodes_selection_probability is None:
self._episodes_selection_probability = self._episodes_length / self._episodes_length.sum()
ep_idx = np.random.choice(np.arange(len(self._episodes_length)), size=batch_size, p=self._episodes_selection_probability)
eps_lengths = self._episodes_length[ep_idx]
# add +1 for the first dummy transition
step_idx = np.random.randint(0, eps_lengths) + 1
assert (step_idx <= eps_lengths).all()
if self._future < 1:
# future_idx = step_idx + np.random.randint(0, self.episode_length - step_idx + 1, size=self._batch_size)
future_idx = step_idx + np.random.geometric(p=(1 - self._future), size=batch_size)
future_idx = np.clip(future_idx, 0, eps_lengths)
assert (future_idx <= eps_lengths).all()
meta = {name: data[ep_idx, step_idx - 1] for name, data in self._storage.items() if name not in self._batch_names}
obs = self._storage['observation'][ep_idx, step_idx - 1]
action = self._storage['action'][ep_idx, step_idx]
next_obs = self._storage['observation'][ep_idx, step_idx]
phy = self._storage['physics'][ep_idx, step_idx]
if custom_reward is not None:
reward = np.array([[custom_reward.from_physics(p)] for p in phy], dtype=np.float32)
else:
reward = self._storage['reward'][ep_idx, step_idx]
discount = self._discount * self._storage['discount'][ep_idx, step_idx]
goal: tp.Optional[np.ndarray] = None
next_goal: tp.Optional[np.ndarray] = None
future_obs: tp.Optional[np.ndarray] = None
future_goal: tp.Optional[np.ndarray] = None
if 'goal' in self._storage.keys():
goal = self._storage['goal'][ep_idx, step_idx - 1]
next_goal = self._storage['goal'][ep_idx, step_idx]
if self._future < 1:
future_goal = self._storage['goal'][ep_idx, future_idx - 1]
# elif self._future:
if self._future < 1:
future_obs = self._storage['observation'][ep_idx, future_idx - 1]
additional = {}
if with_physics:
additional["_physics"] = phy
# TODO remove type ignore when working
return EpisodeBatch(obs=obs, goal=goal, action=action, reward=reward, discount=discount,
next_obs=next_obs, next_goal=next_goal,
future_obs=future_obs, future_goal=future_goal, meta=meta, **additional)
def load(self, env: tp.Any, replay_dir: Path, relabel: bool = True, goal_func: tp.Any = None) -> None:
eps_fns = sorted(replay_dir.glob('*.npz'))
for eps_fn in tqdm(eps_fns):
if self._full:
break
episode = load_episode(eps_fn)
if relabel:
episode = relabel_episode(env, episode, goal_func)
# for field in dataclasses.fields(TimeStep):
for name, values in episode.items():
# values = episode[field.name]
if name not in self._storage:
# first iteration, the buffer is created with appropriate size
self._storage[name] = np.empty((self._max_episodes,) + values.shape, dtype=np.float32)
self._storage[name][self._idx] = np.array(values, dtype=np.float32)
self._idx = (self._idx + 1) % self._max_episodes
self._full = self._full or self._idx == 0
def relabel(self, custom_reward) -> None:
for (ep_idx, phy) in tqdm(enumerate(self._storage["physics"])):
reward = np.array([[custom_reward.from_physics(p)] for p in phy], dtype=np.float32)
self._storage["reward"][ep_idx] = reward
self._max_episodes = len(self._storage["physics"])
self._full = True
# def __iter__(self) -> ReplayBufferIterable:
# ''' Returns the Iterator object '''
# return ReplayBufferIterable(self)
# def __iter__(self) -> tp.Iterator[EpisodeBatch[np.ndarray]]:
# while True:
# yield self.sample()
|
controllable_agent-main
|
url_benchmark/in_memory_replay_buffer.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import datetime
import io
import random
import traceback
import typing as tp
from pathlib import Path
from collections import defaultdict
import dataclasses
import numpy as np
import torch
from torch.utils.data import IterableDataset
from dm_env import specs, TimeStep
EpisodeTuple = tp.Tuple[np.ndarray, ...]
Episode = tp.Dict[str, np.ndarray]
T = tp.TypeVar("T", np.ndarray, torch.Tensor)
B = tp.TypeVar("B", bound="EpisodeBatch")
@dataclasses.dataclass
class EpisodeBatch(tp.Generic[T]):
"""For later use
A container for batchable replayed episodes
"""
obs: T
action: T
reward: T
next_obs: T
discount: T
meta: tp.Dict[str, T] = dataclasses.field(default_factory=dict)
_physics: tp.Optional[T] = None
goal: tp.Optional[T] = None
next_goal: tp.Optional[T] = None
future_obs: tp.Optional[T] = None
future_goal: tp.Optional[T] = None
def __post_init__(self) -> None:
# some security to be removed later
assert isinstance(self.reward, (np.ndarray, torch.Tensor))
assert isinstance(self.discount, (np.ndarray, torch.Tensor))
assert isinstance(self.meta, dict)
def to(self, device: str) -> "EpisodeBatch[torch.Tensor]":
"""Creates a new instance on the appropriate device"""
out: tp.Dict[str, tp.Any] = {}
for field in dataclasses.fields(self):
data = getattr(self, field.name)
if field.name == "meta":
out[field.name] = {x: torch.as_tensor(y, device=device) for x, y in data.items()} # type: ignore
elif isinstance(data, (torch.Tensor, np.ndarray)):
out[field.name] = torch.as_tensor(data, device=device) # type: ignore
elif data is None:
out[field.name] = data
else:
raise RuntimeError(f"Not sure what to do with {field.name}: {data}")
return EpisodeBatch(**out)
@classmethod
def collate_fn(cls, batches: tp.List["EpisodeBatch[T]"]) -> "EpisodeBatch[torch.Tensor]":
"""Creates a new instance from several by stacking in a new first dimension
for all attributes
"""
out: tp.Dict[str, tp.Any] = {}
if isinstance(batches[0].obs, np.ndarray): # move everything to pytorch if first one is numpy
batches = [b.to("cpu") for b in batches] # type: ignore
for field in dataclasses.fields(cls):
data = [getattr(mf, field.name) for mf in batches]
# skip fields with None data
if data[0] is None:
if any(x is not None for x in data):
raise RuntimeError("Found a non-None value mixed with Nones")
out[field.name] = None
continue
# reward and discount can be float which should be converted to
# tensors for stacking
if field.name == "meta":
meta = {k: torch.stack([d[k] for d in data]) for k in data[0]}
out[field.name] = meta
elif isinstance(data[0], torch.Tensor):
out[field.name] = torch.stack(data)
else:
raise RuntimeError(f"Not sure what to do with {field.name}: {data}")
# out[field.name] = [x for y in data for x in y]
return EpisodeBatch(**out)
def unpack(self) -> tp.Tuple[T, T, T, T, T]:
"""Unpacks the structure into the legacy unnamed tuple.
Try to avoid it if possible, this is more likely to be wrong than using names
"""
return (self.obs, self.action, self.reward, self.discount, self.next_obs)
# return (self.obs, self.action, self.reward, self.discount, self.next_obs, *self.meta)
def with_no_reward(self: B) -> B:
reward = self.reward
reward = torch.zeros_like(reward) if isinstance(reward, torch.Tensor) else 0 * reward
return dataclasses.replace(self, reward=reward)
def episode_len(episode: Episode) -> int:
# subtract -1 because the dummy first transition
return next(iter(episode.values())).shape[0] - 1
def save_episode(episode: Episode, fn: Path) -> None:
with io.BytesIO() as bs:
np.savez_compressed(bs, **episode)
bs.seek(0)
with fn.open('wb') as f:
f.write(bs.read())
def load_episode(fn: Path) -> Episode:
with fn.open('rb') as f:
episode = np.load(f)
episode = {k: episode[k] for k in episode.keys()}
return episode
Specs = tp.Sequence[specs.Array]
class ReplayBufferStorage:
def __init__(self, data_specs: Specs, replay_dir: tp.Union[str, Path]) -> None:
self._data_specs = tuple(data_specs)
self._meta_specs: tp.Tuple[tp.Any, ...] = tuple() # deactivated
self._replay_dir = Path(replay_dir)
self._replay_dir.mkdir(exist_ok=True)
# probably bad annotation, let's update when it starts failing
self._current_episode: tp.Dict[str, tp.List[np.ndarray]] = defaultdict(list)
self._preload()
raise Exception("This code is dead due to missing handling of meta data")
def __len__(self) -> int:
return self._num_transitions
def add(self, time_step: TimeStep, meta: tp.Mapping[str, np.ndarray]) -> None:
for key, value in meta.items():
self._current_episode[key].append(value)
for spec in self._data_specs:
value = time_step[spec.name]
if np.isscalar(value):
value = np.full(spec.shape, value, spec.dtype)
assert spec.shape == value.shape and spec.dtype == value.dtype
self._current_episode[spec.name].append(value)
if time_step.last():
episode = {}
for spec in self._data_specs:
values = self._current_episode[spec.name]
episode[spec.name] = np.array(values, spec.dtype)
for spec in self._meta_specs:
values = self._current_episode[spec.name]
episode[spec.name] = np.array(values, spec.dtype)
self._current_episode = defaultdict(list)
self._store_episode(episode)
def _preload(self) -> None:
self._num_episodes = 0
self._num_transitions = 0
for fn in self._replay_dir.glob('*.npz'):
_, _, eps_len = fn.stem.split('_')
self._num_episodes += 1
self._num_transitions += int(eps_len)
def _store_episode(self, episode: Episode) -> None:
eps_idx = self._num_episodes
eps_len = episode_len(episode)
self._num_episodes += 1
self._num_transitions += eps_len
ts = datetime.datetime.now().strftime('%Y%m%dT%H%M%S')
eps_fn = f'{ts}_{eps_idx}_{eps_len}.npz'
save_episode(episode, self._replay_dir / eps_fn)
class ReplayBuffer(IterableDataset):
def __init__(self, storage: ReplayBufferStorage, max_size: int, num_workers: int, nstep: int, discount: float,
fetch_every: int, save_snapshot: bool, future: bool) -> None:
super().__init__()
self._storage = storage
self._size = 0
self._max_size = max_size
self._num_workers = max(1, num_workers)
self._episode_fns: tp.List[Path] = []
self._episodes: tp.Dict[Path, Episode] = {}
self._nstep = nstep
self._discount = discount
self._fetch_every = fetch_every
self._samples_since_last_fetch = fetch_every
self._save_snapshot = save_snapshot
self._future = future
def _sample_episode(self) -> Episode:
eps_fn = random.choice(self._episode_fns)
return self._episodes[eps_fn]
def _store_episode(self, eps_fn: Path) -> bool:
try:
episode = load_episode(eps_fn)
except Exception: # pylint: disable=broad-except
return False
eps_len = episode_len(episode)
while eps_len + self._size > self._max_size:
early_eps_fn = self._episode_fns.pop(0)
early_eps = self._episodes.pop(early_eps_fn)
self._size -= episode_len(early_eps)
early_eps_fn.unlink(missing_ok=True) # type: ignore
self._episode_fns.append(eps_fn)
self._episode_fns.sort()
self._episodes[eps_fn] = episode
self._size += eps_len
if not self._save_snapshot:
eps_fn.unlink(missing_ok=True) # type: ignore
return True
def _try_fetch(self) -> None:
if self._samples_since_last_fetch < self._fetch_every:
return
self._samples_since_last_fetch = 0
try:
worker_id = int(torch.utils.data.get_worker_info().id)
except Exception: # pylint: disable=broad-except
worker_id = 0
eps_fns = sorted(self._storage._replay_dir.glob('*.npz'), reverse=True)
fetched_size = 0
for eps_fn in eps_fns:
eps_idx, eps_len = [int(x) for x in eps_fn.stem.split('_')[1:]]
if eps_idx % self._num_workers != worker_id:
continue
if eps_fn in self._episodes:
break
if fetched_size + eps_len > self._max_size:
break
fetched_size += eps_len
if not self._store_episode(eps_fn):
break
def _sample(self) -> EpisodeBatch[np.ndarray]:
try:
self._try_fetch()
except Exception: # pylint: disable=broad-except
traceback.print_exc()
self._samples_since_last_fetch += 1
episode = self._sample_episode()
# add +1 for the first dummy transition
idx = np.random.randint(0, episode_len(episode) - self._nstep + 1) + 1
meta = {spec.name: episode[spec.name][idx - 1] for spec in self._storage._meta_specs}
obs = episode['observation'][idx - 1]
action = episode['action'][idx]
next_obs = episode['observation'][idx + self._nstep - 1]
reward = np.zeros_like(episode['reward'][idx])
discount = np.ones_like(episode['discount'][idx])
for i in range(self._nstep):
step_reward = episode['reward'][idx + i]
reward += discount * step_reward
discount *= episode['discount'][idx + i] * self._discount
goal: tp.Optional[np.ndarray] = None
future_obs: tp.Optional[np.ndarray] = None
future_goal: tp.Optional[np.ndarray] = None
if 'goal' in episode.keys():
goal = episode['goal'][idx - 1]
if self._future:
future_idx = idx + np.random.randint(0, episode_len(episode) - idx + 1)
future_goal = episode['goal'][future_idx - 1]
# return (obs, goal, action, reward, discount, next_obs, *meta) # type: ignore
elif self._future:
future_idx = idx + np.random.randint(0, episode_len(episode) - idx + 1)
future_obs = episode['observation'][future_idx - 1]
# TODO remove type ignore when working
return EpisodeBatch(obs=obs, action=action, reward=reward, discount=discount,
next_obs=next_obs, goal=goal, future_obs=future_obs,
future_goal=future_goal, meta=meta)
def __iter__(self) -> tp.Iterator[EpisodeBatch[np.ndarray]]:
while True:
yield self._sample()
def _worker_init_fn(worker_id: int) -> None:
seed = np.random.get_state()[1][0] + worker_id # type: ignore
np.random.seed(seed)
random.seed(seed)
def make_replay_loader(storage: ReplayBufferStorage, max_size: int, batch_size: int, num_workers: int,
save_snapshot: bool, future: bool, nstep: int, discount: float) -> tp.Iterable[EpisodeBatch[torch.Tensor]]:
max_size_per_worker = max_size // max(1, num_workers)
iterable = ReplayBuffer(storage,
max_size_per_worker,
num_workers,
nstep,
discount,
fetch_every=1000,
save_snapshot=save_snapshot,
future=future)
loader = torch.utils.data.DataLoader(iterable,
batch_size=batch_size,
num_workers=num_workers,
pin_memory=True,
collate_fn=EpisodeBatch.collate_fn,
worker_init_fn=_worker_init_fn)
return loader
|
controllable_agent-main
|
url_benchmark/replay_buffer.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pdb # pylint: disable=unused-import
import sys
import unittest
import dataclasses
from collections import OrderedDict, deque
import typing as tp
from typing import Any
from dm_env import Environment
from dm_env import StepType, specs
import numpy as np
class UnsupportedPlatform(unittest.SkipTest, RuntimeError):
"""The platform is not supported for running"""
try:
from dm_control import suite # , manipulation
from dm_control.suite.wrappers import action_scale, pixels
from url_benchmark import custom_dmc_tasks as cdmc
except ImportError as e:
raise UnsupportedPlatform(f"Import error (Note: DMC does not run on Mac):\n{e}") from e
S = tp.TypeVar("S", bound="TimeStep")
Env = tp.Union["EnvWrapper", Environment]
@dataclasses.dataclass
class TimeStep:
step_type: StepType
reward: float
discount: float
observation: np.ndarray
physics: np.ndarray = dataclasses.field(default=np.ndarray([]), init=False)
def first(self) -> bool:
return self.step_type == StepType.FIRST # type: ignore
def mid(self) -> bool:
return self.step_type == StepType.MID # type: ignore
def last(self) -> bool:
return self.step_type == StepType.LAST # type: ignore
def __getitem__(self, attr: str) -> tp.Any:
return getattr(self, attr)
def _replace(self: S, **kwargs: tp.Any) -> S:
for name, val in kwargs.items():
setattr(self, name, val)
return self
@dataclasses.dataclass
class GoalTimeStep(TimeStep):
goal: np.ndarray
@dataclasses.dataclass
class ExtendedGoalTimeStep(GoalTimeStep):
action: tp.Any
@dataclasses.dataclass
class ExtendedTimeStep(TimeStep):
action: tp.Any
class EnvWrapper:
def __init__(self, env: Env) -> None:
self._env = env
def _augment_time_step(self, time_step: TimeStep, action: tp.Optional[np.ndarray] = None) -> TimeStep:
if not isinstance(time_step, TimeStep):
# dm_env time step is a named tuple
time_step = TimeStep(**time_step._asdict())
if self.physics is not None:
return time_step._replace(physics=self.physics.get_state())
else:
return time_step
def reset(self) -> TimeStep:
time_step = self._env.reset()
return self._augment_time_step(time_step)
def step(self, action: np.ndarray) -> TimeStep:
time_step = self._env.step(action)
return self._augment_time_step(time_step, action)
def observation_spec(self) -> tp.Any:
assert isinstance(self, EnvWrapper)
return self._env.observation_spec()
def action_spec(self) -> specs.Array:
return self._env.action_spec()
def render(self, *args: tp.Any, **kwargs: tp.Any) -> np.ndarray:
return self._env.render(*args, **kwargs) # type: ignore
@property
def base_env(self) -> tp.Any:
env = self._env
if isinstance(env, EnvWrapper):
return self.base_env
return env
@property
def physics(self) -> tp.Any:
if hasattr(self._env, "physics"):
return self._env.physics
def __getattr__(self, name):
return getattr(self._env, name)
class FlattenJacoObservationWrapper(EnvWrapper):
def __init__(self, env: Env) -> None:
super().__init__(env)
self._obs_spec = OrderedDict()
wrapped_obs_spec = env.observation_spec().copy()
if 'front_close' in wrapped_obs_spec:
spec = wrapped_obs_spec['front_close']
# drop batch dim
self._obs_spec['pixels'] = specs.BoundedArray(shape=spec.shape[1:],
dtype=spec.dtype,
minimum=spec.minimum,
maximum=spec.maximum,
name='pixels')
wrapped_obs_spec.pop('front_close')
for spec in wrapped_obs_spec.values():
assert spec.dtype == np.float64
assert type(spec) == specs.Array
dim = np.sum(
np.fromiter((int(np.prod(spec.shape)) # type: ignore
for spec in wrapped_obs_spec.values()), np.int32))
self._obs_spec['observations'] = specs.Array(shape=(dim,),
dtype=np.float32,
name='observations')
def observation_spec(self) -> tp.Any:
return self._obs_spec
def _augment_time_step(self, time_step: TimeStep, action: tp.Optional[np.ndarray] = None) -> TimeStep:
super()._augment_time_step(time_step=time_step, action=action)
obs = OrderedDict()
# TODO: this is badly typed since observation is a dict in this case
if 'front_close' in time_step.observation:
pixels = time_step.observation['front_close']
time_step.observation.pop('front_close') # type: ignore
pixels = np.squeeze(pixels)
obs['pixels'] = pixels
features = []
for feature in time_step.observation.values(): # type: ignore
features.append(feature.ravel())
obs['observations'] = np.concatenate(features, axis=0)
return time_step._replace(observation=obs)
class ActionRepeatWrapper(EnvWrapper):
def __init__(self, env: tp.Any, num_repeats: int) -> None:
super().__init__(env)
self._num_repeats = num_repeats
def step(self, action: np.ndarray) -> TimeStep:
reward = 0.0
discount = 1.0
for _ in range(self._num_repeats):
time_step = self._env.step(action)
reward += (time_step.reward or 0.0) * discount
discount *= time_step.discount
if time_step.last():
break
return time_step._replace(reward=reward, discount=discount)
class FrameStackWrapper(EnvWrapper):
def __init__(self, env: Env, num_frames: int, pixels_key: str = 'pixels') -> None:
super().__init__(env)
self._num_frames = num_frames
self._frames: tp.Deque[np.ndarray] = deque([], maxlen=num_frames)
self._pixels_key = pixels_key
wrapped_obs_spec = env.observation_spec()
assert pixels_key in wrapped_obs_spec
pixels_shape = wrapped_obs_spec[pixels_key].shape
# remove batch dim
if len(pixels_shape) == 4:
pixels_shape = pixels_shape[1:]
self._obs_spec = specs.BoundedArray(shape=np.concatenate(
[[pixels_shape[2] * num_frames], pixels_shape[:2]], axis=0),
dtype=np.uint8,
minimum=0,
maximum=255,
name='observation')
def _augment_time_step(self, time_step: TimeStep, action: tp.Optional[np.ndarray] = None) -> TimeStep:
super()._augment_time_step(time_step=time_step, action=action)
assert len(self._frames) == self._num_frames
obs = np.concatenate(list(self._frames), axis=0)
return time_step._replace(observation=obs)
def _extract_pixels(self, time_step: TimeStep) -> np.ndarray:
pixels_ = time_step.observation[self._pixels_key]
# remove batch dim
if len(pixels_.shape) == 4:
pixels_ = pixels_[0]
return pixels_.transpose(2, 0, 1).copy()
def reset(self) -> TimeStep:
time_step = self._env.reset()
pixels_ = self._extract_pixels(time_step)
for _ in range(self._num_frames):
self._frames.append(pixels_)
return self._augment_time_step(time_step)
def step(self, action: np.ndarray) -> TimeStep:
time_step = self._env.step(action)
pixels_ = self._extract_pixels(time_step)
self._frames.append(pixels_)
return self._augment_time_step(time_step)
class GoalWrapper(EnvWrapper):
def __init__(self, env: Env, goal_func: tp.Callable[[Env], np.ndarray], append_goal_to_observation: bool = False) -> None:
"""Adds a goal space with a predefined function.
This can also append the observation with the goal to make sure the goal is achievable
"""
super().__init__(env)
self.append_goal_to_observation = append_goal_to_observation
self.goal_func = goal_func
def _augment_time_step(self, time_step: TimeStep, action: tp.Optional[np.ndarray] = None) -> TimeStep:
goal = self.goal_func(self)
obs = time_step.observation.copy()
if self.append_goal_to_observation:
k = "observations"
obs[k] = np.concatenate([obs[k], goal], axis=0)
# obs[k] = np.concatenate([obs[k], np.random.normal(size=goal.shape)], axis=0)
ts = GoalTimeStep(
step_type=time_step.step_type,
reward=time_step.reward,
discount=time_step.discount,
observation=obs,
goal=goal,
)
return super()._augment_time_step(time_step=ts, action=action)
def observation_spec(self) -> specs.Array:
spec = super().observation_spec().copy()
k = "observations"
if not self.append_goal_to_observation:
return spec
goal = self.goal_func(self)
spec[k] = specs.Array((spec[k].shape[0] + goal.shape[0],), dtype=np.float32, name=k)
return spec
class ActionDTypeWrapper(EnvWrapper):
def __init__(self, env: Env, dtype) -> None:
super().__init__(env)
wrapped_action_spec = env.action_spec()
self._action_spec = specs.BoundedArray(wrapped_action_spec.shape,
dtype,
wrapped_action_spec.minimum,
wrapped_action_spec.maximum,
'action')
def action_spec(self) -> specs.BoundedArray:
return self._action_spec
def step(self, action) -> Any:
action = action.astype(self._env.action_spec().dtype)
return self._env.step(action)
class ObservationDTypeWrapper(EnvWrapper):
def __init__(self, env: Env, dtype) -> None:
super().__init__(env)
self._dtype = dtype
wrapped_obs_spec = env.observation_spec()['observations']
self._obs_spec = specs.Array(wrapped_obs_spec.shape, dtype,
'observation')
def _augment_time_step(self, time_step: TimeStep, action: tp.Optional[np.ndarray] = None) -> TimeStep:
obs = time_step.observation['observations'].astype(self._dtype)
return time_step._replace(observation=obs)
def observation_spec(self) -> Any:
return self._obs_spec
class ExtendedGoalTimeStepWrapper(EnvWrapper):
def _augment_time_step(self, time_step: TimeStep, action: tp.Optional[np.ndarray] = None) -> TimeStep:
if action is None:
action_spec = self.action_spec()
action = np.zeros(action_spec.shape, dtype=action_spec.dtype)
assert isinstance(time_step, GoalTimeStep)
ts = ExtendedGoalTimeStep(observation=time_step.observation,
step_type=time_step.step_type,
action=action,
reward=time_step.reward or 0.0,
discount=time_step.discount or 1.0,
goal=time_step.goal)
return super()._augment_time_step(time_step=ts, action=action)
class ExtendedTimeStepWrapper(EnvWrapper):
def _augment_time_step(self, time_step: TimeStep, action: tp.Optional[np.ndarray] = None) -> TimeStep:
if action is None:
action_spec = self.action_spec()
action = np.zeros(action_spec.shape, dtype=action_spec.dtype)
ts = ExtendedTimeStep(observation=time_step.observation,
step_type=time_step.step_type,
action=action,
reward=time_step.reward or 0.0,
discount=time_step.discount or 1.0)
return super()._augment_time_step(time_step=ts, action=action)
def _make_jaco(obs_type, domain, task, frame_stack, action_repeat, seed,
goal_space: tp.Optional[str] = None, append_goal_to_observation: bool = False
) -> FlattenJacoObservationWrapper:
env = cdmc.make_jaco(task, obs_type, seed)
if goal_space is not None:
# inline because circular import
from url_benchmark import goals as _goals # pytlint: disable=import-outside-toplevel
funcs = _goals.goal_spaces.funcs[domain]
if goal_space not in funcs:
raise ValueError(f"No goal space {goal_space} for {domain}, avail: {list(funcs)}")
goal_func = funcs[goal_space]
env = GoalWrapper(env, goal_func, append_goal_to_observation=append_goal_to_observation)
env = ActionDTypeWrapper(env, np.float32)
env = ActionRepeatWrapper(env, action_repeat)
env = FlattenJacoObservationWrapper(env)
return env
def _make_dmc(obs_type, domain, task, frame_stack, action_repeat, seed,
goal_space: tp.Optional[str] = None, append_goal_to_observation: bool = False):
visualize_reward = False
if (domain, task) in suite.ALL_TASKS:
env = suite.load(domain,
task,
task_kwargs=dict(random=seed),
environment_kwargs=dict(flat_observation=True),
visualize_reward=visualize_reward)
else:
env = cdmc.make(domain,
task,
task_kwargs=dict(random=seed),
environment_kwargs=dict(flat_observation=True),
visualize_reward=visualize_reward)
if goal_space is not None:
# inline because circular import
from url_benchmark import goals as _goals # pytlint: disable=import-outside-toplevel
funcs = _goals.goal_spaces.funcs[domain]
if goal_space not in funcs:
raise ValueError(f"No goal space {goal_space} for {domain}, avail: {list(funcs)}")
goal_func = funcs[goal_space]
env = GoalWrapper(env, goal_func, append_goal_to_observation=append_goal_to_observation)
env = ActionDTypeWrapper(env, np.float32)
env = ActionRepeatWrapper(env, action_repeat)
if obs_type == 'pixels':
# zoom in camera for quadruped
camera_id = dict(quadruped=2).get(domain, 0)
render_kwargs = dict(height=84, width=84, camera_id=camera_id)
env = pixels.Wrapper(env,
pixels_only=True,
render_kwargs=render_kwargs)
return env
def make(
name: str, obs_type='states', frame_stack=1, action_repeat=1,
seed=1, goal_space: tp.Optional[str] = None, append_goal_to_observation: bool = False
) -> EnvWrapper:
if append_goal_to_observation and goal_space is None:
raise ValueError("Cannot append goal space since none is defined")
assert obs_type in ['states', 'pixels']
if name.startswith('point_mass_maze'):
domain = 'point_mass_maze'
_, _, _, task = name.split('_', 3)
else:
domain, task = name.split('_', 1)
domain = dict(cup='ball_in_cup').get(domain, domain)
if sys.platform == "darwin":
raise UnsupportedPlatform("Mac platform is not supported")
make_fn = _make_jaco if domain == 'jaco' else _make_dmc
# TODO fix this when it fails (signatures differ)
env = make_fn(obs_type, domain, task, frame_stack, action_repeat, seed,
goal_space=goal_space, append_goal_to_observation=append_goal_to_observation) # type: ignore
if obs_type == 'pixels':
env = FrameStackWrapper(env, frame_stack)
else:
env = ObservationDTypeWrapper(env, np.float32)
env = action_scale.Wrapper(env, minimum=-1.0, maximum=+1.0)
if goal_space is not None:
env = ExtendedGoalTimeStepWrapper(env)
else:
env = ExtendedTimeStepWrapper(env)
return env
def extract_physics(env: Env) -> tp.Dict[str, float]:
"""Extract some physics available in the env"""
output = {}
names = ["torso_height", "torso_upright", "horizontal_velocity", "torso_velocity"]
for name in names:
if not hasattr(env.physics, name):
continue
val: tp.Union[float, np.ndarray] = getattr(env.physics, name)()
if isinstance(val, (int, float)) or not val.ndim:
output[name] = float(val)
else:
for k, v in enumerate(val):
output[f"{name}#{k}"] = float(v)
return output
class FloatStats:
"""Handle for keeping track of the statistics of a float variable"""
def __init__(self) -> None:
self.min = np.inf
self.max = -np.inf
self.mean = 0.0
self._count = 0
def add(self, value: float) -> "FloatStats":
self.min = min(value, self.min)
self.max = max(value, self.max)
self._count += 1
self.mean = (self._count - 1) / self._count * self.mean + 1 / self._count * value
return self
def items(self) -> tp.Iterator[tp.Tuple[str, float]]:
for name, val in self.__dict__.items():
if not name.startswith("_"):
yield name, val
class PhysicsAggregator:
"""Aggregate stats on the physics of an environment"""
def __init__(self) -> None:
self.stats: tp.Dict[str, FloatStats] = {}
def add(self, env: Env) -> "PhysicsAggregator":
phy = extract_physics(env)
for key, val in phy.items():
self.stats.setdefault(key, FloatStats()).add(val)
return self
def dump(self) -> tp.Iterator[tp.Tuple[str, float]]:
"""Exports all statistics and reset the statistics"""
for key, stats in self.stats.items():
for stat, val in stats.items():
yield (f'{key}/{stat}', val)
self.stats.clear()
|
controllable_agent-main
|
url_benchmark/dmc.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pickle
from url_benchmark.dmc import TimeStep
from url_benchmark.in_memory_replay_buffer import ReplayBuffer
from typing import List
import numpy as np
from dm_env import StepType
import pytest
fixed_episode_lengths = [10, 10, 10, 10, 10]
variable_episode_lengths = [2, 3, 5, 6, 7]
@pytest.mark.parametrize('test_data', [(10, fixed_episode_lengths, None, False, 10),
(5, fixed_episode_lengths, None, True, 10),
(10, variable_episode_lengths, 8, False, 5),
(5, variable_episode_lengths, 8, True, 5)])
def test_avg_episode_length_fixed_length_not_full(test_data) -> None:
max_episodes, episode_lengths, max_episode_length, is_full, avg_episode_length = test_data
replay_storage = ReplayBuffer(
max_episodes=max_episodes, discount=1, future=1, max_episode_length=max_episode_length)
meta = {'z': np.ones((3, 3))}
for episode_length in episode_lengths:
for time_step in _create_dummy_episode(episode_length):
replay_storage.add(time_step, meta=meta)
assert replay_storage._full == is_full
assert replay_storage.avg_episode_length == avg_episode_length
@pytest.mark.parametrize('test_data', [(10, 5, 7), (10, 10, 7)])
def test_backward_compatibility(test_data) -> None:
max_episodes, episodes_count, episode_length = test_data
is_full = max_episodes == episodes_count
replay_storage = ReplayBuffer(max_episodes=max_episodes, discount=1, future=1, max_episode_length=episode_length + 1)
meta = {'z': np.ones((3, 3))}
for _ in range(episodes_count):
for time_step in _create_dummy_episode(episode_length):
replay_storage.add(time_step, meta=meta)
# remove attributes recently added
del replay_storage._episodes_length
del replay_storage._episodes_selection_probability
del replay_storage._is_fixed_episode_length
del replay_storage._max_episode_length
loaded_replay_storage = pickle.loads(pickle.dumps(replay_storage))
assert loaded_replay_storage._idx == episodes_count%max_episodes
assert loaded_replay_storage._full == is_full
assert (loaded_replay_storage._episodes_length[:episodes_count]==episode_length).all()
assert (loaded_replay_storage._episodes_length[episodes_count:]==0).all()
assert loaded_replay_storage._max_episode_length is None
def _create_dummy_episode(episode_length: int) -> List[TimeStep]:
time_steps = []
for i in range(episode_length+1):
step_type = StepType.MID
if i == 0:
step_type = StepType.FIRST
elif i == episode_length:
step_type = StepType.LAST
time_step = TimeStep(step_type=step_type, observation=np.zeros(
(3, 3)), reward=1, discount=1)
time_steps.append(time_step)
return time_steps
|
controllable_agent-main
|
url_benchmark/test_in_memory_replay_buffer.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import typing as tp
import collections
import numpy as np
import pytest
from url_benchmark import goals
def test_basics() -> None:
assert "simplified_walker" in goals.goal_spaces.funcs["walker"]
assert len(goals.goals.funcs["simplified_walker"]["walker_stand"]()) == 3
@pytest.mark.parametrize("domain,space", [(d, s) for d in goals.goal_spaces.funcs for s in goals.goal_spaces.funcs[d]])
def test_goal_space_extraction(domain: str, space: str) -> None:
env = goals._make_env(domain)
out = goals.goal_spaces.funcs[domain][space](env)
assert isinstance(out, np.ndarray)
assert out.dtype == np.float32
for name, func in goals.goals.funcs.get(space, {}).items():
goal = func()
assert goal.shape == out.shape, f"Wrong shape for goal {name}"
assert goal.dtype == np.float32
@pytest.mark.parametrize("case", (range(goals.QuadrupedReward.NUM_CASES)))
def test_quad_rewards(case: int) -> None:
reward = goals.QuadrupedReward()
reward._case = case
out = reward.from_physics(reward._env.physics.get_state())
assert 0 <= out <= 1
def test_quad_pos_rewards() -> None:
reward = goals.QuadrupedPosReward()
env = goals._make_env("quadruped")
env.reset()
out = reward.from_physics(env.physics.get_state())
out2 = reward.from_env(env)
assert 0 <= out <= 1
assert out == out2, "Should be deterministic"
assert reward.get_goal("quad_pos_speed").dtype == np.float32
def test_walker_equation() -> None:
reward = goals.WalkerEquation("1 / (1 + abs(x - 2))")
env = goals._make_env("walker")
env.reset()
out = reward.from_physics(env.physics.get_state())
out2 = reward.from_env(env)
assert 0 <= out <= 1
assert out == out2, "Should be deterministic"
def test_walker_bad_equation() -> None:
with pytest.raises(ValueError):
goals.WalkerEquation("1 / (1 + os(x - 2))")
def test_walker_random_equation() -> None:
env = goals._make_env("walker")
reward = goals.WalkerRandomReward()
out = reward.from_env(env)
assert 0 <= out <= 1
def test_dmc_rewards() -> None:
env = goals._make_env("quadruped")
reward = env.task.get_reward(env.physics)
rewarders = {name: goals.get_reward_function(f"quadruped_{name}") for name in ["walk", "stand"]}
rewards = {name: r.from_env(env) for name, r in rewarders.items()}
assert rewards["stand"] == reward
assert rewards["walk"] != reward
assert rewarders["stand"].from_physics(env.physics.get_state()) == reward
def test_walker_qpos() -> None:
env = goals._make_env("walker")
env.reset()
env.step(np.random.uniform(-1, 1, size=6))
out = goals.goal_spaces.funcs["walker"]["walker_pos_speed"](env)
qpos = env.physics.data.qpos
assert pytest.approx(qpos[1]) == out[-1], qpos
@pytest.mark.parametrize("name,expected", [("walker_pos_speed", 4)])
def test_goal_space_dim(name: str, expected: int) -> None:
out = goals.get_goal_space_dim(name)
assert out == expected
def test_uniquely_named_goal_space() -> None:
space_counts = collections.Counter(space for spaces in goals.goal_spaces.funcs.values() for space in spaces)
duplicated = {x for x, y in space_counts.items() if y > 1}
if duplicated:
raise RuntimeError(f"Duplicated goal space names: {duplicated}\n(goal space names need to be unique)")
@pytest.mark.parametrize(
"string,expected", [
("(x + y) * z", {"x", "y", "z"}),
("import x;os.system(stuff) # hello", {"import", "x", "os", "system", "stuff"}),
])
def test_extract_variables(string: str, expected: tp.Set[str]) -> None:
out = goals.extract_names(string)
assert out == expected
|
controllable_agent-main
|
url_benchmark/test_goals.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from url_benchmark.goals import DmcReward
import torch
name = "walker_flip"
load_replay_buffer = "/checkpoint/jrapin/ca/buffers/walker_rnd_ddpg_220803.pt"
relabeled_replay_file_path = "/private/home/atouati/controllable_agent/datasets/walker/rnd/walker_flip_rnd_ddpg.pt"
custom_reward = DmcReward(name)
print("loading Replay from %s", load_replay_buffer)
with open(load_replay_buffer, 'rb') as f:
replay_loader = torch.load(f)
replay_loader.relabel(custom_reward)
with open(relabeled_replay_file_path, 'wb') as f:
torch.save(replay_loader, f)
|
controllable_agent-main
|
url_benchmark/relabel_buffer.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pdb # pylint: disable=unused-import
import warnings
warnings.filterwarnings('ignore', category=DeprecationWarning)
import os
os.environ['MKL_SERVICE_FORCE_INTEL'] = '1'
os.environ['MUJOCO_GL'] = 'egl'
from pathlib import Path
import dataclasses
import typing as tp
import hydra
from hydra.core.config_store import ConfigStore
import torch
import omegaconf as omgcf
from url_benchmark.pretrain import make_agent
from url_benchmark import dmc
from url_benchmark import utils
from url_benchmark.video import VideoRecorder
from url_benchmark import agent as agents
from url_benchmark import goals as _goals
from typing import Any
torch.backends.cudnn.benchmark = True
# # # Config # # #
@dataclasses.dataclass
class PlayConfig:
agent: tp.Any
# mode
reward_free: bool = True
# task settings
task: str = "walker_stand"
obs_type: str = "states" # [states, pixels]
frame_stack: int = 3 # only works if obs_type=pixels
action_repeat: int = 1 # set to 2 for pixels
discount: float = 0.99
goal_space: str = "simplified"
# train settings
num_train_frames: int = 100010
num_seed_frames: int = 0
# eval
eval_every_frames: int = 10000
num_eval_episodes: int = 10
# snapshot
snapshot_ts: int = 2000000
snapshot_base_dir: str = omgcf.SI("./pretrained_models")
# replay buffer
replay_buffer_size: int = 1000000
replay_buffer_num_workers: int = 4
batch_size: int = omgcf.II("agent.batch_size")
nstep: int = omgcf.II("agent.nstep")
update_encoder: bool = False # should always be true for pre-training
# misc
seed: int = 1
device: str = "cuda"
save_video: bool = True
save_train_video: bool = False
use_tb: bool = False
use_wandb: bool = False
use_hiplog: bool = False
# experiment
experiment: str = "exp"
# loaded as base_finetune in finetune.yaml
# we keep the yaml since it's easier to configure plugins from it
ConfigStore.instance().store(name="workspace_config", node=PlayConfig)
# # # Implem # # #
class Workspace:
def __init__(self, cfg: PlayConfig) -> None:
self.work_dir = Path.cwd()
print(f'workspace: {self.work_dir}')
self.cfg = cfg
utils.set_seed_everywhere(cfg.seed)
self.device = torch.device(cfg.device)
# create envs
self.env = dmc.make(cfg.task, cfg.obs_type, cfg.frame_stack,
cfg.action_repeat, cfg.seed, cfg.goal_space)
# create agent
self.agent = make_agent(cfg.obs_type,
self.env.observation_spec(),
self.env.action_spec(),
cfg.num_seed_frames // cfg.action_repeat,
cfg.agent)
# initialize from pretrained
if cfg.snapshot_ts > 0:
pretrained_agent = self.load_snapshot()['agent']
self.agent.init_from(pretrained_agent)
# create video recorders
self.video_recorder = VideoRecorder(
self.work_dir if cfg.save_video else None)
def play(self) -> None:
episode, total_reward = 0, 0.0
eval_until_episode = utils.Until(self.cfg.num_eval_episodes)
while eval_until_episode(episode):
total_reward = 0
if isinstance(self.agent, agents.FBDDPGAgent):
g = _goals.goals.funcs[self.cfg.goal_space][self.cfg.task]()
meta = self.agent.get_goal_meta(g)
else:
meta = self.agent.init_meta()
time_step = self.env.reset()
self.video_recorder.init(self.env)
step = 0
eval_until_step = utils.Until(1000)
while eval_until_step(step):
# print(f'episode {episode}, step {step}')
# while not time_step.last():
with torch.no_grad(), utils.eval_mode(self.agent):
action = self.agent.act(time_step.observation,
meta,
1,
eval_mode=True)
time_step = self.env.step(action)
self.video_recorder.record(self.env)
total_reward += time_step.reward
# print(time_step.goal[2])
step += 1
episode += 1
print(total_reward)
self.video_recorder.save(f'{episode}.mp4')
def load_snapshot(self) -> Any:
snapshot_base_dir = Path(self.cfg.snapshot_base_dir)
# domain, _ = self.cfg.task.split('_', 1)
# snapshot_dir = snapshot_base_dir / self.cfg.obs_type / domain / self.cfg.agent.name
snapshot_dir = snapshot_base_dir
def try_load():
# snapshot = snapshot_dir / str(
# seed) / f'snapshot_{self.cfg.snapshot_ts}.pt'
snapshot = snapshot_dir / f'snapshot_{self.cfg.snapshot_ts}.pt'
# if not snapshot.exists():
# return None
with snapshot.open('rb') as f:
payload = torch.load(f)
return payload
# try to load current seed
payload = try_load()
return payload
@hydra.main(config_path='.', config_name='base_config')
def main(cfg) -> None:
workspace = Workspace(cfg)
workspace.play()
if __name__ == '__main__':
main()
|
controllable_agent-main
|
url_benchmark/play_behaviors.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import typing as tp
from pathlib import Path
import cv2
import imageio
import numpy as np
import wandb
class VideoRecorder:
def __init__(self,
root_dir: tp.Optional[tp.Union[str, Path]],
render_size: int = 256,
fps: int = 20,
camera_id: int = 0,
use_wandb: bool = False) -> None:
self.save_dir: tp.Optional[Path] = None
if root_dir is not None:
self.save_dir = Path(root_dir) / 'eval_video'
self.save_dir.mkdir(exist_ok=True)
self.enabled = False
self.render_size = render_size
self.fps = fps
self.frames: tp.List[np.ndarray] = []
self.camera_id = camera_id
self.use_wandb = use_wandb
def init(self, env, enabled: bool = True) -> None:
self.frames = []
self.enabled = self.save_dir is not None and enabled
self.record(env)
def record(self, env) -> None:
if self.enabled:
if hasattr(env, 'physics'):
if env.physics is not None:
frame = env.physics.render(height=self.render_size,
width=self.render_size,
camera_id=self.camera_id)
else:
frame = env.base_env.render()
else:
frame = env.render()
self.frames.append(frame)
def log_to_wandb(self) -> None:
frames = np.transpose(np.array(self.frames), (0, 3, 1, 2))
fps, skip = 6, 8
wandb.log({
'eval/video':
wandb.Video(frames[::skip, :, ::2, ::2], fps=fps, format="gif")
})
def save(self, file_name: str) -> None:
if self.enabled:
if self.use_wandb:
self.log_to_wandb()
assert self.save_dir is not None
path = self.save_dir / file_name
imageio.mimsave(str(path), self.frames, fps=self.fps) # type: ignore
class TrainVideoRecorder:
def __init__(self,
root_dir: tp.Optional[tp.Union[str, Path]],
render_size: int = 256,
fps: int = 20,
camera_id: int = 0,
use_wandb: bool = False) -> None:
self.save_dir: tp.Optional[Path] = None
if root_dir is not None:
self.save_dir = Path(root_dir) / 'train_video'
self.save_dir.mkdir(exist_ok=True)
self.enabled = False
self.render_size = render_size
self.fps = fps
self.frames: tp.List[np.ndarray] = []
self.camera_id = camera_id
self.use_wandb = use_wandb
def init(self, obs, enabled=True) -> None:
self.frames = []
self.enabled = self.save_dir is not None and enabled
self.record(obs)
def record(self, obs) -> None:
if self.enabled:
frame = cv2.resize(obs[-3:].transpose(1, 2, 0),
dsize=(self.render_size, self.render_size),
interpolation=cv2.INTER_CUBIC)
self.frames.append(frame)
def log_to_wandb(self) -> None:
frames = np.transpose(np.array(self.frames), (0, 3, 1, 2))
fps, skip = 6, 8
wandb.log({
'train/video':
wandb.Video(frames[::skip, :, ::2, ::2], fps=fps, format="gif")
})
def save(self, file_name) -> None:
if self.enabled:
if self.use_wandb:
self.log_to_wandb()
assert self.save_dir is not None
path = self.save_dir / file_name
imageio.mimsave(str(path), self.frames, fps=self.fps) # type: ignore
|
controllable_agent-main
|
url_benchmark/video.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import typing as tp
import pytest
from url_benchmark import dmc
from url_benchmark.in_memory_replay_buffer import ReplayBuffer
@pytest.mark.parametrize("name,expected", [
("walker_walk", {'torso_height': 1.3, 'torso_upright': 1.0, 'horizontal_velocity': 0.0}),
("quadruped_walk", {'torso_upright': 1.0, 'torso_velocity#0': 0.0, 'torso_velocity#1': 0.0, 'torso_velocity#2': 0.0}),
])
def test_extract_physics(name: str, expected: tp.Dict[str, float]) -> None:
env = dmc.make(name, obs_type="states", frame_stack=1, action_repeat=1, seed=12)
phy = dmc.extract_physics(env)
assert phy == expected
time_step = env.reset()
assert time_step.physics.size > 0
# check that it works in the ReplayBuffer
rb = ReplayBuffer(12, 0.9, True)
rb.add(time_step, {})
assert "physics" in rb._current_episode
def test_goal_wrapper() -> None:
env = dmc.make("quadruped_walk", obs_type="states", frame_stack=1, action_repeat=1,
seed=12, goal_space="simplified_quadruped", append_goal_to_observation=True)
out = env.reset()
assert out.observation.shape == env.observation_spec().shape
env = dmc.make("quadruped_walk", obs_type="states", frame_stack=1, action_repeat=1,
seed=12, goal_space="simplified_quadruped", append_goal_to_observation=False)
out2 = env.reset()
assert out2.observation.shape[0] < out.observation.shape[0]
def test_physics_aggregator() -> None:
env = dmc.make("walker_walk", obs_type="states", frame_stack=1, action_repeat=1, seed=12)
agg = dmc.PhysicsAggregator()
agg.add(env)
names = [x[0] for x in agg.dump()]
assert len(names) == 9
assert not list(agg.dump())
def test_float_stats() -> None:
stats = dmc.FloatStats().add(12)
assert all(getattr(stats, name) == 12 for name in ["mean", "max", "min"])
stats.add(24)
assert stats.min == 12
assert stats.max == 24
assert stats.mean == 18
assert stats._count == 2
stats.add(24)
assert stats.mean == 20
|
controllable_agent-main
|
url_benchmark/test_dmc.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import typing as tp
import enum
import dm_env
from dm_env import specs
import numpy as np
import matplotlib.pyplot as plt
from url_benchmark.dmc import ExtendedTimeStep
class ObservationType(enum.IntEnum):
STATE_INDEX = enum.auto()
AGENT_ONEHOT = enum.auto()
GRID = enum.auto()
AGENT_GOAL_POS = enum.auto()
AGENT_POS = enum.auto()
def build_gridworld_task(task,
discount=1.0,
penalty_for_walls=0,
observation_type=ObservationType.AGENT_POS,
max_episode_length=200):
"""Construct a particular Gridworld layout with start/goal states.
Args:
task: string name of the task to use. One of {'simple', 'obstacle',
'random_goal'}.
discount: Discounting factor included in all Timesteps.
penalty_for_walls: Reward added when hitting a wall (should be negative).
observation_type: Enum observation type to use. One of:
* ObservationType.STATE_INDEX: int32 index of agent occupied tile.
* ObservationType.AGENT_ONEHOT: NxN float32 grid, with a 1 where the
agent is and 0 elsewhere.
* ObservationType.GRID: NxNx3 float32 grid of feature channels.
First channel contains walls (1 if wall, 0 otherwise), second the
agent position (1 if agent, 0 otherwise) and third goal position
(1 if goal, 0 otherwise)
* ObservationType.AGENT_GOAL_POS: float32 tuple with
(agent_y, agent_x, goal_y, goal_x).
max_episode_length: If set, will terminate an episode after this many
steps.
"""
tasks_specifications = {
'simple': {
'layout': [
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, 0, 0, 0, 0, 0, 0, 0, 0, -1],
[-1, 0, 0, 0, -1, -1, 0, 0, 0, -1],
[-1, 0, 0, 0, -1, -1, 0, 0, 0, -1],
[-1, 0, 0, 0, -1, -1, 0, 0, 0, -1],
[-1, 0, 0, 0, 0, 0, 0, 0, 0, -1],
[-1, 0, 0, 0, 0, 0, 0, 0, 0, -1],
[-1, 0, 0, 0, 0, 0, 0, 0, 0, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
],
'start_state': (2, 2),
'randomize_goals': True
# 'goal_state': (7, 2)
},
'obstacle': {
'layout': [
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, 0, 0, 0, 0, 0, -1, 0, 0, -1],
[-1, 0, 0, 0, -1, 0, 0, 0, 0, -1],
[-1, 0, 0, 0, -1, -1, 0, 0, 0, -1],
[-1, 0, 0, 0, -1, -1, 0, 0, 0, -1],
[-1, 0, 0, 0, 0, 0, 0, 0, 0, -1],
[-1, 0, 0, 0, 0, 0, 0, 0, 0, -1],
[-1, 0, 0, 0, 0, 0, 0, 0, 0, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
],
'start_state': (2, 2),
'goal_state': (2, 8)
},
'random_goal': {
'layout': [
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, 0, 0, 0, 0, 0, 0, 0, 0, -1],
[-1, 0, 0, 0, -1, -1, 0, 0, 0, -1],
[-1, 0, 0, 0, -1, -1, 0, 0, 0, -1],
[-1, 0, 0, 0, -1, -1, 0, 0, 0, -1],
[-1, 0, 0, 0, 0, 0, 0, 0, 0, -1],
[-1, 0, 0, 0, 0, 0, 0, 0, 0, -1],
[-1, 0, 0, 0, 0, 0, 0, 0, 0, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
],
'start_state': (2, 2),
# 'randomize_goals': True
},
}
return GridWorld(
discount=discount,
penalty_for_walls=penalty_for_walls,
observation_type=observation_type,
max_episode_length=max_episode_length,
**tasks_specifications[task])
class GridWorld(dm_env.Environment):
def __init__(self,
layout,
start_state,
goal_state=None,
observation_type=ObservationType.STATE_INDEX,
discount=1.0,
penalty_for_walls=0,
reward_goal=1,
max_episode_length=None,
randomize_goals=False) -> None:
"""Build a grid environment.
Simple gridworld defined by a map layout, a start and a goal state.
Layout should be a NxN grid, containing:
* 0: empty
* -1: wall
* Any other positive value: value indicates reward; episode will terminate
Args:
layout: NxN array of numbers, indicating the layout of the environment.
start_state: Tuple (y, x) of starting location.
goal_state: Optional tuple (y, x) of goal location. Will be randomly
sampled once if None.
observation_type: Enum observation type to use. One of:
* ObservationType.STATE_INDEX: int32 index of agent occupied tile.
* ObservationType.AGENT_ONEHOT: NxN float32 grid, with a 1 where the
agent is and 0 elsewhere.
* ObservationType.GRID: NxNx3 float32 grid of feature channels.
First channel contains walls (1 if wall, 0 otherwise), second the
agent position (1 if agent, 0 otherwise) and third goal position
(1 if goal, 0 otherwise)
* ObservationType.AGENT_GOAL_POS: float32 tuple with
(agent_y, agent_x, goal_y, goal_x)
discount: Discounting factor included in all Timesteps.
penalty_for_walls: Reward added when hitting a wall (should be negative).
reward_goal: Reward added when finding the goal (should be positive).
max_episode_length: If set, will terminate an episode after this many
steps.
randomize_goals: If true, randomize goal at every episode.
"""
if observation_type not in ObservationType:
raise ValueError('observation_type should be a ObservationType instace.')
self._layout = np.array(layout)
self._start_state = start_state
self._state = self._start_state
self._number_of_states = np.prod(np.shape(self._layout))
self._discount = discount
self._penalty_for_walls = penalty_for_walls
self._reward_goal = reward_goal
self._observation_type = observation_type
self._layout_dims = self._layout.shape
self._max_episode_length = max_episode_length
self._num_episode_steps = 0
self._randomize_goals = randomize_goals
self._goal_state: tp.Tuple[int, int]
if goal_state is None:
# Randomly sample goal_state if not provided
goal_state = self._sample_goal()
self.goal_state = goal_state
def _sample_goal(self):
"""Randomly sample reachable non-starting state."""
# Sample a new goal
n = 0
max_tries = 1e5
while n < max_tries:
goal_state = tuple(np.random.randint(d) for d in self._layout_dims)
if goal_state != self._state and self._layout[goal_state] == 0:
# Reachable state found!
return goal_state
n += 1
raise ValueError('Failed to sample a goal state.')
@property
def number_of_states(self):
return self._number_of_states
@property
def goal_state(self):
return self._goal_state
@goal_state.setter
def goal_state(self, new_goal):
if new_goal == self._state or self._layout[new_goal] < 0:
raise ValueError('This is not a valid goal!')
# Zero out any other goal
self._layout[self._layout > 0] = 0
# Setup new goal location
self._layout[new_goal] = self._reward_goal
self._goal_state = new_goal
def set_state(self, x, y):
self._state = (y, x)
def observation_spec(self):
if self._observation_type is ObservationType.AGENT_ONEHOT:
return specs.Array(
shape=(self._number_of_states, ),
dtype=np.float32,
name='observation_agent_onehot')
elif self._observation_type is ObservationType.GRID:
return specs.Array(
shape=self._layout_dims + (3,),
dtype=np.float32,
name='observation_grid')
elif self._observation_type is ObservationType.AGENT_POS:
return specs.Array(
shape=(2,), dtype=np.float32, name='observation_agent_pos')
elif self._observation_type is ObservationType.AGENT_GOAL_POS:
return specs.Array(
shape=(4,), dtype=np.float32, name='observation_agent_goal_pos')
elif self._observation_type is ObservationType.STATE_INDEX:
return specs.DiscreteArray(
self._number_of_states, dtype=int, name='observation_state_index')
def action_spec(self):
return specs.DiscreteArray(5, dtype=int, name='action')
def get_state(self):
return self._state
def get_goal_obs(self):
if self._observation_type is ObservationType.AGENT_ONEHOT:
obs = np.zeros(self._layout.shape, dtype=np.float32)
# Place agent
obs[self._goal_state] = 1
return obs.flatten()
elif self._observation_type is ObservationType.AGENT_POS:
return np.array(self._goal_state, dtype=np.float32) / np.array(self._layout.shape, dtype=np.float32)
elif self._observation_type is ObservationType.STATE_INDEX:
y, x = self._goal_state
return y * self._layout.shape[1] + x
def get_obs(self):
if self._observation_type is ObservationType.AGENT_ONEHOT:
obs = np.zeros(self._layout.shape, dtype=np.float32)
# Place agent
obs[self._state] = 1
return obs.flatten()
elif self._observation_type is ObservationType.GRID:
obs = np.zeros(self._layout.shape + (3,), dtype=np.float32)
obs[..., 0] = self._layout < 0
obs[self._state[0], self._state[1], 1] = 1
obs[self._goal_state[0], self._goal_state[1], 2] = 1
return obs
elif self._observation_type is ObservationType.AGENT_POS:
return np.array(self._state, dtype=np.float32) / np.array(self._layout.shape, dtype=np.float32)
elif self._observation_type is ObservationType.AGENT_GOAL_POS:
return np.array(self._state + self._goal_state, dtype=np.float32)
elif self._observation_type is ObservationType.STATE_INDEX:
y, x = self._state
return y * self._layout.shape[1] + x
def reset(self):
self._state = self._start_state
self._num_episode_steps = 0
if self._randomize_goals:
self.goal_state = self._sample_goal()
return ExtendedTimeStep(
step_type=dm_env.StepType.FIRST,
action=0,
reward=0.0,
discount=1,
observation=self.get_obs())
def step(self, action):
y, x = self._state
if action == 0: # up
new_state = (y - 1, x)
elif action == 1: # right
new_state = (y, x + 1)
elif action == 2: # down
new_state = (y + 1, x)
elif action == 3: # left
new_state = (y, x - 1)
elif action == 4: # stay
new_state = (y, x)
else:
raise ValueError(
'Invalid action: {} is not 0, 1, 2, 3, or 4.'.format(action))
new_y, new_x = new_state
step_type = dm_env.StepType.MID
if self._layout[new_y, new_x] == -1: # wall
reward = self._penalty_for_walls
discount = self._discount
new_state = (y, x)
elif self._layout[new_y, new_x] == 0: # empty cell
reward = 0.
discount = self._discount
else: # a goal
reward = self._layout[new_y, new_x]
## if we choose de terminate
# discount = 0.
# new_state = self._start_state
# step_type = dm_env.StepType.LAST
discount = self._discount
self._state = new_state
self._num_episode_steps += 1
if (self._max_episode_length is not None and
self._num_episode_steps >= self._max_episode_length):
step_type = dm_env.StepType.LAST
return ExtendedTimeStep(
step_type=step_type,
action=action,
reward=np.float32(reward),
discount=discount,
observation=self.get_obs())
def plot_grid(self, add_start=True):
asbestos = (127 / 255, 140 / 255, 141 / 255, 0.8)
dodger_blue = (25 / 255, 140 / 255, 255 / 255, 0.8)
# carrot = (235 / 255, 137 / 255, 33 / 255, 0.8)
grid_kwargs = {'color': (220 / 255, 220 / 255, 220 / 255, 0.5)}
# marker_style = dict(linestyle=':', color=carrot, markersize=20)
plt.figure(figsize=(4, 4))
img = np.ones((self._layout.shape[0], self._layout.shape[1], 4))
wall_y, wall_x = np.where(self._layout <= -1)
for i in range(len(wall_y)):
img[wall_y[i], wall_x[i]] = np.array(asbestos)
plt.imshow(img, interpolation=None)
# plt.imshow(self._layout <= -1, interpolation='nearest')
ax = plt.gca()
ax.grid(0)
plt.xticks([])
plt.yticks([])
# Add start/goal
if add_start:
plt.text(
self._start_state[1],
self._start_state[0],
r'$\mathbf{S}$',
fontsize=16,
ha='center',
va='center')
plt.text(
self._goal_state[1],
self._goal_state[0],
r'$\mathbf{G}$',
fontsize=16,
ha='center',
va='center',
color=dodger_blue)
h, w = self._layout.shape
for y in range(h - 1):
plt.plot([-0.5, w - 0.5], [y + 0.5, y + 0.5], **grid_kwargs)
for x in range(w - 1):
plt.plot([x + 0.5, x + 0.5], [-0.5, h - 0.5], **grid_kwargs)
def render(self, return_rgb=True):
carrot = (235 / 255, 137 / 255, 33 / 255, 0.8)
self.plot_grid(add_start=False)
# Add the agent location
plt.text(
self._state[1],
self._state[0],
u'😃',
fontname='symbola',
fontsize=18,
ha='center',
va='center',
color=carrot)
if return_rgb:
fig = plt.gcf()
plt.axis('tight')
plt.subplots_adjust(0, 0, 1, 1, 0, 0)
fig.canvas.draw()
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
w, h = fig.canvas.get_width_height()
data = data.reshape((h, w, 3))
plt.close(fig)
return data
def plot_policy(self, policy):
action_names = [
r'$\uparrow$', r'$\rightarrow$', r'$\downarrow$', r'$\leftarrow$'
]
self.plot_grid()
plt.title('Policy Visualization')
h, w = self._layout.shape
for y in range(h):
for x in range(w):
# if ((y, x) != self._start_state) and ((y, x) != self._goal_state):
if (y, x) != self._goal_state:
action_name = action_names[policy[y, x]]
plt.text(x, y, action_name, ha='center', va='center')
def plot_greedy_policy(self, q):
greedy_actions = np.argmax(q, axis=2)
self.plot_policy(greedy_actions)
|
controllable_agent-main
|
url_benchmark/gridworld/env.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
|
controllable_agent-main
|
url_benchmark/gridworld/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pylint: disable=unused-import
import pdb
import copy
import math
import logging
import dataclasses
from collections import OrderedDict
import typing as tp
from pathlib import Path
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
from hydra.core.config_store import ConfigStore
import omegaconf
from url_benchmark import utils
from url_benchmark.in_memory_replay_buffer import ReplayBuffer
from .ddpg import MetaDict
from .ddpg import Encoder
from .fb_modules import Actor, DiagGaussianActor, ForwardMap, BackwardMap, mlp, OnlineCov
from url_benchmark.dmc import TimeStep
from url_benchmark import goals as _goals
logger = logging.getLogger(__name__)
@dataclasses.dataclass
class SFAgentConfig:
# @package agent
_target_: str = "url_benchmark.agent.sf.SFAgent"
name: str = "sf"
# reward_free: ${reward_free}
obs_type: str = omegaconf.MISSING # to be specified later
obs_shape: tp.Tuple[int, ...] = omegaconf.MISSING # to be specified later
action_shape: tp.Tuple[int, ...] = omegaconf.MISSING # to be specified later
device: str = omegaconf.II("device") # ${device}
lr: float = 1e-4
lr_coef: float = 5
sf_target_tau: float = 0.01 # 0.001-0.01
update_every_steps: int = 2
use_tb: bool = omegaconf.II("use_tb") # ${use_tb}
use_wandb: bool = omegaconf.II("use_wandb") # ${use_wandb}
use_hiplog: bool = omegaconf.II("use_hiplog") # ${use_wandb}
num_expl_steps: int = omegaconf.MISSING # ??? # to be specified later
num_inference_steps: int = 5120
hidden_dim: int = 1024 # 128, 2048
backward_hidden_dim: int = 512 # 128, 2048
feature_dim: int = 512 # 128, 1024
z_dim: int = 100 # 30-200
stddev_schedule: str = "0.2" # "linear(1,0.2,200000)" # 0, 0.1, 0.2
stddev_clip: float = 0.3 # 1
update_z_every_step: int = 100
nstep: int = 1
batch_size: int = 1024
init_sf: bool = True
update_encoder: bool = omegaconf.II("update_encoder") # ${update_encoder}
goal_space: tp.Optional[str] = omegaconf.II("goal_space")
# ortho_coef: float = 0.1 # 0.01-10
log_std_bounds: tp.Tuple[float, float] = (-5, 2) # param for DiagGaussianActor
temp: float = 1 # temperature for DiagGaussianActor
boltzmann: bool = False # set to true for DiagGaussianActor
debug: bool = False
preprocess: bool = True
num_sf_updates: int = 1
feature_learner: str = "icm"
mix_ratio: float = 0.0
q_loss: bool = True
update_cov_every_step: int = 1000
add_trunk: bool = False
cs = ConfigStore.instance()
cs.store(group="agent", name="sf", node=SFAgentConfig)
class FeatureLearner(nn.Module):
def __init__(self, obs_dim, action_dim, z_dim, hidden_dim) -> None:
super().__init__()
self.feature_net: nn.Module = mlp(obs_dim, hidden_dim, "ntanh", hidden_dim, "relu", z_dim, "L2")
self.apply(utils.weight_init)
def forward(self, obs: torch.Tensor, action: torch.Tensor, next_obs: torch.Tensor, future_obs: torch.Tensor):
return None
class Identity(FeatureLearner):
def __init__(self, obs_dim, action_dim, z_dim, hidden_dim) -> None:
super().__init__(obs_dim, action_dim, z_dim, hidden_dim)
self.feature_net = nn.Identity()
class Laplacian(FeatureLearner):
def forward(self, obs: torch.Tensor, action: torch.Tensor, next_obs: torch.Tensor, future_obs: torch.Tensor):
del action
del future_obs
phi = self.feature_net(obs)
next_phi = self.feature_net(next_obs)
loss = (phi - next_phi).pow(2).mean()
Cov = torch.matmul(phi, phi.T)
I = torch.eye(*Cov.size(), device=Cov.device)
off_diag = ~I.bool()
orth_loss_diag = - 2 * Cov.diag().mean()
orth_loss_offdiag = Cov[off_diag].pow(2).mean()
orth_loss = orth_loss_offdiag + orth_loss_diag
loss += orth_loss
return loss
class ContrastiveFeature(FeatureLearner):
def __init__(self, obs_dim, action_dim, z_dim, hidden_dim) -> None:
super().__init__(obs_dim, action_dim, z_dim, hidden_dim)
self.mu_net = mlp(obs_dim, hidden_dim, "ntanh", hidden_dim, "relu", z_dim, "L2")
self.apply(utils.weight_init)
# self.W = nn.Linear(z_dim, z_dim, bias=False)
# nn.init.orthogonal_(self.W.weight.data, 1)
def forward(self, obs: torch.Tensor, action: torch.Tensor, next_obs: torch.Tensor, future_obs: torch.Tensor):
del action
del next_obs
assert future_obs is not None
# phi = self.feature_net(obs)
# future_phi = self.feature_net(future_obs)
# phi = F.normalize(phi, dim=1)
# future_phi = F.normalize(future_phi, dim=1)
phi = self.feature_net(obs)
future_mu = self.mu_net(future_obs)
phi = F.normalize(phi, dim=1)
future_mu = F.normalize(future_mu, dim=1)
logits = torch.einsum('sd, td-> st', phi, future_mu) # batch x batch
I = torch.eye(*logits.size(), device=logits.device)
off_diag = ~I.bool()
logits_off_diag = logits[off_diag].reshape(logits.shape[0], logits.shape[0] - 1)
loss = - logits.diag() + torch.logsumexp(logits_off_diag, dim=1)
loss = loss.mean()
return loss
# loss = - logits.diag().mean() + 0.5 * logits[off_diag].pow(2).mean()
# orthonormality loss
# Cov = torch.matmul(phi, phi.T)
# I = torch.eye(*Cov.size(), device=Cov.device)
# off_diag = ~I.bool()
# orth_loss_diag = - 2 * Cov.diag().mean()
# orth_loss_offdiag = Cov[off_diag].pow(2).mean()
# orth_loss = orth_loss_offdiag + orth_loss_diag
# loss += orth_loss
# normalize to compute cosine distance
# phi = F.normalize(phi, dim=1)
# future_phi = F.normalize(future_phi, dim=1)
# logits = torch.einsum('sd, td-> st', phi, future_phi) # batch x batch
# labels = torch.eye(*logits.size(), out=torch.empty_like(logits))
# # - labels * torch.log(torch.sigmoid(logits)) - (1 - labels) * torch.log(1 - torch.sigmoid(logits))
# loss = F.binary_cross_entropy(torch.sigmoid(logits), labels)
class ContrastiveFeaturev2(FeatureLearner):
def __init__(self, obs_dim, action_dim, z_dim, hidden_dim) -> None:
super().__init__(obs_dim, action_dim, z_dim, hidden_dim)
self.mu_net = mlp(obs_dim, hidden_dim, "ntanh", hidden_dim, "relu", z_dim, "L2")
self.apply(utils.weight_init)
# self.W = nn.Linear(z_dim, z_dim, bias=False)
# nn.init.orthogonal_(self.W.weight.data, 1)
def forward(self, obs: torch.Tensor, action: torch.Tensor, next_obs: torch.Tensor, future_obs: torch.Tensor):
del action
del next_obs
assert future_obs is not None
# phi = self.feature_net(obs)
# future_phi = self.feature_net(future_obs)
# phi = F.normalize(phi, dim=1)
# future_phi = F.normalize(future_phi, dim=1)
future_phi = self.feature_net(future_obs)
mu = self.mu_net(obs)
future_phi = F.normalize(future_phi, dim=1)
mu = F.normalize(mu, dim=1)
logits = torch.einsum('sd, td-> st', mu, future_phi) # batch x batch
I = torch.eye(*logits.size(), device=logits.device)
off_diag = ~I.bool()
logits_off_diag = logits[off_diag].reshape(logits.shape[0], logits.shape[0] - 1)
loss = - logits.diag() + torch.logsumexp(logits_off_diag, dim=1)
loss = loss.mean()
return loss
class ICM(FeatureLearner):
def __init__(self, obs_dim, action_dim, z_dim, hidden_dim) -> None:
super().__init__(obs_dim, action_dim, z_dim, hidden_dim)
# self.forward_dynamic_net = mlp(z_dim + action_dim, hidden_dim, 'irelu', hidden_dim, 'irelu', z_dim)
self.inverse_dynamic_net = mlp(2 * z_dim, hidden_dim, 'irelu', hidden_dim, 'irelu', action_dim, 'tanh')
self.apply(utils.weight_init)
def forward(self, obs: torch.Tensor, action: torch.Tensor, next_obs: torch.Tensor, future_obs: torch.Tensor):
del future_obs
phi = self.feature_net(obs)
next_phi = self.feature_net(next_obs)
# predicted_next_obs = self.forward_dynamic_net(torch.cat([phi, action], dim=-1))
# forward_error = (next_phi.detach() - predicted_next_obs).pow(2).mean()
predicted_action = self.inverse_dynamic_net(torch.cat([phi, next_phi], dim=-1))
backward_error = (action - predicted_action).pow(2).mean()
icm_loss = backward_error
# icm_loss = forward_error + backward_error
return icm_loss
class TransitionModel(FeatureLearner):
def __init__(self, obs_dim, action_dim, z_dim, hidden_dim) -> None:
super().__init__(obs_dim, action_dim, z_dim, hidden_dim)
self.forward_dynamic_net = mlp(z_dim + action_dim, hidden_dim, 'irelu', hidden_dim, 'irelu', obs_dim)
self.apply(utils.weight_init)
def forward(self, obs: torch.Tensor, action: torch.Tensor, next_obs: torch.Tensor, future_obs: torch.Tensor):
del future_obs
phi = self.feature_net(obs)
predicted_next_obs = self.forward_dynamic_net(torch.cat([phi, action], dim=-1))
forward_error = (predicted_next_obs - next_obs).pow(2).mean()
return forward_error
class TransitionLatentModel(FeatureLearner):
def __init__(self, obs_dim, action_dim, z_dim, hidden_dim) -> None:
super().__init__(obs_dim, action_dim, z_dim, hidden_dim)
self.forward_dynamic_net = mlp(z_dim + action_dim, hidden_dim, 'irelu', hidden_dim, 'irelu', z_dim)
self.target_feature_net = mlp(obs_dim, hidden_dim, "ntanh", hidden_dim, "relu", z_dim, "L2")
self.apply(utils.weight_init)
def forward(self, obs: torch.Tensor, action: torch.Tensor, next_obs: torch.Tensor, future_obs: torch.Tensor):
del future_obs
phi = self.feature_net(obs)
with torch.no_grad():
next_phi = self.target_feature_net(next_obs)
predicted_next_obs = self.forward_dynamic_net(torch.cat([phi, action], dim=-1))
forward_error = (predicted_next_obs - next_phi.detach()).pow(2).mean()
utils.soft_update_params(self.feature_net, self.target_feature_net, 0.01)
return forward_error
class AutoEncoder(FeatureLearner):
def __init__(self, obs_dim, action_dim, z_dim, hidden_dim) -> None:
super().__init__(obs_dim, action_dim, z_dim, hidden_dim)
self.decoder = mlp(z_dim, hidden_dim, 'irelu', hidden_dim, 'irelu', obs_dim)
self.apply(utils.weight_init)
def forward(self, obs: torch.Tensor, action: torch.Tensor, next_obs: torch.Tensor, future_obs: torch.Tensor):
del future_obs
del next_obs
del action
phi = self.feature_net(obs)
predicted_obs = self.decoder(phi)
reconstruction_error = (predicted_obs - obs).pow(2).mean()
return reconstruction_error
class SVDSR(FeatureLearner):
def __init__(self, obs_dim, action_dim, z_dim, hidden_dim) -> None:
super().__init__(obs_dim, action_dim, z_dim, hidden_dim)
self.mu_net = mlp(obs_dim, hidden_dim, "ntanh", hidden_dim, "relu", z_dim)
self.target_feature_net = mlp(obs_dim, hidden_dim, "ntanh", hidden_dim, "relu", z_dim, "L2")
self.target_mu_net = mlp(obs_dim, hidden_dim, "ntanh", hidden_dim, "relu", z_dim)
self.apply(utils.weight_init)
def forward(self, obs: torch.Tensor, action: torch.Tensor, next_obs: torch.Tensor, future_obs: torch.Tensor):
del future_obs
phi = self.feature_net(obs)
mu = self.mu_net(next_obs)
SR = torch.einsum("sd, td -> st", phi, mu)
with torch.no_grad():
target_phi = self.target_feature_net(next_obs)
target_mu = self.target_mu_net(next_obs)
target_SR = torch.einsum("sd, td -> st", target_phi, target_mu)
I = torch.eye(*SR.size(), device=SR.device)
off_diag = ~I.bool()
loss = - 2 * SR.diag().mean() + (SR - 0.99 * target_SR.detach())[off_diag].pow(2).mean()
# orthonormality loss
Cov = torch.matmul(phi, phi.T)
I = torch.eye(*Cov.size(), device=Cov.device)
off_diag = ~I.bool()
orth_loss_diag = - 2 * Cov.diag().mean()
orth_loss_offdiag = Cov[off_diag].pow(2).mean()
orth_loss = orth_loss_offdiag + orth_loss_diag
loss += orth_loss
utils.soft_update_params(self.feature_net, self.target_feature_net, 0.01)
utils.soft_update_params(self.mu_net, self.target_mu_net, 0.01)
return loss
class SVDSRv2(FeatureLearner):
def __init__(self, obs_dim, action_dim, z_dim, hidden_dim) -> None:
super().__init__(obs_dim, action_dim, z_dim, hidden_dim)
self.mu_net = mlp(obs_dim, hidden_dim, "ntanh", hidden_dim, "relu", z_dim)
self.target_feature_net = mlp(obs_dim, hidden_dim, "ntanh", hidden_dim, "relu", z_dim, "L2")
self.target_mu_net = mlp(obs_dim, hidden_dim, "ntanh", hidden_dim, "relu", z_dim)
self.apply(utils.weight_init)
def forward(self, obs: torch.Tensor, action: torch.Tensor, next_obs: torch.Tensor, future_obs: torch.Tensor):
del future_obs
phi = self.feature_net(next_obs)
mu = self.mu_net(obs)
SR = torch.einsum("sd, td -> st", mu, phi)
with torch.no_grad():
target_phi = self.target_feature_net(next_obs)
target_mu = self.target_mu_net(next_obs)
target_SR = torch.einsum("sd, td -> st", target_mu, target_phi)
I = torch.eye(*SR.size(), device=SR.device)
off_diag = ~I.bool()
loss = - 2 * SR.diag().mean() + (SR - 0.98 * target_SR.detach())[off_diag].pow(2).mean()
# orthonormality loss
Cov = torch.matmul(phi, phi.T)
I = torch.eye(*Cov.size(), device=Cov.device)
off_diag = ~I.bool()
orth_loss_diag = - 2 * Cov.diag().mean()
orth_loss_offdiag = Cov[off_diag].pow(2).mean()
orth_loss = orth_loss_offdiag + orth_loss_diag
loss += orth_loss
utils.soft_update_params(self.feature_net, self.target_feature_net, 0.01)
utils.soft_update_params(self.mu_net, self.target_mu_net, 0.01)
return loss
class SVDP(FeatureLearner):
def __init__(self, obs_dim, action_dim, z_dim, hidden_dim) -> None:
super().__init__(obs_dim, action_dim, z_dim, hidden_dim)
self.mu_net = mlp(obs_dim + action_dim, hidden_dim, "ntanh", hidden_dim, "relu", z_dim)
self.apply(utils.weight_init)
def forward(self, obs: torch.Tensor, action: torch.Tensor, next_obs: torch.Tensor, future_obs: torch.Tensor):
del future_obs
phi = self.feature_net(next_obs)
mu = self.mu_net(torch.cat([obs, action], dim=1))
P = torch.einsum("sd, td -> st", mu, phi)
I = torch.eye(*P.size(), device=P.device)
off_diag = ~I.bool()
loss = - 2 * P.diag().mean() + P[off_diag].pow(2).mean()
# orthonormality loss
Cov = torch.matmul(phi, phi.T)
I = torch.eye(*Cov.size(), device=Cov.device)
off_diag = ~I.bool()
orth_loss_diag = - 2 * Cov.diag().mean()
orth_loss_offdiag = Cov[off_diag].pow(2).mean()
orth_loss = orth_loss_offdiag + orth_loss_diag
loss += orth_loss
return loss
class FBFeatures(FeatureLearner):
def __init__(self, obs_dim, action_dim, z_dim, hidden_dim) -> None:
super().__init__(obs_dim, action_dim, z_dim, hidden_dim)
pt = Path("/private/home/atouati/controllable_agent/url_benchmark/exp_sweep/2022.08.03/"
"161531_fb_ddpg_point_mass_maze_reach_top_right_offline/1/models/snapshot_1000000.pt")
print(f"loading {pt.resolve()}")
with pt.open("rb") as f:
payload = torch.load(f)
self.fb_agent = payload["agent"]
self.feature_net = self.fb_agent.backward_net
self.feature_net.eval()
class SFAgent:
# pylint: disable=unused-argument
def __init__(self,
**kwargs: tp.Any
):
cfg = SFAgentConfig(**kwargs)
self.cfg = cfg
assert len(cfg.action_shape) == 1
self.action_dim = cfg.action_shape[0]
self.solved_meta: tp.Any = None
# models
if cfg.obs_type == 'pixels':
self.aug: nn.Module = utils.RandomShiftsAug(pad=4)
self.encoder: nn.Module = Encoder(cfg.obs_shape).to(cfg.device)
self.obs_dim = self.encoder.repr_dim
else:
self.aug = nn.Identity()
self.encoder = nn.Identity()
self.obs_dim = cfg.obs_shape[0]
if cfg.feature_dim < self.obs_dim:
logger.warning(f"feature_dim {cfg.feature_dim} should not be smaller that obs_dim {self.obs_dim}")
goal_dim = self.obs_dim
if cfg.goal_space is not None:
g = next(iter(_goals.goals.funcs[cfg.goal_space].values()))()
assert len(g.shape) == 1
goal_dim = len(g)
if cfg.z_dim < goal_dim:
logger.warning(f"z_dim {cfg.z_dim} should not be smaller that goal_dim {goal_dim}")
if cfg.feature_learner == "identity":
cfg.z_dim = goal_dim
self.cfg.z_dim = goal_dim
# create the network
if self.cfg.boltzmann:
self.actor: nn.Module = DiagGaussianActor(cfg.obs_type, self.obs_dim, cfg.z_dim, self.action_dim,
cfg.hidden_dim, cfg.log_std_bounds).to(cfg.device)
else:
self.actor = Actor(self.obs_dim, cfg.z_dim, self.action_dim,
cfg.feature_dim, cfg.hidden_dim,
preprocess=cfg.preprocess, add_trunk=self.cfg.add_trunk).to(cfg.device)
self.successor_net = ForwardMap(self.obs_dim, cfg.z_dim, self.action_dim,
cfg.feature_dim, cfg.hidden_dim,
preprocess=cfg.preprocess, add_trunk=self.cfg.add_trunk).to(cfg.device)
# build up the target network
self.successor_target_net = ForwardMap(self.obs_dim, cfg.z_dim, self.action_dim,
cfg.feature_dim, cfg.hidden_dim,
preprocess=cfg.preprocess, add_trunk=self.cfg.add_trunk).to(cfg.device)
learner = dict(icm=ICM, transition=TransitionModel, latent=TransitionLatentModel,
contrastive=ContrastiveFeature, autoencoder=AutoEncoder, lap=Laplacian,
random=FeatureLearner, FB=FBFeatures, svd_sr=SVDSR, svd_p=SVDP,
contrastivev2=ContrastiveFeaturev2, svd_srv2=SVDSRv2,
identity=Identity)[self.cfg.feature_learner]
self.feature_learner = learner(goal_dim, self.action_dim, cfg.z_dim, cfg.backward_hidden_dim).to(cfg.device)
# if cfg.debug:
# self.feature_learner: nn.Module = IdentityMap().to(cfg.device)
# self.feature_net = BackwardMap(cfg.obs_type, goal_dim, cfg.z_dim, cfg.backward_hidden_dim).to(cfg.device)
# load the weights into the target networks
self.successor_target_net.load_state_dict(self.successor_net.state_dict())
# optimizers
self.encoder_opt: tp.Optional[torch.optim.Adam] = None
if cfg.obs_type == 'pixels':
self.encoder_opt = torch.optim.Adam(self.encoder.parameters(), lr=cfg.lr)
self.actor_opt = torch.optim.Adam(self.actor.parameters(), lr=cfg.lr)
self.sf_opt = torch.optim.Adam(self.successor_net.parameters(), lr=cfg.lr)
self.phi_opt: tp.Optional[torch.optim.Adam] = None
if cfg.feature_learner not in ["random", "FB", "identity"]:
self.phi_opt = torch.optim.Adam(self.feature_learner.parameters(), lr=cfg.lr_coef * cfg.lr)
self.train()
self.successor_target_net.train()
self.inv_cov = torch.eye(self.cfg.z_dim, dtype=torch.float32, device=self.cfg.device)
# self.online_cov = OnlineCov(mom=0.99, dim=self.cfg.z_dim).to(self.cfg.device)
# self.online_cov.train()
def train(self, training: bool = True) -> None:
self.training = training
for net in [self.encoder, self.actor, self.successor_net]:
net.train(training)
if self.phi_opt is not None:
self.feature_learner.train()
def init_from(self, other) -> None:
# copy parameters over
names = ["encoder", "actor"]
if self.cfg.init_sf:
names += ["successor_net", "feature_learner", "successor_target_net"]
for name in names:
utils.hard_update_params(getattr(other, name), getattr(self, name))
for key, val in self.__dict__.items():
if isinstance(val, torch.optim.Optimizer):
val.load_state_dict(copy.deepcopy(getattr(other, key).state_dict()))
def precompute_cov(self, replay_loader: ReplayBuffer) -> None:
print("computing Cov of phi to be used at inference")
obs_list = []
batch_size = 0
while batch_size < self.cfg.num_inference_steps:
batch = replay_loader.sample(self.cfg.batch_size)
batch = batch.to(self.cfg.device)
obs = batch.next_goal if self.cfg.goal_space is not None else batch.next_obs
if obs is None:
raise ValueError("Obs should never be None")
obs_list.append(obs)
batch_size += batch.next_obs.size(0)
obs = torch.cat(obs_list, 0)
self.inv_cov = self._compute_cov(obs)
# with torch.no_grad():
# phi = self.feature_learner.feature_net(obs)
# cov = torch.matmul(phi.T, phi) / phi.shape[0]
# self.inv_cov = torch.linalg.pinv(cov)
def _compute_cov(self, goal: torch.Tensor) -> torch.Tensor:
# compute inverse of cov of phi
with torch.no_grad():
phi = self.feature_learner.feature_net(goal)
cov = torch.matmul(phi.T, phi) / phi.shape[0]
inv_cov = torch.linalg.pinv(cov)
return inv_cov
def get_goal_meta(self, goal_array: np.ndarray) -> MetaDict:
# assert self.cfg.feature_learner in ["FB"]
desired_goal = torch.tensor(goal_array).unsqueeze(0).to(self.cfg.device)
with torch.no_grad():
z = self.feature_learner.feature_net(desired_goal)
z = torch.matmul(z, self.inv_cov) # 1 x z_dim
z = math.sqrt(self.cfg.z_dim) * F.normalize(z, dim=1)
z = z.squeeze(0).cpu().numpy()
meta = OrderedDict()
meta['z'] = z
return meta
def infer_meta(self, replay_loader: ReplayBuffer) -> MetaDict:
obs_list, reward_list = [], []
batch_size = 0
while batch_size < self.cfg.num_inference_steps:
batch = replay_loader.sample(self.cfg.batch_size)
batch = batch.to(self.cfg.device)
obs_list.append(batch.next_goal if self.cfg.goal_space is not None else batch.next_obs)
reward_list.append(batch.reward)
batch_size += batch.next_obs.size(0)
obs, reward = torch.cat(obs_list, 0), torch.cat(reward_list, 0) # type: ignore
obs, reward = obs[:self.cfg.num_inference_steps], reward[:self.cfg.num_inference_steps]
return self.infer_meta_from_obs_and_rewards(obs, reward)
def infer_meta_from_obs_and_rewards(self, obs: torch.Tensor, reward: torch.Tensor) -> MetaDict:
print('max reward: ', reward.max().cpu().item())
print('99 percentile: ', torch.quantile(reward, 0.99).cpu().item())
print('median reward: ', reward.median().cpu().item())
print('min reward: ', reward.min().cpu().item())
print('mean reward: ', reward.mean().cpu().item())
print('num reward: ', reward.shape[0])
with torch.no_grad():
phi = self.feature_learner.feature_net(obs)
z = torch.linalg.lstsq(phi, reward).solution # z_dim x 1
z = math.sqrt(self.cfg.z_dim) * F.normalize(z, dim=0) # be careful to the dimension
meta = OrderedDict()
meta['z'] = z.squeeze().cpu().numpy()
# self.solved_meta = meta
return meta
def sample_z(self, size):
gaussian_rdv = torch.randn((size, self.cfg.z_dim), dtype=torch.float32)
z = math.sqrt(self.cfg.z_dim) * F.normalize(gaussian_rdv, dim=1)
return z
def init_meta(self) -> MetaDict:
if self.solved_meta is not None:
print('solved_meta')
return self.solved_meta
else:
z = self.sample_z(1)
z = z.squeeze().numpy()
meta = OrderedDict()
meta['z'] = z
return meta
# pylint: disable=unused-argument
def update_meta(
self,
meta: MetaDict,
global_step: int,
time_step: TimeStep,
finetune: bool = False,
replay_loader: tp.Optional[ReplayBuffer] = None
) -> MetaDict:
if global_step % self.cfg.update_z_every_step == 0:
return self.init_meta()
return meta
def act(self, obs, meta, step, eval_mode) -> tp.Any:
obs = torch.as_tensor(obs, device=self.cfg.device).unsqueeze(0) # type: ignore
h = self.encoder(obs)
z = torch.as_tensor(meta['z'], device=self.cfg.device).unsqueeze(0) # type: ignore
if self.cfg.boltzmann:
dist = self.actor(h, z)
else:
stddev = utils.schedule(self.cfg.stddev_schedule, step)
dist = self.actor(h, z, stddev)
if eval_mode:
action = dist.mean
else:
action = dist.sample()
if step < self.cfg.num_expl_steps:
action.uniform_(-1.0, 1.0)
return action.cpu().numpy()[0]
def update_sf(
self,
obs: torch.Tensor,
goal: torch.Tensor,
action: torch.Tensor,
discount: torch.Tensor,
next_obs: torch.Tensor,
next_goal: torch.Tensor,
future_goal: tp.Optional[torch.Tensor],
z: torch.Tensor,
step: int
) -> tp.Dict[str, float]:
metrics: tp.Dict[str, float] = {}
# compute target successor measure
with torch.no_grad():
if self.cfg.boltzmann:
dist = self.actor(next_obs, z)
next_action = dist.sample()
else:
stddev = utils.schedule(self.cfg.stddev_schedule, step)
dist = self.actor(next_obs, z, stddev)
next_action = dist.sample(clip=self.cfg.stddev_clip)
next_F1, next_F2 = self.successor_target_net(next_obs, z, next_action) # batch x z_dim
target_phi = self.feature_learner.feature_net(next_goal).detach() # batch x z_dim
next_Q1, next_Q2 = [torch.einsum('sd, sd -> s', next_Fi, z) for next_Fi in [next_F1, next_F2]]
next_F = torch.where((next_Q1 < next_Q2).reshape(-1, 1), next_F1, next_F2)
target_F = target_phi + discount * next_F
F1, F2 = self.successor_net(obs, z, action)
if not self.cfg.q_loss:
# compute SF loss
sf_loss = F.mse_loss(F1, target_F) + F.mse_loss(F2, target_F)
else:
# alternative loss
Q1, Q2 = [torch.einsum('sd, sd -> s', Fi, z) for Fi in [F1, F2]]
target_Q = torch.einsum('sd, sd -> s', target_F, z)
sf_loss = F.mse_loss(Q1, target_Q) + F.mse_loss(Q2, target_Q)
# sf_loss /= self.cfg.z_dim
# compute feature loss
phi_loss = self.feature_learner(obs=goal, action=action, next_obs=next_goal, future_obs=future_goal)
if self.cfg.use_tb or self.cfg.use_wandb or self.cfg.use_hiplog:
metrics['target_F'] = target_F.mean().item()
metrics['F1'] = F1.mean().item()
metrics['phi'] = target_phi.mean().item()
metrics['phi_norm'] = torch.norm(target_phi, dim=-1).mean().item()
metrics['z_norm'] = torch.norm(z, dim=-1).mean().item()
metrics['sf_loss'] = sf_loss.item()
if phi_loss is not None:
metrics['phi_loss'] = phi_loss.item()
if isinstance(self.sf_opt, torch.optim.Adam):
metrics["sf_opt_lr"] = self.sf_opt.param_groups[0]["lr"]
# if self.cfg.goal_space in ["simplified_walker", "simplified_quadruped"]:
# metrics['max_velocity'] = goal_prime[:, -1].max().item()
# optimize SF
if self.encoder_opt is not None:
self.encoder_opt.zero_grad(set_to_none=True)
self.sf_opt.zero_grad(set_to_none=True)
sf_loss.backward()
self.sf_opt.step()
if self.encoder_opt is not None:
self.encoder_opt.step()
# optimise phi
if self.phi_opt is not None:
self.phi_opt.zero_grad(set_to_none=True)
phi_loss.backward()
self.phi_opt.step()
return metrics
def update_actor(self, obs: torch.Tensor, z: torch.Tensor, step: int) -> tp.Dict[str, float]:
metrics: tp.Dict[str, float] = {}
if self.cfg.boltzmann:
dist = self.actor(obs, z)
action = dist.rsample()
else:
stddev = utils.schedule(self.cfg.stddev_schedule, step)
dist = self.actor(obs, z, stddev)
action = dist.sample(clip=self.cfg.stddev_clip)
log_prob = dist.log_prob(action).sum(-1, keepdim=True)
F1, F2 = self.successor_net(obs, z, action)
Q1 = torch.einsum('sd, sd -> s', F1, z)
Q2 = torch.einsum('sd, sd -> s', F2, z)
Q = torch.min(Q1, Q2)
actor_loss = (self.cfg.temp * log_prob - Q).mean() if self.cfg.boltzmann else -Q.mean()
# optimize actor
self.actor_opt.zero_grad(set_to_none=True)
actor_loss.backward()
self.actor_opt.step()
if self.cfg.use_tb or self.cfg.use_wandb:
metrics['actor_loss'] = actor_loss.item()
metrics['actor_logprob'] = log_prob.mean().item()
# metrics['actor_ent'] = dist.entropy().sum(dim=-1).mean().item()
return metrics
def aug_and_encode(self, obs: torch.Tensor) -> torch.Tensor:
obs = self.aug(obs)
return self.encoder(obs)
def update(self, replay_loader: ReplayBuffer, step: int) -> tp.Dict[str, float]:
metrics: tp.Dict[str, float] = {}
if step % self.cfg.update_every_steps != 0:
return metrics
for _ in range(self.cfg.num_sf_updates):
batch = replay_loader.sample(self.cfg.batch_size)
batch = batch.to(self.cfg.device)
obs = goal = batch.obs
action = batch.action
discount = batch.discount
next_obs = next_goal = batch.next_obs
future_goal = batch.future_obs
if self.cfg.goal_space:
assert batch.goal is not None
assert batch.next_goal is not None
goal = batch.goal
next_goal = batch.next_goal
future_goal = batch.future_goal
z = self.sample_z(self.cfg.batch_size).to(self.cfg.device)
if not z.shape[-1] == self.cfg.z_dim:
raise RuntimeError("There's something wrong with the logic here")
if self.cfg.mix_ratio > 0:
perm = torch.randperm(self.cfg.batch_size)
desired_goal = next_goal[perm]
with torch.no_grad():
phi = self.feature_learner.feature_net(desired_goal)
# compute inverse of cov of phi
cov = torch.matmul(phi.T, phi) / phi.shape[0]
inv_cov = torch.linalg.pinv(cov)
mix_idxs: tp.Any = np.where(np.random.uniform(size=self.cfg.batch_size) < self.cfg.mix_ratio)[0]
with torch.no_grad():
new_z = phi[mix_idxs]
new_z = torch.matmul(new_z, inv_cov) # batch_size x z_dim
new_z = math.sqrt(self.cfg.z_dim) * F.normalize(new_z, dim=1)
z[mix_idxs] = new_z
metrics.update(self.update_sf(obs=obs, goal=goal, action=action, discount=discount,
next_obs=next_obs, next_goal=next_goal, future_goal=future_goal,
z=z, step=step))
# update actor
metrics.update(self.update_actor(obs, z, step))
# update critic target
utils.soft_update_params(self.successor_net, self.successor_target_net,
self.cfg.sf_target_tau)
# update inv cov
# if step % self.cfg.update_cov_every_step == 0:
# logger.info("update online cov")
# obs_list = list()
# batch_size = 0
# while batch_size < 10000:
# batch = next(replay_loader)
# batch = batch.to(self.cfg.device)
# obs_list.append(batch.next_goal if self.cfg.goal_space is not None else batch.next_obs)
# batch_size += batch.next_obs.size(0)
# obs = torch.cat(obs_list, 0)
# with torch.no_grad():
# phi = self.feature_learner.feature_net(obs)
# self.inv_cov = torch.inverse(self.online_cov(phi))
return metrics
|
controllable_agent-main
|
url_benchmark/agent/sf.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pylint: disable=unused-import
import dataclasses
import typing as tp
import torch
from hydra.core.config_store import ConfigStore
from .sf import SFAgent, SFAgentConfig
@dataclasses.dataclass
class DiscreteSFAgentConfig(SFAgentConfig):
# @package agent
_target_: str = "url_benchmark.agent.discrete_sf.DiscreteSFAgent"
name: str = "discrete_sf"
cs = ConfigStore.instance()
cs.store(group="agent", name="discrete_sf", node=DiscreteSFAgentConfig)
class DiscreteSFAgent(SFAgent):
def __init__(self, **kwargs) -> None:
super().__init__(**kwargs)
num_xy = 5
x = y = torch.linspace(-1, 1, num_xy, dtype=torch.float32, device=self.cfg.device)
XX, YY = torch.meshgrid(x, y)
X = XX.reshape(-1, 1)
Y = YY.reshape(-1, 1)
self.ACTION_GRID = torch.cat([X, Y], dim=1)
def greedy_action(self, obs, z):
OBS = obs.repeat(1, self.ACTION_GRID.shape[0]).reshape(self.ACTION_GRID.shape[0] * obs.shape[0], obs.shape[1])
Z = z.repeat(1, self.ACTION_GRID.shape[0]).reshape(self.ACTION_GRID.shape[0] * z.shape[0], z.shape[1])
ACTION = self.ACTION_GRID.repeat(obs.shape[0], 1)
F1, F2 = self.successor_net(OBS, Z, ACTION)
Q1, Q2 = [torch.einsum('sd, sd -> s', Fi, Z) for Fi in [F1, F2]]
Q = torch.min(Q1, Q2)
max_idx = Q.reshape(obs.shape[0], self.ACTION_GRID.shape[0]).max(dim=1)[1]
return self.ACTION_GRID[max_idx]
def act(self, obs, meta, step, eval_mode) -> tp.Any:
obs = torch.as_tensor(obs, device=self.cfg.device).unsqueeze(0) # type: ignore
obs = self.encoder(obs)
z = torch.as_tensor(meta['z'], device=self.cfg.device).unsqueeze(0) # type: ignore
action = self.greedy_action(obs, z)
if not eval_mode:
if step < self.cfg.num_expl_steps:
action.uniform_(-1.0, 1.0)
return action.cpu().numpy()[0]
def update_actor(self, obs: torch.Tensor, z: torch.Tensor, step: int) -> tp.Dict[str, float]:
metrics: tp.Dict[str, float] = {}
return metrics
def update_sf( # type: ignore
self,
obs: torch.Tensor,
action: torch.Tensor,
discount: torch.Tensor,
next_obs: torch.Tensor,
next_goal: torch.Tensor,
future_obs: tp.Optional[torch.Tensor],
z: torch.Tensor,
step: int
) -> tp.Dict[str, float]:
metrics: tp.Dict[str, float] = {}
# compute target successor measure
with torch.no_grad():
next_action = self.greedy_action(next_obs, z)
target_F1, target_F2 = self.successor_target_net(next_obs, z, next_action) # batch x z_dim
target_phi = self.feature_learner.feature_net(next_goal).detach() # batch x z_dim
target_F = torch.min(target_F1, target_F2)
target_F = target_phi + discount * target_F
# compute SF loss
F1, F2 = self.successor_net(obs, z, action)
# sf_loss = torch.norm(F1 - target_F, dim=-1, p='fro').mean()
# sf_loss += torch.norm(F2 - target_F, dim=-1, p='fro').mean()
sf_loss = (F1 - target_F).pow(2).mean()
sf_loss += (F2 - target_F).pow(2).mean()
# compute feature loss
phi_loss = self.feature_learner(obs=obs, action=action, next_obs=next_obs, future_obs=future_obs)
if self.cfg.use_tb or self.cfg.use_wandb or self.cfg.use_hiplog:
metrics['target_F'] = target_F.mean().item()
metrics['F1'] = F1.mean().item()
metrics['phi'] = target_phi.mean().item()
metrics['phi_norm'] = torch.norm(target_phi, dim=-1).mean().item()
metrics['z_norm'] = torch.norm(z, dim=-1).mean().item()
metrics['sf_loss'] = sf_loss.item()
if phi_loss is not None:
metrics['phi_loss'] = phi_loss.item()
if isinstance(self.sf_opt, torch.optim.Adam):
metrics["sf_opt_lr"] = self.sf_opt.param_groups[0]["lr"]
# if self.cfg.goal_space in ["simplified_walker", "simplified_quadruped"]:
# metrics['max_velocity'] = goal_prime[:, -1].max().item()
# optimize SF
if self.encoder_opt is not None:
self.encoder_opt.zero_grad(set_to_none=True)
self.sf_opt.zero_grad(set_to_none=True)
sf_loss.backward()
self.sf_opt.step()
if self.encoder_opt is not None:
self.encoder_opt.step()
# optimise phi
if self.phi_opt is not None:
self.phi_opt.zero_grad(set_to_none=True)
phi_loss.backward()
self.phi_opt.step()
return metrics
|
controllable_agent-main
|
url_benchmark/agent/discrete_sf.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pdb # pylint: disable=unused-import
import typing as tp
import dataclasses
from collections import OrderedDict
import logging
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
from url_benchmark import utils
from hydra.core.config_store import ConfigStore
import omegaconf
from url_benchmark.dmc import TimeStep
from .ddpg import MetaDict
from .ddpg import Encoder
from .fb_modules import Actor, ForwardMap, mlp
from url_benchmark.in_memory_replay_buffer import ReplayBuffer
from url_benchmark import goals as _goals
logger = logging.getLogger(__name__)
@dataclasses.dataclass
class APSAgentConfig:
_target_: str = "url_benchmark.agent.new_aps.NEWAPSAgent"
name: str = "new_aps"
reward_free: bool = omegaconf.II("reward_free")
custom_reward: tp.Optional[str] = omegaconf.II("custom_reward")
obs_type: str = omegaconf.MISSING # to be specified later
obs_shape: tp.Tuple[int, ...] = omegaconf.MISSING # to be specified later
action_shape: tp.Tuple[int, ...] = omegaconf.MISSING # to be specified later
device: str = omegaconf.II("device") # ${device}
lr: float = 1e-4
sf_target_tau: float = 0.01
update_every_steps: float = 2
use_tb: bool = omegaconf.II("use_tb") # ${use_tb}
use_wandb: bool = omegaconf.II("use_wandb") # ${use_wandb}
use_hiplog: bool = omegaconf.II("use_hiplog") # ${use_wandb}
num_expl_steps: int = omegaconf.MISSING
hidden_dim: int = 1024
feature_dim: int = 512
backward_hidden_dim: int = 512
stddev_schedule: str = "0.2" # "linear(1,0.2,200000)" # "0.2"
stddev_clip: str = "0.3" # 1
nstep: int = 1
batch_size: int = 512 # 256 for pixels
init_critic: bool = True
goal_space: tp.Optional[str] = omegaconf.II("goal_space")
preprocess: bool = False
update_encoder: bool = omegaconf.II("update_encoder")
z_dim: int = 10
update_z_every_step: int = 100
knn_rms: bool = True
knn_k: int = 12
knn_avg: bool = True
knn_clip: float = 0.0001
num_init_steps: int = 4096 # set to ${num_train_frames} to disable finetune policy parameters
num_inference_steps: int = 5120
add_trunk: bool = False
lr_coef: float = 1
future_ratio: float = 0
cs = ConfigStore.instance()
cs.store(group="agent", name="new_aps", node=APSAgentConfig)
class FeatureNet(nn.Module):
def __init__(self, obs_dim, z_dim, hidden_dim) -> None:
super().__init__()
self.net = mlp(obs_dim, hidden_dim, "ntanh", hidden_dim, "relu", z_dim)
self.apply(utils.weight_init)
def forward(self, obs, norm=True):
phi = self.net(obs)
return F.normalize(phi, dim=1) if norm else phi
class FeatureLearner(nn.Module):
def __init__(self, obs_dim, z_dim, hidden_dim) -> None:
super().__init__()
self.feature_net = FeatureNet(obs_dim, z_dim, hidden_dim)
def forward(self, obs: torch.Tensor, z: torch.Tensor):
"""MLE loss"""
phi = self.feature_net(obs)
loss = -torch.einsum("bd,bd->b", phi, z).mean()
return loss
class NEWAPSAgent:
# pylint: disable=unused-argument
def __init__(self,
**kwargs: tp.Any
) -> None:
cfg = APSAgentConfig(**kwargs)
self.cfg = cfg
assert len(cfg.action_shape) == 1
self.action_dim = cfg.action_shape[0]
self.solved_meta: tp.Any = None
# models
if cfg.obs_type == 'pixels':
self.aug: nn.Module = utils.RandomShiftsAug(pad=4)
self.encoder: nn.Module = Encoder(cfg.obs_shape).to(cfg.device)
self.obs_dim = self.encoder.repr_dim
else:
self.aug = nn.Identity()
self.encoder = nn.Identity()
self.obs_dim = cfg.obs_shape[0]
if cfg.feature_dim < self.obs_dim:
logger.warning(f"feature_dim {cfg.feature_dim} should not be smaller that obs_dim {self.obs_dim}")
goal_dim = self.obs_dim
if cfg.goal_space is not None:
g = next(iter(_goals.goals.funcs[cfg.goal_space].values()))()
assert len(g.shape) == 1
goal_dim = len(g)
if cfg.z_dim < goal_dim:
logger.warning(f"z_dim {cfg.z_dim} should not be smaller that goal_dim {goal_dim}")
# create the network
self.actor = Actor(self.obs_dim, cfg.z_dim, self.action_dim,
cfg.feature_dim, cfg.hidden_dim,
preprocess=cfg.preprocess, add_trunk=self.cfg.add_trunk).to(cfg.device)
self.successor_net = ForwardMap(self.obs_dim, cfg.z_dim, self.action_dim,
cfg.feature_dim, cfg.hidden_dim,
preprocess=cfg.preprocess, add_trunk=self.cfg.add_trunk).to(cfg.device)
# build up the target network
self.successor_target_net = ForwardMap(self.obs_dim, cfg.z_dim, self.action_dim,
cfg.feature_dim, cfg.hidden_dim,
preprocess=cfg.preprocess, add_trunk=self.cfg.add_trunk).to(cfg.device)
self.feature_learner = FeatureLearner(goal_dim, cfg.z_dim, cfg.backward_hidden_dim).to(cfg.device)
# load the weights into the target networks
self.successor_target_net.load_state_dict(self.successor_net.state_dict())
# optimizers
self.encoder_opt: tp.Optional[torch.optim.Adam] = None
if cfg.obs_type == 'pixels':
self.encoder_opt = torch.optim.Adam(self.encoder.parameters(), lr=cfg.lr)
self.actor_opt = torch.optim.Adam(self.actor.parameters(), lr=cfg.lr)
self.sf_opt = torch.optim.Adam(self.successor_net.parameters(), lr=cfg.lr)
self.phi_opt = torch.optim.Adam(self.feature_learner.parameters(), lr=cfg.lr_coef * cfg.lr)
self.train()
self.successor_target_net.train()
# particle-based entropy
rms = utils.RMS(self.cfg.device)
self.pbe = utils.PBE(rms, cfg.knn_clip, cfg.knn_k, cfg.knn_avg, cfg.knn_rms,
cfg.device)
self.inv_cov = torch.eye(self.cfg.z_dim, dtype=torch.float32, device=self.cfg.device)
def train(self, training: bool = True) -> None:
self.training = training
for net in [self.encoder, self.actor, self.successor_net, self.feature_learner]:
net.train(training)
def sample_z(self, size):
gaussian_rdv = torch.randn((size, self.cfg.z_dim), dtype=torch.float32)
z = F.normalize(gaussian_rdv, dim=1)
return z
def init_meta(self) -> MetaDict:
if self.solved_meta is not None:
print('solved_meta')
return self.solved_meta
else:
z = self.sample_z(1)
z = z.squeeze().numpy()
meta = OrderedDict()
meta['z'] = z
return meta
# pylint: disable=unused-argument
def update_meta(
self,
meta: MetaDict,
global_step: int,
time_step: TimeStep,
finetune: bool = False,
replay_loader: tp.Optional[ReplayBuffer] = None
) -> MetaDict:
if global_step % self.cfg.update_z_every_step == 0:
return self.init_meta()
return meta
def act(self, obs, meta, step, eval_mode) -> tp.Any:
obs = torch.as_tensor(obs, device=self.cfg.device).unsqueeze(0) # type: ignore
h = self.encoder(obs)
z = torch.as_tensor(meta['z'], device=self.cfg.device).unsqueeze(0) # type: ignore
stddev = utils.schedule(self.cfg.stddev_schedule, step)
dist = self.actor(h, z, stddev)
if eval_mode:
action = dist.mean
else:
action = dist.sample()
if step < self.cfg.num_expl_steps:
action.uniform_(-1.0, 1.0)
return action.cpu().numpy()[0]
def precompute_cov(self, replay_loader: ReplayBuffer) -> None:
print("computing Cov of phi to be used at inference")
obs_list = list()
batch_size = 0
while batch_size < self.cfg.num_inference_steps:
batch = replay_loader.sample(self.cfg.batch_size)
batch = batch.to(self.cfg.device)
obs = batch.next_goal if self.cfg.goal_space is not None else batch.next_obs
if obs is None:
raise ValueError("Obs should never be None")
obs_list.append(obs)
batch_size += batch.next_obs.size(0)
obs = torch.cat(obs_list, 0)
self.inv_cov = self._compute_cov(obs)
# with torch.no_grad():
# phi = self.feature_learner.feature_net(obs)
# cov = torch.matmul(phi.T, phi) / phi.shape[0]
# self.inv_cov = torch.linalg.pinv(cov)
def _compute_cov(self, goal: torch.Tensor) -> torch.Tensor:
# compute inverse of cov of phi
with torch.no_grad():
phi = self.feature_learner.feature_net(goal)
cov = torch.matmul(phi.T, phi) / phi.shape[0]
inv_cov = torch.inverse(cov)
return inv_cov
def get_goal_meta(self, goal_array: np.ndarray) -> MetaDict:
desired_goal = torch.tensor(goal_array).unsqueeze(0).to(self.cfg.device)
with torch.no_grad():
z = self.feature_learner.feature_net(desired_goal)
z = torch.matmul(z, self.inv_cov) # 1 x z_dim
z = F.normalize(z, dim=1)
z = z.squeeze(0).cpu().numpy()
meta = OrderedDict()
meta['z'] = z
return meta
def infer_meta(self, replay_loader: ReplayBuffer) -> MetaDict:
obs_list, reward_list = [], []
batch_size = 0
while batch_size < self.cfg.num_inference_steps:
batch = replay_loader.sample(self.cfg.batch_size)
batch = batch.to(self.cfg.device)
obs_list.append(batch.next_goal if self.cfg.goal_space is not None else batch.next_obs)
reward_list.append(batch.reward)
batch_size += batch.next_obs.size(0)
obs, reward = torch.cat(obs_list, 0), torch.cat(reward_list, 0) # type: ignore
obs, reward = obs[:self.cfg.num_inference_steps], reward[:self.cfg.num_inference_steps]
return self.infer_meta_from_obs_and_rewards(obs, reward)
def infer_meta_from_obs_and_rewards(self, obs: torch.Tensor, reward: torch.Tensor) -> MetaDict:
print('max reward: ', reward.max().cpu().item())
print('99 percentile: ', torch.quantile(reward, 0.99).cpu().item())
print('median reward: ', reward.median().cpu().item())
print('min reward: ', reward.min().cpu().item())
print('mean reward: ', reward.mean().cpu().item())
print('num reward: ', reward.shape[0])
with torch.no_grad():
phi = self.feature_learner.feature_net(obs)
z = torch.linalg.lstsq(phi, reward).solution # z_dim x 1
z = F.normalize(z, dim=0) # be careful to the dimension
meta = OrderedDict()
meta['z'] = z.squeeze().cpu().numpy()
# self.solved_meta = meta
return meta
def update_phi(self, obs, z, step) -> tp.Dict[str, tp.Any]:
metrics: tp.Dict[str, float] = {}
loss = self.feature_learner(obs, z)
self.phi_opt.zero_grad(set_to_none=True)
loss.backward()
self.phi_opt.step()
if self.cfg.use_tb or self.cfg.use_wandb:
metrics['phi_loss'] = loss.item()
return metrics
def compute_intrinsic_reward(self, next_obs, z, step) -> tp.Tuple[tp.Any, tp.Any]:
# maxent reward
with torch.no_grad():
phi = self.feature_learner.feature_net(next_obs, norm=False)
reward = self.pbe(phi)
entropy_reward = reward.reshape(-1, 1)
# successor feature reward
phi = F.normalize(phi, dim=1)
diayn_reward = torch.einsum("bi,bi->b", phi, z).reshape(-1, 1)
return entropy_reward, diayn_reward
def update_critic(self,
obs: torch.Tensor,
action: torch.Tensor,
reward: torch.Tensor,
discount: torch.Tensor,
next_obs: torch.Tensor,
z: torch.Tensor,
step: int) -> tp.Dict[str, tp.Any]:
"""diff is critic takes task as input"""
metrics: tp.Dict[str, float] = {}
# compute target critic
with torch.no_grad():
stddev = utils.schedule(self.cfg.stddev_schedule, step)
dist = self.actor(next_obs, z, stddev)
next_action = dist.sample(clip=self.cfg.stddev_clip)
next_F1, next_F2 = self.successor_target_net(next_obs, z, next_action) # batch x z_dim
next_Q1, next_Q2 = [torch.einsum('sd, sd -> s', next_Fi, z) for next_Fi in [next_F1, next_F2]]
next_Q = torch.min(next_Q1, next_Q2)
target_Q = reward + discount * next_Q.reshape(-1, 1)
target_Q = target_Q.squeeze(1)
F1, F2 = self.successor_net(obs, z, action)
Q1, Q2 = [torch.einsum('sd, sd -> s', Fi, z) for Fi in [F1, F2]]
sf_loss = F.mse_loss(Q1, target_Q) + F.mse_loss(Q2, target_Q)
# sf_loss /= self.cfg.z_dim
if self.cfg.use_tb or self.cfg.use_wandb or self.cfg.use_hiplog:
metrics['target_Q'] = target_Q.mean().item()
metrics['Q1'] = Q1.mean().item()
metrics['z_norm'] = torch.norm(z, dim=-1).mean().item()
metrics['sf_loss'] = sf_loss.item()
# optimize SF
self.sf_opt.zero_grad(set_to_none=True)
sf_loss.backward()
self.sf_opt.step()
return metrics
def update_actor(self, obs: torch.Tensor, z: torch.Tensor, step: int) -> tp.Dict[str, float]:
metrics: tp.Dict[str, float] = {}
stddev = utils.schedule(self.cfg.stddev_schedule, step)
dist = self.actor(obs, z, stddev)
action = dist.sample(clip=self.cfg.stddev_clip)
log_prob = dist.log_prob(action).sum(-1, keepdim=True)
F1, F2 = self.successor_net(obs, z, action)
Q1 = torch.einsum('sd, sd -> s', F1, z)
Q2 = torch.einsum('sd, sd -> s', F2, z)
Q = torch.min(Q1, Q2)
actor_loss = -Q.mean()
# optimize actor
self.actor_opt.zero_grad(set_to_none=True)
actor_loss.backward()
self.actor_opt.step()
if self.cfg.use_tb or self.cfg.use_wandb:
metrics['actor_loss'] = actor_loss.item()
metrics['actor_logprob'] = log_prob.mean().item()
return metrics
def update(self, replay_loader: ReplayBuffer, step: int) -> tp.Dict[str, float]:
metrics: tp.Dict[str, float] = {}
if step % self.cfg.update_every_steps != 0:
return metrics
batch = replay_loader.sample(self.cfg.batch_size)
batch = batch.to(self.cfg.device)
obs = batch.obs
action = batch.action
discount = batch.discount
reward = batch.reward
next_obs = next_goal = batch.next_obs
if self.cfg.goal_space is not None:
assert batch.next_goal is not None
next_goal = batch.next_goal
z = batch.meta["z"]
assert z.shape[-1] == self.cfg.z_dim
if self.cfg.reward_free:
# freeze successor features at finetuning phase
metrics.update(self.update_phi(next_goal, z, step))
with torch.no_grad():
entropy_reward, diayn_reward = self.compute_intrinsic_reward(next_goal, z, step)
intrinsic_reward = entropy_reward + diayn_reward
if self.cfg.use_tb or self.cfg.use_wandb:
metrics['intrinsic_reward'] = intrinsic_reward.mean().item()
metrics['entropy_reward'] = entropy_reward.mean().item()
metrics['diayn_reward'] = diayn_reward.mean().item()
reward = intrinsic_reward
if self.cfg.use_tb or self.cfg.use_wandb:
metrics['extrinsic_reward'] = batch.reward.mean().item()
# hindsight replay
if self.cfg.future_ratio > 0:
future_goal = batch.future_goal if self.cfg.goal_space else batch.future_obs
assert future_goal is not None
future_idxs = np.where(np.random.uniform(size=self.cfg.batch_size) < self.cfg.future_ratio)
with torch.no_grad():
phi = self.feature_learner.feature_net(future_goal)
# compute inverse of cov of phi
cov = torch.matmul(phi.T, phi) / phi.shape[0]
inv_cov = torch.linalg.pinv(cov)
new_z = phi[future_idxs]
new_z = torch.matmul(new_z, inv_cov) # batch_size x z_dim
new_z = F.normalize(new_z, dim=1)
z[future_idxs] = new_z
# update critic
metrics.update(
self.update_critic(obs=obs, action=action, reward=reward, discount=discount,
next_obs=next_obs, z=z, step=step))
# update actor
metrics.update(self.update_actor(obs=obs, z=z, step=step))
# update critic target
utils.soft_update_params(self.successor_net, self.successor_target_net,
self.cfg.sf_target_tau)
return metrics
|
controllable_agent-main
|
url_benchmark/agent/new_aps.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pylint: disable=unused-import
import pdb
import copy
import math
import logging
import dataclasses
from collections import OrderedDict
import typing as tp
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
from hydra.core.config_store import ConfigStore
from url_benchmark import utils
# from url_benchmark import replay_buffer as rb
from url_benchmark.in_memory_replay_buffer import ReplayBuffer
from url_benchmark.dmc import TimeStep
from url_benchmark import goals as _goals
from .ddpg import MetaDict
from .fb_modules import IdentityMap
from .ddpg import Encoder
from .fb_modules import BackwardMap, mlp
logger = logging.getLogger(__name__)
from .fb_ddpg import FBDDPGAgentConfig
@dataclasses.dataclass
class DiscreteFBAgentConfig(FBDDPGAgentConfig):
# @package agent
_target_: str = "url_benchmark.agent.discrete_fb.DiscreteFBAgent"
name: str = "discrete_fb"
preprocess: bool = False
expl_eps: float = 0.2
boltzmann = True
temp = 100
cs = ConfigStore.instance()
cs.store(group="agent", name="discrete_fb", node=DiscreteFBAgentConfig)
class ForwardMap(nn.Module):
""" forward representation class"""
def __init__(self, obs_dim, z_dim, action_dim, feature_dim, hidden_dim,
preprocess=False, add_trunk=True) -> None:
super().__init__()
self.obs_dim = obs_dim
self.z_dim = z_dim
self.action_dim = action_dim
self.preprocess = preprocess
if self.preprocess:
self.obs_net = mlp(self.obs_dim, hidden_dim, "ntanh", feature_dim, "irelu")
self.obs_z_net = mlp(self.obs_dim + self.z_dim, hidden_dim, "ntanh", feature_dim, "irelu")
if not add_trunk:
self.trunk: nn.Module = nn.Identity()
feature_dim = 2 * feature_dim
else:
self.trunk = mlp(2 * feature_dim, hidden_dim, "irelu")
feature_dim = hidden_dim
else:
self.trunk = mlp(self.obs_dim + self.z_dim, hidden_dim, "ntanh",
hidden_dim, "irelu",
hidden_dim, "irelu")
feature_dim = hidden_dim
seq = [feature_dim, hidden_dim, "irelu", self.z_dim * self.action_dim]
self.F1 = mlp(*seq)
self.F2 = mlp(*seq)
self.apply(utils.weight_init)
def forward(self, obs, z):
assert z.shape[-1] == self.z_dim
if self.preprocess:
obs = self.obs_action_net(obs)
obs_z = self.obs_z_net(torch.cat([obs, z], dim=-1))
h = torch.cat([obs, obs_z], dim=-1)
else:
h = torch.cat([obs, z], dim=-1)
if hasattr(self, "trunk"):
h = self.trunk(h)
F1 = self.F1(h)
F2 = self.F2(h)
return F1.reshape(-1, self.z_dim, self.action_dim), F2.reshape(-1, self.z_dim, self.action_dim)
class DiscreteFBAgent:
# pylint: disable=unused-argument
def __init__(self,
**kwargs: tp.Any
):
cfg = DiscreteFBAgentConfig(**kwargs)
self.cfg = cfg
assert len(cfg.action_shape) == 1
self.action_dim = cfg.action_shape[0]
self.solved_meta: tp.Any = None
# models
if cfg.obs_type == 'pixels':
self.aug: nn.Module = utils.RandomShiftsAug(pad=4)
self.encoder: nn.Module = Encoder(cfg.obs_shape).to(cfg.device)
self.obs_dim = self.encoder.repr_dim
else:
self.aug = nn.Identity()
self.encoder = nn.Identity()
self.obs_dim = cfg.obs_shape[0]
if cfg.feature_dim < self.obs_dim:
logger.warning(f"feature_dim {cfg.feature_dim} should not be smaller that obs_dim {self.obs_dim}")
goal_dim = self.obs_dim
if cfg.goal_space is not None:
goal_dim = _goals.get_goal_space_dim(cfg.goal_space)
if cfg.z_dim < goal_dim:
logger.warning(f"z_dim {cfg.z_dim} should not be smaller that goal_dim {goal_dim}")
self.forward_net = ForwardMap(self.obs_dim, cfg.z_dim, self.action_dim,
cfg.feature_dim, cfg.hidden_dim,
preprocess=cfg.preprocess, add_trunk=self.cfg.add_trunk).to(cfg.device)
if cfg.debug:
self.backward_net: nn.Module = IdentityMap().to(cfg.device)
self.backward_target_net: nn.Module = IdentityMap().to(cfg.device)
else:
self.backward_net = BackwardMap(goal_dim, cfg.z_dim, cfg.backward_hidden_dim, norm_z=cfg.norm_z).to(
cfg.device)
self.backward_target_net = BackwardMap(goal_dim,
cfg.z_dim, cfg.backward_hidden_dim, norm_z=cfg.norm_z).to(cfg.device)
# build up the target network
self.forward_target_net = ForwardMap(self.obs_dim, cfg.z_dim, self.action_dim,
cfg.feature_dim, cfg.hidden_dim,
preprocess=cfg.preprocess, add_trunk=self.cfg.add_trunk).to(cfg.device)
# load the weights into the target networks
self.forward_target_net.load_state_dict(self.forward_net.state_dict())
self.backward_target_net.load_state_dict(self.backward_net.state_dict())
# optimizers
self.encoder_opt: tp.Optional[torch.optim.Adam] = None
if cfg.obs_type == 'pixels':
self.encoder_opt = torch.optim.Adam(self.encoder.parameters(), lr=cfg.lr)
self.fb_opt = torch.optim.Adam([{'params': self.forward_net.parameters()}, # type: ignore
{'params': self.backward_net.parameters(), 'lr': cfg.lr_coef * cfg.lr}],
lr=cfg.lr)
self.train()
self.forward_target_net.train()
self.backward_target_net.train()
def train(self, training: bool = True) -> None:
self.training = training
for net in [self.encoder, self.forward_net, self.backward_net]:
net.train(training)
def init_from(self, other) -> None:
# copy parameters over
names = ["encoder"]
if self.cfg.init_fb:
names += ["forward_net", "backward_net", "backward_target_net", "forward_target_net"]
for name in names:
utils.hard_update_params(getattr(other, name), getattr(self, name))
for key, val in self.__dict__.items():
if isinstance(val, torch.optim.Optimizer):
val.load_state_dict(copy.deepcopy(getattr(other, key).state_dict()))
def get_goal_meta(self, goal_array: np.ndarray) -> MetaDict:
desired_goal = torch.tensor(goal_array).unsqueeze(0).to(self.cfg.device)
with torch.no_grad():
z = self.backward_net(desired_goal)
if self.cfg.norm_z:
z = math.sqrt(self.cfg.z_dim) * F.normalize(z, dim=1)
z = z.squeeze(0).cpu().numpy()
meta = OrderedDict()
meta['z'] = z
return meta
def infer_meta(self, replay_loader: ReplayBuffer) -> MetaDict:
obs_list, reward_list = [], []
batch_size = 0
while batch_size < self.cfg.num_inference_steps:
batch = replay_loader.sample(self.cfg.batch_size)
batch = batch.to(self.cfg.device)
obs_list.append(batch.next_goal if self.cfg.goal_space is not None else batch.next_obs)
reward_list.append(batch.reward)
batch_size += batch.next_obs.size(0)
obs, reward = torch.cat(obs_list, 0), torch.cat(reward_list, 0) # type: ignore
obs, reward = obs[:self.cfg.num_inference_steps], reward[:self.cfg.num_inference_steps]
return self.infer_meta_from_obs_and_rewards(obs, reward)
def infer_meta_from_obs_and_rewards(self, obs: torch.Tensor, reward: torch.Tensor) -> MetaDict:
print('max reward: ', reward.max().cpu().item())
print('99 percentile: ', torch.quantile(reward, 0.99).cpu().item())
print('median reward: ', reward.median().cpu().item())
print('min reward: ', reward.min().cpu().item())
print('mean reward: ', reward.mean().cpu().item())
print('num reward: ', reward.shape[0])
# filter out small reward
# pdb.set_trace()
# idx = torch.where(reward >= torch.quantile(reward, 0.99))[0]
# obs = obs[idx]
# reward = reward[idx]
with torch.no_grad():
B = self.backward_net(obs)
z = torch.matmul(reward.T, B) / reward.shape[0]
if self.cfg.norm_z:
z = math.sqrt(self.cfg.z_dim) * F.normalize(z, dim=1)
meta = OrderedDict()
meta['z'] = z.squeeze().cpu().numpy()
# self.solved_meta = meta
return meta
def sample_z(self, size, device: str = "cpu"):
gaussian_rdv = torch.randn((size, self.cfg.z_dim), dtype=torch.float32, device=device)
gaussian_rdv = F.normalize(gaussian_rdv, dim=1)
if self.cfg.norm_z:
z = math.sqrt(self.cfg.z_dim) * gaussian_rdv
else:
uniform_rdv = torch.rand((size, self.cfg.z_dim), dtype=torch.float32, device=device)
z = np.sqrt(self.cfg.z_dim) * uniform_rdv * gaussian_rdv
return z
def init_meta(self) -> MetaDict:
if self.solved_meta is not None:
print('solved_meta')
return self.solved_meta
else:
z = self.sample_z(1)
z = z.squeeze().numpy()
meta = OrderedDict()
meta['z'] = z
return meta
# pylint: disable=unused-argument
def update_meta(
self,
meta: MetaDict,
global_step: int,
time_step: TimeStep,
finetune: bool = False,
replay_loader: tp.Optional[ReplayBuffer] = None
) -> MetaDict:
if global_step % self.cfg.update_z_every_step == 0 and np.random.rand() < self.cfg.update_z_proba:
return self.init_meta()
return meta
def act(self, obs, meta, step, eval_mode) -> tp.Any:
obs = torch.as_tensor(obs, device=self.cfg.device, dtype=torch.float32).unsqueeze(0) # type: ignore
h = self.encoder(obs)
z = torch.as_tensor(meta['z'], device=self.cfg.device).unsqueeze(0) # type: ignore
F1, F2 = self.forward_net(h, z)
Q1, Q2 = [torch.einsum('sda, sd -> sa', Fi, z) for Fi in [F1, F2]]
Q = torch.min(Q1, Q2)
action = Q.max(1)[1]
if not eval_mode:
if step < self.cfg.num_expl_steps:
action = torch.randint_like(action, self.action_dim)
else:
action = torch.randint_like(action, self.action_dim) \
if np.random.rand() < self.cfg.expl_eps else action
return action.item()
def update_fb(
self,
obs: torch.Tensor,
action: torch.Tensor,
discount: torch.Tensor,
next_obs: torch.Tensor,
next_goal: torch.Tensor,
z: torch.Tensor,
step: int
) -> tp.Dict[str, float]:
metrics: tp.Dict[str, float] = {}
# compute target successor measure
with torch.no_grad():
# compute greedy action
target_F1, target_F2 = self.forward_target_net(next_obs, z)
next_Q1, next_Q2 = [torch.einsum('sda, sd -> sa', Fi, z) for Fi in [target_F1, target_F2]]
next_Q = torch.min(next_Q1, next_Q2)
if self.cfg.boltzmann:
pi = F.softmax(next_Q / self.cfg.temp, dim=-1)
target_F1, target_F2 = [torch.einsum("sa, sda -> sd", pi, Fi) for Fi in [target_F1, target_F2]] # batch x z_dim
next_Q = torch.einsum("sa, sa -> s", pi, next_Q)
else:
next_action = next_Q.max(1)[1]
next_idx = next_action[:, None].repeat(1, self.cfg.z_dim)[:, :, None]
target_F1, target_F2 = [Fi.gather(-1, next_idx).squeeze() for Fi in [target_F1, target_F2]] # batch x z_dim
next_Q = next_Q.max(1)[0]
target_B = self.backward_target_net(next_goal) # batch x z_dim
target_M1, target_M2 = [torch.einsum('sd, td -> st', Fi, target_B) \
for Fi in [target_F1, target_F2]] # batch x batch
target_M = torch.min(target_M1, target_M2)
# compute FB loss
idxs = action.repeat(1, self.cfg.z_dim)[:, :, None]
F1, F2 = [Fi.gather(-1, idxs).squeeze() for Fi in self.forward_net(obs, z)]
B = self.backward_net(next_goal)
M1 = torch.einsum('sd, td -> st', F1, B) # batch x batch
M2 = torch.einsum('sd, td -> st', F2, B) # batch x batch
I = torch.eye(*M1.size(), device=M1.device)
off_diag = ~I.bool()
fb_offdiag: tp.Any = 0.5 * sum((M - discount * target_M)[off_diag].pow(2).mean() for M in [M1, M2])
fb_diag: tp.Any = -sum(M.diag().mean() for M in [M1, M2])
fb_loss = fb_offdiag + fb_diag
# Q LOSS
if self.cfg.q_loss:
with torch.no_grad():
# next_Q1, nextQ2 = [torch.einsum('sd, sd -> s', target_Fi, z) for target_Fi in [target_F1, target_F2]]
# next_Q = torch.min(next_Q1, nextQ2)
cov = torch.matmul(B.T, B) / B.shape[0]
inv_cov = torch.linalg.pinv(cov)
implicit_reward = (torch.matmul(B, inv_cov) * z).sum(dim=1) # batch_size
target_Q = implicit_reward.detach() + discount.squeeze(1) * next_Q # batch_size
Q1, Q2 = [torch.einsum('sd, sd -> s', Fi, z) for Fi in [F1, F2]]
q_loss = F.mse_loss(Q1, target_Q) + F.mse_loss(Q2, target_Q)
fb_loss += self.cfg.q_loss_coef * q_loss
# ORTHONORMALITY LOSS FOR BACKWARD EMBEDDING
Cov = torch.matmul(B, B.T)
orth_loss_diag = - 2 * Cov.diag().mean()
orth_loss_offdiag = Cov[off_diag].pow(2).mean()
orth_loss = orth_loss_offdiag + orth_loss_diag
fb_loss += self.cfg.ortho_coef * orth_loss
# Cov = torch.cov(B.T) # Vicreg loss
# var_loss = F.relu(1 - Cov.diag().clamp(1e-4, 1).sqrt()).mean() # eps avoids inf. sqrt gradient at 0
# cov_loss = 2 * torch.triu(Cov, diagonal=1).pow(2).mean() # 2x upper triangular part
# orth_loss = var_loss + cov_loss
# fb_loss += self.cfg.ortho_coef * orth_loss
if self.cfg.use_tb or self.cfg.use_wandb or self.cfg.use_hiplog:
metrics['target_M'] = target_M.mean().item()
metrics['M1'] = M1.mean().item()
metrics['F1'] = F1.mean().item()
metrics['B'] = B.mean().item()
metrics['B_norm'] = torch.norm(B, dim=-1).mean().item()
metrics['z_norm'] = torch.norm(z, dim=-1).mean().item()
metrics['fb_loss'] = fb_loss.item()
metrics['fb_diag'] = fb_diag.item()
metrics['fb_offdiag'] = fb_offdiag.item()
if self.cfg.q_loss:
metrics['q_loss'] = q_loss.item()
metrics['orth_loss'] = orth_loss.item()
metrics['orth_loss_diag'] = orth_loss_diag.item()
metrics['orth_loss_offdiag'] = orth_loss_offdiag.item()
if self.cfg.q_loss:
metrics['q_loss'] = q_loss.item()
eye_diff = torch.matmul(B.T, B) / B.shape[0] - torch.eye(B.shape[1], device=B.device)
metrics['orth_linf'] = torch.max(torch.abs(eye_diff)).item()
metrics['orth_l2'] = eye_diff.norm().item() / math.sqrt(B.shape[1])
if isinstance(self.fb_opt, torch.optim.Adam):
metrics["fb_opt_lr"] = self.fb_opt.param_groups[0]["lr"]
# optimize FB
if self.encoder_opt is not None:
self.encoder_opt.zero_grad(set_to_none=True)
self.fb_opt.zero_grad(set_to_none=True)
fb_loss.backward()
self.fb_opt.step()
if self.encoder_opt is not None:
self.encoder_opt.step()
return metrics
def aug_and_encode(self, obs: torch.Tensor) -> torch.Tensor:
obs = self.aug(obs)
return self.encoder(obs)
def update(self, replay_loader: ReplayBuffer, step: int) -> tp.Dict[str, float]:
metrics: tp.Dict[str, float] = {}
if step % self.cfg.update_every_steps != 0:
return metrics
batch = replay_loader.sample(self.cfg.batch_size)
batch = batch.to(self.cfg.device)
# pdb.set_trace()
obs = batch.obs
action = batch.action.type(torch.int64)
discount = batch.discount
next_obs = next_goal = batch.next_obs
if self.cfg.goal_space is not None:
assert batch.next_goal is not None
next_goal = batch.next_goal
# if len(batch.meta) == 1 and batch.meta[0].shape[-1] == self.cfg.z_dim:
# z = batch.meta[0]
# invalid = torch.linalg.norm(z, dim=1) < 1e-15
# if sum(invalid):
# z[invalid, :] = self.sample_z(sum(invalid)).to(self.cfg.device)
# else:
z = self.sample_z(self.cfg.batch_size, device=self.cfg.device)
if not z.shape[-1] == self.cfg.z_dim:
raise RuntimeError("There's something wrong with the logic here")
# obs = self.aug_and_encode(batch.obs)
# next_obs = self.aug_and_encode(batch.next_obs)
# if not self.cfg.update_encoder:
# obs = obs.detach()
# next_obs = next_obs.detach()
backward_input = batch.obs
future_goal = batch.future_obs
if self.cfg.goal_space is not None:
assert batch.goal is not None
backward_input = batch.goal
future_goal = batch.future_goal
perm = torch.randperm(self.cfg.batch_size)
backward_input = backward_input[perm]
if self.cfg.mix_ratio > 0:
mix_idxs: tp.Any = np.where(np.random.uniform(size=self.cfg.batch_size) < self.cfg.mix_ratio)[0]
if not self.cfg.rand_weight:
with torch.no_grad():
mix_z = self.backward_net(backward_input[mix_idxs]).detach()
else:
# generate random weight
weight = torch.rand(size=(mix_idxs.shape[0], self.cfg.batch_size)).to(self.cfg.device)
weight = F.normalize(weight, dim=1)
uniform_rdv = torch.rand(mix_idxs.shape[0], 1).to(self.cfg.device)
weight = uniform_rdv * weight
with torch.no_grad():
mix_z = torch.matmul(weight, self.backward_net(backward_input).detach())
if self.cfg.norm_z:
mix_z = math.sqrt(self.cfg.z_dim) * F.normalize(mix_z, dim=1)
z[mix_idxs] = mix_z
# hindsight replay
if self.cfg.future_ratio > 0:
assert future_goal is not None
future_idxs = np.where(np.random.uniform(size=self.cfg.batch_size) < self.cfg.future_ratio)
z[future_idxs] = self.backward_net(future_goal[future_idxs]).detach()
metrics.update(self.update_fb(obs=obs, action=action, discount=discount,
next_obs=next_obs, next_goal=next_goal, z=z, step=step))
# update critic target
utils.soft_update_params(self.forward_net, self.forward_target_net,
self.cfg.fb_target_tau)
utils.soft_update_params(self.backward_net, self.backward_target_net,
self.cfg.fb_target_tau)
return metrics
|
controllable_agent-main
|
url_benchmark/agent/discrete_fb.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import dataclasses
import typing as tp
import torch
from torch import nn
from hydra.core.config_store import ConfigStore
import omegaconf
from url_benchmark import utils
from .ddpg import DDPGAgent
from .ddpg import DDPGAgentConfig as _BaseConfig
from url_benchmark.in_memory_replay_buffer import ReplayBuffer
from typing import Any, Dict, Tuple
@dataclasses.dataclass
class ICMAPTAgentConfig(_BaseConfig):
_target_: str = "url_benchmark.agent.icm_apt.ICMAPTAgent"
name: str = "icm_apt"
update_encoder: bool = omegaconf.II("update_encoder")
icm_rep_dim: int = 512
icm_scale: float = 1.0
knn_rms: bool = False
knn_k: int = 12
knn_avg: bool = True
knn_clip: float = 0.0
cs = ConfigStore.instance()
cs.store(group="agent", name="icm_apt", node=ICMAPTAgentConfig)
class ICM(nn.Module):
"""
Same as ICM, with a trunk to save memory for KNN
"""
def __init__(self, obs_dim, action_dim, hidden_dim, icm_rep_dim) -> None:
super().__init__()
self.trunk = nn.Sequential(nn.Linear(obs_dim, icm_rep_dim),
nn.LayerNorm(icm_rep_dim), nn.Tanh())
self.forward_net = nn.Sequential(
nn.Linear(icm_rep_dim + action_dim, hidden_dim), nn.ReLU(),
nn.Linear(hidden_dim, icm_rep_dim))
self.backward_net = nn.Sequential(
nn.Linear(2 * icm_rep_dim, hidden_dim), nn.ReLU(),
nn.Linear(hidden_dim, action_dim), nn.Tanh())
self.apply(utils.weight_init)
def forward(self, obs, action, next_obs) -> Tuple[Any, Any]:
assert obs.shape[0] == next_obs.shape[0]
assert obs.shape[0] == action.shape[0]
obs = self.trunk(obs)
next_obs = self.trunk(next_obs)
next_obs_hat = self.forward_net(torch.cat([obs, action], dim=-1))
action_hat = self.backward_net(torch.cat([obs, next_obs], dim=-1))
forward_error = torch.norm(next_obs - next_obs_hat,
dim=-1,
p=2,
keepdim=True)
backward_error = torch.norm(action - action_hat,
dim=-1,
p=2,
keepdim=True)
return forward_error, backward_error
def get_rep(self, obs, action) -> Any:
rep = self.trunk(obs)
return rep
class ICMAPTAgent(DDPGAgent):
def __init__(self, **kwargs) -> None:
cfg = ICMAPTAgentConfig(**kwargs)
super().__init__(**kwargs)
self.cfg = cfg # override base ddpg cfg type
self.icm = ICM(self.obs_dim, self.action_dim, self.hidden_dim,
cfg.icm_rep_dim).to(self.device)
# optimizers
self.icm_opt = torch.optim.Adam(self.icm.parameters(), lr=self.lr)
self.icm.train()
# particle-based entropy
rms = utils.RMS(self.device)
self.pbe = utils.PBE(rms, cfg.knn_clip, cfg.knn_k, cfg.knn_avg, cfg.knn_rms,
self.device)
def update_icm(self, obs, action, next_obs, step) -> Dict[str, Any]:
metrics: tp.Dict[str, float] = {}
forward_error, backward_error = self.icm(obs, action, next_obs)
loss = forward_error.mean() + backward_error.mean()
self.icm_opt.zero_grad()
if self.encoder_opt is not None:
self.encoder_opt.zero_grad(set_to_none=True)
loss.backward()
self.icm_opt.step()
if self.encoder_opt is not None:
self.encoder_opt.step()
if self.use_tb or self.use_wandb:
metrics['icm_loss'] = loss.item()
return metrics
def compute_intr_reward(self, obs, action, next_obs, step) -> Any:
rep = self.icm.get_rep(obs, action)
reward = self.pbe(rep)
reward = reward.reshape(-1, 1)
return reward
def update(self, replay_loader: ReplayBuffer, step: int) -> tp.Dict[str, float]:
metrics: tp.Dict[str, float] = {}
if step % self.update_every_steps != 0:
return metrics
batch = replay_loader.sample(self.cfg.batch_size)
obs, action, extr_reward, discount, next_obs = batch.to(self.device).unpack()
# augment and encode
obs = self.aug_and_encode(obs)
with torch.no_grad():
next_obs = self.aug_and_encode(next_obs)
if self.reward_free:
metrics.update(self.update_icm(obs, action, next_obs, step))
with torch.no_grad():
intr_reward = self.compute_intr_reward(obs, action, next_obs,
step)
reward = intr_reward
else:
reward = extr_reward
if self.use_tb or self.use_wandb:
metrics['extr_reward'] = extr_reward.mean().item()
metrics['intr_reward'] = intr_reward.mean().item()
metrics['batch_reward'] = reward.mean().item()
if not self.update_encoder:
obs = obs.detach()
next_obs = next_obs.detach()
# update critic
metrics.update(
self.update_critic(obs.detach(), action, reward, discount,
next_obs.detach(), step))
# update actor
metrics.update(self.update_actor(obs.detach(), step))
# update critic target
utils.soft_update_params(self.critic, self.critic_target,
self.critic_target_tau)
return metrics
|
controllable_agent-main
|
url_benchmark/agent/icm_apt.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pylint: disable=unused-import
import pdb
import copy
import math
import logging
import dataclasses
from collections import OrderedDict
import typing as tp
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
from hydra.core.config_store import ConfigStore
import omegaconf
from dm_env import specs
from url_benchmark import utils
# from url_benchmark import replay_buffer as rb
from url_benchmark.in_memory_replay_buffer import ReplayBuffer
from url_benchmark.dmc import TimeStep
from url_benchmark import goals as _goals
from .ddpg import MetaDict
from .fb_modules import IdentityMap
from .ddpg import Encoder
from .fb_modules import Actor, DiagGaussianActor, ForwardMap, BackwardMap, OnlineCov
logger = logging.getLogger(__name__)
@dataclasses.dataclass
class FBDDPGAgentConfig:
# @package agent
_target_: str = "url_benchmark.agent.fb_ddpg.FBDDPGAgent"
name: str = "fb_ddpg"
# reward_free: ${reward_free}
obs_type: str = omegaconf.MISSING # to be specified later
obs_shape: tp.Tuple[int, ...] = omegaconf.MISSING # to be specified later
action_shape: tp.Tuple[int, ...] = omegaconf.MISSING # to be specified later
device: str = omegaconf.II("device") # ${device}
lr: float = 1e-4
lr_coef: float = 1
fb_target_tau: float = 0.01 # 0.001-0.01
update_every_steps: int = 2
use_tb: bool = omegaconf.II("use_tb") # ${use_tb}
use_wandb: bool = omegaconf.II("use_wandb") # ${use_wandb}
use_hiplog: bool = omegaconf.II("use_hiplog") # ${use_wandb}
num_expl_steps: int = omegaconf.MISSING # ??? # to be specified later
num_inference_steps: int = 5120
hidden_dim: int = 1024 # 128, 2048
backward_hidden_dim: int = 526 # 512
feature_dim: int = 512 # 128, 1024
z_dim: int = 50 # 100
stddev_schedule: str = "0.2" # "linear(1,0.2,200000)" #
stddev_clip: float = 0.3 # 1
update_z_every_step: int = 300
update_z_proba: float = 1.0
nstep: int = 1
batch_size: int = 1024 # 512
init_fb: bool = True
update_encoder: bool = omegaconf.II("update_encoder") # ${update_encoder}
goal_space: tp.Optional[str] = omegaconf.II("goal_space")
ortho_coef: float = 1.0 # 0.01-10
log_std_bounds: tp.Tuple[float, float] = (-5, 2) # param for DiagGaussianActor
temp: float = 1 # temperature for DiagGaussianActor
boltzmann: bool = False # set to true for DiagGaussianActor
debug: bool = False
future_ratio: float = 0.0
mix_ratio: float = 0.5 # 0-1
rand_weight: bool = False # True, False
preprocess: bool = True
norm_z: bool = True
q_loss: bool = False
q_loss_coef: float = 0.01
additional_metric: bool = False
add_trunk: bool = False
cs = ConfigStore.instance()
cs.store(group="agent", name="fb_ddpg", node=FBDDPGAgentConfig)
class FBDDPGAgent:
# pylint: disable=unused-argument
def __init__(self,
**kwargs: tp.Any
):
cfg = FBDDPGAgentConfig(**kwargs)
self.cfg = cfg
assert len(cfg.action_shape) == 1
self.action_dim = cfg.action_shape[0]
self.solved_meta: tp.Any = None
# models
if cfg.obs_type == 'pixels':
self.aug: nn.Module = utils.RandomShiftsAug(pad=4)
self.encoder: nn.Module = Encoder(cfg.obs_shape).to(cfg.device)
self.obs_dim = self.encoder.repr_dim
else:
self.aug = nn.Identity()
self.encoder = nn.Identity()
self.obs_dim = cfg.obs_shape[0]
if cfg.feature_dim < self.obs_dim:
logger.warning(f"feature_dim {cfg.feature_dim} should not be smaller that obs_dim {self.obs_dim}")
goal_dim = self.obs_dim
if cfg.goal_space is not None:
goal_dim = _goals.get_goal_space_dim(cfg.goal_space)
if cfg.z_dim < goal_dim:
logger.warning(f"z_dim {cfg.z_dim} should not be smaller that goal_dim {goal_dim}")
# create the network
if self.cfg.boltzmann:
self.actor: nn.Module = DiagGaussianActor(self.obs_dim, cfg.z_dim, self.action_dim,
cfg.hidden_dim, cfg.log_std_bounds).to(cfg.device)
else:
self.actor = Actor(self.obs_dim, cfg.z_dim, self.action_dim,
cfg.feature_dim, cfg.hidden_dim,
preprocess=cfg.preprocess, add_trunk=self.cfg.add_trunk).to(cfg.device)
self.forward_net = ForwardMap(self.obs_dim, cfg.z_dim, self.action_dim,
cfg.feature_dim, cfg.hidden_dim,
preprocess=cfg.preprocess, add_trunk=self.cfg.add_trunk).to(cfg.device)
if cfg.debug:
self.backward_net: nn.Module = IdentityMap().to(cfg.device)
self.backward_target_net: nn.Module = IdentityMap().to(cfg.device)
else:
self.backward_net = BackwardMap(goal_dim, cfg.z_dim, cfg.backward_hidden_dim, norm_z=cfg.norm_z).to(cfg.device)
self.backward_target_net = BackwardMap(goal_dim,
cfg.z_dim, cfg.backward_hidden_dim, norm_z=cfg.norm_z).to(cfg.device)
# build up the target network
self.forward_target_net = ForwardMap(self.obs_dim, cfg.z_dim, self.action_dim,
cfg.feature_dim, cfg.hidden_dim,
preprocess=cfg.preprocess, add_trunk=self.cfg.add_trunk).to(cfg.device)
# load the weights into the target networks
self.forward_target_net.load_state_dict(self.forward_net.state_dict())
self.backward_target_net.load_state_dict(self.backward_net.state_dict())
# optimizers
self.encoder_opt: tp.Optional[torch.optim.Adam] = None
if cfg.obs_type == 'pixels':
self.encoder_opt = torch.optim.Adam(self.encoder.parameters(), lr=cfg.lr)
self.actor_opt = torch.optim.Adam(self.actor.parameters(), lr=cfg.lr)
# params = [p for net in [self.forward_net, self.backward_net] for p in net.parameters()]
# self.fb_opt = torch.optim.Adam(params, lr=cfg.lr)
self.fb_opt = torch.optim.Adam([{'params': self.forward_net.parameters()}, # type: ignore
{'params': self.backward_net.parameters(), 'lr': cfg.lr_coef * cfg.lr}],
lr=cfg.lr)
self.train()
self.forward_target_net.train()
self.backward_target_net.train()
self.actor_success: tp.List[float] = [] # only for debugging, can be removed eventually
# self.inv_cov = torch.eye(self.cfg.z_dim, dtype=torch.float32, device=self.cfg.device)
# self.online_cov = OnlineCov(mom=0.99, dim=self.cfg.z_dim).to(self.cfg.device)
# self.online_cov.train()
def train(self, training: bool = True) -> None:
self.training = training
for net in [self.encoder, self.actor, self.forward_net, self.backward_net]:
net.train(training)
def init_from(self, other) -> None:
# copy parameters over
names = ["encoder", "actor"]
if self.cfg.init_fb:
names += ["forward_net", "backward_net", "backward_target_net", "forward_target_net"]
for name in names:
utils.hard_update_params(getattr(other, name), getattr(self, name))
for key, val in self.__dict__.items():
if isinstance(val, torch.optim.Optimizer):
val.load_state_dict(copy.deepcopy(getattr(other, key).state_dict()))
def get_goal_meta(self, goal_array: np.ndarray) -> MetaDict:
desired_goal = torch.tensor(goal_array).unsqueeze(0).to(self.cfg.device)
with torch.no_grad():
z = self.backward_net(desired_goal)
if self.cfg.norm_z:
z = math.sqrt(self.cfg.z_dim) * F.normalize(z, dim=1)
z = z.squeeze(0).cpu().numpy()
meta = OrderedDict()
meta['z'] = z
return meta
def infer_meta(self, replay_loader: ReplayBuffer) -> MetaDict:
obs_list, reward_list = [], []
batch_size = 0
while batch_size < self.cfg.num_inference_steps:
batch = replay_loader.sample(self.cfg.batch_size)
batch = batch.to(self.cfg.device)
obs_list.append(batch.next_goal if self.cfg.goal_space is not None else batch.next_obs)
reward_list.append(batch.reward)
batch_size += batch.next_obs.size(0)
obs, reward = torch.cat(obs_list, 0), torch.cat(reward_list, 0) # type: ignore
obs, reward = obs[:self.cfg.num_inference_steps], reward[:self.cfg.num_inference_steps]
return self.infer_meta_from_obs_and_rewards(obs, reward)
def infer_meta_from_obs_and_rewards(self, obs: torch.Tensor, reward: torch.Tensor) -> MetaDict:
print('max reward: ', reward.max().cpu().item())
print('99 percentile: ', torch.quantile(reward, 0.99).cpu().item())
print('median reward: ', reward.median().cpu().item())
print('min reward: ', reward.min().cpu().item())
print('mean reward: ', reward.mean().cpu().item())
print('num reward: ', reward.shape[0])
# filter out small reward
# pdb.set_trace()
# idx = torch.where(reward >= torch.quantile(reward, 0.99))[0]
# obs = obs[idx]
# reward = reward[idx]
with torch.no_grad():
B = self.backward_net(obs)
z = torch.matmul(reward.T, B) / reward.shape[0]
if self.cfg.norm_z:
z = math.sqrt(self.cfg.z_dim) * F.normalize(z, dim=1)
meta = OrderedDict()
meta['z'] = z.squeeze().cpu().numpy()
# self.solved_meta = meta
return meta
def sample_z(self, size, device: str = "cpu"):
gaussian_rdv = torch.randn((size, self.cfg.z_dim), dtype=torch.float32, device=device)
gaussian_rdv = F.normalize(gaussian_rdv, dim=1)
if self.cfg.norm_z:
z = math.sqrt(self.cfg.z_dim) * gaussian_rdv
else:
uniform_rdv = torch.rand((size, self.cfg.z_dim), dtype=torch.float32, device=device)
z = np.sqrt(self.cfg.z_dim) * uniform_rdv * gaussian_rdv
return z
def init_meta(self) -> MetaDict:
if self.solved_meta is not None:
print('solved_meta')
return self.solved_meta
else:
z = self.sample_z(1)
z = z.squeeze().numpy()
meta = OrderedDict()
meta['z'] = z
return meta
# pylint: disable=unused-argument
def update_meta(
self,
meta: MetaDict,
global_step: int,
time_step: TimeStep,
finetune: bool = False,
replay_loader: tp.Optional[ReplayBuffer] = None
) -> MetaDict:
if global_step % self.cfg.update_z_every_step == 0 and np.random.rand() < self.cfg.update_z_proba:
return self.init_meta()
return meta
def act(self, obs, meta, step, eval_mode) -> tp.Any:
obs = torch.as_tensor(obs, device=self.cfg.device, dtype=torch.float32).unsqueeze(0) # type: ignore
h = self.encoder(obs)
z = torch.as_tensor(meta['z'], device=self.cfg.device).unsqueeze(0) # type: ignore
if self.cfg.boltzmann:
dist = self.actor(h, z)
else:
stddev = utils.schedule(self.cfg.stddev_schedule, step)
dist = self.actor(h, z, stddev)
if eval_mode:
action = dist.mean
if self.cfg.additional_metric:
# the following is doing extra computation only used for metrics,
# it should be deactivated eventually
F_mean_s = self.forward_net(obs, z, action)
# F_samp_s = self.forward_net(obs, z, dist.sample())
F_rand_s = self.forward_net(obs, z, torch.zeros_like(action).uniform_(-1.0, 1.0))
Qs = [torch.min(*(torch.einsum('sd, sd -> s', F, z) for F in Fs)) for Fs in [F_mean_s, F_rand_s]]
self.actor_success = (Qs[0] > Qs[1]).cpu().numpy().tolist()
else:
action = dist.sample()
if step < self.cfg.num_expl_steps:
action.uniform_(-1.0, 1.0)
return action.cpu().numpy()[0]
def compute_z_correl(self, time_step: TimeStep, meta: MetaDict) -> float:
goal = time_step.goal if self.cfg.goal_space is not None else time_step.observation # type: ignore
with torch.no_grad():
zs = [torch.Tensor(x).unsqueeze(0).float().to(self.cfg.device) for x in [goal, meta["z"]]]
zs[0] = self.backward_net(zs[0])
zs = [F.normalize(z, 1) for z in zs]
return torch.matmul(zs[0], zs[1].T).item()
def update_fb(
self,
obs: torch.Tensor,
action: torch.Tensor,
discount: torch.Tensor,
next_obs: torch.Tensor,
next_goal: torch.Tensor,
z: torch.Tensor,
step: int
) -> tp.Dict[str, float]:
metrics: tp.Dict[str, float] = {}
# compute target successor measure
with torch.no_grad():
if self.cfg.boltzmann:
dist = self.actor(next_obs, z)
next_action = dist.sample()
else:
stddev = utils.schedule(self.cfg.stddev_schedule, step)
dist = self.actor(next_obs, z, stddev)
next_action = dist.sample(clip=self.cfg.stddev_clip)
target_F1, target_F2 = self.forward_target_net(next_obs, z, next_action) # batch x z_dim
target_B = self.backward_target_net(next_goal) # batch x z_dim
target_M1 = torch.einsum('sd, td -> st', target_F1, target_B) # batch x batch
target_M2 = torch.einsum('sd, td -> st', target_F2, target_B) # batch x batch
target_M = torch.min(target_M1, target_M2)
# compute FB loss
F1, F2 = self.forward_net(obs, z, action)
B = self.backward_net(next_goal)
M1 = torch.einsum('sd, td -> st', F1, B) # batch x batch
M2 = torch.einsum('sd, td -> st', F2, B) # batch x batch
I = torch.eye(*M1.size(), device=M1.device)
off_diag = ~I.bool()
fb_offdiag: tp.Any = 0.5 * sum((M - discount * target_M)[off_diag].pow(2).mean() for M in [M1, M2])
fb_diag: tp.Any = -sum(M.diag().mean() for M in [M1, M2])
fb_loss = fb_offdiag + fb_diag
# Q LOSS
if self.cfg.q_loss:
with torch.no_grad():
next_Q1, nextQ2 = [torch.einsum('sd, sd -> s', target_Fi, z) for target_Fi in [target_F1, target_F2]]
next_Q = torch.min(next_Q1, nextQ2)
cov = torch.matmul(B.T, B) / B.shape[0]
inv_cov = torch.inverse(cov)
implicit_reward = (torch.matmul(B, inv_cov) * z).sum(dim=1) # batch_size
target_Q = implicit_reward.detach() + discount.squeeze(1) * next_Q # batch_size
Q1, Q2 = [torch.einsum('sd, sd -> s', Fi, z) for Fi in [F1, F2]]
q_loss = F.mse_loss(Q1, target_Q) + F.mse_loss(Q2, target_Q)
fb_loss += self.cfg.q_loss_coef * q_loss
# ORTHONORMALITY LOSS FOR BACKWARD EMBEDDING
Cov = torch.matmul(B, B.T)
orth_loss_diag = - 2 * Cov.diag().mean()
orth_loss_offdiag = Cov[off_diag].pow(2).mean()
orth_loss = orth_loss_offdiag + orth_loss_diag
fb_loss += self.cfg.ortho_coef * orth_loss
# Cov = torch.cov(B.T) # Vicreg loss
# var_loss = F.relu(1 - Cov.diag().clamp(1e-4, 1).sqrt()).mean() # eps avoids inf. sqrt gradient at 0
# cov_loss = 2 * torch.triu(Cov, diagonal=1).pow(2).mean() # 2x upper triangular part
# orth_loss = var_loss + cov_loss
# fb_loss += self.cfg.ortho_coef * orth_loss
if self.cfg.use_tb or self.cfg.use_wandb or self.cfg.use_hiplog:
metrics['target_M'] = target_M.mean().item()
metrics['M1'] = M1.mean().item()
metrics['F1'] = F1.mean().item()
metrics['B'] = B.mean().item()
metrics['B_norm'] = torch.norm(B, dim=-1).mean().item()
metrics['z_norm'] = torch.norm(z, dim=-1).mean().item()
metrics['fb_loss'] = fb_loss.item()
metrics['fb_diag'] = fb_diag.item()
metrics['fb_offdiag'] = fb_offdiag.item()
if self.cfg.q_loss:
metrics['q_loss'] = q_loss.item()
metrics['orth_loss'] = orth_loss.item()
metrics['orth_loss_diag'] = orth_loss_diag.item()
metrics['orth_loss_offdiag'] = orth_loss_offdiag.item()
if self.cfg.q_loss:
metrics['q_loss'] = q_loss.item()
eye_diff = torch.matmul(B.T, B) / B.shape[0] - torch.eye(B.shape[1], device=B.device)
metrics['orth_linf'] = torch.max(torch.abs(eye_diff)).item()
metrics['orth_l2'] = eye_diff.norm().item() / math.sqrt(B.shape[1])
if isinstance(self.fb_opt, torch.optim.Adam):
metrics["fb_opt_lr"] = self.fb_opt.param_groups[0]["lr"]
# optimize FB
if self.encoder_opt is not None:
self.encoder_opt.zero_grad(set_to_none=True)
self.fb_opt.zero_grad(set_to_none=True)
fb_loss.backward()
self.fb_opt.step()
if self.encoder_opt is not None:
self.encoder_opt.step()
return metrics
def update_actor(self, obs: torch.Tensor, z: torch.Tensor, step: int) -> tp.Dict[str, float]:
metrics: tp.Dict[str, float] = {}
if self.cfg.boltzmann:
dist = self.actor(obs, z)
action = dist.rsample()
else:
stddev = utils.schedule(self.cfg.stddev_schedule, step)
dist = self.actor(obs, z, stddev)
action = dist.sample(clip=self.cfg.stddev_clip)
log_prob = dist.log_prob(action).sum(-1, keepdim=True)
F1, F2 = self.forward_net(obs, z, action)
Q1 = torch.einsum('sd, sd -> s', F1, z)
Q2 = torch.einsum('sd, sd -> s', F2, z)
if self.cfg.additional_metric:
q1_success = Q1 > Q2
Q = torch.min(Q1, Q2)
actor_loss = (self.cfg.temp * log_prob - Q).mean() if self.cfg.boltzmann else -Q.mean()
# optimize actor
self.actor_opt.zero_grad(set_to_none=True)
actor_loss.backward()
self.actor_opt.step()
if self.cfg.use_tb or self.cfg.use_wandb:
metrics['actor_loss'] = actor_loss.item()
metrics['q'] = Q.mean().item()
if self.cfg.additional_metric:
metrics['q1_success'] = q1_success.float().mean().item()
metrics['actor_logprob'] = log_prob.mean().item()
# metrics['actor_ent'] = dist.entropy().sum(dim=-1).mean().item()
return metrics
def aug_and_encode(self, obs: torch.Tensor) -> torch.Tensor:
obs = self.aug(obs)
return self.encoder(obs)
def update(self, replay_loader: ReplayBuffer, step: int) -> tp.Dict[str, float]:
metrics: tp.Dict[str, float] = {}
if step % self.cfg.update_every_steps != 0:
return metrics
batch = replay_loader.sample(self.cfg.batch_size)
batch = batch.to(self.cfg.device)
# pdb.set_trace()
obs = batch.obs
action = batch.action
discount = batch.discount
next_obs = next_goal = batch.next_obs
if self.cfg.goal_space is not None:
assert batch.next_goal is not None
next_goal = batch.next_goal
# if len(batch.meta) == 1 and batch.meta[0].shape[-1] == self.cfg.z_dim:
# z = batch.meta[0]
# invalid = torch.linalg.norm(z, dim=1) < 1e-15
# if sum(invalid):
# z[invalid, :] = self.sample_z(sum(invalid)).to(self.cfg.device)
# else:
z = self.sample_z(self.cfg.batch_size, device=self.cfg.device)
if not z.shape[-1] == self.cfg.z_dim:
raise RuntimeError("There's something wrong with the logic here")
# obs = self.aug_and_encode(batch.obs)
# next_obs = self.aug_and_encode(batch.next_obs)
# if not self.cfg.update_encoder:
# obs = obs.detach()
# next_obs = next_obs.detach()
backward_input = batch.obs
future_goal = batch.future_obs
if self.cfg.goal_space is not None:
assert batch.goal is not None
backward_input = batch.goal
future_goal = batch.future_goal
perm = torch.randperm(self.cfg.batch_size)
backward_input = backward_input[perm]
if self.cfg.mix_ratio > 0:
mix_idxs: tp.Any = np.where(np.random.uniform(size=self.cfg.batch_size) < self.cfg.mix_ratio)[0]
if not self.cfg.rand_weight:
with torch.no_grad():
mix_z = self.backward_net(backward_input[mix_idxs]).detach()
else:
# generate random weight
weight = torch.rand(size=(mix_idxs.shape[0], self.cfg.batch_size)).to(self.cfg.device)
weight = F.normalize(weight, dim=1)
uniform_rdv = torch.rand(mix_idxs.shape[0], 1).to(self.cfg.device)
weight = uniform_rdv * weight
with torch.no_grad():
mix_z = torch.matmul(weight, self.backward_net(backward_input).detach())
if self.cfg.norm_z:
mix_z = math.sqrt(self.cfg.z_dim) * F.normalize(mix_z, dim=1)
z[mix_idxs] = mix_z
# hindsight replay
if self.cfg.future_ratio > 0:
assert future_goal is not None
future_idxs = np.where(np.random.uniform(size=self.cfg.batch_size) < self.cfg.future_ratio)
z[future_idxs] = self.backward_net(future_goal[future_idxs]).detach()
metrics.update(self.update_fb(obs=obs, action=action, discount=discount,
next_obs=next_obs, next_goal=next_goal, z=z, step=step))
# update actor
metrics.update(self.update_actor(obs, z, step))
# update critic target
utils.soft_update_params(self.forward_net, self.forward_target_net,
self.cfg.fb_target_tau)
utils.soft_update_params(self.backward_net, self.backward_target_net,
self.cfg.fb_target_tau)
# update inv cov
# if step % self.cfg.update_cov_every_step == 0:
# logger.info("update online cov")
# obs_list = list()
# batch_size = 0
# while batch_size < 10000:
# batch = next(replay_loader)
# batch = batch.to(self.cfg.device)
# obs_list.append(batch.next_goal if self.cfg.goal_space is not None else batch.next_obs)
# batch_size += batch.next_obs.size(0)
# obs = torch.cat(obs_list, 0)
# with torch.no_grad():
# B = self.backward_net(obs)
# self.inv_cov = torch.inverse(self.online_cov(B))
return metrics
# def update(self, replay_loader: tp.Iterator[rb.EpisodeBatch], step: int) -> tp.Dict[str, float]:
# metrics: tp.Dict[str, float] = {}
#
# if step % self.cfg.update_every_steps != 0:
# return metrics
#
# for _ in range(self.cfg.num_fb_updates):
# batch = next(replay_loader)
# batch = batch.to(self.cfg.device)
# if self.cfg.mix_ratio > 0:
# assert self.cfg.batch_size % 3 == 0
# mini_batch_size = self.cfg.batch_size // 3
# else:
# assert self.cfg.batch_size % 2 == 0
# mini_batch_size = self.cfg.batch_size // 2
# idxs = list(range(mini_batch_size))
# idxs_prime = list(range(mini_batch_size, 2 * mini_batch_size))
#
# # pdb.set_trace()
# obs = batch.obs[idxs]
# action = batch.action[idxs]
# discount = batch.discount[idxs]
# next_obs = next_goal = batch.next_obs[idxs]
# if self.cfg.goal_space is not None:
# assert batch.next_goal is not None
# next_goal = batch.next_goal[idxs]
# if len(batch.meta) == 1 and batch.meta[0].shape[-1] == self.cfg.z_dim:
# z = batch.meta[0][idxs]
# invalid = torch.linalg.norm(z, dim=1) < 1e-15
# if sum(invalid):
# z[invalid, :] = self.sample_z(sum(invalid)).to(self.cfg.device)
# else:
# z = self.sample_z(mini_batch_size).to(self.cfg.device)
# if not z.shape[-1] == self.cfg.z_dim:
# raise RuntimeError("There's something wrong with the logic here")
# # obs = self.aug_and_encode(batch.obs)
# # next_obs = self.aug_and_encode(batch.next_obs)
# # if not self.cfg.update_encoder:
# # obs = obs.detach()
# # next_obs = next_obs.detach()
#
# backward_input = batch.obs
# future_goal = batch.future_obs
# if self.cfg.goal_space is not None:
# assert batch.goal is not None
# backward_input = batch.goal
# future_goal = batch.future_goal
#
# # goal = backward_input[idxs]
# goal_prime = backward_input[idxs_prime]
#
# if self.cfg.mix_ratio > 0:
# mix_idxs: tp.Any = np.where(np.random.uniform(size=mini_batch_size) < self.cfg.mix_ratio)[0]
# part = backward_input[2 * mini_batch_size:]
# if not self.cfg.rand_weight:
# mix_z = self.backward_net(part[mix_idxs]).detach()
# else:
# # generate random weight
# weight = torch.rand(size=(mix_idxs.shape[0], mini_batch_size)).to(self.cfg.device)
# weight = F.normalize(weight, dim=1)
# uniform_rdv = torch.rand(mix_idxs.shape[0], 1).to(self.cfg.device)
# weight = uniform_rdv * weight
# mix_z = torch.matmul(weight, self.backward_net(part).detach())
# if self.cfg.norm_z:
# mix_z = math.sqrt(self.cfg.z_dim) * F.normalize(mix_z, dim=1)
# z[mix_idxs] = mix_z
#
# # hindsight replay
# if self.cfg.future_ratio > 0:
# assert future_goal is not None
# future_idxs = np.where(np.random.uniform(size=mini_batch_size) < self.cfg.future_ratio)
# future_goal = future_goal[idxs][future_idxs]
# z[future_idxs] = self.backward_net(future_goal).detach()
# goal_prime[future_idxs] = future_goal
# metrics.update(self.update_fb(obs=obs, action=action, discount=discount,
# next_obs=next_obs, next_goal=next_goal, goal_prime=goal_prime, z=z, step=step))
#
# # update actor
# metrics.update(self.update_actor(obs, z, step))
#
# # update critic target
# utils.soft_update_params(self.forward_net, self.forward_target_net,
# self.cfg.fb_target_tau)
# utils.soft_update_params(self.backward_net, self.backward_target_net,
# self.cfg.fb_target_tau)
#
# return metrics
# def update_fb(
# self,
# obs: torch.Tensor,
# action: torch.Tensor,
# discount: torch.Tensor,
# next_obs: torch.Tensor,
# next_goal: torch.Tensor,
# goal_prime: torch.Tensor,
# z: torch.Tensor,
# step: int
# ) -> tp.Dict[str, float]:
# metrics: tp.Dict[str, float] = {}
# # compute target successor measure
# with torch.no_grad():
# if self.cfg.boltzmann:
# dist = self.actor(next_obs, z)
# next_action = dist.sample()
# else:
# stddev = utils.schedule(self.cfg.stddev_schedule, step)
# dist = self.actor(next_obs, z, stddev)
# next_action = dist.sample(clip=self.cfg.stddev_clip)
# target_F1, target_F2 = self.forward_target_net(next_obs, z, next_action) # batch x z_dim
# target_B = self.backward_target_net(goal_prime) # batch x z_dim
# target_M1 = torch.einsum('sd, td -> st', target_F1, target_B) # batch x batch
# target_M2 = torch.einsum('sd, td -> st', target_F2, target_B) # batch x batch
# target_M = torch.min(target_M1, target_M2)
#
# # compute FB loss
# F1, F2 = self.forward_net(obs, z, action)
# B = self.backward_net(next_goal)
# B_prime = self.backward_net(goal_prime)
# M1_diag = torch.einsum('sd, sd -> s', F1, B) # batch
# M2_diag = torch.einsum('sd, sd -> s', F2, B) # batch
# M1 = torch.einsum('sd, td -> st', F1, B_prime) # batch x batch
# M2 = torch.einsum('sd, td -> st', F2, B_prime) # batch x batch
# fb_loss = 0.5 * (M1 - discount * target_M).pow(2).mean() - M1_diag.mean()
# fb_loss += 0.5 * (M2 - discount * target_M).pow(2).mean() - M2_diag.mean()
#
# # ORTHONORMALITY LOSS FOR BACKWARD EMBEDDING
#
# B_B_prime = torch.matmul(B, B_prime.T)
# B_diag = torch.einsum('sd, sd -> s', B, B)
# B_prime_diag = torch.einsum('sd, sd -> s', B_prime, B_prime)
# orth_loss = B_B_prime.pow(2).mean() - (B_diag.mean() + B_prime_diag.mean())
# fb_loss += self.cfg.ortho_coef * orth_loss
#
# if self.cfg.use_tb or self.cfg.use_wandb or self.cfg.use_hiplog:
# metrics['target_M'] = target_M.mean().item()
# metrics['M1'] = M1.mean().item()
# metrics['F1'] = F1.mean().item()
# metrics['B'] = B.mean().item()
# metrics['B_norm'] = torch.norm(B, dim=-1).mean().item()
# metrics['z_norm'] = torch.norm(z, dim=-1).mean().item()
# metrics['fb_loss'] = fb_loss.item()
# metrics['orth_loss'] = orth_loss.item()
# eye_diff = torch.matmul(B.T, B) / B.shape[0] - torch.eye(B.shape[1], device=B.device)
# metrics['orth_linf'] = torch.max(torch.abs(eye_diff)).item()
# metrics['orth_l2'] = eye_diff.norm().item() / math.sqrt(B.shape[1])
# if isinstance(self.fb_opt, torch.optim.Adam):
# metrics["fb_opt_lr"] = self.fb_opt.param_groups[0]["lr"]
# if self.cfg.goal_space in ["simplified_walker", "simplified_quadruped"]:
# metrics['max_velocity'] = goal_prime[:, -1].max().item()
#
# # optimize FB
# if self.encoder_opt is not None:
# self.encoder_opt.zero_grad(set_to_none=True)
# self.fb_opt.zero_grad(set_to_none=True)
# fb_loss.backward()
# self.fb_opt.step()
# if self.encoder_opt is not None:
# self.encoder_opt.step()
# return metrics
|
controllable_agent-main
|
url_benchmark/agent/fb_ddpg.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import inspect
import dataclasses
from types import ModuleType
import numpy as np
import torch
from url_benchmark import replay_buffer as rb
from url_benchmark import agent as agents
from . import fb_ddpg
from . import fb_modules
def get_cfg() -> fb_ddpg.FBDDPGAgentConfig:
# hopefully this can get simpler soon
return fb_ddpg.FBDDPGAgentConfig(
obs_shape=(4,), action_shape=(3,), obs_type="state", device="cpu", num_expl_steps=1, goal_space=None
)
def test_agent_init() -> None:
cfg = get_cfg()
agent = fb_ddpg.FBDDPGAgent(**dataclasses.asdict(cfg))
b = 12
shapes = dict(obs=(b, 4), next_obs=(b, 4), action=(b, 4), reward=(b,), discount=(b,))
iterator = (rb.EpisodeBatch(**{x: np.random.rand(*y).astype(np.float32)
for x, y in shapes.items()}) for _ in range(100)) # type: ignore
meta = agent.init_meta()
with torch.no_grad():
action = agent.act(next(iterator).obs[0], meta, 0, eval_mode=False)
assert action.shape == (3,)
def test_agents_config() -> None:
cfgs = []
for module in agents.__dict__.values():
if isinstance(module, ModuleType):
for obj in module.__dict__.values():
if inspect.isclass(obj) and issubclass(obj, agents.DDPGAgentConfig):
if obj not in cfgs:
cfgs.append(obj)
assert len(cfgs) >= 3
for cfg in cfgs:
# check that target and name have been updated to match the algo
assert cfg.name.replace("_", "") in cfg.__name__.lower()
assert cfg.name in cfg._target_
def test_multiinputs() -> None:
m, n = [10, 12]
x, y = (torch.rand([16, z]) for z in [m, n])
mip = fb_modules.MultinputNet([m, n], [100, 100, 32])
out = mip(x, y)
assert out.shape == (16, 32)
|
controllable_agent-main
|
url_benchmark/agent/test_agent.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import typing as tp
from collections import OrderedDict
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
from url_benchmark import utils
from url_benchmark.dmc import TimeStep
from .ddpg import DDPGAgent, MetaDict
from url_benchmark.in_memory_replay_buffer import ReplayBuffer
from typing import Any, Dict, NoReturn, Tuple
"""
Reimplementation of https://github.com/RLAgent/state-marginal-matching:
- Removed redundant forward passes
- No updating p_z
- Added finetuning procedure from what's described in DIAYN
- VAE encodes and decodes from the encoding from DDPG when n > 1
as the paper does not make it clear how to include skills with pixel input
- When n=1, obs_type=pixel, remove the False from line 144
to input pixels into the vae
- TODO: when using pixel-based vae (n=1), gpu may run out of memory.
"""
class VAE(nn.Module):
def __init__(self, obs_dim, z_dim, code_dim, vae_beta, device) -> None:
super().__init__()
self.z_dim = z_dim
self.code_dim = code_dim
self.make_networks(obs_dim, z_dim, code_dim)
self.beta = vae_beta
self.apply(utils.weight_init)
self.device = device
def make_networks(self, obs_dim, z_dim, code_dim) -> None:
self.enc = nn.Sequential(nn.Linear(obs_dim + z_dim, 150), nn.ReLU(),
nn.Linear(150, 150), nn.ReLU())
self.enc_mu = nn.Linear(150, code_dim)
self.enc_logvar = nn.Linear(150, code_dim)
self.dec = nn.Sequential(nn.Linear(code_dim, 150), nn.ReLU(),
nn.Linear(150, 150), nn.ReLU(),
nn.Linear(150, obs_dim + z_dim))
def encode(self, obs_z) -> Tuple[Any, Any, Any]:
enc_features = self.enc(obs_z)
mu = self.enc_mu(enc_features)
logvar = self.enc_logvar(enc_features)
stds = (0.5 * logvar).exp()
return mu, logvar, stds
def forward(self, obs_z, epsilon) -> Tuple[Any, Tuple[Any, Any, Any]]:
mu, logvar, stds = self.encode(obs_z)
code = epsilon * stds + mu
obs_distr_params = self.dec(code)
return obs_distr_params, (mu, logvar, stds)
def loss(self, obs_z) -> Tuple[Any, Any]:
epsilon = torch.randn([obs_z.shape[0], self.code_dim]).to(self.device)
# pylint: disable=unused-variable
obs_distr_params, (mu, logvar, stds) = self(obs_z, epsilon)
kle = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp(),
dim=1).mean()
log_prob = F.mse_loss(obs_z, obs_distr_params, reduction='none')
loss = self.beta * kle + log_prob.mean()
return loss, log_prob.sum(list(range(1, len(log_prob.shape)))).view(
log_prob.shape[0], 1)
class PVae(VAE):
def make_networks(self, obs_shape, z_dim, code_dim) -> None:
self.enc = nn.Sequential(nn.Conv2d(obs_shape[0], 32, 3, stride=2),
nn.ReLU(), nn.Conv2d(32, 32, 3, stride=1),
nn.ReLU(), nn.Conv2d(32, 32, 3, stride=1),
nn.ReLU(), nn.Conv2d(32, 32, 3, stride=1),
nn.ReLU(), nn.Flatten(),
nn.Linear(32 * 35 * 35, 150), nn.ReLU())
self.enc_mu = nn.Linear(150, code_dim)
self.enc_logvar = nn.Linear(150, code_dim)
self.dec = nn.Sequential(
nn.Linear(code_dim, 32 * 35 * 35), nn.ReLU(),
nn.Unflatten(dim=1, unflattened_size=(32, 35, 35)),
nn.ConvTranspose2d(32, 32, 3, stride=1), nn.ReLU(),
nn.ConvTranspose2d(32, 32, 3, stride=1), nn.ReLU(),
nn.ConvTranspose2d(32, 32, 3, stride=1), nn.ReLU(),
nn.ConvTranspose2d(32, 32, 3, stride=1), nn.ReLU(),
nn.ConvTranspose2d(32, 32, 3, stride=2), nn.ReLU(),
nn.Conv2d(32, obs_shape[0], 4, stride=1))
class SMM(nn.Module):
def __init__(self, obs_dim, z_dim, hidden_dim, vae_beta, device) -> None:
super().__init__()
self.z_dim = z_dim
self.z_pred_net = nn.Sequential(nn.Linear(obs_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, z_dim))
self.vae = VAE(obs_dim=obs_dim,
z_dim=z_dim,
code_dim=128,
vae_beta=vae_beta,
device=device)
self.apply(utils.weight_init)
def predict_logits(self, obs) -> Any:
z_pred_logits = self.z_pred_net(obs)
return z_pred_logits
def loss(self, logits, z) -> Any:
z_labels = torch.argmax(z, 1)
return nn.CrossEntropyLoss(reduction='none')(logits, z_labels)
class PSMM(nn.Module):
# pylint: disable=unused-argument
def __init__(self, obs_shape, z_dim, hidden_dim, vae_beta, device) -> None:
super().__init__()
self.z_dim = z_dim
self.vae = PVae(obs_dim=obs_shape,
z_dim=z_dim,
code_dim=128,
vae_beta=vae_beta,
device=device)
self.apply(utils.weight_init)
# discriminator not needed when n=1, as z is degenerate
def predict_logits(self, obs) -> NoReturn:
raise NotImplementedError
def loss(self, logits, z) -> NoReturn:
raise NotImplementedError
class SMMAgent(DDPGAgent):
def __init__(self, z_dim, sp_lr, vae_lr, vae_beta, state_ent_coef,
latent_ent_coef, latent_cond_ent_coef, update_encoder,
**kwargs) -> None:
self.z_dim = z_dim
self.state_ent_coef = state_ent_coef
self.latent_ent_coef = latent_ent_coef
self.latent_cond_ent_coef = latent_cond_ent_coef
self.update_encoder = update_encoder
kwargs["meta_dim"] = self.z_dim
super().__init__(**kwargs)
# self.obs_dim is now the real obs_dim (or repr_dim) + z_dim
self.smm = SMM(self.obs_dim - z_dim,
z_dim,
hidden_dim=kwargs['hidden_dim'],
vae_beta=vae_beta,
device=kwargs['device']).to(kwargs['device'])
self.pred_optimizer = torch.optim.Adam(
self.smm.z_pred_net.parameters(), lr=sp_lr)
self.vae_optimizer = torch.optim.Adam(self.smm.vae.parameters(),
lr=vae_lr)
self.smm.train()
# fine tuning SMM agent
self.ft_returns = np.zeros(z_dim, dtype=np.float32)
self.ft_not_finished = [True for z in range(z_dim)]
def init_meta(self) -> tp.Dict[str, np.ndarray]:
z = np.zeros(self.z_dim, dtype=np.float32)
z[np.random.choice(self.z_dim)] = 1.0
meta = OrderedDict()
meta['z'] = z
return meta
# pylint: disable=unused-argument
def update_meta(
self,
meta: MetaDict,
global_step: int,
time_step: TimeStep,
finetune: bool = False,
replay_loader: tp.Optional[ReplayBuffer] = None
) -> MetaDict:
# during fine-tuning, find the best skill and fine-tune that one only.
if self.reward_free:
return self.update_meta_ft(meta, global_step, time_step)
# during training, change to randomly sampled z at the end of the episode
if time_step.last():
return self.init_meta()
return meta
def update_meta_ft(self, meta: MetaDict, global_step, time_step) -> MetaDict:
z_ind: tp.Any = meta['z'].argmax()
if any(self.ft_not_finished):
self.ft_returns[z_ind] += time_step.reward
if time_step.last():
if not any(self.ft_not_finished):
# choose the best
new_z_ind: int = self.ft_returns.argmax() # type: ignore
else:
# or the next z to try
self.ft_not_finished[z_ind] = False
not_tried_z: int = sum(self.ft_not_finished)
# uniformly sample from the remaining unused z
for i in range(self.z_dim):
if self.ft_not_finished[i]:
if np.random.random() < 1 / not_tried_z:
new_z_ind = i
break
not_tried_z -= 1
new_z = np.zeros(self.z_dim, dtype=np.float32)
new_z[new_z_ind] = 1.0
meta['z'] = new_z # type: ignore
return meta
def update_vae(self, obs_z) -> Tuple[Dict[str, Any], Any]:
metrics: tp.Dict[str, float] = {}
loss, h_s_z = self.smm.vae.loss(obs_z)
self.vae_optimizer.zero_grad()
if self.encoder_opt is not None:
self.encoder_opt.zero_grad(set_to_none=True)
loss.backward()
self.vae_optimizer.step()
if self.encoder_opt is not None:
self.encoder_opt.step()
metrics['loss_vae'] = loss.cpu().item()
return metrics, h_s_z
def update_pred(self, obs, z) -> Tuple[Dict[str, Any], Any]:
metrics: tp.Dict[str, float] = {}
logits = self.smm.predict_logits(obs)
h_z_s = self.smm.loss(logits, z).unsqueeze(-1)
loss = h_z_s.mean()
self.pred_optimizer.zero_grad()
loss.backward()
self.pred_optimizer.step()
metrics['loss_pred'] = loss.cpu().item()
return metrics, h_z_s
def update(self, replay_loader: ReplayBuffer, step: int) -> tp.Dict[str, float]:
metrics: tp.Dict[str, float] = {}
if step % self.update_every_steps != 0:
return metrics
batch = replay_loader.sample(self.cfg.batch_size).to(self.device)
obs, action, extr_reward, discount, next_obs = batch.unpack()
z = batch.meta["z"]
obs = self.aug_and_encode(obs)
with torch.no_grad():
next_obs = self.aug_and_encode(next_obs)
obs_z = torch.cat([obs, z], dim=1) # do not learn encoder in the VAE
next_obs_z = torch.cat([next_obs, z], dim=1)
if self.reward_free:
vae_metrics, h_s_z = self.update_vae(obs_z)
pred_metrics, h_z_s = self.update_pred(obs.detach(), z)
h_z = np.log(self.z_dim) # One-hot z encoding
h_z *= torch.ones_like(extr_reward).to(self.device)
pred_log_ratios = self.state_ent_coef * h_s_z.detach(
) # p^*(s) is ignored, as state space dimension is inaccessible from pixel input
intr_reward = pred_log_ratios + self.latent_ent_coef * h_z + self.latent_cond_ent_coef * h_z_s.detach(
)
reward = intr_reward
else:
reward = extr_reward
if self.use_tb or self.use_wandb:
metrics.update(vae_metrics)
metrics.update(pred_metrics)
metrics['intr_reward'] = intr_reward.mean().item()
metrics['extr_reward'] = extr_reward.mean().item()
metrics['batch_reward'] = reward.mean().item()
if not self.update_encoder:
obs_z = obs_z.detach()
next_obs_z = next_obs_z.detach()
# update critic
metrics.update(
self.update_critic(obs_z.detach(), action, reward, discount,
next_obs_z.detach(), step))
# update actor
metrics.update(self.update_actor(obs_z.detach(), step))
# update critic target
utils.soft_update_params(self.critic, self.critic_target,
self.critic_target_tau)
return metrics
|
controllable_agent-main
|
url_benchmark/agent/smm.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import typing as tp
import torch
from torch import nn
from url_benchmark import utils
from .ddpg import DDPGAgent
from url_benchmark.in_memory_replay_buffer import ReplayBuffer
from typing import Any, Dict, Tuple
class ICM(nn.Module):
def __init__(self, obs_dim, action_dim, hidden_dim) -> None:
super().__init__()
self.forward_net = nn.Sequential(
nn.Linear(obs_dim + action_dim, hidden_dim), nn.ReLU(),
nn.Linear(hidden_dim, obs_dim))
self.backward_net = nn.Sequential(nn.Linear(2 * obs_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, action_dim),
nn.Tanh())
self.apply(utils.weight_init)
def forward(self, obs, action, next_obs) -> Tuple[Any, Any]:
assert obs.shape[0] == next_obs.shape[0]
assert obs.shape[0] == action.shape[0]
next_obs_hat = self.forward_net(torch.cat([obs, action], dim=-1))
action_hat = self.backward_net(torch.cat([obs, next_obs], dim=-1))
forward_error = torch.norm(next_obs - next_obs_hat,
dim=-1,
p=2,
keepdim=True)
backward_error = torch.norm(action - action_hat,
dim=-1,
p=2,
keepdim=True)
return forward_error, backward_error
class ICMAgent(DDPGAgent):
def __init__(self, icm_scale, update_encoder, **kwargs) -> None:
super().__init__(**kwargs)
self.icm_scale = icm_scale
self.update_encoder = update_encoder
self.icm = ICM(self.obs_dim, self.action_dim,
self.hidden_dim).to(self.device)
# optimizers
self.icm_opt = torch.optim.Adam(self.icm.parameters(), lr=self.lr)
self.icm.train()
# pylint: disable=unused-argument
def update_icm(self, obs, action, next_obs, step) -> Dict[str, Any]:
metrics: tp.Dict[str, float] = {}
forward_error, backward_error = self.icm(obs, action, next_obs)
loss = forward_error.mean() + backward_error.mean()
self.icm_opt.zero_grad(set_to_none=True)
if self.encoder_opt is not None:
self.encoder_opt.zero_grad(set_to_none=True)
loss.backward()
self.icm_opt.step()
if self.encoder_opt is not None:
self.encoder_opt.step()
if self.use_tb or self.use_wandb:
metrics['icm_loss'] = loss.item()
return metrics
# pylint: disable=unused-argument
def compute_intr_reward(self, obs, action, next_obs, step) -> Any:
forward_error, _ = self.icm(obs, action, next_obs)
reward = forward_error * self.icm_scale
reward = torch.log(reward + 1.0)
return reward
def update(self, replay_loader: ReplayBuffer, step: int) -> tp.Dict[str, float]:
metrics: tp.Dict[str, float] = {}
if step % self.update_every_steps != 0:
return metrics
batch = replay_loader.sample(self.cfg.batch_size)
obs, action, extr_reward, discount, next_obs = batch.to(self.device).unpack()
# augment and encode
obs = self.aug_and_encode(obs)
with torch.no_grad():
next_obs = self.aug_and_encode(next_obs)
if self.reward_free:
metrics.update(self.update_icm(obs, action, next_obs, step))
with torch.no_grad():
intr_reward = self.compute_intr_reward(obs, action, next_obs,
step)
if self.use_tb or self.use_wandb:
metrics['intr_reward'] = intr_reward.mean().item()
reward = intr_reward
else:
reward = extr_reward
if self.use_tb or self.use_wandb:
metrics['extr_reward'] = extr_reward.mean().item()
metrics['batch_reward'] = reward.mean().item()
if not self.update_encoder:
obs = obs.detach()
next_obs = next_obs.detach()
# update critic
metrics.update(
self.update_critic(obs.detach(), action, reward, discount,
next_obs.detach(), step))
# update actor
metrics.update(self.update_actor(obs.detach(), step))
# update critic target
utils.soft_update_params(self.critic, self.critic_target,
self.critic_target_tau)
return metrics
|
controllable_agent-main
|
url_benchmark/agent/icm.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pdb # pylint: disable=unused-import
import typing as tp
from collections import OrderedDict
import dataclasses
import logging
import numpy as np
import torch
from torch import nn
from hydra.core.config_store import ConfigStore
import omegaconf
from url_benchmark.dmc import TimeStep
from url_benchmark.in_memory_replay_buffer import ReplayBuffer
from url_benchmark import utils
from .fb_modules import mlp, Actor
import url_benchmark.goals as _goals
logger = logging.getLogger(__name__)
# MetaDict = tp.Mapping[str, tp.Union[np.ndarray, torch.Tensor]]
MetaDict = tp.Mapping[str, np.ndarray]
@dataclasses.dataclass
class GoalSMConfig:
# @package agent
_target_: str = "url_benchmark.agent.goal_sm.GoalSMAgent"
name: str = "goal_sm"
reward_free: bool = omegaconf.II("reward_free")
custom_reward: tp.Optional[str] = omegaconf.II("custom_reward")
obs_type: str = omegaconf.MISSING # to be specified later
obs_shape: tp.Tuple[int, ...] = omegaconf.MISSING # to be specified later
action_shape: tp.Tuple[int, ...] = omegaconf.MISSING # to be specified later
device: str = omegaconf.II("device") # ${device}
lr: float = 1e-4
critic_target_tau: float = 0.01
update_every_steps: float = 2
use_tb: bool = omegaconf.II("use_tb") # ${use_tb}
use_wandb: bool = omegaconf.II("use_wandb") # ${use_wandb}
use_hiplog: bool = omegaconf.II("use_hiplog") # ${use_wandb}
num_expl_steps: int = omegaconf.MISSING
hidden_dim: int = 1024
feature_dim: int = 512
stddev_schedule: str = "0.2" # "linear(1,0.2,200000)"
stddev_clip: float = 0.3 # 1.0
nstep: int = 1
batch_size: int = 1024 # 256 for pixels
init_critic: bool = True
goal_space: tp.Optional[str] = omegaconf.II("goal_space")
future_ratio: float = 0
preprocess: bool = False
add_trunk: bool = False
update_meta_every_step: int = 500
cs = ConfigStore.instance()
cs.store(group="agent", name="goal_sm", node=GoalSMConfig)
class Critic(nn.Module):
""" forward representation class"""
def __init__(self, obs_dim, z_dim, action_dim, feature_dim, hidden_dim,
preprocess=False, add_trunk=True) -> None:
super().__init__()
self.obs_dim = obs_dim
self.z_dim = z_dim
self.action_dim = action_dim
self.preprocess = preprocess
if self.preprocess:
self.obs_action_net = mlp(self.obs_dim + self.action_dim, hidden_dim, "ntanh", feature_dim, "irelu")
self.obs_z_net = mlp(self.obs_dim + self.z_dim, hidden_dim, "ntanh", feature_dim, "irelu")
if not add_trunk:
self.trunk: nn.Module = nn.Identity()
feature_dim = 2 * feature_dim
else:
self.trunk = mlp(2 * feature_dim, hidden_dim, "irelu")
feature_dim = hidden_dim
else:
self.trunk = mlp(self.obs_dim + self.z_dim + self.action_dim, hidden_dim, "ntanh",
hidden_dim, "irelu",
hidden_dim, "irelu")
feature_dim = hidden_dim
seq = [feature_dim, hidden_dim, "irelu", 1]
self.F1 = mlp(*seq)
self.F2 = mlp(*seq)
self.apply(utils.weight_init)
def forward(self, obs, z, action):
assert z.shape[-1] == self.z_dim
if self.preprocess:
obs_action = self.obs_action_net(torch.cat([obs, action], dim=-1))
obs_z = self.obs_z_net(torch.cat([obs, z], dim=-1))
h = torch.cat([obs_action, obs_z], dim=-1)
else:
h = torch.cat([obs, z, action], dim=-1)
if hasattr(self, "trunk"):
h = self.trunk(h)
F1 = self.F1(h)
F2 = self.F2(h)
return F1, F2
class GoalSMAgent:
# pylint: disable=unused-argument
def __init__(self,
**kwargs: tp.Any
):
cfg = GoalSMConfig(**kwargs)
self.cfg = cfg
assert len(cfg.action_shape) == 1
self.action_dim = cfg.action_shape[0]
self.solved_meta: tp.Any = None
self.obs_dim = cfg.obs_shape[0]
if cfg.feature_dim < self.obs_dim:
logger.warning(f"feature_dim {cfg.feature_dim} should not be smaller that obs_dim {self.obs_dim}")
self.goal_dim = 0
if cfg.goal_space is not None:
if cfg.goal_space == "quad_pos_speed":
self.goal_dim = 7 # ugly hack
else:
g = next(iter(_goals.goals.funcs[cfg.goal_space].values()))()
assert len(g.shape) == 1
self.goal_dim = len(g)
self.actor = Actor(self.obs_dim, self.goal_dim, self.action_dim,
cfg.feature_dim, cfg.hidden_dim,
preprocess=cfg.preprocess, add_trunk=self.cfg.add_trunk).to(cfg.device)
self.critic: nn.Module = Critic(self.obs_dim, self.goal_dim, self.action_dim,
cfg.feature_dim, cfg.hidden_dim,
preprocess=cfg.preprocess, add_trunk=self.cfg.add_trunk).to(cfg.device)
self.critic_target: nn.Module = Critic(self.obs_dim, self.goal_dim, self.action_dim,
cfg.feature_dim, cfg.hidden_dim,
preprocess=cfg.preprocess, add_trunk=self.cfg.add_trunk).to(cfg.device)
self.critic_target.load_state_dict(self.critic.state_dict())
# optimizers
self.actor_opt = torch.optim.Adam(self.actor.parameters(), lr=cfg.lr)
self.critic_opt = torch.optim.Adam(self.critic.parameters(), lr=cfg.lr)
self.train()
self.critic_target.train()
def train(self, training: bool = True) -> None:
self.training = training
self.actor.train(training)
self.critic.train(training)
def init_from(self, other) -> None:
# copy parameters over
utils.hard_update_params(other.actor, self.actor)
utils.hard_update_params(other.critic, self.critic)
def init_meta(self, replay_loader: tp.Optional[ReplayBuffer] = None) -> MetaDict:
if replay_loader is not None:
batch = replay_loader.sample(self.cfg.batch_size)
assert batch.next_goal is not None
g = batch.next_goal[0]
else:
g = np.zeros((self.goal_dim,), dtype=np.float32)
meta = OrderedDict()
meta['g'] = g
return meta
# pylint: disable=unused-argument
def update_meta(
self,
meta: MetaDict,
global_step: int,
time_step: TimeStep,
finetune: bool = False,
replay_loader: tp.Optional[ReplayBuffer] = None
) -> MetaDict:
if global_step % self.cfg.update_meta_every_step == 0 and global_step > 1000: # skip first trajectory
return self.init_meta(replay_loader)
return meta
def get_goal_meta(self, goal_array: np.ndarray) -> MetaDict:
meta = OrderedDict()
meta['g'] = goal_array
return meta
def infer_meta(self, replay_loader: ReplayBuffer) -> MetaDict:
# Not used, only for compatibility with pretrain.eval !!!
batch = replay_loader.sample(self.cfg.batch_size)
assert batch.next_goal is not None
g = batch.next_goal[0]
return self.get_goal_meta(g)
def act(self, obs, meta, step, eval_mode) -> np.ndarray:
device = torch.device(self.cfg.device)
obs = torch.as_tensor(obs, device=device).unsqueeze(0)
goals = []
for value in meta.values():
value = torch.as_tensor(value, device=device).unsqueeze(0)
goals.append(value)
goal = torch.cat(goals, dim=-1)
#assert obs.shape[-1] == self.obs_shape[-1]
stddev = utils.schedule(self.cfg.stddev_schedule, step)
dist = self.actor(obs, goal, stddev)
if eval_mode:
action = dist.mean
else:
action = dist.sample(clip=None)
if step < self.cfg.num_expl_steps:
action.uniform_(-1.0, 1.0)
return action.cpu().numpy()[0]
@tp.no_type_check # TODO remove
def update_critic(self,
obs: torch.Tensor,
desired_goal: torch.Tensor,
action: torch.Tensor,
discount: torch.Tensor,
next_obs: torch.Tensor,
achieved_goal: torch.Tensor,
step: int) -> tp.Dict[str, float]:
metrics = {}
with torch.no_grad():
stddev = utils.schedule(self.cfg.stddev_schedule, step)
dist = self.actor(next_obs, desired_goal, stddev)
next_action = dist.sample(clip=self.cfg.stddev_clip)
target_Q1, target_Q2 = self.critic_target(next_obs, desired_goal, next_action)
target_Q = torch.min(target_Q1, target_Q2)
Q1, Q2 = self.critic(obs, desired_goal, action)
Q1_diag, Q2_diag = self.critic(obs, achieved_goal, action)
loss_offdiag: tp.Any = 0.5 * sum((Q - discount * target_Q).pow(2).mean() for Q in [Q1, Q2])
loss_diag: tp.Any = -sum(Q.diag().mean() for Q in [Q1_diag, Q2_diag])
critic_loss = loss_offdiag + loss_diag
if self.cfg.use_tb or self.cfg.use_wandb:
metrics['critic_target_q'] = target_Q.mean().item()
metrics['critic_q1'] = Q1.mean().item()
metrics['critic_q2'] = Q2.mean().item()
metrics['critic_loss'] = critic_loss.item()
metrics['stdev'] = stddev
# optimize critic
self.critic_opt.zero_grad(set_to_none=True)
critic_loss.backward()
self.critic_opt.step()
return metrics
@tp.no_type_check # TODO remove
def update_actor(self,
obs: torch.Tensor,
goal: torch.Tensor,
step: int) -> tp.Dict[str, float]:
metrics = {}
stddev = utils.schedule(self.cfg.stddev_schedule, step)
dist = self.actor(obs, goal, stddev)
action = dist.sample(clip=self.cfg.stddev_clip)
log_prob = dist.log_prob(action).sum(-1, keepdim=True)
Q1, Q2 = self.critic(obs, goal, action)
Q = torch.min(Q1, Q2)
actor_loss = -Q.mean()
# optimize actor
self.actor_opt.zero_grad(set_to_none=True)
actor_loss.backward()
self.actor_opt.step()
if self.cfg.use_tb or self.cfg.use_wandb:
metrics['actor_loss'] = actor_loss.item()
metrics['actor_logprob'] = log_prob.mean().item()
metrics['actor_ent'] = dist.entropy().sum(dim=-1).mean().item()
return metrics
def update(self, replay_loader: ReplayBuffer, step: int) -> tp.Dict[str, float]:
metrics: tp.Dict[str, float] = {}
#import ipdb; ipdb.set_trace()
if step % self.cfg.update_every_steps != 0:
return metrics
batch = replay_loader.sample(self.cfg.batch_size)
batch = batch.to(self.cfg.device)
achieved_goal = batch.next_goal
future_goal = batch.future_obs
if self.cfg.goal_space:
future_goal = batch.future_goal
obs = batch.obs
action = batch.action
discount = batch.discount
next_obs = batch.next_obs
desired_goal = batch.meta["g"]
# sample goal from replay
# new_batch = next(replay_loader)
# new_batch = new_batch.to(self.cfg.device)
# desired_goal = new_batch.next_goal # type: ignore
# perm = torch.randperm(self.cfg.batch_size)
# desired_goal = achieved_goal[perm]
if self.cfg.future_ratio > 0:
assert future_goal is not None
future_idxs = np.where(np.random.uniform(size=self.cfg.batch_size) < self.cfg.future_ratio)[0]
desired_goal[future_idxs] = future_goal[future_idxs] # type: ignore
# update critic
metrics.update(
self.update_critic(obs=obs, desired_goal=desired_goal, action=action,
discount=discount, next_obs=next_obs, achieved_goal=achieved_goal, step=step))
# update actor
metrics.update(self.update_actor(obs=obs, goal=desired_goal, step=step))
# update critic target
utils.soft_update_params(self.critic, self.critic_target,
self.cfg.critic_target_tau)
return metrics
|
controllable_agent-main
|
url_benchmark/agent/goal_sm.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pdb # pylint: disable=unused-import
import typing as tp
import dataclasses
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
from collections import OrderedDict
from url_benchmark import utils
from hydra.core.config_store import ConfigStore
import omegaconf
from url_benchmark.dmc import TimeStep
from .ddpg import DDPGAgent, MetaDict, DDPGAgentConfig
from url_benchmark.in_memory_replay_buffer import ReplayBuffer
from typing import Any, Dict, Tuple
# TODO(HL): how to include GPI for continuous domain?
@dataclasses.dataclass
class APSAgentConfig(DDPGAgentConfig):
_target_: str = "url_benchmark.agent.aps.APSAgent"
name: str = "aps"
update_encoder: bool = omegaconf.II("update_encoder")
sf_dim: int = 10
update_task_every_step: int = 5
knn_rms: bool = True
knn_k: int = 12
knn_avg: bool = True
knn_clip: float = 0.0001
num_init_steps: int = 4096 # set to ${num_train_frames} to disable finetune policy parameters
lstsq_batch_size: int = 4096
num_inference_steps: int = 10000
cs = ConfigStore.instance()
cs.store(group="agent", name="aps", node=APSAgentConfig)
class CriticSF(nn.Module):
def __init__(self, obs_type, obs_dim, action_dim, feature_dim, hidden_dim,
sf_dim) -> None:
super().__init__()
self.obs_type = obs_type
if obs_type == 'pixels':
# for pixels actions will be added after trunk
self.trunk = nn.Sequential(nn.Linear(obs_dim, feature_dim),
nn.LayerNorm(feature_dim), nn.Tanh())
trunk_dim = feature_dim + action_dim
else:
# for states actions come in the beginning
self.trunk = nn.Sequential(
nn.Linear(obs_dim + action_dim, hidden_dim),
nn.LayerNorm(hidden_dim), nn.Tanh())
trunk_dim = hidden_dim
def make_q():
q_layers = []
q_layers += [
nn.Linear(trunk_dim, hidden_dim),
nn.ReLU(inplace=True)
]
if obs_type == 'pixels':
q_layers += [
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(inplace=True)
]
q_layers += [nn.Linear(hidden_dim, sf_dim)]
return nn.Sequential(*q_layers)
self.Q1 = make_q()
self.Q2 = make_q()
self.apply(utils.weight_init)
def forward(self, obs, action, task) -> Tuple[Any, Any]:
inpt = obs if self.obs_type == 'pixels' else torch.cat([obs, action],
dim=-1)
h = self.trunk(inpt)
h = torch.cat([h, action], dim=-1) if self.obs_type == 'pixels' else h
q1 = self.Q1(h)
q2 = self.Q2(h)
q1 = torch.einsum("bi,bi->b", task, q1).reshape(-1, 1)
q2 = torch.einsum("bi,bi->b", task, q2).reshape(-1, 1)
return q1, q2
class APS(nn.Module):
def __init__(self, obs_dim, sf_dim, hidden_dim) -> None:
super().__init__()
self.state_feat_net = nn.Sequential(nn.Linear(obs_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, sf_dim))
self.apply(utils.weight_init)
def forward(self, obs, norm=True) -> Any:
state_feat = self.state_feat_net(obs)
state_feat = F.normalize(state_feat, dim=-1) if norm else state_feat
return state_feat
class APSAgent(DDPGAgent):
def __init__(self, **kwargs: tp.Any) -> None:
cfg = APSAgentConfig(**kwargs)
# create actor and critic
# increase obs shape to include task dim (through meta_dim)
super().__init__(**kwargs, meta_dim=cfg.sf_dim)
self.cfg: APSAgentConfig = cfg # override base ddpg cfg type
# overwrite critic with critic sf
self.critic = CriticSF(cfg.obs_type, self.obs_dim, self.action_dim,
self.feature_dim, self.hidden_dim,
self.sf_dim).to(self.device)
self.critic_target = CriticSF(self.obs_type, self.obs_dim,
self.action_dim, self.feature_dim,
self.hidden_dim,
self.sf_dim).to(self.device)
self.critic_target.load_state_dict(self.critic.state_dict())
self.critic_opt = torch.optim.Adam(self.critic.parameters(),
lr=self.lr)
self.aps = APS(self.obs_dim - self.sf_dim, self.sf_dim,
kwargs['hidden_dim']).to(kwargs['device'])
# particle-based entropy
rms = utils.RMS(self.device)
self.pbe = utils.PBE(rms, cfg.knn_clip, cfg.knn_k, cfg.knn_avg, cfg.knn_rms,
cfg.device)
# optimizers
self.aps_opt = torch.optim.Adam(self.aps.parameters(), lr=self.lr)
self.train()
self.critic_target.train()
self.aps.train()
def init_meta(self) -> tp.Dict[str, np.ndarray]:
if self.solved_meta is not None:
return self.solved_meta
task = torch.randn(self.sf_dim)
task = task / torch.norm(task)
task_array = task.cpu().numpy()
meta = OrderedDict()
meta['task'] = task_array
return meta
# pylint: disable=unused-argument
def update_meta(
self,
meta: MetaDict,
global_step: int,
time_step: TimeStep,
finetune: bool = False,
replay_loader: tp.Optional[ReplayBuffer] = None
) -> MetaDict:
if global_step % self.update_task_every_step == 0:
return self.init_meta()
return meta
def update_aps(self, task, next_obs, step) -> Dict[str, Any]:
metrics: tp.Dict[str, float] = {}
loss = self.compute_aps_loss(next_obs, task)
self.aps_opt.zero_grad(set_to_none=True)
if self.encoder_opt is not None:
self.encoder_opt.zero_grad(set_to_none=True)
loss.backward()
self.aps_opt.step()
if self.encoder_opt is not None:
self.encoder_opt.step()
if self.use_tb or self.use_wandb:
metrics['aps_loss'] = loss.item()
return metrics
def compute_intr_reward(self, task, next_obs, step) -> Tuple[Any, Any]:
# maxent reward
with torch.no_grad():
rep = self.aps(next_obs, norm=False)
reward = self.pbe(rep)
intr_ent_reward = reward.reshape(-1, 1)
# successor feature reward
rep = rep / torch.norm(rep, dim=1, keepdim=True)
intr_sf_reward = torch.einsum("bi,bi->b", task, rep).reshape(-1, 1)
return intr_ent_reward, intr_sf_reward
def compute_aps_loss(self, next_obs, task) -> Any:
"""MLE loss"""
loss = -torch.einsum("bi,bi->b", task, self.aps(next_obs)).mean()
return loss
def update(self, replay_loader: ReplayBuffer, step: int) -> tp.Dict[str, float]:
metrics: tp.Dict[str, float] = {}
if step % self.update_every_steps != 0:
return metrics
batch = replay_loader.sample(self.cfg.batch_size).to(self.device)
obs, action, extr_reward, discount, next_obs = batch.unpack()
task = batch.meta["task"]
# augment and encode
obs = self.aug_and_encode(obs)
next_obs = self.aug_and_encode(next_obs)
if self.reward_free:
# freeze successor features at finetuning phase
metrics.update(self.update_aps(task, next_obs, step))
with torch.no_grad():
intr_ent_reward, intr_sf_reward = self.compute_intr_reward(
task, next_obs, step)
intr_reward = intr_ent_reward + intr_sf_reward
if self.use_tb or self.use_wandb:
metrics['intr_reward'] = intr_reward.mean().item()
metrics['intr_ent_reward'] = intr_ent_reward.mean().item()
metrics['intr_sf_reward'] = intr_sf_reward.mean().item()
reward = intr_reward
else:
reward = extr_reward
if self.use_tb or self.use_wandb:
metrics['extr_reward'] = extr_reward.mean().item()
metrics['batch_reward'] = reward.mean().item()
if not self.update_encoder:
obs = obs.detach()
next_obs = next_obs.detach()
# extend observations with task
obs = torch.cat([obs, task], dim=1)
next_obs = torch.cat([next_obs, task], dim=1)
# update critic
metrics.update(
self.update_critic(obs.detach(), action, reward, discount,
next_obs.detach(), task, step))
# update actor
metrics.update(self.update_actor(obs.detach(), task, step))
# update critic target
utils.soft_update_params(self.critic, self.critic_target,
self.critic_target_tau)
return metrics
@torch.no_grad()
def regress_meta(self, replay_loader, step):
obs, reward = [], []
batch_size = 0
while batch_size < self.lstsq_batch_size:
batch = replay_loader.sample(self.cfg.batch_size)
batch_obs, _, batch_reward, *_ = utils.to_torch(batch, self.device)
obs.append(batch_obs)
reward.append(batch_reward)
batch_size += batch_obs.size(0)
obs, reward = torch.cat(obs, 0), torch.cat(reward, 0)
obs = self.aug_and_encode(obs)
rep = self.aps(obs)
task = torch.linalg.lstsq(reward, rep)[0][:rep.size(1), :][0]
task = task / torch.norm(task)
task = task.cpu().numpy()
meta = OrderedDict()
meta['task'] = task
# save for evaluation
self.solved_meta = meta
return meta
@torch.no_grad()
def infer_meta(self, replay_loader: ReplayBuffer) -> MetaDict:
obs_list, reward_list = [], []
batch_size = 0
while batch_size < self.cfg.num_inference_steps:
batch = replay_loader.sample(self.cfg.batch_size)
batch = batch.to(self.cfg.device)
obs_list.append(batch.next_obs)
reward_list.append(batch.reward)
batch_size += batch.next_obs.size(0)
obs, reward = torch.cat(obs_list, 0), torch.cat(reward_list, 0) # type: ignore
obs, reward = obs[:self.cfg.num_inference_steps], reward[:self.cfg.num_inference_steps]
return self.infer_meta_from_obs_and_rewards(obs, reward)
@torch.no_grad()
def infer_meta_from_obs_and_rewards(self, obs: torch.Tensor, reward: torch.Tensor) -> MetaDict:
print('max reward: ', reward.max().cpu().item())
print('99 percentile: ', torch.quantile(reward, 0.99).cpu().item())
print('median reward: ', reward.median().cpu().item())
print('min reward: ', reward.min().cpu().item())
print('mean reward: ', reward.mean().cpu().item())
print('num reward: ', reward.shape[0])
rep = self.aps(obs)
# task = torch.linalg.lstsq(reward, rep)[0][:rep.size(1), :][0]
task = torch.linalg.lstsq(rep, reward)[0].squeeze()
task = task / torch.norm(task)
task = task.cpu().numpy()
meta = OrderedDict()
meta['task'] = task
# self.solved_meta = meta
return meta
def update_critic(self, obs, action, reward, discount, next_obs, task,
step) -> Dict[str, Any]:
"""diff is critic takes task as input"""
metrics: tp.Dict[str, float] = {}
with torch.no_grad():
stddev = utils.schedule(self.stddev_schedule, step)
dist = self.actor(next_obs, stddev)
next_action = dist.sample(clip=self.stddev_clip)
target_Q1, target_Q2 = self.critic_target(next_obs, next_action,
task)
target_V = torch.min(target_Q1, target_Q2)
target_Q = reward + (discount * target_V)
Q1, Q2 = self.critic(obs, action, task)
critic_loss = F.mse_loss(Q1, target_Q) + F.mse_loss(Q2, target_Q)
if self.use_tb or self.use_wandb:
metrics['critic_target_q'] = target_Q.mean().item()
metrics['critic_q1'] = Q1.mean().item()
metrics['critic_q2'] = Q2.mean().item()
metrics['critic_loss'] = critic_loss.item()
# optimize critic
self.critic_opt.zero_grad(set_to_none=True)
critic_loss.backward()
self.critic_opt.step()
return metrics
def update_actor(self, obs, task, step) -> Dict[str, Any]:
"""diff is critic takes task as input"""
metrics: tp.Dict[str, float] = {}
stddev = utils.schedule(self.stddev_schedule, step)
dist = self.actor(obs, stddev)
action = dist.sample(clip=self.stddev_clip)
log_prob = dist.log_prob(action).sum(-1, keepdim=True)
Q1, Q2 = self.critic(obs, action, task)
Q = torch.min(Q1, Q2)
actor_loss = -Q.mean()
# optimize actor
self.actor_opt.zero_grad(set_to_none=True)
actor_loss.backward()
self.actor_opt.step()
if self.use_tb or self.use_wandb:
metrics['actor_loss'] = actor_loss.item()
metrics['actor_logprob'] = log_prob.mean().item()
metrics['actor_ent'] = dist.entropy().sum(dim=-1).mean().item()
return metrics
|
controllable_agent-main
|
url_benchmark/agent/aps.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .ddpg import DDPGAgent as DDPGAgent
from .ddpg import DDPGAgentConfig as DDPGAgentConfig
from .fb_ddpg import FBDDPGAgent as FBDDPGAgent
from .aps import APSAgent as APSAgent
from .ddpg import MetaDict as MetaDict
# register agents for hydra
from .sf import SFAgent
from .goal_td3 import GoalTD3Agent
from .discrete_sf import DiscreteSFAgent
from .rnd import RNDAgent
from .diayn import DIAYNAgent
from .aps import APSAgent
from .proto import ProtoAgent
from .icm_apt import ICMAPTAgent
from .sf_svd import SFSVDAgent
from .new_aps import NEWAPSAgent
from .goal_sm import GoalSMAgent
from .max_ent import MaxEntAgent
from .uvf import UVFAgent
from .discrete_fb import DiscreteFBAgent
|
controllable_agent-main
|
url_benchmark/agent/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pylint: disable=unused-import
import pdb
import typing as tp
import torch
from url_benchmark import utils
from .ddpg import DDPGAgent
from url_benchmark.in_memory_replay_buffer import ReplayBuffer
import dataclasses
from url_benchmark.agent.ddpg import DDPGAgentConfig
from hydra.core.config_store import ConfigStore
import omegaconf
@dataclasses.dataclass
class MaxEntAgentConfig(DDPGAgentConfig):
_target_: str = "url_benchmark.agent.max_ent.MaxEntAgent"
name: str = "max_ent"
knn_rms: bool = True
knn_k: int = 12
knn_avg: bool = True
knn_clip: float = 0.0001
goal_space: tp.Optional[str] = omegaconf.II("goal_space")
cs = ConfigStore.instance()
cs.store(group="agent", name="max_ent", node=MaxEntAgentConfig)
class MaxEntAgent(DDPGAgent):
def __init__(self, **kwargs) -> None:
super().__init__(**kwargs)
cfg = MaxEntAgentConfig(**kwargs)
self.cfg = cfg
# particle-based entropy
rms = utils.RMS(self.cfg.device)
self.pbe = utils.PBE(rms, self.cfg.knn_clip, self.cfg.knn_k, self.cfg.knn_avg, cfg.knn_rms,
self.cfg.device)
def compute_intr_reward(self, goal: torch.Tensor, step: int) -> torch.Tensor:
reward = self.pbe(goal)
intr_ent_reward = reward.reshape(-1, 1)
return intr_ent_reward
def update(self, replay_loader: ReplayBuffer, step: int) -> tp.Dict[str, float]:
metrics: tp.Dict[str, float] = {}
if step % self.cfg.update_every_steps != 0:
return metrics
batch = replay_loader.sample(self.cfg.batch_size)
batch = batch.to(self.cfg.device)
obs = batch.obs
action = batch.action
discount = batch.discount
next_goal = next_obs = batch.next_obs
if self.cfg.goal_space is not None: # type: ignore
assert batch.next_goal is not None
next_goal = batch.next_goal
with torch.no_grad():
reward = self.compute_intr_reward(goal=next_goal, step=step)
if self.use_tb or self.use_wandb:
metrics['intr_reward'] = reward.mean().item()
# update critic
metrics.update(
self.update_critic(obs, action, reward, discount,
next_obs, step))
# update actor
metrics.update(self.update_actor(obs, step))
# update critic target
utils.soft_update_params(self.critic, self.critic_target,
self.critic_target_tau)
return metrics
|
controllable_agent-main
|
url_benchmark/agent/max_ent.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import typing as tp
import torch
from torch import nn
from url_benchmark import utils
from .ddpg import DDPGAgent
from url_benchmark.in_memory_replay_buffer import ReplayBuffer
from typing import Any, Dict
class Disagreement(nn.Module):
def __init__(self, obs_dim, action_dim, hidden_dim, n_models=5) -> None:
super().__init__()
self.ensemble = nn.ModuleList([
nn.Sequential(nn.Linear(obs_dim + action_dim, hidden_dim),
nn.ReLU(), nn.Linear(hidden_dim, obs_dim))
for _ in range(n_models)
])
def forward(self, obs, action, next_obs) -> Any:
#import ipdb; ipdb.set_trace()
assert obs.shape[0] == next_obs.shape[0]
assert obs.shape[0] == action.shape[0]
errors = []
for model in self.ensemble:
next_obs_hat = model(torch.cat([obs, action], dim=-1))
model_error = torch.norm(next_obs - next_obs_hat,
dim=-1,
p=2,
keepdim=True)
errors.append(model_error)
return torch.cat(errors, dim=1)
def get_disagreement(self, obs, action, next_obs) -> Any:
assert obs.shape[0] == next_obs.shape[0]
assert obs.shape[0] == action.shape[0]
preds = []
for model in self.ensemble:
next_obs_hat = model(torch.cat([obs, action], dim=-1))
preds.append(next_obs_hat)
preds_tensor = torch.stack(preds, dim=0)
return torch.var(preds_tensor, dim=0).mean(dim=-1)
class DisagreementAgent(DDPGAgent):
def __init__(self, update_encoder, **kwargs) -> None:
super().__init__(**kwargs)
self.update_encoder = update_encoder
self.disagreement = Disagreement(self.obs_dim, self.action_dim,
self.hidden_dim).to(self.device)
# optimizers
self.disagreement_opt = torch.optim.Adam(
self.disagreement.parameters(), lr=self.lr)
self.disagreement.train()
def update_disagreement(self, obs, action, next_obs, step) -> Dict[str, Any]:
metrics: tp.Dict[str, float] = {}
error = self.disagreement(obs, action, next_obs)
loss = error.mean()
self.disagreement_opt.zero_grad(set_to_none=True)
if self.encoder_opt is not None:
self.encoder_opt.zero_grad(set_to_none=True)
loss.backward()
self.disagreement_opt.step()
if self.encoder_opt is not None:
self.encoder_opt.step()
if self.use_tb or self.use_wandb:
metrics['disagreement_loss'] = loss.item()
return metrics
def compute_intr_reward(self, obs, action, next_obs, step) -> Any:
reward = self.disagreement.get_disagreement(obs, action,
next_obs).unsqueeze(1)
return reward
def update(self, replay_loader: ReplayBuffer, step: int) -> tp.Dict[str, float]:
metrics: tp.Dict[str, float] = {}
if step % self.update_every_steps != 0:
return metrics
batch = replay_loader.sample(self.cfg.batch_size)
obs, action, extr_reward, discount, next_obs = batch.to(self.device).unpack()
# augment and encode
obs = self.aug_and_encode(obs)
with torch.no_grad():
next_obs = self.aug_and_encode(next_obs)
if self.reward_free:
metrics.update(
self.update_disagreement(obs, action, next_obs, step))
with torch.no_grad():
intr_reward = self.compute_intr_reward(obs, action, next_obs,
step)
if self.use_tb or self.use_wandb:
metrics['intr_reward'] = intr_reward.mean().item()
reward = intr_reward
else:
reward = extr_reward
if self.use_tb or self.use_wandb:
metrics['extr_reward'] = extr_reward.mean().item()
metrics['batch_reward'] = reward.mean().item()
if not self.update_encoder:
obs = obs.detach()
next_obs = next_obs.detach()
# update critic
metrics.update(
self.update_critic(obs.detach(), action, reward, discount,
next_obs.detach(), step))
# update actor
metrics.update(self.update_actor(obs.detach(), step))
# update critic target
utils.soft_update_params(self.critic, self.critic_target,
self.critic_target_tau)
return metrics
|
controllable_agent-main
|
url_benchmark/agent/disagreement.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pdb # pylint: disable=unused-import
import typing as tp
from typing import Any, Dict
import copy
import dataclasses
import torch
from torch import nn
from hydra.core.config_store import ConfigStore
import omegaconf
from url_benchmark import utils
from .ddpg import DDPGAgent, DDPGAgentConfig
from url_benchmark.in_memory_replay_buffer import ReplayBuffer
import url_benchmark.goals as _goals
@dataclasses.dataclass
class RNDAgentConfig(DDPGAgentConfig):
_target_: str = "url_benchmark.agent.rnd.RNDAgent"
name: str = "rnd"
rnd_rep_dim: int = 512
rnd_scale: float = 1.0
update_encoder: bool = omegaconf.II("update_encoder")
goal_space: tp.Optional[str] = omegaconf.II("goal_space")
cs = ConfigStore.instance()
cs.store(group="agent", name="rnd", node=RNDAgentConfig)
class RND(nn.Module):
def __init__(self,
obs_dim,
hidden_dim,
rnd_rep_dim,
encoder,
aug,
obs_shape,
obs_type,
clip_val=5.) -> None:
super().__init__()
self.clip_val = clip_val
self.aug = aug
if obs_type == "pixels":
self.normalize_obs: nn.Module = nn.BatchNorm2d(obs_shape[0], affine=False)
else:
self.normalize_obs = nn.BatchNorm1d(obs_shape[0], affine=False)
self.predictor = nn.Sequential(encoder, nn.Linear(obs_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, rnd_rep_dim))
self.target = nn.Sequential(copy.deepcopy(encoder),
nn.Linear(obs_dim, hidden_dim), nn.ReLU(),
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, rnd_rep_dim))
for param in self.target.parameters():
param.requires_grad = False
self.apply(utils.weight_init)
def forward(self, obs) -> Any:
obs = self.aug(obs)
obs = self.normalize_obs(obs)
obs = torch.clamp(obs, -self.clip_val, self.clip_val)
prediction, target = self.predictor(obs), self.target(obs)
prediction_error = torch.square(target.detach() - prediction).mean(
dim=-1, keepdim=True)
return prediction_error
class RNDAgent(DDPGAgent):
def __init__(self, **kwargs: tp.Any) -> None:
super().__init__(**kwargs)
cfg = RNDAgentConfig(**kwargs)
self.cfg = cfg
goal_dim = self.obs_dim
if self.cfg.goal_space is not None:
goal_dim = _goals.get_goal_space_dim(self.cfg.goal_space)
self.rnd = RND(goal_dim, cfg.hidden_dim, cfg.rnd_rep_dim,
self.encoder, self.aug, (goal_dim, ),
cfg.obs_type).to(self.device)
self.intrinsic_reward_rms = utils.RMS(device=self.device)
# optimizers
self.rnd_opt = torch.optim.Adam(self.rnd.parameters(), lr=self.lr)
self.rnd.train()
# pylint: disable=unused-argument
def update_rnd(self, obs, step) -> Dict[str, Any]:
metrics: tp.Dict[str, float] = {}
prediction_error = self.rnd(obs)
loss = prediction_error.mean()
self.rnd_opt.zero_grad(set_to_none=True)
if self.encoder_opt is not None:
self.encoder_opt.zero_grad(set_to_none=True)
loss.backward()
self.rnd_opt.step()
if self.encoder_opt is not None:
self.encoder_opt.step()
if self.use_tb or self.use_wandb:
metrics['rnd_loss'] = loss.item()
return metrics
def compute_intr_reward(self, obs, step) -> Any:
prediction_error = self.rnd(obs)
_, intr_reward_var = self.intrinsic_reward_rms(prediction_error)
reward = self.rnd_scale * prediction_error / (
torch.sqrt(intr_reward_var) + 1e-8)
return reward
def update(self, replay_loader: ReplayBuffer, step: int) -> tp.Dict[str, float]:
metrics: tp.Dict[str, float] = {}
if step % self.update_every_steps != 0:
return metrics
batch = replay_loader.sample(self.cfg.batch_size)
batch = batch.to(self.cfg.device)
goal = obs = batch.obs
if self.cfg.goal_space is not None: # type: ignore
assert batch.goal is not None
goal = batch.goal
action = batch.action
extr_reward = batch.reward
discount = batch.discount
next_obs = batch.next_obs
# update RND first
if self.reward_free:
# note: one difference is that the RND module is updated off policy
metrics.update(self.update_rnd(goal, step))
with torch.no_grad():
intr_reward = self.compute_intr_reward(goal, step)
if self.use_tb or self.use_wandb:
metrics['intr_reward'] = intr_reward.mean().item()
reward = intr_reward
else:
reward = extr_reward
# augment and encode
obs = self.aug_and_encode(obs)
with torch.no_grad():
next_obs = self.aug_and_encode(next_obs)
if self.use_tb or self.use_wandb:
metrics['extr_reward'] = extr_reward.mean().item()
metrics['batch_reward'] = reward.mean().item()
metrics['pred_error_mean'] = self.intrinsic_reward_rms.M.item()
metrics['pred_error_std'] = torch.sqrt(self.intrinsic_reward_rms.S).item()
if not self.update_encoder:
obs = obs.detach()
next_obs = next_obs.detach()
# update critic
metrics.update(
self.update_critic(obs.detach(), action, reward, discount,
next_obs.detach(), step))
# update actor
metrics.update(self.update_actor(obs.detach(), step))
# update critic target
utils.soft_update_params(self.critic, self.critic_target,
self.critic_target_tau)
return metrics
|
controllable_agent-main
|
url_benchmark/agent/rnd.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import typing as tp
import dataclasses
from typing import Any, Tuple
from collections import OrderedDict
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
from hydra.core.config_store import ConfigStore
import omegaconf
from url_benchmark.dmc import TimeStep
from url_benchmark.in_memory_replay_buffer import ReplayBuffer
from url_benchmark import utils
from .fb_modules import mlp
# MetaDict = tp.Mapping[str, tp.Union[np.ndarray, torch.Tensor]]
MetaDict = tp.Mapping[str, np.ndarray]
@dataclasses.dataclass
class DDPGAgentConfig:
_target_: str = "url_benchmark.agent.ddpg.DDPGAgent"
name: str = "ddpg"
reward_free: bool = omegaconf.II("reward_free")
obs_type: str = omegaconf.MISSING # to be specified later
obs_shape: tp.Tuple[int, ...] = omegaconf.MISSING # to be specified later
action_shape: tp.Tuple[int, ...] = omegaconf.MISSING # to be specified later
device: str = omegaconf.II("device")
lr: float = 1e-4
critic_target_tau: float = 0.01
update_every_steps: int = 2
use_tb: bool = omegaconf.II("use_tb")
use_wandb: bool = omegaconf.II("use_wandb")
num_expl_steps: int = omegaconf.MISSING # to be specified later
hidden_dim: int = 1024
feature_dim: int = 50
stddev_schedule: float = 0.2
stddev_clip: float = 0.3
nstep: int = 3
batch_size: int = 1024 # 256 for pixels
init_critic: bool = True
# update_encoder: ${update_encoder} # not in the config
cs = ConfigStore.instance()
cs.store(group="agent", name="ddpg", node=DDPGAgentConfig)
class Encoder(nn.Module):
def __init__(self, obs_shape) -> None:
super().__init__()
assert len(obs_shape) == 3
self.repr_dim = 32 * 35 * 35
self.convnet = nn.Sequential(nn.Conv2d(obs_shape[0], 32, 3, stride=2),
nn.ReLU(), nn.Conv2d(32, 32, 3, stride=1),
nn.ReLU(), nn.Conv2d(32, 32, 3, stride=1),
nn.ReLU(), nn.Conv2d(32, 32, 3, stride=1),
nn.ReLU())
self.apply(utils.weight_init)
def forward(self, obs) -> Any:
obs = obs / 255.0 - 0.5
h = self.convnet(obs)
h = h.view(h.shape[0], -1)
return h
class Actor(nn.Module):
def __init__(self, obs_type, obs_dim, action_dim, feature_dim, hidden_dim) -> None:
super().__init__()
feature_dim = feature_dim if obs_type == 'pixels' else hidden_dim
self.trunk = nn.Sequential(nn.Linear(obs_dim, feature_dim),
nn.LayerNorm(feature_dim), nn.Tanh())
policy_layers = []
policy_layers += [
nn.Linear(feature_dim, hidden_dim),
nn.ReLU(inplace=True)
]
# add additional hidden layer for pixels
if obs_type == 'pixels':
policy_layers += [
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(inplace=True)
]
policy_layers += [nn.Linear(hidden_dim, action_dim)]
self.policy = nn.Sequential(*policy_layers)
self.apply(utils.weight_init)
def forward(self, obs, std) -> utils.TruncatedNormal:
h = self.trunk(obs)
mu = self.policy(h)
mu = torch.tanh(mu)
std = torch.ones_like(mu) * std
dist = utils.TruncatedNormal(mu, std)
return dist
class Critic(nn.Module):
def __init__(self, obs_type, obs_dim, action_dim, feature_dim, hidden_dim) -> None:
super().__init__()
self.obs_type = obs_type
if obs_type == 'pixels':
# for pixels actions will be added after trunk
self.trunk = nn.Sequential(nn.Linear(obs_dim, feature_dim),
nn.LayerNorm(feature_dim), nn.Tanh())
trunk_dim = feature_dim + action_dim
else:
# for states actions come in the beginning
self.trunk = nn.Sequential(
nn.Linear(obs_dim + action_dim, hidden_dim),
nn.LayerNorm(hidden_dim), nn.Tanh())
trunk_dim = hidden_dim
def make_q():
q_layers = []
q_layers += [
nn.Linear(trunk_dim, hidden_dim),
nn.ReLU(inplace=True)
]
if obs_type == 'pixels':
q_layers += [
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(inplace=True)
]
q_layers += [nn.Linear(hidden_dim, 1)]
return nn.Sequential(*q_layers)
self.Q1 = make_q()
self.Q2 = make_q()
self.apply(utils.weight_init)
def forward(self, obs, action) -> Tuple[Any, Any]:
inpt = obs if self.obs_type == 'pixels' else torch.cat([obs, action],
dim=-1)
h = self.trunk(inpt)
h = torch.cat([h, action], dim=-1) if self.obs_type == 'pixels' else h
q1 = self.Q1(h)
q2 = self.Q2(h)
return q1, q2
class DDPGAgent:
# pylint: disable=unused-argument
def __init__(self, meta_dim: int = 0, **kwargs: tp.Any) -> None:
if self.__class__.__name__.startswith(("DIAYN", "APS", "RND", "Proto", "ICMAPT", "MaxEnt")): # HACK
cfg_fields = {field.name for field in dataclasses.fields(DDPGAgentConfig)}
# those have their own config, so lets curate the fields
# others will need to be ported in time
kwargs = {x: y for x, y in kwargs.items() if x in cfg_fields}
cfg = DDPGAgentConfig(**kwargs)
self.cfg = cfg
self.action_dim = cfg.action_shape[0]
self.solved_meta = None
# self.update_encoder = update_encoder # used in subclasses
# models
if cfg.obs_type == 'pixels':
self.aug: tp.Union[utils.RandomShiftsAug, nn.Identity] = utils.RandomShiftsAug(pad=4)
self.encoder: tp.Union[Encoder, nn.Identity] = Encoder(cfg.obs_shape).to(cfg.device)
self.obs_dim = self.encoder.repr_dim + meta_dim
else:
self.aug = nn.Identity()
self.encoder = nn.Identity()
self.obs_dim = cfg.obs_shape[0] + meta_dim
self.actor = Actor(cfg.obs_type, self.obs_dim, self.action_dim,
cfg.feature_dim, cfg.hidden_dim).to(cfg.device)
self.critic: nn.Module = Critic(cfg.obs_type, self.obs_dim, self.action_dim,
cfg.feature_dim, cfg.hidden_dim).to(cfg.device)
self.critic_target: nn.Module = Critic(cfg.obs_type, self.obs_dim, self.action_dim,
cfg.feature_dim, cfg.hidden_dim).to(cfg.device)
self.critic_target.load_state_dict(self.critic.state_dict())
# optimizers
self.encoder_opt: tp.Optional[torch.optim.Adam] = None
if cfg.obs_type == 'pixels':
self.encoder_opt = torch.optim.Adam(self.encoder.parameters(), lr=cfg.lr)
self.actor_opt = torch.optim.Adam(self.actor.parameters(), lr=cfg.lr)
self.critic_opt = torch.optim.Adam(self.critic.parameters(), lr=cfg.lr)
self.reward_model: tp.Optional[torch.nn.Module] = None
self.reward_opt: tp.Optional[torch.optim.Adam] = None
if self.reward_free:
self.reward_model = mlp(self.obs_dim, cfg.hidden_dim, "ntanh", cfg.hidden_dim, # type: ignore
"relu", cfg.hidden_dim, "relu", 1).to(cfg.device) # type: ignore
self.reward_opt = torch.optim.Adam(self.reward_model.parameters(), lr=1e-3)
self.train()
self.critic_target.train()
def __getattr__(self, name: str) -> tp.Any:
# LEGACY: allow accessing the config directly as attribute
# to avoid having to rewrite everything at once
# cost: less type safety
if "cfg" in self.__dict__:
return getattr(self.cfg, name)
raise AttributeError
def train(self, training: bool = True) -> None:
self.training = training
self.encoder.train(training)
self.actor.train(training)
self.critic.train(training)
def init_from(self, other) -> None:
# copy parameters over
utils.hard_update_params(other.encoder, self.encoder)
utils.hard_update_params(other.actor, self.actor)
if self.init_critic:
utils.hard_update_params(other.critic.trunk, self.critic.trunk)
def init_meta(self) -> MetaDict:
return OrderedDict()
# pylint: disable=unused-argument
def update_meta(
self,
meta: MetaDict,
global_step: int,
time_step: TimeStep,
finetune: bool = False,
replay_loader: tp.Optional[ReplayBuffer] = None
) -> MetaDict:
return meta
def act(self, obs, meta, step, eval_mode) -> np.ndarray:
obs = torch.as_tensor(obs, device=self.device).unsqueeze(0)
h = self.encoder(obs)
inputs = [h]
for value in meta.values():
value = torch.as_tensor(value, device=self.device).unsqueeze(0)
inputs.append(value)
inpt = torch.cat(inputs, dim=-1)
#assert obs.shape[-1] == self.obs_shape[-1]
stddev = utils.schedule(self.stddev_schedule, step)
dist = self.actor(inpt, stddev)
if eval_mode:
action = dist.mean
else:
action = dist.sample(clip=None)
if step < self.num_expl_steps:
action.uniform_(-1.0, 1.0)
return action.cpu().numpy()[0]
def train_reward(self, replay_loader: ReplayBuffer) -> None:
obs_list, reward_list = [], []
batch_size = 0
num_inference_steps = 10000
while batch_size < num_inference_steps:
batch = replay_loader.sample(self.cfg.batch_size)
obs, action, reward, discount, next_obs = batch.to(self.device).unpack()
del obs, action, discount
obs_list.append(next_obs)
reward_list.append(reward)
batch_size += next_obs.size(0)
obs, reward = torch.cat(obs_list, 0), torch.cat(reward_list, 0) # type: ignore
obs, reward = obs[: num_inference_steps], reward[: num_inference_steps]
print('max reward: ', reward.max().cpu().item())
print('99 percentile: ', torch.quantile(reward, 0.99).cpu().item())
print('median reward: ', reward.median().cpu().item())
print('min reward: ', reward.min().cpu().item())
print('mean reward: ', reward.mean().cpu().item())
print('num reward: ', reward.shape[0])
assert self.reward_model is not None
for i in range(2000):
reward_loss = (self.reward_model(obs) - reward).pow(2).mean()
assert self.reward_opt is not None
self.reward_opt.zero_grad(set_to_none=True)
reward_loss.backward()
self.reward_opt.step()
print(f"iteration: {i}, reward_loss: {reward_loss.item()}")
# compute test loss:
while batch_size < num_inference_steps:
batch = replay_loader.sample(self.cfg.batch_size)
obs, action, reward, discount, next_obs = batch.to(self.device).unpack()
del obs, action, discount
obs_list.append(next_obs)
reward_list.append(reward)
batch_size += next_obs.size(0)
obs, reward = torch.cat(obs_list, 0), torch.cat(reward_list, 0) # type: ignore
obs, reward = obs[: num_inference_steps], reward[: num_inference_steps]
test_loss = (self.reward_model(obs) - reward).pow(2).mean()
print(f"Test Loss: {test_loss.item()}")
@tp.no_type_check # TODO remove
def update_critic(self, obs, action, reward, discount, next_obs, step) -> tp.Dict[str, float]:
metrics = {}
with torch.no_grad():
stddev = utils.schedule(self.stddev_schedule, step)
dist = self.actor(next_obs, stddev)
next_action = dist.sample(clip=self.stddev_clip)
target_Q1, target_Q2 = self.critic_target(next_obs, next_action)
target_V = torch.min(target_Q1, target_Q2)
target_Q = reward + (discount * target_V)
Q1, Q2 = self.critic(obs, action)
critic_loss = F.mse_loss(Q1, target_Q) + F.mse_loss(Q2, target_Q)
if self.use_tb or self.use_wandb:
metrics['critic_target_q'] = target_Q.mean().item()
metrics['critic_q1'] = Q1.mean().item()
metrics['critic_q2'] = Q2.mean().item()
metrics['critic_loss'] = critic_loss.item()
# optimize critic
if self.encoder_opt is not None:
self.encoder_opt.zero_grad(set_to_none=True)
self.critic_opt.zero_grad(set_to_none=True)
critic_loss.backward()
self.critic_opt.step()
if self.encoder_opt is not None:
self.encoder_opt.step()
return metrics
@tp.no_type_check # TODO remove
def update_actor(self, obs, step) -> tp.Dict[str, float]:
metrics = {}
stddev = utils.schedule(self.stddev_schedule, step)
dist = self.actor(obs, stddev)
action = dist.sample(clip=self.stddev_clip)
log_prob = dist.log_prob(action).sum(-1, keepdim=True)
Q1, Q2 = self.critic(obs, action)
Q = torch.min(Q1, Q2)
actor_loss = -Q.mean()
# optimize actor
self.actor_opt.zero_grad(set_to_none=True)
actor_loss.backward()
self.actor_opt.step()
if self.use_tb or self.use_wandb:
metrics['actor_loss'] = actor_loss.item()
metrics['actor_logprob'] = log_prob.mean().item()
metrics['actor_ent'] = dist.entropy().sum(dim=-1).mean().item()
return metrics
def aug_and_encode(self, obs) -> Any:
obs = self.aug(obs)
return self.encoder(obs)
def update(self, replay_loader: ReplayBuffer, step: int) -> tp.Dict[str, float]:
metrics: tp.Dict[str, float] = {}
#import ipdb; ipdb.set_trace()
if step % self.update_every_steps != 0:
return metrics
batch = replay_loader.sample(self.cfg.batch_size)
obs, action, reward, discount, next_obs, *_ = batch.to(self.device).unpack()
if self.reward_free:
del reward
assert self.reward_model is not None
reward = self.reward_model(next_obs)
# augment and encode
obs = self.aug_and_encode(obs)
with torch.no_grad():
next_obs = self.aug_and_encode(next_obs)
if self.use_tb or self.use_wandb:
metrics['batch_reward'] = reward.mean().item()
# update critic
metrics.update(
self.update_critic(obs, action, reward, discount, next_obs, step))
# update actor
metrics.update(self.update_actor(obs.detach(), step))
# update critic target
utils.soft_update_params(self.critic, self.critic_target,
self.critic_target_tau)
return metrics
|
controllable_agent-main
|
url_benchmark/agent/ddpg.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pylint: disable=unused-import
import pdb
import copy
import math
import logging
import dataclasses
from collections import OrderedDict
import typing as tp
from pathlib import Path
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
from hydra.core.config_store import ConfigStore
import omegaconf
from url_benchmark import utils
from url_benchmark.in_memory_replay_buffer import ReplayBuffer
from .ddpg import MetaDict
from .fb_modules import IdentityMap
from .ddpg import Encoder
from .fb_modules import Actor, DiagGaussianActor, ForwardMap, BackwardMap, mlp, OnlineCov
from dm_env import specs
from url_benchmark.dmc import TimeStep
from url_benchmark import goals as _goals
logger = logging.getLogger(__name__)
@dataclasses.dataclass
class SFSVDAgentConfig:
# @package agent
_target_: str = "url_benchmark.agent.sf_svd.SFSVDAgent"
name: str = "sf_svd"
# reward_free: ${reward_free}
obs_type: str = omegaconf.MISSING # to be specified later
obs_shape: tp.Tuple[int, ...] = omegaconf.MISSING # to be specified later
action_shape: tp.Tuple[int, ...] = omegaconf.MISSING # to be specified later
device: str = omegaconf.II("device") # ${device}
lr: float = 1e-4
lr_coef: float = 5
sf_target_tau: float = 0.01 # 0.001-0.01
update_every_steps: int = 2
use_tb: bool = omegaconf.II("use_tb") # ${use_tb}
use_wandb: bool = omegaconf.II("use_wandb") # ${use_wandb}
use_hiplog: bool = omegaconf.II("use_hiplog") # ${use_wandb}
num_expl_steps: int = omegaconf.MISSING # ??? # to be specified later
num_inference_steps: int = 5120
hidden_dim: int = 1024 # 128, 2048
backward_hidden_dim: int = 512 # 128, 2048
feature_dim: int = 512 # 128, 1024
z_dim: int = 100 # 30-200
stddev_schedule: str = "0.2" # "linear(1,0.2,200000)" # 0, 0.1, 0.2
stddev_clip: float = 0.3 # 1
update_z_every_step: int = 100
nstep: int = 1
batch_size: int = 1024
init_sf: bool = True
update_encoder: bool = omegaconf.II("update_encoder") # ${update_encoder}
goal_space: tp.Optional[str] = omegaconf.II("goal_space")
# ortho_coef: float = 0.1 # 0.01-10
log_std_bounds: tp.Tuple[float, float] = (-5, 2) # param for DiagGaussianActor
temp: float = 1 # temperature for DiagGaussianActor
boltzmann: bool = False # set to true for DiagGaussianActor
debug: bool = False
preprocess: bool = True
num_sf_updates: int = 1
feature_learner: str = "p"
mix_ratio: float = 0.0
q_loss: bool = True
update_cov_every_step: int = 1000
add_trunk: bool = False
cs = ConfigStore.instance()
cs.store(group="agent", name="sf_svd", node=SFSVDAgentConfig)
class SVDLearner(nn.Module):
def __init__(self, obs_dim, action_dim, z_dim, hidden_dim) -> None:
super().__init__()
self.feature_net = mlp(obs_dim + action_dim, hidden_dim, "ntanh", hidden_dim, "relu", z_dim, "L2")
self.mu_net = mlp(obs_dim, hidden_dim, "ntanh", hidden_dim, "relu", z_dim)
self.apply(utils.weight_init)
def forward(self, obs: torch.Tensor, action: torch.Tensor, next_obs: torch.Tensor, future_obs: torch.Tensor):
del future_obs
phi = self.feature_net(torch.cat([obs, action], dim=1))
mu = self.mu_net(next_obs)
P = torch.einsum("sd, td -> st", phi, mu)
I = torch.eye(*P.size(), device=P.device)
off_diag = ~I.bool()
loss = - 2 * P.diag().mean() + P[off_diag].pow(2).mean()
# orthonormality loss
Cov = torch.matmul(phi, phi.T)
I = torch.eye(*Cov.size(), device=Cov.device)
off_diag = ~I.bool()
orth_loss_diag = - 2 * Cov.diag().mean()
orth_loss_offdiag = Cov[off_diag].pow(2).mean()
orth_loss = orth_loss_offdiag + orth_loss_diag
loss += orth_loss
return loss
class SFSVDAgent:
# pylint: disable=unused-argument
def __init__(self,
**kwargs: tp.Any
):
cfg = SFSVDAgentConfig(**kwargs)
self.cfg = cfg
assert len(cfg.action_shape) == 1
self.action_dim = cfg.action_shape[0]
self.solved_meta: tp.Any = None
# models
if cfg.obs_type == 'pixels':
self.aug: nn.Module = utils.RandomShiftsAug(pad=4)
self.encoder: nn.Module = Encoder(cfg.obs_shape).to(cfg.device)
self.obs_dim = self.encoder.repr_dim
else:
self.aug = nn.Identity()
self.encoder = nn.Identity()
self.obs_dim = cfg.obs_shape[0]
if cfg.feature_dim < self.obs_dim:
logger.warning(f"feature_dim {cfg.feature_dim} should not be smaller that obs_dim {self.obs_dim}")
goal_dim = self.obs_dim
if cfg.goal_space is not None:
g = next(iter(_goals.goals.funcs[cfg.goal_space].values()))()
assert len(g.shape) == 1
goal_dim = len(g)
if cfg.z_dim < goal_dim:
logger.warning(f"z_dim {cfg.z_dim} should not be smaller that goal_dim {goal_dim}")
# create the network
if self.cfg.boltzmann:
self.actor: nn.Module = DiagGaussianActor(cfg.obs_type, self.obs_dim, cfg.z_dim, self.action_dim,
cfg.hidden_dim, cfg.log_std_bounds).to(cfg.device)
else:
self.actor = Actor(self.obs_dim, cfg.z_dim, self.action_dim,
cfg.feature_dim, cfg.hidden_dim,
preprocess=cfg.preprocess, add_trunk=cfg.add_trunk).to(cfg.device)
self.successor_net = ForwardMap(self.obs_dim, cfg.z_dim, self.action_dim,
cfg.feature_dim, cfg.hidden_dim,
preprocess=cfg.preprocess, add_trunk=cfg.add_trunk).to(cfg.device)
# build up the target network
self.successor_target_net = ForwardMap(self.obs_dim, cfg.z_dim, self.action_dim,
cfg.feature_dim, cfg.hidden_dim,
preprocess=cfg.preprocess, add_trunk=cfg.add_trunk).to(cfg.device)
self.feature_learner = SVDLearner(goal_dim, self.action_dim, cfg.z_dim, cfg.backward_hidden_dim).to(cfg.device)
# if cfg.debug:
# self.feature_learner: nn.Module = IdentityMap().to(cfg.device)
# self.feature_net = BackwardMap(cfg.obs_type, goal_dim, cfg.z_dim, cfg.backward_hidden_dim).to(cfg.device)
# load the weights into the target networks
self.successor_target_net.load_state_dict(self.successor_net.state_dict())
# optimizers
self.encoder_opt: tp.Optional[torch.optim.Adam] = None
if cfg.obs_type == 'pixels':
self.encoder_opt = torch.optim.Adam(self.encoder.parameters(), lr=cfg.lr)
self.actor_opt = torch.optim.Adam(self.actor.parameters(), lr=cfg.lr)
self.sf_opt = torch.optim.Adam(self.successor_net.parameters(), lr=cfg.lr)
self.phi_opt: tp.Optional[torch.optim.Adam] = None
self.phi_opt = torch.optim.Adam(self.feature_learner.parameters(), lr=cfg.lr_coef * cfg.lr)
self.train()
self.successor_target_net.train()
self.inv_cov = torch.eye(self.cfg.z_dim, dtype=torch.float32, device=self.cfg.device)
# self.online_cov = OnlineCov(mom=0.99, dim=self.cfg.z_dim).to(self.cfg.device)
# self.online_cov.train()
def train(self, training: bool = True) -> None:
self.training = training
for net in [self.encoder, self.actor, self.successor_net]:
net.train(training)
if self.phi_opt is not None:
self.feature_learner.train()
def init_from(self, other) -> None:
# copy parameters over
names = ["encoder", "actor"]
if self.cfg.init_sf:
names += ["successor_net", "feature_learner", "successor_target_net"]
for name in names:
utils.hard_update_params(getattr(other, name), getattr(self, name))
for key, val in self.__dict__.items():
if isinstance(val, torch.optim.Optimizer):
val.load_state_dict(copy.deepcopy(getattr(other, key).state_dict()))
def precompute_cov(self, replay_loader: ReplayBuffer) -> None:
print("computing Cov of phi to be used at inference")
obs_list = []
action_list = []
batch_size = 0
while batch_size < self.cfg.num_inference_steps:
batch = replay_loader.sample(self.cfg.batch_size)
batch = batch.to(self.cfg.device)
obs = batch.goal if self.cfg.goal_space is not None else batch.obs
if obs is None:
raise ValueError("Obs should never be None")
obs_list.append(obs)
action_list.append(batch.action)
batch_size += batch.next_obs.size(0)
obs = torch.cat(obs_list, 0)
action = torch.cat(action_list, 0)
self.inv_cov = self._compute_cov(torch.cat([obs, action], dim=1))
# with torch.no_grad():
# phi = self.feature_learner.feature_net(obs)
# cov = torch.matmul(phi.T, phi) / phi.shape[0]
# self.inv_cov = torch.linalg.pinv(cov)
def _compute_cov(self, goal: torch.Tensor) -> torch.Tensor:
# compute inverse of cov of phi
with torch.no_grad():
phi = self.feature_learner.feature_net(goal)
cov = torch.matmul(phi.T, phi) / phi.shape[0]
inv_cov = torch.inverse(cov)
return inv_cov
def get_goal_meta(self, goal_array: np.ndarray) -> MetaDict:
# assert self.cfg.feature_learner in ["FB"]
desired_goal = torch.tensor(goal_array).unsqueeze(0).to(self.cfg.device)
dummy_action = torch.zeros((1, self.action_dim), dtype=torch.float32).to(self.cfg.device)
with torch.no_grad():
z = self.feature_learner.feature_net(torch.cat([desired_goal, dummy_action], dim=1))
z = torch.matmul(z, self.inv_cov) # 1 x z_dim
z = math.sqrt(self.cfg.z_dim) * F.normalize(z, dim=1)
z = z.squeeze(0).cpu().numpy()
meta = OrderedDict()
meta['z'] = z
return meta
def infer_meta(self, replay_loader: ReplayBuffer) -> MetaDict:
obs_list, reward_list, action_list = [], [], []
batch_size = 0
while batch_size < self.cfg.num_inference_steps:
batch = replay_loader.sample(self.cfg.batch_size)
batch = batch.to(self.cfg.device)
obs_list.append(batch.goal if self.cfg.goal_space is not None else batch.obs)
action_list.append(batch.action)
reward_list.append(batch.reward)
batch_size += batch.next_obs.size(0)
obs, reward, action = torch.cat(obs_list, 0), torch.cat(reward_list, 0), torch.cat(action_list, 0) # type: ignore
obs, reward, action = obs[:self.cfg.num_inference_steps], reward[:self.cfg.num_inference_steps], action[:self.cfg.num_inference_steps]
return self.infer_meta_from_obs_action_and_rewards(obs, action, reward)
def infer_meta_from_obs_action_and_rewards(self, obs: torch.Tensor, action: torch.Tensor, reward: torch.Tensor) -> MetaDict:
print('max reward: ', reward.max().cpu().item())
print('99 percentile: ', torch.quantile(reward, 0.99).cpu().item())
print('median reward: ', reward.median().cpu().item())
print('min reward: ', reward.min().cpu().item())
print('mean reward: ', reward.mean().cpu().item())
print('num reward: ', reward.shape[0])
with torch.no_grad():
phi = self.feature_learner.feature_net(torch.cat([obs, action], dim=1))
z = torch.linalg.lstsq(phi, reward).solution # z_dim x 1
z = math.sqrt(self.cfg.z_dim) * F.normalize(z, dim=0) # be careful to the dimension
meta = OrderedDict()
meta['z'] = z.squeeze().cpu().numpy()
# self.solved_meta = meta
return meta
def sample_z(self, size):
gaussian_rdv = torch.randn((size, self.cfg.z_dim), dtype=torch.float32)
z = math.sqrt(self.cfg.z_dim) * F.normalize(gaussian_rdv, dim=1)
return z
def init_meta(self) -> MetaDict:
if self.solved_meta is not None:
print('solved_meta')
return self.solved_meta
else:
z = self.sample_z(1)
z = z.squeeze().numpy()
meta = OrderedDict()
meta['z'] = z
return meta
# pylint: disable=unused-argument
def update_meta(
self,
meta: MetaDict,
global_step: int,
time_step: TimeStep,
finetune: bool = False,
replay_loader: tp.Optional[ReplayBuffer] = None
) -> MetaDict:
if global_step % self.cfg.update_z_every_step == 0:
return self.init_meta()
return meta
def act(self, obs, meta, step, eval_mode) -> tp.Any:
obs = torch.as_tensor(obs, device=self.cfg.device).unsqueeze(0) # type: ignore
h = self.encoder(obs)
z = torch.as_tensor(meta['z'], device=self.cfg.device).unsqueeze(0) # type: ignore
if self.cfg.boltzmann:
dist = self.actor(h, z)
else:
stddev = utils.schedule(self.cfg.stddev_schedule, step)
dist = self.actor(h, z, stddev)
if eval_mode:
action = dist.mean
else:
action = dist.sample()
if step < self.cfg.num_expl_steps:
action.uniform_(-1.0, 1.0)
return action.cpu().numpy()[0]
def update_sf(
self,
obs: torch.Tensor,
goal: torch.Tensor,
action: torch.Tensor,
discount: torch.Tensor,
next_obs: torch.Tensor,
next_goal: torch.Tensor,
future_goal: tp.Optional[torch.Tensor],
z: torch.Tensor,
step: int
) -> tp.Dict[str, float]:
metrics: tp.Dict[str, float] = {}
# compute target successor measure
with torch.no_grad():
if self.cfg.boltzmann:
dist = self.actor(next_obs, z)
next_action = dist.sample()
else:
stddev = utils.schedule(self.cfg.stddev_schedule, step)
dist = self.actor(next_obs, z, stddev)
next_action = dist.sample(clip=self.cfg.stddev_clip)
next_F1, next_F2 = self.successor_target_net(next_obs, z, next_action) # batch x z_dim
target_phi = self.feature_learner.feature_net(torch.cat([goal, action], dim=1)).detach() # batch x z_dim
next_Q1, next_Q2 = [torch.einsum('sd, sd -> s', next_Fi, z) for next_Fi in [next_F1, next_F2]]
next_F = torch.where((next_Q1 < next_Q2).reshape(-1, 1), next_F1, next_F2)
target_F = target_phi + discount * next_F
F1, F2 = self.successor_net(obs, z, action)
if not self.cfg.q_loss:
# compute SF loss
sf_loss = F.mse_loss(F1, target_F) + F.mse_loss(F2, target_F)
else:
# alternative loss
Q1, Q2 = [torch.einsum('sd, sd -> s', Fi, z) for Fi in [F1, F2]]
target_Q = torch.einsum('sd, sd -> s', target_F, z)
sf_loss = F.mse_loss(Q1, target_Q) + F.mse_loss(Q2, target_Q)
sf_loss /= self.cfg.z_dim
# compute feature loss
phi_loss = self.feature_learner(obs=goal, action=action, next_obs=next_goal, future_obs=future_goal)
if self.cfg.use_tb or self.cfg.use_wandb or self.cfg.use_hiplog:
metrics['target_F'] = target_F.mean().item()
metrics['F1'] = F1.mean().item()
metrics['phi'] = target_phi.mean().item()
metrics['phi_norm'] = torch.norm(target_phi, dim=-1).mean().item()
metrics['z_norm'] = torch.norm(z, dim=-1).mean().item()
metrics['sf_loss'] = sf_loss.item()
if phi_loss is not None:
metrics['phi_loss'] = phi_loss.item()
if isinstance(self.sf_opt, torch.optim.Adam):
metrics["sf_opt_lr"] = self.sf_opt.param_groups[0]["lr"]
# if self.cfg.goal_space in ["simplified_walker", "simplified_quadruped"]:
# metrics['max_velocity'] = goal_prime[:, -1].max().item()
# optimize SF
if self.encoder_opt is not None:
self.encoder_opt.zero_grad(set_to_none=True)
self.sf_opt.zero_grad(set_to_none=True)
sf_loss.backward()
self.sf_opt.step()
if self.encoder_opt is not None:
self.encoder_opt.step()
# optimise phi
if self.phi_opt is not None:
self.phi_opt.zero_grad(set_to_none=True)
phi_loss.backward()
self.phi_opt.step()
return metrics
def update_actor(self, obs: torch.Tensor, z: torch.Tensor, step: int) -> tp.Dict[str, float]:
metrics: tp.Dict[str, float] = {}
if self.cfg.boltzmann:
dist = self.actor(obs, z)
action = dist.rsample()
else:
stddev = utils.schedule(self.cfg.stddev_schedule, step)
dist = self.actor(obs, z, stddev)
action = dist.sample(clip=self.cfg.stddev_clip)
log_prob = dist.log_prob(action).sum(-1, keepdim=True)
F1, F2 = self.successor_net(obs, z, action)
Q1 = torch.einsum('sd, sd -> s', F1, z)
Q2 = torch.einsum('sd, sd -> s', F2, z)
Q = torch.min(Q1, Q2)
actor_loss = (self.cfg.temp * log_prob - Q).mean() if self.cfg.boltzmann else -Q.mean()
# optimize actor
self.actor_opt.zero_grad(set_to_none=True)
actor_loss.backward()
self.actor_opt.step()
if self.cfg.use_tb or self.cfg.use_wandb:
metrics['actor_loss'] = actor_loss.item()
metrics['actor_logprob'] = log_prob.mean().item()
# metrics['actor_ent'] = dist.entropy().sum(dim=-1).mean().item()
return metrics
def aug_and_encode(self, obs: torch.Tensor) -> torch.Tensor:
obs = self.aug(obs)
return self.encoder(obs)
def update(self, replay_loader: ReplayBuffer, step: int) -> tp.Dict[str, float]:
metrics: tp.Dict[str, float] = {}
if step % self.cfg.update_every_steps != 0:
return metrics
goal_list = []
for _ in range(self.cfg.num_sf_updates):
batch = replay_loader.sample(self.cfg.batch_size)
batch = batch.to(self.cfg.device)
obs = goal = batch.obs
action = batch.action
discount = batch.discount
next_obs = next_goal = batch.next_obs
future_goal = batch.future_obs
if self.cfg.goal_space:
assert batch.goal is not None
assert batch.next_goal is not None
goal = batch.goal
next_goal = batch.next_goal
future_goal = batch.future_goal
goal_list.append(next_goal)
z = self.sample_z(self.cfg.batch_size).to(self.cfg.device)
if not z.shape[-1] == self.cfg.z_dim:
raise RuntimeError("There's something wrong with the logic here")
if self.cfg.mix_ratio > 0:
perm = torch.randperm(self.cfg.batch_size)
desired_goal = next_goal[perm]
dummy_action = torch.zeros((desired_goal.shape[0], self.action_dim), dtype=torch.float32).to(self.cfg.device)
with torch.no_grad():
phi = self.feature_learner.feature_net(torch.cat([desired_goal, dummy_action], dim=1))
# compute inverse of cov of phi
cov = torch.matmul(phi.T, phi) / phi.shape[0]
inv_cov = torch.inverse(cov)
mix_idxs: tp.Any = np.where(np.random.uniform(size=self.cfg.batch_size) < self.cfg.mix_ratio)[0]
with torch.no_grad():
new_z = phi[mix_idxs]
new_z = torch.matmul(new_z, inv_cov) # batch_size x z_dim
new_z = math.sqrt(self.cfg.z_dim) * F.normalize(new_z, dim=1)
z[mix_idxs] = new_z
metrics.update(self.update_sf(obs=obs, goal=goal, action=action, discount=discount,
next_obs=next_obs, next_goal=next_goal, future_goal=future_goal,
z=z, step=step))
# update actor
metrics.update(self.update_actor(obs, z, step))
# update critic target
utils.soft_update_params(self.successor_net, self.successor_target_net,
self.cfg.sf_target_tau)
# update inv cov
# if step % self.cfg.update_cov_every_step == 0:
# logger.info("update online cov")
# obs_list = list()
# batch_size = 0
# while batch_size < 10000:
# batch = next(replay_loader)
# batch = batch.to(self.cfg.device)
# obs_list.append(batch.next_goal if self.cfg.goal_space is not None else batch.next_obs)
# batch_size += batch.next_obs.size(0)
# obs = torch.cat(obs_list, 0)
# with torch.no_grad():
# phi = self.feature_learner.feature_net(obs)
# self.inv_cov = torch.inverse(self.online_cov(phi))
return metrics
|
controllable_agent-main
|
url_benchmark/agent/sf_svd.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pdb # pylint: disable=unused-import
import typing as tp
from collections import OrderedDict
import dataclasses
import logging
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
from hydra.core.config_store import ConfigStore
import omegaconf
from url_benchmark.dmc import TimeStep
from url_benchmark.in_memory_replay_buffer import ReplayBuffer
from url_benchmark import utils
from .fb_modules import mlp, Actor
from url_benchmark import goals as _goals
from pathlib import Path
logger = logging.getLogger(__name__)
# MetaDict = tp.Mapping[str, tp.Union[np.ndarray, torch.Tensor]]
MetaDict = tp.Mapping[str, np.ndarray]
@dataclasses.dataclass
class GoalTD3AgentConfig:
# @package agent
_target_: str = "url_benchmark.agent.goal_td3.GoalTD3Agent"
name: str = "goal_td3"
reward_free: bool = omegaconf.II("reward_free")
custom_reward: tp.Optional[str] = omegaconf.II("custom_reward")
obs_type: str = omegaconf.MISSING # to be specified later
obs_shape: tp.Tuple[int, ...] = omegaconf.MISSING # to be specified later
action_shape: tp.Tuple[int, ...] = omegaconf.MISSING # to be specified later
device: str = omegaconf.II("device") # ${device}
lr: float = 1e-4
critic_target_tau: float = 0.01
update_every_steps: float = 2
use_tb: bool = omegaconf.II("use_tb") # ${use_tb}
use_wandb: bool = omegaconf.II("use_wandb") # ${use_wandb}
use_hiplog: bool = omegaconf.II("use_hiplog") # ${use_wandb}
num_expl_steps: int = omegaconf.MISSING
hidden_dim: int = 1024
feature_dim: int = 512
stddev_schedule: str = "0.2" # "linear(1,0.2,200000)"
stddev_clip: float = 0.3 # 1.0
nstep: int = 1
batch_size: int = 1024 # 256 for pixels
init_critic: bool = True
goal_space: tp.Optional[str] = omegaconf.II("goal_space")
fb_reward: bool = False
future_ratio: float = 0
preprocess: bool = False
add_trunk: bool = False
supervised: bool = True
cs = ConfigStore.instance()
cs.store(group="agent", name="goal_td3", node=GoalTD3AgentConfig)
class Critic(nn.Module):
""" forward representation class"""
def __init__(self, obs_dim, z_dim, action_dim, feature_dim, hidden_dim,
preprocess=False, add_trunk=True) -> None:
super().__init__()
self.obs_dim = obs_dim
self.action_dim = action_dim
self.z_dim = z_dim
self.preprocess = preprocess
if self.preprocess:
self.obs_action_net = mlp(self.obs_dim + self.action_dim, hidden_dim, "ntanh", feature_dim, "irelu")
self.obs_z_net = mlp(self.obs_dim + self.z_dim, hidden_dim, "ntanh", feature_dim, "irelu")
if not add_trunk:
self.trunk: nn.Module = nn.Identity()
feature_dim = 2 * feature_dim
else:
self.trunk = mlp(2 * feature_dim, hidden_dim, "irelu")
feature_dim = hidden_dim
else:
self.trunk = mlp(self.obs_dim + self.z_dim + self.action_dim, hidden_dim, "ntanh")
feature_dim = hidden_dim
seq = [feature_dim, hidden_dim, "irelu", self.z_dim]
self.F1 = mlp(*seq)
self.F2 = mlp(*seq)
self.apply(utils.weight_init)
def forward(self, obs, z, action):
assert z.shape[-1] == self.z_dim
if self.preprocess:
obs_action = self.obs_action_net(torch.cat([obs, action], dim=-1))
obs_z = self.obs_z_net(torch.cat([obs, z], dim=-1))
h = torch.cat([obs_action, obs_z], dim=-1)
else:
h = torch.cat([obs, z, action], dim=-1)
if hasattr(self, "trunk"):
h = self.trunk(h)
F1 = self.F1(h)
F2 = self.F2(h)
return F1, F2
class GoalTD3Agent:
# pylint: disable=unused-argument
def __init__(self,
**kwargs: tp.Any
):
cfg = GoalTD3AgentConfig(**kwargs)
self.cfg = cfg
assert len(cfg.action_shape) == 1
self.action_dim = cfg.action_shape[0]
self.solved_meta: tp.Any = None
self.obs_dim = cfg.obs_shape[0]
if cfg.feature_dim < self.obs_dim:
logger.warning(f"feature_dim {cfg.feature_dim} should not be smaller that obs_dim {self.obs_dim}")
self.goal_dim = 0
if cfg.goal_space is not None:
if cfg.goal_space == "quad_pos_speed":
self.goal_dim = 7 # ugly hack
else:
g = next(iter(_goals.goals.funcs[cfg.goal_space].values()))()
assert len(g.shape) == 1
self.goal_dim = len(g)
if self.cfg.fb_reward:
# FB
pt = Path("/checkpoint/atouati/ca/2022-09-09_proto_maze/results/fb_ddpg_5e-05/9/models/snapshot_1000000.pt")
# pt = Path("/private/home/atouati/controllable_agent/url_benchmark/exp_sweep/2022.08.03/"
# "161531_fb_ddpg_point_mass_maze_reach_top_right_offline/1/models/snapshot_1000000.pt")
# Contr
# pt = Path("/private/home/atouati/controllable_agent/url_benchmark/exp_paper/"
# "2022.08.22_point_mass_maze_reach_top_right/100239_sf_contrastive/0/models/snapshot_1000000.pt")
# Lap
# pt = Path("/private/home/atouati/controllable_agent/url_benchmark/exp_paper/"
# "2022.08.23_point_mass_maze_reach_top_right/072210_sf_lap/1/models/snapshot_1000000.pt")
# pt = Path("/private/home/atouati/controllable_agent/url_benchmark/exp_paper/"
# "2022.08.25_point_mass_maze_reach_top_right/161812_new_aps/0/models/snapshot_2000000.pt")
print(f"loading {pt.resolve()}")
with pt.open("rb") as f:
payload = torch.load(f)
fb_agent = payload["agent"]
if hasattr(fb_agent, "feature_learner"):
self.feature_net = fb_agent.feature_learner.feature_net
else:
self.feature_net = fb_agent.backward_net
self.feature_net.eval()
self.goal_dim = fb_agent.cfg.z_dim
if "replay_loader" in payload.keys():
self.precompute_cov(payload["replay_loader"])
self.actor = Actor(self.obs_dim, self.goal_dim, self.action_dim,
cfg.feature_dim, cfg.hidden_dim,
preprocess=cfg.preprocess, add_trunk=self.cfg.add_trunk).to(cfg.device)
self.critic: nn.Module = Critic(self.obs_dim, self.goal_dim, self.action_dim,
cfg.feature_dim, cfg.hidden_dim,
preprocess=cfg.preprocess, add_trunk=self.cfg.add_trunk).to(cfg.device)
self.critic_target: nn.Module = Critic(self.obs_dim, self.goal_dim, self.action_dim,
cfg.feature_dim, cfg.hidden_dim,
preprocess=cfg.preprocess, add_trunk=self.cfg.add_trunk).to(cfg.device)
self.critic_target.load_state_dict(self.critic.state_dict())
# optimizers
self.actor_opt = torch.optim.Adam(self.actor.parameters(), lr=cfg.lr)
self.critic_opt = torch.optim.Adam(self.critic.parameters(), lr=cfg.lr)
self.reward_model: tp.Optional[torch.nn.Module] = None
self.reward_opt: tp.Optional[torch.optim.Adam] = None
if cfg.reward_free:
self.reward_model = mlp(self.obs_dim, cfg.hidden_dim, "ntanh", cfg.hidden_dim, # type: ignore
"relu", cfg.hidden_dim, "relu", 1).to(cfg.device) # type: ignore
self.reward_opt = torch.optim.Adam(self.reward_model.parameters(), lr=1e-3)
self.train()
self.critic_target.train()
def train(self, training: bool = True) -> None:
self.training = training
self.actor.train(training)
self.critic.train(training)
def precompute_cov(self, replay_loader: ReplayBuffer) -> None:
if not self.cfg.fb_reward:
return None
logger.info("computing Cov of phi to be used at inference")
obs_list: tp.List[torch.Tensor] = []
batch_size = 0
while batch_size < 100000:
batch = replay_loader.sample(self.cfg.batch_size)
batch = batch.to(self.cfg.device)
obs_list.append(batch.next_goal if self.cfg.goal_space is not None else batch.next_obs) # type: ignore
batch_size += batch.next_obs.size(0)
obs = torch.cat(obs_list, 0)
with torch.no_grad():
phi = self.feature_net(obs)
cov = torch.matmul(phi.T, phi) / phi.shape[0]
self.inv_cov = torch.linalg.pinv(cov)
def init_from(self, other) -> None:
# copy parameters over
utils.hard_update_params(other.actor, self.actor)
utils.hard_update_params(other.critic, self.critic)
def init_meta(self, custom_reward: tp.Optional[_goals.BaseReward] = None) -> MetaDict:
if isinstance(custom_reward, _goals.MazeMultiGoal):
idx = np.random.choice(len(custom_reward.goals))
desired_goal = custom_reward.goals[idx]
meta = OrderedDict()
meta["g"] = desired_goal
return meta
else:
return OrderedDict()
# pylint: disable=unused-argument
def update_meta(
self,
meta: MetaDict,
global_step: int,
time_step: TimeStep,
finetune: bool = False,
replay_loader: tp.Optional[ReplayBuffer] = None
) -> MetaDict:
return meta
def get_goal_meta(self, goal_array: np.ndarray) -> MetaDict:
meta = OrderedDict()
meta['g'] = goal_array
return meta
def act(self, obs, meta, step, eval_mode) -> np.ndarray:
device = torch.device(self.cfg.device)
obs = torch.as_tensor(obs, device=device).unsqueeze(0)
goals = []
for value in meta.values():
value = torch.as_tensor(value, device=device).unsqueeze(0)
if self.cfg.fb_reward:
with torch.no_grad():
goals.append(torch.matmul(self.feature_net(value), self.inv_cov))
else:
goals.append(value)
goal = torch.cat(goals, dim=-1)
#assert obs.shape[-1] == self.obs_shape[-1]
stddev = utils.schedule(self.cfg.stddev_schedule, step)
dist = self.actor(obs, goal, stddev)
if eval_mode:
action = dist.mean
else:
action = dist.sample(clip=None)
if step < self.cfg.num_expl_steps:
action.uniform_(-1.0, 1.0)
return action.cpu().numpy()[0]
def train_reward(self, replay_loader: ReplayBuffer) -> None:
obs_list, reward_list = [], []
batch_size = 0
num_inference_steps = 10000
while batch_size < num_inference_steps:
batch = replay_loader.sample(self.cfg.batch_size)
obs, action, reward, discount, next_obs = batch.to(self.cfg.device).unpack()
del obs, action, discount
obs_list.append(next_obs)
reward_list.append(reward)
batch_size += next_obs.size(0)
obs, reward = torch.cat(obs_list, 0), torch.cat(reward_list, 0) # type: ignore
obs, reward = obs[: num_inference_steps], reward[: num_inference_steps]
print('max reward: ', reward.max().cpu().item())
print('99 percentile: ', torch.quantile(reward, 0.99).cpu().item())
print('median reward: ', reward.median().cpu().item())
print('min reward: ', reward.min().cpu().item())
print('mean reward: ', reward.mean().cpu().item())
print('num reward: ', reward.shape[0])
assert self.reward_model is not None
for i in range(2000):
reward_loss = (self.reward_model(obs) - reward).pow(2).mean()
assert self.reward_opt is not None
self.reward_opt.zero_grad(set_to_none=True)
reward_loss.backward()
self.reward_opt.step()
print(f"iteration: {i}, reward_loss: {reward_loss.item()}")
# compute test loss:
while batch_size < num_inference_steps:
batch = replay_loader.sample(self.cfg.batch_size)
obs, action, reward, discount, next_obs = batch.to(self.cfg.device).unpack()
del obs, action, discount
obs_list.append(next_obs)
reward_list.append(reward)
batch_size += next_obs.size(0)
obs, reward = torch.cat(obs_list, 0), torch.cat(reward_list, 0) # type: ignore
obs, reward = obs[: num_inference_steps], reward[: num_inference_steps]
test_loss = (self.reward_model(obs) - reward).pow(2).mean()
print(f"Test Loss: {test_loss.item()}")
@tp.no_type_check # TODO remove
def update_critic(self,
obs: torch.Tensor,
goal: torch.Tensor,
action: torch.Tensor,
reward: torch.Tensor,
discount: torch.Tensor,
next_obs: torch.Tensor,
step: int) -> tp.Dict[str, float]:
metrics = {}
with torch.no_grad():
stddev = utils.schedule(self.cfg.stddev_schedule, step)
dist = self.actor(next_obs, goal, stddev)
next_action = dist.sample(clip=self.cfg.stddev_clip)
target_Q1, target_Q2 = self.critic_target(next_obs, goal, next_action)
target_V = torch.min(target_Q1, target_Q2)
target_Q = reward + (discount * target_V)
Q1, Q2 = self.critic(obs, goal, action)
critic_loss = F.mse_loss(Q1, target_Q) + F.mse_loss(Q2, target_Q)
if self.cfg.use_tb or self.cfg.use_wandb:
metrics['critic_target_q'] = target_Q.mean().item()
metrics['critic_q1'] = Q1.mean().item()
metrics['critic_q2'] = Q2.mean().item()
metrics['critic_loss'] = critic_loss.item()
metrics['stdev'] = stddev
# optimize critic
self.critic_opt.zero_grad(set_to_none=True)
critic_loss.backward()
self.critic_opt.step()
return metrics
@tp.no_type_check # TODO remove
def update_actor(self,
obs: torch.Tensor,
goal: torch.Tensor,
step: int) -> tp.Dict[str, float]:
metrics = {}
stddev = utils.schedule(self.cfg.stddev_schedule, step)
dist = self.actor(obs, goal, stddev)
action = dist.sample(clip=self.cfg.stddev_clip)
log_prob = dist.log_prob(action).sum(-1, keepdim=True)
Q1, Q2 = self.critic(obs, goal, action)
Q = torch.min(Q1, Q2)
actor_loss = -Q.mean()
# optimize actor
self.actor_opt.zero_grad(set_to_none=True)
actor_loss.backward()
self.actor_opt.step()
if self.cfg.use_tb or self.cfg.use_wandb:
metrics['actor_loss'] = actor_loss.item()
metrics['actor_logprob'] = log_prob.mean().item()
metrics['actor_ent'] = dist.entropy().sum(dim=-1).mean().item()
return metrics
def update(self, replay_loader: ReplayBuffer, step: int,
custom_reward: tp.Optional[_goals.BaseReward] = None) -> tp.Dict[str, float]:
metrics: tp.Dict[str, float] = {}
#import ipdb; ipdb.set_trace()
if step % self.cfg.update_every_steps != 0:
return metrics
batch = replay_loader.sample(self.cfg.batch_size)
batch = batch.to(self.cfg.device)
achieved_goal = batch.next_goal
future_goal = batch.future_obs
if self.cfg.goal_space:
future_goal = batch.future_goal
obs = batch.obs
action = batch.action
discount = batch.discount
reward = batch.reward
next_obs = batch.next_obs
if self.cfg.reward_free:
del reward
assert self.reward_model is not None
reward = self.reward_model(next_obs)
desired_goal: torch.Tensor = torch.tensor([], dtype=torch.float32, device=self.cfg.device)
device = torch.device(self.cfg.device)
if isinstance(custom_reward, _goals.MazeMultiGoal):
del reward
if self.cfg.supervised:
# sample uniform goal
idx = np.random.choice(len(custom_reward.goals), size=self.cfg.batch_size)
desired_goal = custom_reward.goals[idx]
# convert to tensor
desired_goal = torch.as_tensor(desired_goal, device=device)
else:
# sample goal from replay
new_batch = replay_loader.sample(self.cfg.batch_size)
new_batch = new_batch.to(self.cfg.device)
desired_goal = new_batch.next_goal # type: ignore
# perm = torch.randperm(self.cfg.batch_size)
# desired_goal = achieved_goal[perm]
if self.cfg.future_ratio > 0:
assert future_goal is not None
future_idxs = np.where(np.random.uniform(size=self.cfg.batch_size) < self.cfg.future_ratio)[0]
desired_goal[future_idxs] = future_goal[future_idxs] # type: ignore
if self.cfg.fb_reward:
# reward = (self.feature_net(achieved_goals) *
# torch.matmul(self.feature_net(desired_goals), self.inv_cov)).sum(dim=1, keepdims=True)
with torch.no_grad():
desired_goal = torch.matmul(self.feature_net(desired_goal), self.inv_cov)
reward = (self.feature_net(achieved_goal) * desired_goal).sum(dim=1, keepdims=True)
else:
reward, _ = custom_reward.from_goal(achieved_goal.cpu().numpy(), desired_goal.cpu().numpy()) # type: ignore
reward = torch.as_tensor(reward, device=device).unsqueeze(1) # type: ignore
# # augment obs
# obs = torch.cat([obs, desired_goal], dim=-1)
# next_obs = torch.cat([next_obs, desired_goal], dim=-1)
if self.cfg.use_tb or self.cfg.use_wandb:
metrics['batch_reward'] = reward.mean().item()
# update critic
metrics.update(
self.update_critic(obs=obs, goal=desired_goal, action=action, reward=reward,
discount=discount, next_obs=next_obs, step=step))
# update actor
metrics.update(self.update_actor(obs=obs, goal=desired_goal, step=step))
# update critic target
utils.soft_update_params(self.critic, self.critic_target,
self.cfg.critic_target_tau)
return metrics
|
controllable_agent-main
|
url_benchmark/agent/goal_td3.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import typing as tp
import dataclasses
from copy import deepcopy
import torch
from torch import nn
import torch.nn.functional as F
from torch import distributions as pyd
from torch import jit
from hydra.core.config_store import ConfigStore
import omegaconf
from url_benchmark import utils
from .ddpg import DDPGAgent
from .ddpg import DDPGAgentConfig as _BaseConfig
from url_benchmark.in_memory_replay_buffer import ReplayBuffer
from typing import Any, Dict, TypeVar
_T = TypeVar('_T')
@jit.script
def sinkhorn_knopp(Q):
Q -= Q.max()
Q = torch.exp(Q).T
Q /= Q.sum()
r = torch.ones(Q.shape[0], device=Q.device) / Q.shape[0]
c = torch.ones(Q.shape[1], device=Q.device) / Q.shape[1]
for _ in range(3):
u = Q.sum(dim=1)
u = r / u
Q *= u.unsqueeze(dim=1)
Q *= (c / Q.sum(dim=0)).unsqueeze(dim=0)
Q = Q / Q.sum(dim=0, keepdim=True)
return Q.T
class Projector(nn.Module):
def __init__(self, pred_dim, proj_dim) -> None:
super().__init__()
self.trunk = nn.Sequential(nn.Linear(pred_dim, proj_dim), nn.ReLU(),
nn.Linear(proj_dim, pred_dim))
self.apply(utils.weight_init)
def forward(self, x) -> Any:
return self.trunk(x)
@dataclasses.dataclass
class ProtoAgentConfig(_BaseConfig):
_target_: str = "url_benchmark.agent.proto.ProtoAgent"
name: str = "proto"
update_encoder: bool = omegaconf.II("update_encoder")
pred_dim: int = 128
proj_dim: int = 512
num_protos: int = 512
tau: float = 0.1
topk: int = 3
queue_size: int = 2048
encoder_target_tau: float = 0.05
cs = ConfigStore.instance()
cs.store(group="agent", name="proto", node=ProtoAgentConfig)
class ProtoAgent(DDPGAgent):
def __init__(self, **kwargs) -> None:
cfg = ProtoAgentConfig(**kwargs)
super().__init__(**kwargs)
self.cfg = cfg # override base ddpg cfg type
# models
self.encoder_target = deepcopy(self.encoder)
self.predictor = nn.Linear(self.obs_dim, cfg.pred_dim).to(self.device)
self.predictor.apply(utils.weight_init)
self.predictor_target = deepcopy(self.predictor)
self.projector = Projector(cfg.pred_dim, cfg.proj_dim).to(self.device)
self.projector.apply(utils.weight_init)
# prototypes
self.protos = nn.Linear(cfg.pred_dim, cfg.num_protos,
bias=False).to(self.device)
self.protos.apply(utils.weight_init)
# candidate queue
self.queue = torch.zeros(cfg.queue_size, cfg.pred_dim, device=self.device)
self.queue_ptr = 0
# optimizers
self.proto_opt = torch.optim.Adam(utils.chain(
self.encoder.parameters(), self.predictor.parameters(),
self.projector.parameters(), self.protos.parameters()),
lr=self.lr)
self.predictor.train()
self.projector.train()
self.protos.train()
def init_from(self, other) -> None:
# copy parameters over
utils.hard_update_params(other.encoder, self.encoder)
utils.hard_update_params(other.actor, self.actor)
utils.hard_update_params(other.predictor, self.predictor)
utils.hard_update_params(other.projector, self.projector)
utils.hard_update_params(other.protos, self.protos)
if self.init_critic:
utils.hard_update_params(other.critic, self.critic)
def normalize_protos(self) -> None:
C = self.protos.weight.data.clone()
C = F.normalize(C, dim=1, p=2)
self.protos.weight.data.copy_(C)
# pylint: disable=unused-argument
def compute_intr_reward(self, obs, step) -> Any:
self.normalize_protos()
# find a candidate for each prototype
with torch.no_grad():
z = self.encoder(obs)
z = self.predictor(z)
z = F.normalize(z, dim=1, p=2)
scores = self.protos(z).T
prob = F.softmax(scores, dim=1)
candidates = pyd.Categorical(prob).sample()
# enqueue candidates
ptr = self.queue_ptr
self.queue[ptr:ptr + self.num_protos] = z[candidates]
self.queue_ptr = (ptr + self.num_protos) % self.queue.shape[0]
# compute distances between the batch and the queue of candidates
z_to_q = torch.norm(z[:, None, :] - self.queue[None, :, :], dim=2, p=2)
all_dists, _ = torch.topk(z_to_q, self.topk, dim=1, largest=False)
dist = all_dists[:, -1:]
reward = dist
return reward
# pylint: disable=unused-argument
def update_proto(self, obs, next_obs, step) -> Dict[str, Any]:
metrics: tp.Dict[str, float] = {}
# normalize prototypes
self.normalize_protos()
# online network
s = self.encoder(obs)
s = self.predictor(s)
s = self.projector(s)
s = F.normalize(s, dim=1, p=2)
scores_s = self.protos(s)
log_p_s = F.log_softmax(scores_s / self.tau, dim=1)
# target network
with torch.no_grad():
t = self.encoder_target(next_obs)
t = self.predictor_target(t)
t = F.normalize(t, dim=1, p=2)
scores_t = self.protos(t)
q_t = sinkhorn_knopp(scores_t / self.tau)
# loss
loss = -(q_t * log_p_s).sum(dim=1).mean()
if self.use_tb or self.use_wandb:
metrics['repr_loss'] = loss.item()
self.proto_opt.zero_grad(set_to_none=True)
loss.backward()
self.proto_opt.step()
return metrics
def update(self, replay_loader: ReplayBuffer, step: int) -> tp.Dict[str, float]:
metrics: tp.Dict[str, float] = {}
if step % self.update_every_steps != 0:
return metrics
batch = replay_loader.sample(self.cfg.batch_size)
obs, action, extr_reward, discount, next_obs = batch.to(self.device).unpack()
# augment and encode
with torch.no_grad():
obs = self.aug(obs)
next_obs = self.aug(next_obs)
if self.reward_free:
metrics.update(self.update_proto(obs, next_obs, step))
with torch.no_grad():
intr_reward = self.compute_intr_reward(next_obs, step)
if self.use_tb or self.use_wandb:
metrics['intr_reward'] = intr_reward.mean().item()
reward = intr_reward
else:
reward = extr_reward
if self.use_tb or self.use_wandb:
metrics['extr_reward'] = extr_reward.mean().item()
metrics['batch_reward'] = reward.mean().item()
obs = self.encoder(obs)
next_obs = self.encoder(next_obs)
if not self.update_encoder:
obs = obs.detach()
next_obs = next_obs.detach()
# update critic
metrics.update(
self.update_critic(obs.detach(), action, reward, discount,
next_obs.detach(), step))
# update actor
metrics.update(self.update_actor(obs.detach(), step))
# update critic target
utils.soft_update_params(self.encoder, self.encoder_target,
self.encoder_target_tau)
utils.soft_update_params(self.predictor, self.predictor_target,
self.encoder_target_tau)
utils.soft_update_params(self.critic, self.critic_target,
self.critic_target_tau)
return metrics
|
controllable_agent-main
|
url_benchmark/agent/proto.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pdb # pylint: disable=unused-import
import math
import typing as tp
import torch
from torch import nn
import torch.nn.functional as F
from url_benchmark import utils
class OnlineCov(nn.Module):
def __init__(self, mom: float, dim: int) -> None:
super().__init__()
self.mom = mom # momentum
self.count = torch.nn.Parameter(torch.LongTensor([0]), requires_grad=False)
self.cov: tp.Any = torch.nn.Parameter(torch.zeros((dim, dim), dtype=torch.float32), requires_grad=False)
def forward(self, x: torch.Tensor) -> torch.Tensor:
if self.training:
self.count += 1 # type: ignore
self.cov.data *= self.mom
self.cov.data += (1 - self.mom) * torch.matmul(x.T, x) / x.shape[0]
count = self.count.item()
cov = self.cov / (1 - self.mom**count)
return cov
class _L2(nn.Module):
def __init__(self, dim) -> None:
super().__init__()
self.dim = dim
def forward(self, x):
y = math.sqrt(self.dim) * F.normalize(x, dim=1)
return y
def _nl(name: str, dim: int) -> tp.List[nn.Module]:
"""Returns a non-linearity given name and dimension"""
if name == "irelu":
return [nn.ReLU(inplace=True)]
if name == "relu":
return [nn.ReLU()]
if name == "ntanh":
return [nn.LayerNorm(dim), nn.Tanh()]
if name == "layernorm":
return [nn.LayerNorm(dim)]
if name == "tanh":
return [nn.Tanh()]
if name == "L2":
return [_L2(dim)]
raise ValueError(f"Unknown non-linearity {name}")
def mlp(*layers: tp.Sequence[tp.Union[int, str]]) -> nn.Sequential:
"""Provides a sequence of linear layers and non-linearities
providing a sequence of dimension for the neurons, or name of
the non-linearities
Eg: mlp(10, 12, "relu", 15) returns:
Sequential(Linear(10, 12), ReLU(), Linear(12, 15))
"""
assert len(layers) >= 2
sequence: tp.List[nn.Module] = []
assert isinstance(layers[0], int), "First input must provide the dimension"
prev_dim: int = layers[0]
for layer in layers[1:]:
if isinstance(layer, str):
sequence.extend(_nl(layer, prev_dim))
else:
assert isinstance(layer, int)
sequence.append(nn.Linear(prev_dim, layer))
prev_dim = layer
return nn.Sequential(*sequence)
class Actor(nn.Module):
def __init__(self, obs_dim, z_dim, action_dim, feature_dim, hidden_dim,
preprocess=False, add_trunk=True) -> None:
super().__init__()
self.obs_dim = obs_dim
self.z_dim = z_dim
self.action_dim = action_dim
self.preprocess = preprocess
if self.preprocess:
self.obs_net = mlp(self.obs_dim, hidden_dim, "ntanh", feature_dim, "irelu")
self.obs_z_net = mlp(self.obs_dim + self.z_dim, hidden_dim, "ntanh", feature_dim, "irelu")
if not add_trunk:
self.trunk: nn.Module = nn.Identity()
feature_dim = 2 * feature_dim
else:
self.trunk = mlp(2 * feature_dim, hidden_dim, "irelu")
feature_dim = hidden_dim
else:
self.trunk = mlp(self.obs_dim + self.z_dim, hidden_dim, "ntanh",
hidden_dim, "irelu",
hidden_dim, "irelu")
feature_dim = hidden_dim
self.policy = mlp(feature_dim, hidden_dim, "irelu", self.action_dim)
self.apply(utils.weight_init)
# initialize the last layer by zero
# self.policy[-1].weight.data.fill_(0.0)
def forward(self, obs, z, std):
assert z.shape[-1] == self.z_dim
if self.preprocess:
obs_z = self.obs_z_net(torch.cat([obs, z], dim=-1))
obs = self.obs_net(obs)
h = torch.cat([obs, obs_z], dim=-1)
else:
h = torch.cat([obs, z], dim=-1)
if hasattr(self, "trunk"):
h = self.trunk(h)
mu = self.policy(h)
mu = torch.tanh(mu)
std = torch.ones_like(mu) * std
dist = utils.TruncatedNormal(mu, std)
return dist
class DiagGaussianActor(nn.Module):
def __init__(self, obs_dim, z_dim, action_dim, hidden_dim, log_std_bounds,
preprocess=False) -> None:
super().__init__()
self.z_dim = z_dim
self.log_std_bounds = log_std_bounds
self.preprocess = preprocess
feature_dim = obs_dim + z_dim
self.policy = mlp(feature_dim, hidden_dim, "ntanh", hidden_dim, "relu", 2 * action_dim)
self.apply(utils.weight_init)
def forward(self, obs, z):
assert z.shape[-1] == self.z_dim
h = torch.cat([obs, z], dim=-1)
mu, log_std = self.policy(h).chunk(2, dim=-1)
# constrain log_std inside [log_std_min, log_std_max]
log_std = torch.tanh(log_std)
log_std_min, log_std_max = self.log_std_bounds
log_std = log_std_min + 0.5 * (log_std_max - log_std_min) * (log_std + 1)
std = log_std.exp()
dist = utils.SquashedNormal(mu, std)
return dist
class ForwardMap(nn.Module):
""" forward representation class"""
def __init__(self, obs_dim, z_dim, action_dim, feature_dim, hidden_dim,
preprocess=False, add_trunk=True) -> None:
super().__init__()
self.obs_dim = obs_dim
self.z_dim = z_dim
self.action_dim = action_dim
self.preprocess = preprocess
if self.preprocess:
self.obs_action_net = mlp(self.obs_dim + self.action_dim, hidden_dim, "ntanh", feature_dim, "irelu")
self.obs_z_net = mlp(self.obs_dim + self.z_dim, hidden_dim, "ntanh", feature_dim, "irelu")
if not add_trunk:
self.trunk: nn.Module = nn.Identity()
feature_dim = 2 * feature_dim
else:
self.trunk = mlp(2 * feature_dim, hidden_dim, "irelu")
feature_dim = hidden_dim
else:
self.trunk = mlp(self.obs_dim + self.z_dim + self.action_dim, hidden_dim, "ntanh",
hidden_dim, "irelu",
hidden_dim, "irelu")
feature_dim = hidden_dim
seq = [feature_dim, hidden_dim, "irelu", self.z_dim]
self.F1 = mlp(*seq)
self.F2 = mlp(*seq)
self.apply(utils.weight_init)
def forward(self, obs, z, action):
assert z.shape[-1] == self.z_dim
if self.preprocess:
obs_action = self.obs_action_net(torch.cat([obs, action], dim=-1))
obs_z = self.obs_z_net(torch.cat([obs, z], dim=-1))
h = torch.cat([obs_action, obs_z], dim=-1)
else:
h = torch.cat([obs, z, action], dim=-1)
if hasattr(self, "trunk"):
h = self.trunk(h)
F1 = self.F1(h)
F2 = self.F2(h)
return F1, F2
class IdentityMap(nn.Module):
def __init__(self) -> None:
super().__init__()
self.B = nn.Identity()
def forward(self, obs):
return self.B(obs)
class BackwardMap(nn.Module):
""" backward representation class"""
def __init__(self, obs_dim, z_dim, hidden_dim, norm_z: bool = True) -> None:
super().__init__()
self.obs_dim = obs_dim
self.z_dim = z_dim
self.norm_z = norm_z
self.B = mlp(self.obs_dim, hidden_dim, "ntanh", hidden_dim, "relu", self.z_dim)
self.apply(utils.weight_init)
def forward(self, obs):
if not hasattr(self, "norm_z"): # backward compatiblity
self.norm_z = True
B = self.B(obs)
if self.norm_z:
B = math.sqrt(self.z_dim) * F.normalize(B, dim=1)
return B
class MultinputNet(nn.Module):
"""Network with multiple inputs"""
def __init__(self, input_dims: tp.Sequence[int], sequence_dims: tp.Sequence[int]) -> None:
super().__init__()
input_dims = list(input_dims)
sequence_dims = list(sequence_dims)
dim0 = sequence_dims[0]
self.innets = nn.ModuleList([mlp(indim, dim0, "relu", dim0, "layernorm") for indim in input_dims]) # type: ignore
sequence: tp.List[tp.Union[str, int]] = [dim0]
for dim in sequence_dims[1:]:
sequence.extend(["relu", dim])
self.outnet = mlp(*sequence) # type: ignore
def forward(self, *tensors: torch.Tensor) -> torch.Tensor:
assert len(tensors) == len(self.innets)
out = sum(net(x) for net, x in zip(self.innets, tensors)) / len(self.innets)
return self.outnet(out) # type : ignore
|
controllable_agent-main
|
url_benchmark/agent/fb_modules.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import dataclasses
import typing as tp
from typing import Any, Dict, Tuple
import math
from collections import OrderedDict
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
from hydra.core.config_store import ConfigStore
import omegaconf
from url_benchmark import utils
from url_benchmark.dmc import TimeStep
from .ddpg import DDPGAgent, MetaDict, DDPGAgentConfig
from url_benchmark.in_memory_replay_buffer import ReplayBuffer
@dataclasses.dataclass
class DIAYNAgentConfig(DDPGAgentConfig):
_target_: str = "url_benchmark.agent.diayn.DIAYNAgent"
name: str = "diayn"
update_encoder: bool = omegaconf.II("update_encoder")
skill_dim: int = 16
diayn_scale: float = 1.0
update_skill_every_step: int = 50
cs = ConfigStore.instance()
cs.store(group="agent", name="diayn", node=DIAYNAgentConfig)
class DIAYN(nn.Module):
def __init__(self, obs_dim: int, skill_dim: int, hidden_dim: int) -> None:
super().__init__()
self.skill_pred_net = nn.Sequential(nn.Linear(obs_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, skill_dim))
self.apply(utils.weight_init)
def forward(self, obs) -> Any:
skill_pred = self.skill_pred_net(obs)
return skill_pred
class DIAYNAgent(DDPGAgent):
def __init__(self, **kwargs) -> None:
cfg = DIAYNAgentConfig(**kwargs)
# create actor and critic
# increase obs shape to include skill dim (through meta_dim)
super().__init__(**kwargs, meta_dim=cfg.skill_dim)
self.cfg = cfg # override base ddpg cfg type
# create diayn
self.diayn = DIAYN(self.obs_dim - self.skill_dim, self.skill_dim,
kwargs['hidden_dim']).to(kwargs['device'])
# loss criterion
self.diayn_criterion = nn.CrossEntropyLoss()
# optimizers
self.diayn_opt = torch.optim.Adam(self.diayn.parameters(), lr=self.lr)
self.diayn.train()
def init_meta(self) -> tp.Dict[str, np.ndarray]:
skill = np.zeros(self.skill_dim, dtype=np.float32)
skill[np.random.choice(self.skill_dim)] = 1.0
meta = OrderedDict()
meta['skill'] = skill
return meta
# pylint: disable=unused-argument
def update_meta(
self,
meta: MetaDict,
global_step: int,
time_step: TimeStep,
finetune: bool = False,
replay_loader: tp.Optional[ReplayBuffer] = None
) -> MetaDict:
if global_step % self.update_skill_every_step == 0:
return self.init_meta()
return meta
def update_diayn(self, skill, next_obs, step) -> Dict[str, Any]:
metrics: tp.Dict[str, float] = {}
loss, df_accuracy = self.compute_diayn_loss(next_obs, skill)
self.diayn_opt.zero_grad()
if self.encoder_opt is not None:
self.encoder_opt.zero_grad(set_to_none=True)
loss.backward()
self.diayn_opt.step()
if self.encoder_opt is not None:
self.encoder_opt.step()
if self.use_tb or self.use_wandb:
metrics['diayn_loss'] = loss.item()
metrics['diayn_acc'] = df_accuracy
return metrics
def compute_intr_reward(self, skill, next_obs, step) -> Any:
z_hat = torch.argmax(skill, dim=1)
d_pred = self.diayn(next_obs)
d_pred_log_softmax = F.log_softmax(d_pred, dim=1)
# TODO pred_z unused, is that normal?
# _, pred_z = torch.max(d_pred_log_softmax, dim=1, keepdim=True)
reward = d_pred_log_softmax[torch.arange(d_pred.shape[0]),
z_hat] - math.log(1 / self.skill_dim)
reward = reward.reshape(-1, 1)
return reward * self.diayn_scale
def compute_diayn_loss(self, next_state, skill) -> Tuple[Any, Any]:
"""
DF Loss
"""
z_hat = torch.argmax(skill, dim=1)
d_pred = self.diayn(next_state)
d_pred_log_softmax = F.log_softmax(d_pred, dim=1)
_, pred_z = torch.max(d_pred_log_softmax, dim=1, keepdim=True)
d_loss = self.diayn_criterion(d_pred, z_hat)
df_accuracy = torch.sum(
torch.eq(z_hat,
pred_z.reshape(1,
list(
pred_z.size())[0])[0])).float() / list(
pred_z.size())[0]
return d_loss, df_accuracy
def update(self, replay_loader: ReplayBuffer, step: int) -> tp.Dict[str, float]:
metrics: tp.Dict[str, float] = {}
if step % self.update_every_steps != 0:
return metrics
batch = replay_loader.sample(self.cfg.batch_size).to(self.device)
obs, action, extr_reward, discount, next_obs = batch.unpack()
skill = batch.meta["skill"]
# augment and encode
obs = self.aug_and_encode(obs)
next_obs = self.aug_and_encode(next_obs)
if self.reward_free:
metrics.update(self.update_diayn(skill, next_obs, step))
with torch.no_grad():
intr_reward = self.compute_intr_reward(skill, next_obs, step)
if self.use_tb or self.use_wandb:
metrics['intr_reward'] = intr_reward.mean().item()
reward = intr_reward
else:
reward = extr_reward
if self.use_tb or self.use_wandb:
metrics['extr_reward'] = extr_reward.mean().item()
metrics['batch_reward'] = reward.mean().item()
if not self.update_encoder:
obs = obs.detach()
next_obs = next_obs.detach()
# extend observations with skill
obs = torch.cat([obs, skill], dim=1)
next_obs = torch.cat([next_obs, skill], dim=1)
# update critic
metrics.update(
self.update_critic(obs.detach(), action, reward, discount,
next_obs.detach(), step))
# update actor
metrics.update(self.update_actor(obs.detach(), step))
# update critic target
utils.soft_update_params(self.critic, self.critic_target,
self.critic_target_tau)
return metrics
|
controllable_agent-main
|
url_benchmark/agent/diayn.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pylint: disable=unused-import
import pdb
import copy
import math
import logging
import dataclasses
from collections import OrderedDict
import typing as tp
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
from hydra.core.config_store import ConfigStore
import omegaconf
from dm_env import specs
from url_benchmark import utils
# from url_benchmark import replay_buffer as rb
from url_benchmark.in_memory_replay_buffer import ReplayBuffer
from url_benchmark.dmc import TimeStep
from url_benchmark import goals as _goals
from .ddpg import MetaDict
from .fb_modules import IdentityMap
from .ddpg import Encoder
from .fb_modules import Actor, DiagGaussianActor, ForwardMap, BackwardMap, OnlineCov
logger = logging.getLogger(__name__)
@dataclasses.dataclass
class UVFAgentConfig:
# @package agent
_target_: str = "url_benchmark.agent.uvf.UVFAgent"
name: str = "uvf"
# reward_free: ${reward_free}
obs_type: str = omegaconf.MISSING # to be specified later
obs_shape: tp.Tuple[int, ...] = omegaconf.MISSING # to be specified later
action_shape: tp.Tuple[int, ...] = omegaconf.MISSING # to be specified later
device: str = omegaconf.II("device") # ${device}
lr: float = 1e-4
lr_coef: float = 1
fb_target_tau: float = 0.01 # 0.001-0.01
update_every_steps: int = 2
use_tb: bool = omegaconf.II("use_tb") # ${use_tb}
use_wandb: bool = omegaconf.II("use_wandb") # ${use_wandb}
use_hiplog: bool = omegaconf.II("use_hiplog") # ${use_wandb}
num_expl_steps: int = omegaconf.MISSING # ??? # to be specified later
num_inference_steps: int = 5120
hidden_dim: int = 1024 # 128, 2048
backward_hidden_dim: int = 512 # 128, 2048
feature_dim: int = 512 # 128, 1024
z_dim: int = 100 # 30-200
stddev_schedule: str = "0.2" # "linear(1,0.2,200000)" #
stddev_clip: float = 0.3 # 1
update_z_every_step: int = 300
update_z_proba: float = 1.0
nstep: int = 1
batch_size: int = 512 # multiple de 3, 500-5000
init_fb: bool = True
update_encoder: bool = omegaconf.II("update_encoder") # ${update_encoder}
goal_space: tp.Optional[str] = omegaconf.II("goal_space")
ortho_coef: float = 1.0 # 0.01-10
log_std_bounds: tp.Tuple[float, float] = (-5, 2) # param for DiagGaussianActor
temp: float = 1 # temperature for DiagGaussianActor
boltzmann: bool = False # set to true for DiagGaussianActor
debug: bool = False
future_ratio: float = 0.0
mix_ratio: float = 0.5 # 0-1
rand_weight: bool = False # True, False
preprocess: bool = True
norm_z: bool = True
q_loss: bool = False
additional_metric: bool = False
add_trunk: bool = False
cs = ConfigStore.instance()
cs.store(group="agent", name="uvf", node=UVFAgentConfig)
class UVFAgent:
# pylint: disable=unused-argument
def __init__(self,
**kwargs: tp.Any
):
cfg = UVFAgentConfig(**kwargs)
self.cfg = cfg
assert len(cfg.action_shape) == 1
self.action_dim = cfg.action_shape[0]
self.solved_meta: tp.Any = None
# models
if cfg.obs_type == 'pixels':
self.aug: nn.Module = utils.RandomShiftsAug(pad=4)
self.encoder: nn.Module = Encoder(cfg.obs_shape).to(cfg.device)
self.obs_dim = self.encoder.repr_dim
else:
self.aug = nn.Identity()
self.encoder = nn.Identity()
self.obs_dim = cfg.obs_shape[0]
if cfg.feature_dim < self.obs_dim:
logger.warning(f"feature_dim {cfg.feature_dim} should not be smaller that obs_dim {self.obs_dim}")
goal_dim = self.obs_dim
if cfg.goal_space is not None:
goal_dim = _goals.get_goal_space_dim(cfg.goal_space)
if cfg.z_dim < goal_dim:
logger.warning(f"z_dim {cfg.z_dim} should not be smaller that goal_dim {goal_dim}")
# create the network
if self.cfg.boltzmann:
self.actor: nn.Module = DiagGaussianActor(self.obs_dim, cfg.z_dim, self.action_dim,
cfg.hidden_dim, cfg.log_std_bounds).to(cfg.device)
else:
self.actor = Actor(self.obs_dim, cfg.z_dim, self.action_dim,
cfg.feature_dim, cfg.hidden_dim,
preprocess=cfg.preprocess, add_trunk=self.cfg.add_trunk).to(cfg.device)
self.forward_net = ForwardMap(self.obs_dim, cfg.z_dim, self.action_dim,
cfg.feature_dim, cfg.hidden_dim,
preprocess=cfg.preprocess, add_trunk=self.cfg.add_trunk).to(cfg.device)
if cfg.debug:
self.backward_net: nn.Module = IdentityMap().to(cfg.device)
# self.backward_target_net: nn.Module = IdentityMap().to(cfg.device)
else:
self.backward_net = BackwardMap(goal_dim, cfg.z_dim, cfg.backward_hidden_dim, norm_z=cfg.norm_z).to(cfg.device)
# self.backward_target_net = BackwardMap(goal_dim,
# cfg.z_dim, cfg.backward_hidden_dim, norm_z=cfg.norm_z).to(cfg.device)
# build up the target network
self.forward_target_net = ForwardMap(self.obs_dim, cfg.z_dim, self.action_dim,
cfg.feature_dim, cfg.hidden_dim,
preprocess=cfg.preprocess, add_trunk=self.cfg.add_trunk).to(cfg.device)
# load the weights into the target networks
self.forward_target_net.load_state_dict(self.forward_net.state_dict())
# self.backward_target_net.load_state_dict(self.backward_net.state_dict())
# optimizers
self.encoder_opt: tp.Optional[torch.optim.Adam] = None
if cfg.obs_type == 'pixels':
self.encoder_opt = torch.optim.Adam(self.encoder.parameters(), lr=cfg.lr)
self.actor_opt = torch.optim.Adam(self.actor.parameters(), lr=cfg.lr)
# params = [p for net in [self.forward_net, self.backward_net] for p in net.parameters()]
# self.fb_opt = torch.optim.Adam(params, lr=cfg.lr)
self.fb_opt = torch.optim.Adam([{'params': self.forward_net.parameters()}, # type: ignore
{'params': self.backward_net.parameters(), 'lr': cfg.lr_coef * cfg.lr}],
lr=cfg.lr)
self.train()
self.forward_target_net.train()
# self.backward_target_net.train()
self.actor_success: tp.List[float] = [] # only for debugging, can be removed eventually
# self.inv_cov = torch.eye(self.cfg.z_dim, dtype=torch.float32, device=self.cfg.device)
# self.online_cov = OnlineCov(mom=0.99, dim=self.cfg.z_dim).to(self.cfg.device)
# self.online_cov.train()
def train(self, training: bool = True) -> None:
self.training = training
for net in [self.encoder, self.actor, self.forward_net, self.backward_net]:
net.train(training)
def init_from(self, other) -> None:
# copy parameters over
names = ["encoder", "actor"]
if self.cfg.init_fb:
names += ["forward_net", "backward_net", "forward_target_net"] # + ["backward_target_net"]
for name in names:
utils.hard_update_params(getattr(other, name), getattr(self, name))
for key, val in self.__dict__.items():
if isinstance(val, torch.optim.Optimizer):
val.load_state_dict(copy.deepcopy(getattr(other, key).state_dict()))
def get_goal_meta(self, goal_array: np.ndarray) -> MetaDict:
desired_goal = torch.tensor(goal_array).unsqueeze(0).to(self.cfg.device)
with torch.no_grad():
z = self.backward_net(desired_goal)
# if self.cfg.norm_z:
# z = math.sqrt(self.cfg.z_dim) * F.normalize(z, dim=1)
z = z.squeeze(0).cpu().numpy()
meta = OrderedDict()
meta['z'] = z
return meta
def infer_meta(self, replay_loader: ReplayBuffer) -> MetaDict:
obs_list, reward_list = [], []
batch_size = 0
while batch_size < self.cfg.num_inference_steps:
batch = replay_loader.sample(self.cfg.batch_size)
batch = batch.to(self.cfg.device)
obs_list.append(batch.next_goal if self.cfg.goal_space is not None else batch.next_obs)
reward_list.append(batch.reward)
batch_size += batch.next_obs.size(0)
obs, reward = torch.cat(obs_list, 0), torch.cat(reward_list, 0) # type: ignore
obs, reward = obs[:self.cfg.num_inference_steps], reward[:self.cfg.num_inference_steps]
return self.infer_meta_from_obs_and_rewards(obs, reward)
def infer_meta_from_obs_and_rewards(self, obs: torch.Tensor, reward: torch.Tensor) -> MetaDict:
print('max reward: ', reward.max().cpu().item())
print('99 percentile: ', torch.quantile(reward, 0.99).cpu().item())
print('median reward: ', reward.median().cpu().item())
print('min reward: ', reward.min().cpu().item())
print('mean reward: ', reward.mean().cpu().item())
print('num reward: ', reward.shape[0])
# filter out small reward
# pdb.set_trace()
# idx = torch.where(reward >= torch.quantile(reward, 0.99))[0]
# obs = obs[idx]
# reward = reward[idx]
with torch.no_grad():
B = self.backward_net(obs)
z = torch.matmul(reward.T, B) / reward.shape[0]
if self.cfg.norm_z:
z = math.sqrt(self.cfg.z_dim) * F.normalize(z, dim=1)
meta = OrderedDict()
meta['z'] = z.squeeze().cpu().numpy()
# self.solved_meta = meta
return meta
def init_meta(self, replay_loader: tp.Optional[ReplayBuffer] = None) -> MetaDict:
if replay_loader is not None:
batch = replay_loader.sample(self.cfg.batch_size)
assert batch.next_goal is not None
g = batch.next_goal[0]
meta = self.get_goal_meta(g)
else:
z = np.zeros((self.cfg.z_dim,), dtype=np.float32)
meta = OrderedDict()
meta['z'] = z
return meta
# pylint: disable=unused-argument
def update_meta(
self,
meta: MetaDict,
global_step: int,
time_step: TimeStep,
finetune: bool = False,
replay_loader: tp.Optional[ReplayBuffer] = None
) -> MetaDict:
if global_step % self.cfg.update_z_every_step == 0 and np.random.rand() < self.cfg.update_z_proba:
return self.init_meta()
return meta
def act(self, obs, meta, step, eval_mode) -> tp.Any:
obs = torch.as_tensor(obs, device=self.cfg.device).unsqueeze(0) # type: ignore
h = self.encoder(obs)
z = torch.as_tensor(meta['z'], device=self.cfg.device).unsqueeze(0) # type: ignore
if self.cfg.boltzmann:
dist = self.actor(h, z)
else:
stddev = utils.schedule(self.cfg.stddev_schedule, step)
dist = self.actor(h, z, stddev)
if eval_mode:
action = dist.mean
else:
action = dist.sample()
if step < self.cfg.num_expl_steps:
action.uniform_(-1.0, 1.0)
return action.cpu().numpy()[0]
def update_fb(
self,
obs: torch.Tensor,
action: torch.Tensor,
discount: torch.Tensor,
next_obs: torch.Tensor,
next_goal: torch.Tensor,
desired_goal: torch.Tensor,
step: int
) -> tp.Dict[str, float]:
metrics: tp.Dict[str, float] = {}
# Q LOSS
epsilon = 1e-6
z = self.backward_net(desired_goal)
reward = (torch.norm(next_goal - desired_goal, dim=1, keepdim=False) < epsilon).float() # batch_size
with torch.no_grad():
stddev = utils.schedule(self.cfg.stddev_schedule, step)
dist = self.actor(next_obs, z, stddev)
next_action = dist.sample(clip=self.cfg.stddev_clip)
target_F1, target_F2 = self.forward_target_net(next_obs, z, next_action) # batch x z_dim
next_Q1, nextQ2 = [torch.einsum('sd, sd -> s', target_Fi, z) for target_Fi in [target_F1, target_F2]]
next_Q = torch.min(next_Q1, nextQ2)
target_Q = reward + discount.squeeze(1) * next_Q # batch_size
target_Q = target_Q.detach()
F1, F2 = self.forward_net(obs, z, action)
Q1, Q2 = [torch.einsum('sd, sd -> s', Fi, z) for Fi in [F1, F2]]
fb_loss = F.mse_loss(Q1, target_Q) + F.mse_loss(Q2, target_Q)
if self.cfg.use_tb or self.cfg.use_wandb or self.cfg.use_hiplog:
metrics['z_norm'] = torch.norm(z, dim=-1).mean().item()
metrics['fb_loss'] = fb_loss.item()
if isinstance(self.fb_opt, torch.optim.Adam):
metrics["fb_opt_lr"] = self.fb_opt.param_groups[0]["lr"]
# optimize FB
if self.encoder_opt is not None:
self.encoder_opt.zero_grad(set_to_none=True)
self.fb_opt.zero_grad(set_to_none=True)
fb_loss.backward()
self.fb_opt.step()
if self.encoder_opt is not None:
self.encoder_opt.step()
return metrics
def update_actor(self, obs: torch.Tensor, desired_goal: torch.Tensor, step: int) -> tp.Dict[str, float]:
metrics: tp.Dict[str, float] = {}
z = self.backward_net(desired_goal)
if self.cfg.boltzmann:
dist = self.actor(obs, z)
action = dist.rsample()
else:
stddev = utils.schedule(self.cfg.stddev_schedule, step)
dist = self.actor(obs, z, stddev)
action = dist.sample(clip=self.cfg.stddev_clip)
log_prob = dist.log_prob(action).sum(-1, keepdim=True)
F1, F2 = self.forward_net(obs, z, action)
Q1 = torch.einsum('sd, sd -> s', F1, z)
Q2 = torch.einsum('sd, sd -> s', F2, z)
Q = torch.min(Q1, Q2)
actor_loss = (self.cfg.temp * log_prob - Q).mean() if self.cfg.boltzmann else -Q.mean()
# optimize actor
self.actor_opt.zero_grad(set_to_none=True)
actor_loss.backward()
self.actor_opt.step()
if self.cfg.use_tb or self.cfg.use_wandb:
metrics['actor_loss'] = actor_loss.item()
metrics['q'] = Q.mean().item()
metrics['actor_logprob'] = log_prob.mean().item()
return metrics
def update(self, replay_loader: ReplayBuffer, step: int) -> tp.Dict[str, float]:
metrics: tp.Dict[str, float] = {}
if step % self.cfg.update_every_steps != 0:
return metrics
batch = replay_loader.sample(self.cfg.batch_size)
batch = batch.to(self.cfg.device)
# pdb.set_trace()
obs = batch.obs
action = batch.action
discount = batch.discount
next_obs = next_goal = batch.next_obs
if self.cfg.goal_space is not None:
assert batch.next_goal is not None
next_goal = batch.next_goal
# second_batch = replay_loader.sample(self.cfg.batch_size)
# second_batch = second_batch.to(self.cfg.device)
#
# desired_goal = second_batch.next_obs
# if self.cfg.goal_space is not None:
# assert second_batch.next_goal is not None
# desired_goal = second_batch.next_goal
perm = torch.randperm(self.cfg.batch_size)
desired_goal = next_goal[perm]
if self.cfg.mix_ratio > 0:
mix_idxs: tp.Any = np.where(np.random.uniform(size=self.cfg.batch_size) < self.cfg.mix_ratio)[0]
desired_goal[mix_idxs] = next_goal[mix_idxs]
metrics.update(self.update_fb(obs=obs, action=action, discount=discount,
next_obs=next_obs, next_goal=next_goal, desired_goal=desired_goal, step=step))
# update actor
metrics.update(self.update_actor(obs, desired_goal, step))
# update critic target
utils.soft_update_params(self.forward_net, self.forward_target_net,
self.cfg.fb_target_tau)
# utils.soft_update_params(self.backward_net, self.backward_target_net,
# self.cfg.fb_target_tau)
return metrics
|
controllable_agent-main
|
url_benchmark/agent/uvf.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
# Copyright 2019 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Quadruped Domain."""
import collections
import typing as tp
from typing import Any
import os
from dm_control import mujoco
from dm_control.mujoco.wrapper import mjbindings
from dm_control.rl import control
from dm_control.suite import base
from dm_control.suite import common
from dm_control.utils import containers
from dm_control.utils import rewards
from dm_control.utils import xml_tools
from lxml import etree
import numpy as np
from scipy import ndimage
enums = mjbindings.enums
mjlib = mjbindings.mjlib
_DEFAULT_TIME_LIMIT = 20
_CONTROL_TIMESTEP = .02
# Horizontal speeds above which the move reward is 1.
_RUN_SPEED = 5
_WALK_SPEED = 0.5
_JUMP_HEIGHT = 1.0
# Constants related to terrain generation.
_HEIGHTFIELD_ID = 0
_TERRAIN_SMOOTHNESS = 0.15 # 0.0: maximally bumpy; 1.0: completely smooth.
_TERRAIN_BUMP_SCALE = 2 # Spatial scale of terrain bumps (in meters).
# Named model elements.
_TOES = ['toe_front_left', 'toe_back_left', 'toe_back_right', 'toe_front_right']
_WALLS = ['wall_px', 'wall_py', 'wall_nx', 'wall_ny']
SUITE = containers.TaggedTasks()
def make(task,
task_kwargs=None,
environment_kwargs=None,
visualize_reward: bool = False):
task_kwargs = task_kwargs or {}
if environment_kwargs is not None:
task_kwargs = task_kwargs.copy()
task_kwargs['environment_kwargs'] = environment_kwargs
env = SUITE[task](**task_kwargs)
env.task.visualize_reward = visualize_reward
return env
# REMOVED since resources is undefined
# def get_model_and_assets() -> Tuple[Any, Any]:
# """Returns a tuple containing the model XML string and a dict of assets."""
# root_dir = os.path.dirname(os.path.dirname(__file__))
# xml = resources.GetResource(
# os.path.join(root_dir, 'custom_dmc_tasks', 'quadruped.xml'))
# return xml, common.ASSETS
def make_model(floor_size=None, terrain: bool = False, rangefinders: bool = False,
walls_and_ball: bool = False):
"""Returns the model XML string."""
root_dir = os.path.dirname(os.path.dirname(__file__))
xml_string = common.read_model(os.path.join(root_dir, 'custom_dmc_tasks', 'quadruped.xml'))
parser = etree.XMLParser(remove_blank_text=True)
mjcf = etree.XML(xml_string, parser)
# Set floor size.
if floor_size is not None:
floor_geom = mjcf.find('.//geom[@name=\'floor\']')
floor_geom.attrib['size'] = f'{floor_size} {floor_size} .5'
# Remove walls, ball and target.
if not walls_and_ball:
for wall in _WALLS:
wall_geom = xml_tools.find_element(mjcf, 'geom', wall)
wall_geom.getparent().remove(wall_geom)
# Remove ball.
ball_body = xml_tools.find_element(mjcf, 'body', 'ball')
ball_body.getparent().remove(ball_body)
# Remove target.
target_site = xml_tools.find_element(mjcf, 'site', 'target')
target_site.getparent().remove(target_site)
# Remove terrain.
if not terrain:
terrain_geom = xml_tools.find_element(mjcf, 'geom', 'terrain')
terrain_geom.getparent().remove(terrain_geom)
# Remove rangefinders if they're not used, as range computations can be
# expensive, especially in a scene with heightfields.
if not rangefinders:
rangefinder_sensors = mjcf.findall('.//rangefinder')
for rf in rangefinder_sensors:
rf.getparent().remove(rf)
return etree.tostring(mjcf, pretty_print=True)
@SUITE.add()
def stand(time_limit: int = _DEFAULT_TIME_LIMIT, random=None, environment_kwargs=None):
"""Returns the Walk task."""
xml_string = make_model(floor_size=_DEFAULT_TIME_LIMIT * _WALK_SPEED)
physics = Physics.from_xml_string(xml_string, common.ASSETS)
task = Stand(random=random)
environment_kwargs = environment_kwargs or {}
return control.Environment(physics, task, time_limit=time_limit,
control_timestep=_CONTROL_TIMESTEP,
**environment_kwargs)
@SUITE.add()
def jump(time_limit: int = _DEFAULT_TIME_LIMIT, random=None, environment_kwargs=None):
"""Returns the Walk task."""
xml_string = make_model(floor_size=_DEFAULT_TIME_LIMIT * _WALK_SPEED)
physics = Physics.from_xml_string(xml_string, common.ASSETS)
task = Jump(desired_height=_JUMP_HEIGHT, random=random)
environment_kwargs = environment_kwargs or {}
return control.Environment(physics, task, time_limit=time_limit,
control_timestep=_CONTROL_TIMESTEP,
**environment_kwargs)
@SUITE.add()
def roll(time_limit: int = _DEFAULT_TIME_LIMIT, random=None, environment_kwargs=None):
"""Returns the Walk task."""
xml_string = make_model(floor_size=_DEFAULT_TIME_LIMIT * _WALK_SPEED)
physics = Physics.from_xml_string(xml_string, common.ASSETS)
task = Roll(desired_speed=_WALK_SPEED, random=random)
environment_kwargs = environment_kwargs or {}
return control.Environment(physics, task, time_limit=time_limit,
control_timestep=_CONTROL_TIMESTEP,
**environment_kwargs)
@SUITE.add()
def roll_fast(time_limit: int = _DEFAULT_TIME_LIMIT, random=None, environment_kwargs=None):
"""Returns the Walk task."""
xml_string = make_model(floor_size=_DEFAULT_TIME_LIMIT * _WALK_SPEED)
physics = Physics.from_xml_string(xml_string, common.ASSETS)
task = Roll(desired_speed=_RUN_SPEED, random=random)
environment_kwargs = environment_kwargs or {}
return control.Environment(physics, task, time_limit=time_limit,
control_timestep=_CONTROL_TIMESTEP,
**environment_kwargs)
@SUITE.add()
def escape(time_limit: int = _DEFAULT_TIME_LIMIT, random=None,
environment_kwargs=None):
"""Returns the Escape task."""
xml_string = make_model(floor_size=40, terrain=True, rangefinders=True)
physics = Physics.from_xml_string(xml_string, common.ASSETS)
task = Escape(random=random)
environment_kwargs = environment_kwargs or {}
return control.Environment(physics, task, time_limit=time_limit,
control_timestep=_CONTROL_TIMESTEP,
**environment_kwargs)
@SUITE.add()
def fetch(time_limit: int = _DEFAULT_TIME_LIMIT, random=None, environment_kwargs=None):
"""Returns the Fetch task."""
xml_string = make_model(walls_and_ball=True)
physics = Physics.from_xml_string(xml_string, common.ASSETS)
task = Fetch(random=random)
environment_kwargs = environment_kwargs or {}
return control.Environment(physics, task, time_limit=time_limit,
control_timestep=_CONTROL_TIMESTEP,
**environment_kwargs)
# pylint: disable=attribute-defined-outside-init
class Physics(mujoco.Physics):
"""Physics simulation with additional features for the Quadruped domain."""
def _reload_from_data(self, data) -> None:
super()._reload_from_data(data)
# Clear cached sensor names when the physics is reloaded.
self._sensor_types_to_names: tp.Dict[tp.Tuple[tp.Any, ...], tp.List[str]] = {}
self._hinge_names: tp.List[str] = []
def _get_sensor_names(self, *sensor_types) -> Any:
try:
sensor_names = self._sensor_types_to_names[sensor_types]
except KeyError:
[sensor_ids] = np.where(np.in1d(self.model.sensor_type, sensor_types))
sensor_names = [self.model.id2name(s_id, 'sensor') for s_id in sensor_ids]
self._sensor_types_to_names[sensor_types] = sensor_names
return sensor_names
def torso_upright(self) -> np.ndarray:
"""Returns the dot-product of the torso z-axis and the global z-axis."""
return np.asarray(self.named.data.xmat['torso', 'zz'])
def torso_velocity(self) -> Any:
"""Returns the velocity of the torso, in the local frame."""
return self.named.data.sensordata['velocimeter'].copy()
def com_height(self) -> Any:
return self.named.data.sensordata['center_of_mass'].copy()[2]
def egocentric_state(self) -> Any:
"""Returns the state without global orientation or position."""
if not self._hinge_names:
[hinge_ids] = np.nonzero(self.model.jnt_type ==
enums.mjtJoint.mjJNT_HINGE)
self._hinge_names = [self.model.id2name(j_id, 'joint')
for j_id in hinge_ids]
return np.hstack((self.named.data.qpos[self._hinge_names],
self.named.data.qvel[self._hinge_names],
self.data.act))
def toe_positions(self) -> Any:
"""Returns toe positions in egocentric frame."""
torso_frame = self.named.data.xmat['torso'].reshape(3, 3)
torso_pos = self.named.data.xpos['torso']
torso_to_toe = self.named.data.xpos[_TOES] - torso_pos
return torso_to_toe.dot(torso_frame)
def force_torque(self) -> Any:
"""Returns scaled force/torque sensor readings at the toes."""
force_torque_sensors = self._get_sensor_names(enums.mjtSensor.mjSENS_FORCE,
enums.mjtSensor.mjSENS_TORQUE)
return np.arcsinh(self.named.data.sensordata[force_torque_sensors])
def imu(self) -> Any:
"""Returns IMU-like sensor readings."""
imu_sensors = self._get_sensor_names(enums.mjtSensor.mjSENS_GYRO,
enums.mjtSensor.mjSENS_ACCELEROMETER)
return self.named.data.sensordata[imu_sensors]
def rangefinder(self) -> Any:
"""Returns scaled rangefinder sensor readings."""
rf_sensors = self._get_sensor_names(enums.mjtSensor.mjSENS_RANGEFINDER)
rf_readings = self.named.data.sensordata[rf_sensors]
no_intersection = -1.0
return np.where(rf_readings == no_intersection, 1.0, np.tanh(rf_readings))
def origin_distance(self) -> np.ndarray:
"""Returns the distance from the origin to the workspace."""
return np.asarray(np.linalg.norm(self.named.data.site_xpos['workspace']))
def origin(self) -> Any:
"""Returns origin position in the torso frame."""
torso_frame = self.named.data.xmat['torso'].reshape(3, 3)
torso_pos = self.named.data.xpos['torso']
return -torso_pos.dot(torso_frame)
def ball_state(self) -> Any:
"""Returns ball position and velocity relative to the torso frame."""
data = self.named.data
torso_frame = data.xmat['torso'].reshape(3, 3)
ball_rel_pos = data.xpos['ball'] - data.xpos['torso']
ball_rel_vel = data.qvel['ball_root'][:3] - data.qvel['root'][:3]
ball_rot_vel = data.qvel['ball_root'][3:]
ball_state = np.vstack((ball_rel_pos, ball_rel_vel, ball_rot_vel))
return ball_state.dot(torso_frame).ravel()
def target_position(self) -> Any:
"""Returns target position in torso frame."""
torso_frame = self.named.data.xmat['torso'].reshape(3, 3)
torso_pos = self.named.data.xpos['torso']
torso_to_target = self.named.data.site_xpos['target'] - torso_pos
return torso_to_target.dot(torso_frame)
def ball_to_target_distance(self) -> Any:
"""Returns horizontal distance from the ball to the target."""
ball_to_target = (self.named.data.site_xpos['target'] -
self.named.data.xpos['ball'])
return np.linalg.norm(ball_to_target[:2])
def self_to_ball_distance(self) -> Any:
"""Returns horizontal distance from the quadruped workspace to the ball."""
self_to_ball = (self.named.data.site_xpos['workspace']
- self.named.data.xpos['ball'])
return np.linalg.norm(self_to_ball[:2])
def _find_non_contacting_height(physics, orientation, x_pos: float = 0.0, y_pos: float = 0.0) -> None:
"""Find a height with no contacts given a body orientation.
Args:
physics: An instance of `Physics`.
orientation: A quaternion.
x_pos: A float. Position along global x-axis.
y_pos: A float. Position along global y-axis.
Raises:
RuntimeError: If a non-contacting configuration has not been found after
10,000 attempts.
"""
z_pos = 0.0 # Start embedded in the floor.
num_contacts = 1
num_attempts = 0
# Move up in 1cm increments until no contacts.
while num_contacts > 0:
try:
with physics.reset_context():
physics.named.data.qpos['root'][:3] = x_pos, y_pos, z_pos
physics.named.data.qpos['root'][3:] = orientation
except control.PhysicsError:
# We may encounter a PhysicsError here due to filling the contact
# buffer, in which case we simply increment the height and continue.
pass
num_contacts = physics.data.ncon
z_pos += 0.01
num_attempts += 1
if num_attempts > 10000:
raise RuntimeError('Failed to find a non-contacting configuration.')
def _common_observations(physics) -> tp.Dict[str, Any]:
"""Returns the observations common to all tasks."""
obs = collections.OrderedDict()
obs['egocentric_state'] = physics.egocentric_state()
obs['torso_velocity'] = physics.torso_velocity()
obs['torso_upright'] = physics.torso_upright()
obs['imu'] = physics.imu()
obs['force_torque'] = physics.force_torque()
return obs
def _upright_reward(physics, deviation_angle: int = 0):
"""Returns a reward proportional to how upright the torso is.
Args:
physics: an instance of `Physics`.
deviation_angle: A float, in degrees. The reward is 0 when the torso is
exactly upside-down and 1 when the torso's z-axis is less than
`deviation_angle` away from the global z-axis.
"""
deviation = np.cos(np.deg2rad(deviation_angle))
return rewards.tolerance(
physics.torso_upright(),
bounds=(deviation, float('inf')),
sigmoid='linear',
margin=1 + deviation,
value_at_margin=0)
class Move(base.Task):
"""A quadruped task solved by moving forward at a designated speed."""
def __init__(self, desired_speed, random=None) -> None:
"""Initializes an instance of `Move`.
Args:
desired_speed: A float. If this value is zero, reward is given simply
for standing upright. Otherwise this specifies the horizontal velocity
at which the velocity-dependent reward component is maximized.
random: Optional, either a `numpy.random.RandomState` instance, an
integer seed for creating a new `RandomState`, or None to select a seed
automatically (default).
"""
self._desired_speed = desired_speed
super().__init__(random=random)
def initialize_episode(self, physics) -> None:
"""Sets the state of the environment at the start of each episode.
Args:
physics: An instance of `Physics`.
"""
# Initial configuration.
orientation = self.random.randn(4)
orientation /= np.linalg.norm(orientation)
_find_non_contacting_height(physics, orientation)
super().initialize_episode(physics)
def get_observation(self, physics) -> tp.Dict[str, Any]:
"""Returns an observation to the agent."""
return _common_observations(physics)
def get_reward(self, physics) -> Any:
"""Returns a reward to the agent."""
# Move reward term.
move_reward = rewards.tolerance(
physics.torso_velocity()[0],
bounds=(self._desired_speed, float('inf')),
margin=self._desired_speed,
value_at_margin=0.5,
sigmoid='linear')
return _upright_reward(physics) * move_reward
class Stand(base.Task):
"""A quadruped task solved by moving forward at a designated speed."""
def __init__(self, random=None) -> None:
"""Initializes an instance of `Move`.
Args:
desired_speed: A float. If this value is zero, reward is given simply
for standing upright. Otherwise this specifies the horizontal velocity
at which the velocity-dependent reward component is maximized.
random: Optional, either a `numpy.random.RandomState` instance, an
integer seed for creating a new `RandomState`, or None to select a seed
automatically (default).
"""
super().__init__(random=random)
def initialize_episode(self, physics) -> None:
"""Sets the state of the environment at the start of each episode.
Args:
physics: An instance of `Physics`.
"""
# Initial configuration.
orientation = self.random.randn(4)
orientation /= np.linalg.norm(orientation)
_find_non_contacting_height(physics, orientation)
super().initialize_episode(physics)
def get_observation(self, physics) -> tp.Dict[str, Any]:
"""Returns an observation to the agent."""
return _common_observations(physics)
def get_reward(self, physics) -> Any:
"""Returns a reward to the agent."""
return _upright_reward(physics)
class Jump(base.Task):
"""A quadruped task solved by moving forward at a designated speed."""
def __init__(self, desired_height, random=None) -> None:
"""Initializes an instance of `Move`.
Args:
desired_speed: A float. If this value is zero, reward is given simply
for standing upright. Otherwise this specifies the horizontal velocity
at which the velocity-dependent reward component is maximized.
random: Optional, either a `numpy.random.RandomState` instance, an
integer seed for creating a new `RandomState`, or None to select a seed
automatically (default).
"""
self._desired_height = desired_height
super().__init__(random=random)
def initialize_episode(self, physics) -> None:
"""Sets the state of the environment at the start of each episode.
Args:
physics: An instance of `Physics`.
"""
# Initial configuration.
orientation = self.random.randn(4)
orientation /= np.linalg.norm(orientation)
_find_non_contacting_height(physics, orientation)
super().initialize_episode(physics)
def get_observation(self, physics) -> tp.Dict[str, Any]:
"""Returns an observation to the agent."""
return _common_observations(physics)
def get_reward(self, physics) -> Any:
"""Returns a reward to the agent."""
# Move reward term.
jump_up = rewards.tolerance(
physics.com_height(),
bounds=(self._desired_height, float('inf')),
margin=self._desired_height,
value_at_margin=0.5,
sigmoid='linear')
return _upright_reward(physics) * jump_up
class Roll(base.Task):
"""A quadruped task solved by moving forward at a designated speed."""
def __init__(self, desired_speed, random=None) -> None:
"""Initializes an instance of `Move`.
Args:
desired_speed: A float. If this value is zero, reward is given simply
for standing upright. Otherwise this specifies the horizontal velocity
at which the velocity-dependent reward component is maximized.
random: Optional, either a `numpy.random.RandomState` instance, an
integer seed for creating a new `RandomState`, or None to select a seed
automatically (default).
"""
self._desired_speed = desired_speed
super().__init__(random=random)
def initialize_episode(self, physics) -> None:
"""Sets the state of the environment at the start of each episode.
Args:
physics: An instance of `Physics`.
"""
# Initial configuration.
orientation = self.random.randn(4)
orientation /= np.linalg.norm(orientation)
_find_non_contacting_height(physics, orientation)
super().initialize_episode(physics)
def get_observation(self, physics) -> tp.Dict[str, Any]:
"""Returns an observation to the agent."""
return _common_observations(physics)
def get_reward(self, physics) -> Any:
"""Returns a reward to the agent."""
# Move reward term.
move_reward = rewards.tolerance(
np.linalg.norm(physics.torso_velocity()),
bounds=(self._desired_speed, float('inf')),
margin=self._desired_speed,
value_at_margin=0.5,
sigmoid='linear')
return _upright_reward(physics) * move_reward
class Escape(base.Task):
"""A quadruped task solved by escaping a bowl-shaped terrain."""
def initialize_episode(self, physics) -> None:
"""Sets the state of the environment at the start of each episode.
Args:
physics: An instance of `Physics`.
"""
# Get heightfield resolution, assert that it is square.
res = physics.model.hfield_nrow[_HEIGHTFIELD_ID]
assert res == physics.model.hfield_ncol[_HEIGHTFIELD_ID]
# Sinusoidal bowl shape.
row_grid, col_grid = np.ogrid[-1:1:res * 1j, -1:1:res * 1j]
radius = np.clip(np.sqrt(col_grid**2 + row_grid**2), .04, 1)
bowl_shape = .5 - np.cos(2 * np.pi * radius) / 2
# Random smooth bumps.
terrain_size = 2 * physics.model.hfield_size[_HEIGHTFIELD_ID, 0]
bump_res = int(terrain_size / _TERRAIN_BUMP_SCALE)
bumps = self.random.uniform(_TERRAIN_SMOOTHNESS, 1, (bump_res, bump_res))
smooth_bumps = ndimage.zoom(bumps, res / float(bump_res))
# Terrain is elementwise product.
terrain = bowl_shape * smooth_bumps
start_idx = physics.model.hfield_adr[_HEIGHTFIELD_ID]
physics.model.hfield_data[start_idx:start_idx + res**2] = terrain.ravel()
super().initialize_episode(physics)
# If we have a rendering context, we need to re-upload the modified
# heightfield data.
if physics.contexts:
with physics.contexts.gl.make_current() as ctx:
ctx.call(mjlib.mjr_uploadHField,
physics.model.ptr,
physics.contexts.mujoco.ptr,
_HEIGHTFIELD_ID)
# Initial configuration.
orientation = self.random.randn(4)
orientation /= np.linalg.norm(orientation)
_find_non_contacting_height(physics, orientation)
def get_observation(self, physics) -> tp.Dict[str, Any]:
"""Returns an observation to the agent."""
obs = _common_observations(physics)
obs['origin'] = physics.origin()
obs['rangefinder'] = physics.rangefinder()
return obs
def get_reward(self, physics) -> Any:
"""Returns a reward to the agent."""
# Escape reward term.
terrain_size = physics.model.hfield_size[_HEIGHTFIELD_ID, 0]
escape_reward = rewards.tolerance(
physics.origin_distance(),
bounds=(terrain_size, float('inf')),
margin=terrain_size,
value_at_margin=0,
sigmoid='linear')
return _upright_reward(physics, deviation_angle=20) * escape_reward
class Fetch(base.Task):
"""A quadruped task solved by bringing a ball to the origin."""
def initialize_episode(self, physics) -> None:
"""Sets the state of the environment at the start of each episode.
Args:
physics: An instance of `Physics`.
"""
# Initial configuration, random azimuth and horizontal position.
azimuth = self.random.uniform(0, 2 * np.pi)
orientation = np.array((np.cos(azimuth / 2), 0, 0, np.sin(azimuth / 2)))
spawn_radius = 0.9 * physics.named.model.geom_size['floor', 0]
x_pos, y_pos = self.random.uniform(-spawn_radius, spawn_radius, size=(2,))
_find_non_contacting_height(physics, orientation, x_pos, y_pos)
# Initial ball state.
physics.named.data.qpos['ball_root'][:2] = self.random.uniform(
-spawn_radius, spawn_radius, size=(2,))
physics.named.data.qpos['ball_root'][2] = 2
physics.named.data.qvel['ball_root'][:2] = 5 * self.random.randn(2)
super().initialize_episode(physics)
def get_observation(self, physics) -> tp.Dict[str, Any]:
"""Returns an observation to the agent."""
obs = _common_observations(physics)
obs['ball_state'] = physics.ball_state()
obs['target_position'] = physics.target_position()
return obs
def get_reward(self, physics) -> Any:
"""Returns a reward to the agent."""
# Reward for moving close to the ball.
arena_radius = physics.named.model.geom_size['floor', 0] * np.sqrt(2)
workspace_radius = physics.named.model.site_size['workspace', 0]
ball_radius = physics.named.model.geom_size['ball', 0]
reach_reward = rewards.tolerance(
physics.self_to_ball_distance(),
bounds=(0, workspace_radius + ball_radius),
sigmoid='linear',
margin=arena_radius, value_at_margin=0)
# Reward for bringing the ball to the target.
target_radius = physics.named.model.site_size['target', 0]
fetch_reward = rewards.tolerance(
physics.ball_to_target_distance(),
bounds=(0, target_radius),
sigmoid='linear',
margin=arena_radius, value_at_margin=0)
reach_then_fetch = reach_reward * (0.5 + 0.5 * fetch_reward)
return _upright_reward(physics) * reach_then_fetch
|
controllable_agent-main
|
url_benchmark/custom_dmc_tasks/quadruped.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
# Copyright 2017 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Point-mass domain."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from dm_control import mujoco
from dm_control.rl import control
from dm_control.suite import base
from dm_control.suite import common
from dm_control.suite.utils import randomizers
from dm_control.utils import containers
from dm_control.utils import rewards
from dm_control.utils import io as resources
from dm_env import specs
import numpy as np
import os
_DEFAULT_TIME_LIMIT = 20
SUITE = containers.TaggedTasks()
TASKS = [('reach_top_left', np.array([-0.15, 0.15])),
('reach_top_right', np.array([0.15, 0.15])),
('reach_bottom_left', np.array([-0.15, -0.15])),
('reach_bottom_right', np.array([0.15, -0.15]))]
def make(task,
task_kwargs=None,
environment_kwargs=None,
visualize_reward=False):
task_kwargs = task_kwargs or {}
if environment_kwargs is not None:
task_kwargs = task_kwargs.copy()
task_kwargs['environment_kwargs'] = environment_kwargs
env = SUITE[task](**task_kwargs)
env.task.visualize_reward = visualize_reward
return env
def get_model_and_assets(task):
"""Returns a tuple containing the model XML string and a dict of assets."""
root_dir = os.path.dirname(os.path.dirname(__file__))
xml = resources.GetResource(
os.path.join(root_dir, 'custom_dmc_tasks', f'point_mass_maze_{task}.xml'))
return xml, common.ASSETS
@SUITE.add('benchmarking')
def multi_goal(time_limit=_DEFAULT_TIME_LIMIT,
random=None,
environment_kwargs=None):
"""Returns the Run task."""
physics = Physics.from_xml_string(*get_model_and_assets('multi_goal'))
task = MultiTaskPointMassMaze(target_id=0, random=random)
environment_kwargs = environment_kwargs or {}
return control.Environment(physics,
task,
time_limit=time_limit,
**environment_kwargs)
@SUITE.add('benchmarking')
def reach_top_left(time_limit=_DEFAULT_TIME_LIMIT,
random=None,
environment_kwargs=None):
"""Returns the Run task."""
physics = Physics.from_xml_string(*get_model_and_assets('reach_top_left'))
task = MultiTaskPointMassMaze(target_id=0, random=random)
environment_kwargs = environment_kwargs or {}
return control.Environment(physics,
task,
time_limit=time_limit,
**environment_kwargs)
@SUITE.add('benchmarking')
def reach_top_right(time_limit=_DEFAULT_TIME_LIMIT,
random=None,
environment_kwargs=None):
"""Returns the Run task."""
physics = Physics.from_xml_string(*get_model_and_assets('reach_top_right'))
task = MultiTaskPointMassMaze(target_id=1, random=random)
environment_kwargs = environment_kwargs or {}
return control.Environment(physics,
task,
time_limit=time_limit,
**environment_kwargs)
@SUITE.add('benchmarking')
def reach_bottom_left(time_limit=_DEFAULT_TIME_LIMIT,
random=None,
environment_kwargs=None):
"""Returns the Run task."""
physics = Physics.from_xml_string(*get_model_and_assets('reach_bottom_left'))
task = MultiTaskPointMassMaze(target_id=2, random=random)
environment_kwargs = environment_kwargs or {}
return control.Environment(physics,
task,
time_limit=time_limit,
**environment_kwargs)
@SUITE.add('benchmarking')
def reach_bottom_right(time_limit=_DEFAULT_TIME_LIMIT,
random=None,
environment_kwargs=None):
"""Returns the Run task."""
physics = Physics.from_xml_string(*get_model_and_assets('reach_bottom_right'))
task = MultiTaskPointMassMaze(target_id=3, random=random)
environment_kwargs = environment_kwargs or {}
return control.Environment(physics,
task,
time_limit=time_limit,
**environment_kwargs)
class Physics(mujoco.Physics):
"""physics for the point_mass domain."""
def mass_to_target_dist(self, target):
"""Returns the distance from mass to the target."""
d = target - self.named.data.geom_xpos['pointmass'][:2]
return np.linalg.norm(d)
class MultiTaskPointMassMaze(base.Task):
"""A point_mass `Task` to reach target with smooth reward."""
def __init__(self, target_id, random=None) -> None:
"""Initialize an instance of `PointMassMaze`.
Args:
randomize_gains: A `bool`, whether to randomize the actuator gains.
random: Optional, either a `numpy.random.RandomState` instance, an
integer seed for creating a new `RandomState`, or None to select a seed
automatically (default).
"""
self._target = TASKS[target_id][1]
super().__init__(random=random)
def initialize_episode(self, physics):
"""Sets the state of the environment at the start of each episode.
If _randomize_gains is True, the relationship between the controls and
the joints is randomized, so that each control actuates a random linear
combination of joints.
Args:
physics: An instance of `mujoco.Physics`.
"""
randomizers.randomize_limited_and_rotational_joints(
physics, self.random)
physics.data.qpos[0] = np.random.uniform(-0.29, -0.15)
physics.data.qpos[1] = np.random.uniform(0.15, 0.29)
# import ipdb; ipdb.set_trace()
physics.named.data.geom_xpos['target'][:2] = self._target
super().initialize_episode(physics)
def get_observation(self, physics):
"""Returns an observation of the state."""
obs = collections.OrderedDict()
obs['position'] = physics.position()
obs['velocity'] = physics.velocity()
return obs
def get_reward_spec(self):
return specs.Array(shape=(1,), dtype=np.float32, name='reward')
def get_reward(self, physics):
"""Returns a reward to the agent."""
target_size = .015
control_reward = rewards.tolerance(physics.control(), margin=1,
value_at_margin=0,
sigmoid='quadratic').mean()
small_control = (control_reward + 4) / 5
near_target = rewards.tolerance(physics.mass_to_target_dist(self._target),
bounds=(0, target_size), margin=target_size)
reward = near_target * small_control
return reward
|
controllable_agent-main
|
url_benchmark/custom_dmc_tasks/point_mass_maze.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
# Copyright 2019 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""A task where the goal is to move the hand close to a target prop or site."""
import collections
from dm_control import composer
from dm_control.composer import initializers
from dm_control.composer.variation import distributions
from dm_control.entities import props
from dm_control.manipulation.shared import arenas
from dm_control.manipulation.shared import cameras
from dm_control.manipulation.shared import constants
from dm_control.manipulation.shared import observations
from dm_control.manipulation.shared import robots
from dm_control.manipulation.shared import workspaces
from dm_control.utils import rewards
from dm_env import specs
import numpy as np
_ReachWorkspace = collections.namedtuple(
'_ReachWorkspace', ['target_bbox', 'tcp_bbox', 'arm_offset'])
# Ensures that the props are not touching the table before settling.
_PROP_Z_OFFSET = 0.001
_DUPLO_WORKSPACE = _ReachWorkspace(
target_bbox=workspaces.BoundingBox(lower=(-0.1, -0.1, _PROP_Z_OFFSET),
upper=(0.1, 0.1, _PROP_Z_OFFSET)),
tcp_bbox=workspaces.BoundingBox(lower=(-0.1, -0.1, 0.2),
upper=(0.1, 0.1, 0.4)),
arm_offset=robots.ARM_OFFSET)
_SITE_WORKSPACE = _ReachWorkspace(
target_bbox=workspaces.BoundingBox(lower=(-0.2, -0.2, 0.02),
upper=(0.2, 0.2, 0.4)),
tcp_bbox=workspaces.BoundingBox(lower=(-0.2, -0.2, 0.02),
upper=(0.2, 0.2, 0.4)),
arm_offset=robots.ARM_OFFSET)
_TARGET_RADIUS = 0.05
_TIME_LIMIT = 10.
TASKS = [('reach_top_left', np.array([-0.09, 0.09, _PROP_Z_OFFSET])),
('reach_top_right', np.array([0.09, 0.09, _PROP_Z_OFFSET])),
('reach_bottom_left', np.array([-0.09, -0.09, _PROP_Z_OFFSET])),
('reach_bottom_right', np.array([0.09, -0.09, _PROP_Z_OFFSET]))]
def make(task_id, obs_type, seed):
obs_settings = observations.VISION if obs_type == 'pixels' else observations.PERFECT_FEATURES
task = _reach(task_id, obs_settings=obs_settings, use_site=True)
return composer.Environment(task,
time_limit=_TIME_LIMIT,
random_state=seed)
class MultiTaskReach(composer.Task):
"""Bring the hand close to a target prop or site."""
def __init__(self, task_id, arena, arm, hand, prop, obs_settings,
workspace, control_timestep):
"""Initializes a new `Reach` task.
Args:
arena: `composer.Entity` instance.
arm: `robot_base.RobotArm` instance.
hand: `robot_base.RobotHand` instance.
prop: `composer.Entity` instance specifying the prop to reach to, or None
in which case the target is a fixed site whose position is specified by
the workspace.
obs_settings: `observations.ObservationSettings` instance.
workspace: `_ReachWorkspace` specifying the placement of the prop and TCP.
control_timestep: Float specifying the control timestep in seconds.
"""
self._arena = arena
self._arm = arm
self._hand = hand
self._arm.attach(self._hand)
self._arena.attach_offset(self._arm, offset=workspace.arm_offset)
self.control_timestep = control_timestep
self._tcp_initializer = initializers.ToolCenterPointInitializer(
self._hand,
self._arm,
position=distributions.Uniform(*workspace.tcp_bbox),
quaternion=workspaces.DOWN_QUATERNION)
# Add custom camera observable.
self._task_observables = cameras.add_camera_observables(
arena, obs_settings, cameras.FRONT_CLOSE)
if task_id == 'reach_multitask':
self._targets = [target for (_, target) in TASKS]
else:
self._targets = [
target for (task, target) in TASKS if task == task_id
]
assert len(self._targets) > 0
#target_pos_distribution = distributions.Uniform(*TASKS[task_id])
self._prop = prop
if prop:
# The prop itself is used to visualize the target location.
self._make_target_site(parent_entity=prop, visible=False)
self._target = self._arena.add_free_entity(prop)
self._prop_placer = initializers.PropPlacer(
props=[prop],
position=target_pos_distribution,
quaternion=workspaces.uniform_z_rotation,
settle_physics=True)
else:
if len(self._targets) == 1:
self._target = self._make_target_site(parent_entity=arena,
visible=True)
#obs = observable.MJCFFeature('pos', self._target)
# obs.configure(**obs_settings.prop_pose._asdict())
#self._task_observables['target_position'] = obs
# Add sites for visualizing the prop and target bounding boxes.
workspaces.add_bbox_site(body=self.root_entity.mjcf_model.worldbody,
lower=workspace.tcp_bbox.lower,
upper=workspace.tcp_bbox.upper,
rgba=constants.GREEN,
name='tcp_spawn_area')
workspaces.add_bbox_site(body=self.root_entity.mjcf_model.worldbody,
lower=workspace.target_bbox.lower,
upper=workspace.target_bbox.upper,
rgba=constants.BLUE,
name='target_spawn_area')
def _make_target_site(self, parent_entity, visible):
return workspaces.add_target_site(
body=parent_entity.mjcf_model.worldbody,
radius=_TARGET_RADIUS,
visible=visible,
rgba=constants.RED,
name='target_site')
@property
def root_entity(self):
return self._arena
@property
def arm(self):
return self._arm
@property
def hand(self):
return self._hand
def get_reward_spec(self):
n = len(self._targets)
return specs.Array(shape=(n,), dtype=np.float32, name='reward')
@property
def task_observables(self):
return self._task_observables
def get_reward(self, physics):
hand_pos = physics.bind(self._hand.tool_center_point).xpos
rews = []
for target_pos in self._targets:
distance = np.linalg.norm(hand_pos - target_pos)
reward = rewards.tolerance(distance,
bounds=(0, _TARGET_RADIUS),
margin=_TARGET_RADIUS)
rews.append(reward)
rews = np.array(rews).astype(np.float32)
if len(self._targets) == 1:
return rews[0]
return rews
def initialize_episode(self, physics, random_state):
self._hand.set_grasp(physics, close_factors=random_state.uniform())
self._tcp_initializer(physics, random_state)
if self._prop:
self._prop_placer(physics, random_state)
else:
if len(self._targets) == 1:
physics.bind(self._target).pos = self._targets[0]
def _reach(task_id, obs_settings, use_site):
"""Configure and instantiate a `Reach` task.
Args:
obs_settings: An `observations.ObservationSettings` instance.
use_site: Boolean, if True then the target will be a fixed site, otherwise
it will be a moveable Duplo brick.
Returns:
An instance of `reach.Reach`.
"""
arena = arenas.Standard()
arm = robots.make_arm(obs_settings=obs_settings)
hand = robots.make_hand(obs_settings=obs_settings)
if use_site:
workspace = _SITE_WORKSPACE
prop = None
else:
workspace = _DUPLO_WORKSPACE
prop = props.Duplo(observable_options=observations.make_options(
obs_settings, observations.FREEPROP_OBSERVABLES))
task = MultiTaskReach(task_id,
arena=arena,
arm=arm,
hand=hand,
prop=prop,
obs_settings=obs_settings,
workspace=workspace,
control_timestep=constants.CONTROL_TIMESTEP)
return task
|
controllable_agent-main
|
url_benchmark/custom_dmc_tasks/jaco.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
# Copyright 2017 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Planar Walker Domain."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from typing import Any, Tuple
import typing as tp
import os
from dm_control import mujoco
from dm_control.rl import control
from dm_control.suite import base
from dm_control.suite import common
from dm_control.suite.utils import randomizers
from dm_control.utils import containers
from dm_control.utils import rewards
from dm_control.utils import io as resources
_CONTROL_TIMESTEP: float
_DEFAULT_TIME_LIMIT: int
_RUN_SPEED: int
_SPIN_SPEED: int
_STAND_HEIGHT: float
_WALK_SPEED: int
# from dm_control import suite # TODO useless?
_DEFAULT_TIME_LIMIT = 25
_CONTROL_TIMESTEP = .025
# Minimal height of torso over foot above which stand reward is 1.
_STAND_HEIGHT = 1.2
# Horizontal speeds (meters/second) above which move reward is 1.
_WALK_SPEED = 1
_RUN_SPEED = 8
_SPIN_SPEED = 5
SUITE = containers.TaggedTasks()
def make(task,
task_kwargs=None,
environment_kwargs=None,
visualize_reward: bool = False):
task_kwargs = task_kwargs or {}
if environment_kwargs is not None:
task_kwargs = task_kwargs.copy()
task_kwargs['environment_kwargs'] = environment_kwargs
env = SUITE[task](**task_kwargs)
env.task.visualize_reward = visualize_reward
return env
def get_model_and_assets() -> Tuple[Any, Any]:
"""Returns a tuple containing the model XML string and a dict of assets."""
root_dir = os.path.dirname(os.path.dirname(__file__))
xml = resources.GetResource(os.path.join(root_dir, 'custom_dmc_tasks',
'walker.xml'))
return xml, common.ASSETS
@SUITE.add('benchmarking')
def flip(time_limit: int = _DEFAULT_TIME_LIMIT,
random=None,
environment_kwargs=None):
"""Returns the Run task."""
physics = Physics.from_xml_string(*get_model_and_assets())
task = PlanarWalker(move_speed=_RUN_SPEED,
forward=True,
flip=True,
random=random)
environment_kwargs = environment_kwargs or {}
return control.Environment(physics,
task,
time_limit=time_limit,
control_timestep=_CONTROL_TIMESTEP,
**environment_kwargs)
class Physics(mujoco.Physics):
"""Physics simulation with additional features for the Walker domain."""
def torso_upright(self) -> Any:
"""Returns projection from z-axes of torso to the z-axes of world."""
return self.named.data.xmat['torso', 'zz']
def torso_height(self) -> Any:
"""Returns the height of the torso."""
return self.named.data.xpos['torso', 'z']
def horizontal_velocity(self) -> Any:
"""Returns the horizontal velocity of the center-of-mass."""
return self.named.data.sensordata['torso_subtreelinvel'][0]
def orientations(self) -> Any:
"""Returns planar orientations of all bodies."""
return self.named.data.xmat[1:, ['xx', 'xz']].ravel()
def angmomentum(self) -> Any:
"""Returns the angular momentum of torso of the Cheetah about Y axis."""
return self.named.data.subtree_angmom['torso'][1]
class PlanarWalker(base.Task):
"""A planar walker task."""
def __init__(self, move_speed, forward=True, flip=False, random=None) -> None:
"""Initializes an instance of `PlanarWalker`.
Args:
move_speed: A float. If this value is zero, reward is given simply for
standing up. Otherwise this specifies a target horizontal velocity for
the walking task.
random: Optional, either a `numpy.random.RandomState` instance, an
integer seed for creating a new `RandomState`, or None to select a seed
automatically (default).
"""
self._move_speed = move_speed
self._forward = 1 if forward else -1
self._flip = flip
super(PlanarWalker, self).__init__(random=random)
def initialize_episode(self, physics) -> None:
"""Sets the state of the environment at the start of each episode.
In 'standing' mode, use initial orientation and small velocities.
In 'random' mode, randomize joint angles and let fall to the floor.
Args:
physics: An instance of `Physics`.
"""
randomizers.randomize_limited_and_rotational_joints(
physics, self.random)
super(PlanarWalker, self).initialize_episode(physics)
def get_observation(self, physics) -> tp.Dict[str, Any]:
"""Returns an observation of body orientations, height and velocites."""
obs = collections.OrderedDict()
obs['orientations'] = physics.orientations()
obs['height'] = physics.torso_height()
obs['velocity'] = physics.velocity()
return obs
def get_reward(self, physics) -> Any:
"""Returns a reward to the agent."""
standing = rewards.tolerance(physics.torso_height(),
bounds=(_STAND_HEIGHT, float('inf')),
margin=_STAND_HEIGHT / 2)
upright = (1 + physics.torso_upright()) / 2
stand_reward = (3 * standing + upright) / 4
if self._flip:
move_reward = rewards.tolerance(self._forward *
physics.angmomentum(),
bounds=(_SPIN_SPEED, float('inf')),
margin=_SPIN_SPEED,
value_at_margin=0,
sigmoid='linear')
else:
move_reward = rewards.tolerance(
self._forward * physics.horizontal_velocity(),
bounds=(self._move_speed, float('inf')),
margin=self._move_speed / 2,
value_at_margin=0.5,
sigmoid='linear')
return stand_reward * (5 * move_reward + 1) / 6
|
controllable_agent-main
|
url_benchmark/custom_dmc_tasks/walker.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import typing as tp
from . import cheetah
from . import walker
from . import hopper
from . import quadruped
from . import jaco
from . import point_mass_maze
def make(domain, task,
task_kwargs=None,
environment_kwargs=None,
visualize_reward: bool = False):
if domain == 'cheetah':
return cheetah.make(task,
task_kwargs=task_kwargs,
environment_kwargs=environment_kwargs,
visualize_reward=visualize_reward)
elif domain == 'walker':
return walker.make(task,
task_kwargs=task_kwargs,
environment_kwargs=environment_kwargs,
visualize_reward=visualize_reward)
elif domain == 'hopper':
return hopper.make(task,
task_kwargs=task_kwargs,
environment_kwargs=environment_kwargs,
visualize_reward=visualize_reward)
elif domain == 'quadruped':
return quadruped.make(task,
task_kwargs=task_kwargs,
environment_kwargs=environment_kwargs,
visualize_reward=visualize_reward)
elif domain == 'point_mass_maze':
return point_mass_maze.make(task,
task_kwargs=task_kwargs,
environment_kwargs=environment_kwargs,
visualize_reward=visualize_reward)
else:
raise ValueError(f'{task} not found')
assert None
def make_jaco(task, obs_type, seed) -> tp.Any:
return jaco.make(task, obs_type, seed)
|
controllable_agent-main
|
url_benchmark/custom_dmc_tasks/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
# Copyright 2017 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Cheetah Domain."""
import collections
import os
import typing as tp
from typing import Any, Tuple
from dm_control import mujoco
from dm_control.rl import control
from dm_control.suite import base
from dm_control.suite import common
from dm_control.utils import containers
from dm_control.utils import rewards
from dm_control.utils import io as resources
_DEFAULT_TIME_LIMIT: int
_RUN_SPEED: int
_SPIN_SPEED: int
# How long the simulation will run, in seconds.
_DEFAULT_TIME_LIMIT = 10
# Running speed above which reward is 1.
_RUN_SPEED = 10
_WALK_SPEED = 2
_SPIN_SPEED = 5
SUITE = containers.TaggedTasks()
def make(task,
task_kwargs=None,
environment_kwargs=None,
visualize_reward: bool = False):
task_kwargs = task_kwargs or {}
if environment_kwargs is not None:
task_kwargs = task_kwargs.copy()
task_kwargs['environment_kwargs'] = environment_kwargs
env = SUITE[task](**task_kwargs)
env.task.visualize_reward = visualize_reward
return env
def get_model_and_assets() -> Tuple[Any, Any]:
"""Returns a tuple containing the model XML string and a dict of assets."""
root_dir = os.path.dirname(os.path.dirname(__file__))
xml = resources.GetResource(
os.path.join(root_dir, 'custom_dmc_tasks', 'cheetah.xml'))
return xml, common.ASSETS
@SUITE.add('benchmarking')
def walk(time_limit: int = _DEFAULT_TIME_LIMIT,
random=None,
environment_kwargs=None):
"""Returns the run task."""
physics = Physics.from_xml_string(*get_model_and_assets())
task = Cheetah(move_speed=_WALK_SPEED, forward=True, flip=False, random=random)
environment_kwargs = environment_kwargs or {}
return control.Environment(physics,
task,
time_limit=time_limit,
**environment_kwargs)
@SUITE.add('benchmarking')
def walk_backward(time_limit: int = _DEFAULT_TIME_LIMIT,
random=None,
environment_kwargs=None):
"""Returns the run task."""
physics = Physics.from_xml_string(*get_model_and_assets())
task = Cheetah(move_speed=_WALK_SPEED, forward=False, flip=False, random=random)
environment_kwargs = environment_kwargs or {}
return control.Environment(physics,
task,
time_limit=time_limit,
**environment_kwargs)
@SUITE.add('benchmarking')
def run_backward(time_limit: int = _DEFAULT_TIME_LIMIT,
random=None,
environment_kwargs=None):
"""Returns the run task."""
physics = Physics.from_xml_string(*get_model_and_assets())
task = Cheetah(forward=False, flip=False, random=random)
environment_kwargs = environment_kwargs or {}
return control.Environment(physics,
task,
time_limit=time_limit,
**environment_kwargs)
@SUITE.add('benchmarking')
def flip(time_limit: int = _DEFAULT_TIME_LIMIT,
random=None,
environment_kwargs=None):
"""Returns the run task."""
physics = Physics.from_xml_string(*get_model_and_assets())
task = Cheetah(move_speed=_WALK_SPEED, forward=True, flip=True, random=random)
environment_kwargs = environment_kwargs or {}
return control.Environment(physics,
task,
time_limit=time_limit,
**environment_kwargs)
@SUITE.add('benchmarking')
def flip_backward(time_limit: int = _DEFAULT_TIME_LIMIT,
random=None,
environment_kwargs=None):
"""Returns the run task."""
physics = Physics.from_xml_string(*get_model_and_assets())
task = Cheetah(move_speed=_WALK_SPEED, forward=False, flip=True, random=random)
environment_kwargs = environment_kwargs or {}
return control.Environment(physics,
task,
time_limit=time_limit,
**environment_kwargs)
class Physics(mujoco.Physics):
"""Physics simulation with additional features for the Cheetah domain."""
def speed(self) -> Any:
"""Returns the horizontal speed of the Cheetah."""
return self.named.data.sensordata['torso_subtreelinvel'][0]
def angmomentum(self) -> Any:
"""Returns the angular momentum of torso of the Cheetah about Y axis."""
return self.named.data.subtree_angmom['torso'][1]
class Cheetah(base.Task):
"""A `Task` to train a running Cheetah."""
def __init__(self, move_speed=_RUN_SPEED, forward=True, flip=False, random=None) -> None:
self._move_speed = move_speed
self._forward = 1 if forward else -1
self._flip = flip
super(Cheetah, self).__init__(random=random)
self._timeout_progress = 0
def initialize_episode(self, physics) -> None:
"""Sets the state of the environment at the start of each episode."""
# The indexing below assumes that all joints have a single DOF.
assert physics.model.nq == physics.model.njnt
is_limited = physics.model.jnt_limited == 1
lower, upper = physics.model.jnt_range[is_limited].T
physics.data.qpos[is_limited] = self.random.uniform(lower, upper)
# Stabilize the model before the actual simulation.
for _ in range(200):
physics.step()
physics.data.time = 0
self._timeout_progress = 0
super().initialize_episode(physics)
def get_observation(self, physics) -> tp.Dict[str, Any]:
"""Returns an observation of the state, ignoring horizontal position."""
obs = collections.OrderedDict()
# Ignores horizontal position to maintain translational invariance.
obs['position'] = physics.data.qpos[1:].copy()
obs['velocity'] = physics.velocity()
return obs
def get_reward(self, physics) -> Any:
"""Returns a reward to the agent."""
if self._flip:
reward = rewards.tolerance(self._forward * physics.angmomentum(),
bounds=(_SPIN_SPEED, float('inf')),
margin=_SPIN_SPEED,
value_at_margin=0,
sigmoid='linear')
else:
reward = rewards.tolerance(self._forward * physics.speed(),
bounds=(self._move_speed, float('inf')),
margin=self._move_speed,
value_at_margin=0,
sigmoid='linear')
return reward
|
controllable_agent-main
|
url_benchmark/custom_dmc_tasks/cheetah.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
# Copyright 2017 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Hopper domain."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import typing as tp
from typing import Any, Tuple
import numpy as np
from dm_control import mujoco
from dm_control.rl import control
from dm_control.suite import base
from dm_control.suite import common
from dm_control.suite.utils import randomizers
from dm_control.utils import containers
from dm_control.utils import rewards
from dm_control.utils import io as resources
_CONTROL_TIMESTEP: float
_DEFAULT_TIME_LIMIT: int
_HOP_SPEED: int
_SPIN_SPEED: int
_STAND_HEIGHT: float
SUITE = containers.TaggedTasks()
_CONTROL_TIMESTEP = .02 # (Seconds)
# Default duration of an episode, in seconds.
_DEFAULT_TIME_LIMIT = 20
# Minimal height of torso over foot above which stand reward is 1.
_STAND_HEIGHT = 0.6
# Hopping speed above which hop reward is 1.
_HOP_SPEED = 2
_SPIN_SPEED = 5
def make(task,
task_kwargs=None,
environment_kwargs=None,
visualize_reward: bool = False):
task_kwargs = task_kwargs or {}
if environment_kwargs is not None:
task_kwargs = task_kwargs.copy()
task_kwargs['environment_kwargs'] = environment_kwargs
env = SUITE[task](**task_kwargs)
env.task.visualize_reward = visualize_reward
return env
def get_model_and_assets() -> Tuple[Any, Any]:
"""Returns a tuple containing the model XML string and a dict of assets."""
root_dir = os.path.dirname(os.path.dirname(__file__))
xml = resources.GetResource(
os.path.join(root_dir, 'custom_dmc_tasks', 'hopper.xml'))
return xml, common.ASSETS
@SUITE.add('benchmarking')
def hop_backward(time_limit: int = _DEFAULT_TIME_LIMIT, random=None, environment_kwargs=None):
"""Returns a Hopper that strives to hop forward."""
physics = Physics.from_xml_string(*get_model_and_assets())
task = Hopper(hopping=True, forward=False, flip=False, random=random)
environment_kwargs = environment_kwargs or {}
return control.Environment(physics,
task,
time_limit=time_limit,
control_timestep=_CONTROL_TIMESTEP,
**environment_kwargs)
@SUITE.add('benchmarking')
def flip(time_limit: int = _DEFAULT_TIME_LIMIT, random=None, environment_kwargs=None):
"""Returns a Hopper that strives to hop forward."""
physics = Physics.from_xml_string(*get_model_and_assets())
task = Hopper(hopping=True, forward=True, flip=True, random=random)
environment_kwargs = environment_kwargs or {}
return control.Environment(physics,
task,
time_limit=time_limit,
control_timestep=_CONTROL_TIMESTEP,
**environment_kwargs)
@SUITE.add('benchmarking')
def flip_backward(time_limit: int = _DEFAULT_TIME_LIMIT, random=None, environment_kwargs=None):
"""Returns a Hopper that strives to hop forward."""
physics = Physics.from_xml_string(*get_model_and_assets())
task = Hopper(hopping=True, forward=False, flip=True, random=random)
environment_kwargs = environment_kwargs or {}
return control.Environment(physics,
task,
time_limit=time_limit,
control_timestep=_CONTROL_TIMESTEP,
**environment_kwargs)
class Physics(mujoco.Physics):
"""Physics simulation with additional features for the Hopper domain."""
def height(self) -> Any:
"""Returns height of torso with respect to foot."""
return (self.named.data.xipos['torso', 'z'] -
self.named.data.xipos['foot', 'z'])
def speed(self) -> Any:
"""Returns horizontal speed of the Hopper."""
return self.named.data.sensordata['torso_subtreelinvel'][0]
def touch(self) -> Any:
"""Returns the signals from two foot touch sensors."""
return np.log1p(self.named.data.sensordata[['touch_toe',
'touch_heel']])
def angmomentum(self) -> Any:
"""Returns the angular momentum of torso of the Cheetah about Y axis."""
return self.named.data.subtree_angmom['torso'][1]
class Hopper(base.Task):
"""A Hopper's `Task` to train a standing and a jumping Hopper."""
def __init__(self, hopping, forward=True, flip=False, random=None) -> None:
"""Initialize an instance of `Hopper`.
Args:
hopping: Boolean, if True the task is to hop forwards, otherwise it is to
balance upright.
random: Optional, either a `numpy.random.RandomState` instance, an
integer seed for creating a new `RandomState`, or None to select a seed
automatically (default).
"""
self._hopping = hopping
self._forward = 1 if forward else -1
self._flip = flip
self._timeout_progress = 0
super(Hopper, self).__init__(random=random)
def initialize_episode(self, physics) -> None:
"""Sets the state of the environment at the start of each episode."""
randomizers.randomize_limited_and_rotational_joints(
physics, self.random)
self._timeout_progress = 0
super(Hopper, self).initialize_episode(physics)
def get_observation(self, physics) -> tp.Dict[str, Any]:
"""Returns an observation of positions, velocities and touch sensors."""
obs = collections.OrderedDict()
# Ignores horizontal position to maintain translational invariance:
obs['position'] = physics.data.qpos[1:].copy()
obs['velocity'] = physics.velocity()
obs['touch'] = physics.touch()
return obs
def get_reward(self, physics) -> Any:
"""Returns a reward applicable to the performed task."""
standing = rewards.tolerance(physics.height(), (_STAND_HEIGHT, 2))
assert self._hopping
if self._flip:
hopping = rewards.tolerance(self._forward * physics.angmomentum(),
bounds=(_SPIN_SPEED, float('inf')),
margin=_SPIN_SPEED,
value_at_margin=0,
sigmoid='linear')
else:
hopping = rewards.tolerance(self._forward * physics.speed(),
bounds=(_HOP_SPEED, float('inf')),
margin=_HOP_SPEED / 2,
value_at_margin=0.5,
sigmoid='linear')
return standing * hopping
|
controllable_agent-main
|
url_benchmark/custom_dmc_tasks/hopper.py
|
"""
Trigger the conda-forge.github.io Travis job to restart.
"""
import argparse
import os
import requests
import six
import conda_smithy.ci_register
def rebuild_travis(repo_slug):
headers = conda_smithy.ci_register.travis_headers()
# If we don't specify the API version, we get a 404.
# Also fix the accepted content type.
headers["Accept"] = "application/json"
headers["Travis-API-Version"] = "3"
# Trigger a build on `master`.
encoded_slug = six.moves.urllib.parse.quote(repo_slug, safe='')
url = 'https://api.travis-ci.org/repo/{}/requests'.format(encoded_slug)
response = requests.post(
url,
json={
"request": {
"branch": "master",
"message": "Triggering build from staged-recipes",
}
},
headers=headers
)
if response.status_code != 201:
print(response.content)
response.raise_for_status()
def main(argv):
parser = argparse.ArgumentParser(description="Trigger Travis CI build.")
parser.add_argument("slug", type=str, help="repo to trigger build for")
args = parser.parse_args(argv[1:])
rebuild_travis(args.slug)
return 0
if __name__ == '__main__':
import sys
sys.exit(main(sys.argv))
|
staged-recipes-master
|
.travis_scripts/trigger_travis_build.py
|
#!/usr/bin/env python
"""
Convert all recipes into feedstocks.
This script is to be run in a TravisCI context, with all secret environment variables defined (BINSTAR_TOKEN, GH_TOKEN)
Such as:
export GH_TOKEN=$(cat ~/.conda-smithy/github.token)
"""
from __future__ import print_function
from conda_build.metadata import MetaData
from contextlib import contextmanager
from datetime import datetime
from github import Github, GithubException
import os.path
import shutil
import subprocess
import sys
import tempfile
import traceback
# Enable DEBUG to run the diagnostics, without actually creating new feedstocks.
DEBUG = False
recipe_directory_name = 'recipes'
def list_recipes():
if os.path.isdir(recipe_directory_name):
recipes = os.listdir(recipe_directory_name)
else:
recipes = []
for recipe_dir in recipes:
# We don't list the "example" feedstock. It is an example, and is there
# to be helpful.
if recipe_dir.startswith('example'):
continue
path = os.path.abspath(os.path.join(recipe_directory_name, recipe_dir))
yield path, MetaData(path).name()
@contextmanager
def tmp_dir(*args, **kwargs):
temp_dir = tempfile.mkdtemp(*args, **kwargs)
try:
yield temp_dir
finally:
shutil.rmtree(temp_dir)
def repo_exists(gh, organization, name):
# Use the organization provided.
org = gh.get_organization(organization)
try:
org.get_repo(name)
return True
except GithubException as e:
if e.status == 404:
return False
raise
def print_rate_limiting_info(gh):
# Compute some info about our GitHub API Rate Limit.
# Note that it doesn't count against our limit to
# get this info. So, we should be doing this regularly
# to better know when it is going to run out. Also,
# this will help us better understand where we are
# spending it and how to better optimize it.
# Get GitHub API Rate Limit usage and total
gh_api_remaining = gh.get_rate_limit().rate.remaining
gh_api_total = gh.get_rate_limit().rate.limit
# Compute time until GitHub API Rate Limit reset
gh_api_reset_time = gh.get_rate_limit().rate.reset
gh_api_reset_time -= datetime.utcnow()
print("")
print("GitHub API Rate Limit Info:")
print("---------------------------")
print("Currently remaining {remaining} out of {total}.".format(remaining=gh_api_remaining, total=gh_api_total))
print("Will reset in {time}.".format(time=gh_api_reset_time))
print("")
if __name__ == '__main__':
exit_code = 0
is_merged_pr = (os.environ.get('TRAVIS_BRANCH') == 'master' and os.environ.get('TRAVIS_PULL_REQUEST') == 'false')
smithy_conf = os.path.expanduser('~/.conda-smithy')
if not os.path.exists(smithy_conf):
os.mkdir(smithy_conf)
def write_token(name, token):
with open(os.path.join(smithy_conf, name + '.token'), 'w') as fh:
fh.write(token)
if 'APPVEYOR_TOKEN' in os.environ:
write_token('appveyor', os.environ['APPVEYOR_TOKEN'])
if 'CIRCLE_TOKEN' in os.environ:
write_token('circle', os.environ['CIRCLE_TOKEN'])
gh = None
if 'GH_TOKEN' in os.environ:
write_token('github', os.environ['GH_TOKEN'])
gh = Github(os.environ['GH_TOKEN'])
# Get our initial rate limit info.
print_rate_limiting_info(gh)
owner_info = ['--organization', 'conda-forge']
print('Calculating the recipes which need to be turned into feedstocks.')
with tmp_dir('__feedstocks') as feedstocks_dir:
feedstock_dirs = []
for recipe_dir, name in list_recipes():
feedstock_dir = os.path.join(feedstocks_dir, name + '-feedstock')
print('Making feedstock for {}'.format(name))
try:
subprocess.check_call(['conda', 'smithy', 'init', recipe_dir,
'--feedstock-directory', feedstock_dir])
except subprocess.CalledProcessError:
traceback.print_exception(*sys.exc_info())
continue
if not is_merged_pr:
# We just want to check that conda-smithy is doing its thing without having any metadata issues.
continue
feedstock_dirs.append([feedstock_dir, name, recipe_dir])
subprocess.check_call(['git', 'remote', 'add', 'upstream_with_token',
'https://conda-forge-manager:{}@github.com/conda-forge/{}-feedstock'.format(os.environ['GH_TOKEN'],
name)],
cwd=feedstock_dir)
# Sometimes we already have the feedstock created. We need to deal with that case.
if repo_exists(gh, 'conda-forge', name + '-feedstock'):
subprocess.check_call(['git', 'fetch', 'upstream_with_token'], cwd=feedstock_dir)
subprocess.check_call(['git', 'branch', '-m', 'master', 'old'], cwd=feedstock_dir)
try:
subprocess.check_call(['git', 'checkout', '-b', 'master', 'upstream_with_token/master'], cwd=feedstock_dir)
except subprocess.CalledProcessError:
# Sometimes, we have a repo, but there are no commits on it! Just catch that case.
subprocess.check_call(['git', 'checkout', '-b' 'master'], cwd=feedstock_dir)
subprocess.check_call(['conda', 'smithy', 'register-github', feedstock_dir] + owner_info + ['--extra-admin-users', 'cf-blacksmithy'])
# Break the previous loop to allow the TravisCI registering to take place only once per function call.
# Without this, intermittent failures to synch the TravisCI repos ensue.
# Hang on to any CI registration errors that occur and raise them at the end.
for num, (feedstock_dir, name, recipe_dir) in enumerate(feedstock_dirs):
if num >= 20:
exit_code = 1
break
# Try to register each feedstock with CI.
# However sometimes their APIs have issues for whatever reason.
# In order to bank our progress, we note the error and handle it.
# After going through all the recipes and removing the converted ones,
# we fail the build so that people are aware that things did not clear.
try:
subprocess.check_call(['conda', 'smithy', 'register-ci', '--feedstock_directory', feedstock_dir] + owner_info)
except subprocess.CalledProcessError:
exit_code = 1
traceback.print_exception(*sys.exc_info())
continue
subprocess.check_call(['conda', 'smithy', 'rerender'], cwd=feedstock_dir)
subprocess.check_call(['git', 'commit', '-am', "Re-render the feedstock after CI registration."], cwd=feedstock_dir)
for i in range(5):
try:
# Capture the output, as it may contain the GH_TOKEN.
out = subprocess.check_output(['git', 'push', 'upstream_with_token', 'HEAD:master'], cwd=feedstock_dir,
stderr=subprocess.STDOUT)
break
except subprocess.CalledProcessError:
pass
# Likely another job has already pushed to this repo.
# Place our changes on top of theirs and try again.
out = subprocess.check_output(['git', 'fetch', 'upstream_with_token', 'master'], cwd=feedstock_dir,
stderr=subprocess.STDOUT)
try:
subprocess.check_call(['git', 'rebase', 'upstream_with_token/master', 'master'], cwd=feedstock_dir)
except subprocess.CalledProcessError:
# Handle rebase failure by choosing the changes in `master`.
subprocess.check_call(['git', 'checkout', 'master', '--', '.'], cwd=feedstock_dir)
subprocess.check_call(['git', 'rebase', '--continue'], cwd=feedstock_dir)
# Remove this recipe from the repo.
if is_merged_pr:
subprocess.check_call(['git', 'rm', '-rf', recipe_dir])
# Update status based on the remote.
subprocess.check_call(['git', 'stash', '--keep-index', '--include-untracked'])
subprocess.check_call(['git', 'fetch'])
subprocess.check_call(['git', 'rebase', '--autostash'])
subprocess.check_call(['git', 'add', '.'])
try:
subprocess.check_call(['git', 'stash', 'pop'])
except subprocess.CalledProcessError:
# In case there was nothing to stash.
# Finish quietly.
pass
# Parse `git status --porcelain` to handle some merge conflicts and generate the removed recipe list.
changed_files = subprocess.check_output(['git', 'status', '--porcelain', recipe_directory_name],
universal_newlines=True)
changed_files = changed_files.splitlines()
# Add all files from AU conflicts. They are new files that we weren't tracking previously.
# Adding them resolves the conflict and doesn't actually add anything to the index.
new_file_conflicts = filter(lambda _: _.startswith("AU "), changed_files)
new_file_conflicts = map(lambda _ : _.replace("AU", "", 1).lstrip(), new_file_conflicts)
for each_new_file in new_file_conflicts:
subprocess.check_call(['git', 'add', each_new_file])
# Generate a fresh listing of recipes removed.
#
# * Each line we get back is a change to a file in the recipe directory.
# * We narrow the list down to recipes that are staged for deletion (ignores examples).
# * Then we clean up the list so that it only has the recipe names.
removed_recipes = filter(lambda _: _.startswith("D "), changed_files)
removed_recipes = map(lambda _ : _.replace("D", "", 1).lstrip(), removed_recipes)
removed_recipes = map(lambda _ : os.path.relpath(_, recipe_directory_name), removed_recipes)
removed_recipes = map(lambda _ : _.split(os.path.sep)[0], removed_recipes)
removed_recipes = sorted(set(removed_recipes))
# Commit any removed packages.
subprocess.check_call(['git', 'status'])
if removed_recipes:
msg = ('Removed recipe{s} ({}) after converting into feedstock{s}.'
''.format(', '.join(removed_recipes),
s=('s' if len(removed_recipes) > 1 else '')))
msg += ' [ci skip]'
if is_merged_pr:
# Capture the output, as it may contain the GH_TOKEN.
out = subprocess.check_output(['git', 'remote', 'add', 'upstream_with_token',
'https://conda-forge-manager:{}@github.com/conda-forge/staged-recipes'.format(os.environ['GH_TOKEN'])],
stderr=subprocess.STDOUT)
subprocess.check_call(['git', 'commit', '-m', msg])
# Capture the output, as it may contain the GH_TOKEN.
branch = os.environ.get('TRAVIS_BRANCH')
out = subprocess.check_output(['git', 'push', 'upstream_with_token', 'HEAD:%s' % branch],
stderr=subprocess.STDOUT)
else:
print('Would git commit, with the following message: \n {}'.format(msg))
if gh:
# Get our final rate limit info.
print_rate_limiting_info(gh)
sys.exit(exit_code)
|
staged-recipes-master
|
.travis_scripts/create_feedstocks.py
|
#!/usr/bin/env python
"""
Copyright (c) 2016, Continuum Analytics, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Continuum Analytics nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from __future__ import print_function, division
import logging
import os
import pkg_resources
import re
import subprocess
import networkx as nx
from conda_build import api, conda_interface
from conda_build.metadata import find_recipe, MetaData
from conda_build.utils import HashableDict
log = logging.getLogger(__file__)
CONDA_BUILD_CACHE = os.environ.get("CONDA_BUILD_CACHE")
hash_length = api.Config().hash_length
def package_key(metadata, worker_label, run='build'):
# get the build string from whatever conda-build makes of the configuration
used_loop_vars = metadata.get_used_loop_vars()
build_vars = '-'.join([k + '_' + str(metadata.config.variant[k]) for k in used_loop_vars
if k != 'target_platform'])
# kind of a special case. Target platform determines a lot of output behavior, but may not be
# explicitly listed in the recipe.
tp = metadata.config.variant.get('target_platform')
if tp and tp != metadata.config.subdir and 'target_platform' not in build_vars:
build_vars += '-target_' + tp
key = [metadata.name(), metadata.version()]
if build_vars:
key.append(build_vars)
key.extend(['on', worker_label])
key = "-".join(key)
if run == 'test':
key = '-'.join(('c3itest', key))
return key
def _git_changed_files(git_rev, stop_rev=None, git_root=''):
if not git_root:
git_root = os.getcwd()
if stop_rev:
git_rev = "{0}..{1}".format(git_rev, stop_rev)
output = subprocess.check_output(['git', 'diff-tree', '--no-commit-id',
'--name-only', '-r', git_rev],
cwd=git_root)
files = output.decode().splitlines()
return files
def _get_base_folders(base_dir, changed_files):
recipe_dirs = []
for f in changed_files:
# only consider files that come from folders
if '/' in f:
f = f.split('/')[0]
try:
find_recipe(os.path.join(base_dir, f))
recipe_dirs.append(f)
except IOError:
pass
return recipe_dirs
def git_changed_submodules(git_rev='HEAD@{1}', stop_rev=None, git_root='.'):
if stop_rev is not None:
git_rev = "{0}..{1}".format(git_rev, stop_rev)
diff_script = pkg_resources.resource_filename('conda_concourse_ci', 'diff-script.sh')
diff = subprocess.check_output(['bash', diff_script, git_rev],
cwd=git_root, universal_newlines=True)
submodule_changed_files = [line.split() for line in diff.splitlines()]
submodules_with_recipe_changes = []
for submodule in submodule_changed_files:
for file in submodule:
if 'recipe/' in file and submodule[0] not in submodules_with_recipe_changes:
submodules_with_recipe_changes.append(submodule[0])
return submodules_with_recipe_changes
def git_new_submodules(git_rev='HEAD@{1}', stop_rev=None, git_root='.'):
if stop_rev is not None:
git_rev = "{0}..{1}".format(git_rev, stop_rev)
new_submodule_script = pkg_resources.resource_filename('conda_concourse_ci',
'new-submodule-script.sh')
diff = subprocess.check_output(['bash', new_submodule_script, git_rev],
cwd=git_root, universal_newlines=True)
return diff.splitlines()
def git_renamed_folders(git_rev='HEAD@{1}', stop_rev=None, git_root='.'):
if stop_rev is not None:
git_rev = "{0}..{1}".format(git_rev, stop_rev)
rename_script = pkg_resources.resource_filename('conda_concourse_ci',
'rename-script.sh')
renamed_files = subprocess.check_output(['bash', rename_script], cwd=git_root,
universal_newlines=True).splitlines()
return renamed_files
def git_changed_recipes(git_rev='HEAD@{1}', stop_rev=None, git_root='.'):
"""
Get the list of files changed in a git revision and return a list of
package directories that have been modified.
git_rev: if stop_rev is not provided, this represents the changes
introduced by the given git rev. It is equivalent to
git_rev=SOME_REV@{1} and stop_rev=SOME_REV
stop_rev: when provided, this is the end of a range of revisions to
consider. git_rev becomes the start revision. Note that the
start revision is *one before* the actual start of examining
commits for changes. In other words:
git_rev=SOME_REV@{1} and stop_rev=SOME_REV => only SOME_REV
git_rev=SOME_REV@{2} and stop_rev=SOME_REV => two commits, SOME_REV and the
one before it
"""
changed_files = _git_changed_files(git_rev, stop_rev, git_root)
recipe_dirs = _get_base_folders(git_root, changed_files)
changed_submodules = git_changed_submodules(git_rev, stop_rev, git_root)
new_submodules = git_new_submodules(git_rev, stop_rev, git_root)
renamed_folders = git_renamed_folders(git_rev, stop_rev, git_root)
return recipe_dirs + changed_submodules + new_submodules + renamed_folders
def _deps_to_version_dict(deps):
d = {}
for x in deps:
x = x.strip().split()
if len(x) == 3:
d[x[0]] = (x[1], x[2])
elif len(x) == 2:
d[x[0]] = (x[1], 'any')
else:
d[x[0]] = ('any', 'any')
return d
def get_build_deps(meta):
build_reqs = meta.get_value('requirements/build')
if not build_reqs:
build_reqs = []
return _deps_to_version_dict(build_reqs)
def get_run_test_deps(meta):
run_reqs = meta.get_value('requirements/run')
if not run_reqs:
run_reqs = []
test_reqs = meta.get_value('test/requires')
if not test_reqs:
test_reqs = []
return _deps_to_version_dict(run_reqs + test_reqs)
_rendered_recipes = {}
@conda_interface.memoized
def _get_or_render_metadata(meta_file_or_recipe_dir, worker, finalize, config=None):
global _rendered_recipes
platform = worker['platform']
arch = str(worker['arch'])
if (meta_file_or_recipe_dir, platform, arch) not in _rendered_recipes:
print("rendering {0} for {1}".format(meta_file_or_recipe_dir, worker['label']))
_rendered_recipes[(meta_file_or_recipe_dir, platform, arch)] = \
api.render(meta_file_or_recipe_dir, platform=platform, arch=arch,
verbose=False, permit_undefined_jinja=True,
bypass_env_check=True, config=config, finalize=finalize)
return _rendered_recipes[(meta_file_or_recipe_dir, platform, arch)]
def add_recipe_to_graph(recipe_dir, graph, run, worker, conda_resolve,
recipes_dir=None, config=None, finalize=False):
try:
rendered = _get_or_render_metadata(recipe_dir, worker, config=config, finalize=finalize)
except (IOError, SystemExit):
log.warn('invalid recipe dir: %s - skipping', recipe_dir)
return None
name = None
for (metadata, _, _) in rendered:
name = package_key(metadata, worker['label'], run)
if metadata.skip():
continue
if name not in graph.nodes():
graph.add_node(name, meta=metadata, worker=worker)
add_dependency_nodes_and_edges(name, graph, run, worker, conda_resolve,
recipes_dir=recipes_dir, finalize=finalize)
# # add the test equivalent at the same time. This is so that expanding can find it.
# if run == 'build':
# add_recipe_to_graph(recipe_dir, graph, 'test', worker, conda_resolve,
# recipes_dir=recipes_dir)
# test_key = package_key(metadata, worker['label'])
# graph.add_edge(test_key, name)
# upload_key = package_key(metadata, worker['label'])
# graph.add_node(upload_key, meta=metadata, worker=worker)
# graph.add_edge(upload_key, test_key)
return name
def match_peer_job(target_matchspec, other_m, this_m=None):
"""target_matchspec comes from the recipe. target_variant is the variant from the recipe whose
deps we are matching. m is the peer job, which must satisfy conda and also have matching keys
for any keys that are shared between target_variant and m.config.variant"""
match_dict = {'name': other_m.name(),
'version': other_m.version(),
'build': _fix_any(other_m.build_id(), other_m.config), }
if conda_interface.conda_43:
match_dict = conda_interface.Dist(name=match_dict['name'],
dist_name='-'.join((match_dict['name'],
match_dict['version'],
match_dict['build'])),
version=match_dict['version'],
build_string=match_dict['build'],
build_number=int(other_m.build_number() or 0),
channel=None)
matchspec_matches = target_matchspec.match(match_dict)
variant_matches = True
if this_m:
other_m_used_vars = other_m.get_used_loop_vars()
for v in this_m.get_used_loop_vars():
if v in other_m_used_vars:
variant_matches &= this_m.config.variant[v] == other_m.config.variant[v]
return matchspec_matches and variant_matches
def add_intradependencies(graph):
"""ensure that downstream packages wait for upstream build/test (not use existing
available packages)"""
for node in graph.nodes():
if 'meta' not in graph.node[node]:
continue
# get build dependencies
m = graph.node[node]['meta']
# this is pretty hard. Realistically, we would want to know
# what the build and host platforms are on the build machine.
# However, all we know right now is what machine we're actually
# on (the one calculating the graph).
deps = set(m.ms_depends('build') + m.ms_depends('host') + m.ms_depends('run') +
[conda_interface.MatchSpec(dep) for dep in
m.meta.get('test', {}).get('requires', [])])
for dep in deps:
name_matches = (n for n in graph.nodes() if graph.node[n]['meta'].name() == dep.name)
for matching_node in name_matches:
# are any of these build dependencies also nodes in our graph?
if (match_peer_job(conda_interface.MatchSpec(dep),
graph.node[matching_node]['meta'],
m) and
(node, matching_node) not in graph.edges()):
# add edges if they don't already exist
graph.add_edge(node, matching_node)
def collapse_subpackage_nodes(graph):
"""Collapse all subpackage nodes into their parent recipe node
We get one node per output, but a given recipe can have multiple outputs. It's important
for dependency ordering in the graph that the outputs exist independently, but once those
dependencies are established, we need to collapse subpackages down to a single job for the
top-level recipe."""
# group nodes by their recipe path first, then within those groups by their variant
node_groups = {}
for node in graph.nodes():
if 'meta' in graph.node[node]:
meta = graph.node[node]['meta']
meta_path = meta.meta_path or meta.meta['extra']['parent_recipe']['path']
master = False
master_meta = MetaData(meta_path, config=meta.config)
if master_meta.name() == meta.name():
master = True
group = node_groups.get(meta_path, {})
subgroup = group.get(HashableDict(meta.config.variant), {})
if master:
if 'master' in subgroup:
raise ValueError("tried to set more than one node in a group as master")
subgroup['master'] = node
else:
sps = subgroup.get('subpackages', [])
sps.append(node)
subgroup['subpackages'] = sps
group[HashableDict(meta.config.variant)] = subgroup
node_groups[meta_path] = group
for recipe_path, group in node_groups.items():
for variant, subgroup in group.items():
# if no node is the top-level recipe (only outputs, no top-level output), need to obtain
# package/name from recipe given by common recipe path.
subpackages = subgroup.get('subpackages')
if 'master' not in subgroup:
sp0 = graph.node[subpackages[0]]
master_meta = MetaData(recipe_path, config=sp0['meta'].config)
worker = sp0['worker']
master_key = package_key(master_meta, worker['label'])
graph.add_node(master_key, meta=master_meta, worker=worker)
master = graph.node[master_key]
else:
master = subgroup['master']
master_key = package_key(graph.node[master]['meta'],
graph.node[master]['worker']['label'])
# fold in dependencies for all of the other subpackages within a group. This is just
# the intersection of the edges between all nodes. Store this on the "master" node.
if subpackages:
remap_edges = [edge for edge in graph.edges() if edge[1] in subpackages]
for edge in remap_edges:
graph.add_edge(edge[0], master_key)
graph.remove_edge(*edge)
# remove nodes that have been folded into master nodes
for subnode in subpackages:
graph.remove_node(subnode)
def construct_graph(recipes_dir, worker, run, conda_resolve, folders=(),
git_rev=None, stop_rev=None, matrix_base_dir=None,
config=None, finalize=False):
'''
Construct a directed graph of dependencies from a directory of recipes
run: whether to use build or run/test requirements for the graph. Avoids cycles.
values: 'build' or 'test'. Actually, only 'build' matters - otherwise, it's
run/test for any other value.
'''
matrix_base_dir = matrix_base_dir or recipes_dir
if not os.path.isabs(recipes_dir):
recipes_dir = os.path.normpath(os.path.join(os.getcwd(), recipes_dir))
assert os.path.isdir(recipes_dir)
if not folders:
if not git_rev:
git_rev = 'HEAD'
folders = git_changed_recipes(git_rev, stop_rev=stop_rev,
git_root=recipes_dir)
graph = nx.DiGraph()
for folder in folders:
recipe_dir = os.path.join(recipes_dir, folder)
if not os.path.isdir(recipe_dir):
raise ValueError("Specified folder {} does not exist".format(recipe_dir))
add_recipe_to_graph(recipe_dir, graph, run, worker, conda_resolve,
recipes_dir, config=config, finalize=finalize)
add_intradependencies(graph)
collapse_subpackage_nodes(graph)
return graph
def _fix_any(value, config):
value = re.sub('any(?:h[0-9a-f]{%d})?' % config.hash_length, '', value)
return value
@conda_interface.memoized
def _installable(name, version, build_string, config, conda_resolve):
"""Can Conda install the package we need?"""
ms = conda_interface.MatchSpec(" ".join([name, _fix_any(version, config),
_fix_any(build_string, config)]))
installable = conda_resolve.find_matches(ms)
if not installable:
log.warn("Dependency {name}, version {ver} is not installable from your "
"channels: {channels} with subdir {subdir}. Seeing if we can build it..."
.format(name=name, ver=version, channels=config.channel_urls,
subdir=config.host_subdir))
return installable
def _buildable(name, version, recipes_dir, worker, config, finalize):
"""Does the recipe that we have available produce the package we need?"""
possible_dirs = os.listdir(recipes_dir)
packagename_re = re.compile(r'%s(?:\-[0-9]+[\.0-9\_\-a-zA-Z]*)?$' % name)
likely_dirs = (dirname for dirname in possible_dirs if
(os.path.isdir(os.path.join(recipes_dir, dirname)) and
packagename_re.match(dirname)))
metadata_tuples = [m for path in likely_dirs
for (m, _, _) in _get_or_render_metadata(os.path.join(recipes_dir,
path), worker, finalize=finalize)]
# this is our target match
ms = conda_interface.MatchSpec(" ".join([name, _fix_any(version, config)]))
available = False
for m in metadata_tuples:
available = match_peer_job(ms, m)
if available:
break
return m.meta_path if available else False
def add_dependency_nodes_and_edges(node, graph, run, worker, conda_resolve, recipes_dir=None,
finalize=False):
'''add build nodes for any upstream deps that are not yet installable
changes graph in place.
'''
metadata = graph.node[node]['meta']
# for plain test runs, ignore build reqs.
deps = get_run_test_deps(metadata)
recipes_dir = recipes_dir or os.getcwd()
# cross: need to distinguish between build_subdir (build reqs) and host_subdir
if run == 'build':
deps.update(get_build_deps(metadata))
for dep, (version, build_str) in deps.items():
# we don't need worker info in _installable because it is already part of conda_resolve
if not _installable(dep, version, build_str, metadata.config, conda_resolve):
recipe_dir = _buildable(dep, version, recipes_dir, worker, metadata.config,
finalize=finalize)
if not recipe_dir:
continue
# raise ValueError("Dependency {} is not installable, and recipe (if "
# " available) can't produce desired version ({})."
# .format(dep, version))
dep_name = add_recipe_to_graph(recipe_dir, graph, 'build', worker,
conda_resolve, recipes_dir, finalize=finalize)
if not dep_name:
raise ValueError("Tried to build recipe {0} as dependency, which is skipped "
"in meta.yaml".format(recipe_dir))
graph.add_edge(node, dep_name)
def expand_run_upstream(graph, conda_resolve, worker, run, steps=0, max_downstream=5,
recipes_dir=None, matrix_base_dir=None):
pass
def expand_run(graph, conda_resolve, worker, run, steps=0, max_downstream=5,
recipes_dir=None, matrix_base_dir=None, finalize=False):
"""Apply the build label to any nodes that need (re)building or testing.
"need rebuilding" means both packages that our target package depends on,
but are not yet built, as well as packages that depend on our target
package. For the latter, you can specify how many dependencies deep (steps)
to follow that chain, since it can be quite large.
If steps is -1, all downstream dependencies are rebuilt or retested
"""
downstream = 0
initial_nodes = len(graph.nodes())
# for build, we get test automatically. Give people the max_downstream in terms
# of packages, not tasks
# if run == 'build':
# max_downstream *= 2
def expand_step(task_graph, full_graph, downstream):
for node in task_graph.nodes():
for predecessor in full_graph.predecessors(node):
if max_downstream < 0 or (downstream - initial_nodes) < max_downstream:
add_recipe_to_graph(
os.path.dirname(full_graph.node[predecessor]['meta'].meta_path),
task_graph, run=run, worker=worker, conda_resolve=conda_resolve,
recipes_dir=recipes_dir, finalize=finalize)
downstream += 1
return len(graph.nodes())
# starting from our initial collection of dirty nodes, trace the tree down to packages
# that depend on the dirty nodes. These packages may need to be rebuilt, or perhaps
# just tested. The 'run' argument determines which.
if steps != 0:
if not recipes_dir:
raise ValueError("recipes_dir is necessary if steps != 0. "
"Please pass it as an argument.")
# here we need to fully populate a graph that has the right build or run/test deps.
# We don't create this elsewhere because it is unnecessary and costly.
# get all immediate subdirectories
other_top_dirs = [d for d in os.listdir(recipes_dir)
if os.path.isdir(os.path.join(recipes_dir, d)) and
not d.startswith('.')]
recipe_dirs = []
for recipe_dir in other_top_dirs:
try:
find_recipe(os.path.join(recipes_dir, recipe_dir))
recipe_dirs.append(recipe_dir)
except IOError:
pass
# constructing the graph for build will automatically also include the test deps
full_graph = construct_graph(recipes_dir, worker, 'build', folders=recipe_dirs,
matrix_base_dir=matrix_base_dir, conda_resolve=conda_resolve)
if steps >= 0:
for step in range(steps):
downstream = expand_step(graph, full_graph, downstream)
else:
while True:
nodes = graph.nodes()
downstream = expand_step(graph, full_graph, downstream)
if nodes == graph.nodes():
break
def order_build(graph):
'''
Assumes that packages are in graph.
Builds a temporary graph of relevant nodes and returns it topological sort.
Relevant nodes selected in a breadth first traversal sourced at each pkg
in packages.
'''
reorder_cyclical_test_dependencies(graph)
try:
order = list(nx.topological_sort(graph))
order.reverse()
except nx.exception.NetworkXUnfeasible:
raise ValueError("Cycles detected in graph: %s", nx.find_cycle(graph,
orientation='reverse'))
return order
def reorder_cyclical_test_dependencies(graph):
"""By default, we make things that depend on earlier outputs for build wait for tests of
the earlier thing to pass. However, circular dependencies spread across run/test and
build/host can make this approach incorrect. For example:
A <-- B : B depends on A at build time
B <-- A : A depends on B at run time. We can build A before B, but we cannot test A until B
is built.
To resolve this, we must reorder the graph edges:
build A <-- test A <--> build B <-- test B
must become:
build A <-- build B <-- test A <-- test B
"""
# find all test nodes with edges to build nodes
test_nodes = [node for node in graph.nodes() if node.startswith('test-')]
edges_from_test_to_build = [edge for edge in graph.edges() if edge[0] in test_nodes and
edge[1].startswith('build-')]
# find any of their inverses. Entries here are of the form (test-A, build-B)
circular_deps = [edge for edge in edges_from_test_to_build
if (edge[1], edge[0]) in graph.edges()]
for (testA, buildB) in circular_deps:
# remove build B dependence on test A
graph.remove_edge(testA, buildB)
# remove test B dependence on build B
testB = buildB.replace('build-', 'test-', 1)
graph.remove_edge(buildB, testB)
# Add test B dependence on test A
graph.add_edge(testA, testB)
# make sure that test A still depends on build B
assert (buildB, testA) in graph.edges()
# graph is modified in place. No return necessary.
|
staged-recipes-master
|
.ci_support/compute_build_graph.py
|
import conda_build.conda_interface
import networkx as nx
import conda_build.api
from compute_build_graph import construct_graph
import argparse
import os
from collections import OrderedDict
import sys
def get_host_platform():
from sys import platform
if platform == "linux" or platform == "linux2":
return "linux"
elif platform == "darwin":
return "osx"
elif platform == "win32":
return "win"
def build_all(recipes_dir, arch):
folders = os.listdir(recipes_dir)
if not folders:
print("Found no recipes to build")
return
channel_urls = ['local', 'conda-forge', 'defaults']
# ensure that noarch path exists and is indexed for newer conda (4.4+)
noarch_path = os.path.join(sys.exec_prefix, 'conda-bld', 'noarch')
try:
os.makedirs(noarch_path)
except:
pass
conda_build.api.update_index(noarch_path)
index = conda_build.conda_interface.get_index(channel_urls=channel_urls)
conda_resolve = conda_build.conda_interface.Resolve(index)
exclusive_config_file = os.path.join(conda_build.conda_interface.root_dir,
'conda_build_config.yaml')
platform = get_host_platform()
script_dir = os.path.dirname(os.path.realpath(__file__))
variant_config_files = []
variant_config_file = os.path.join(script_dir, '{}{}.yaml'.format(
platform, arch))
if os.path.exists(variant_config_file):
variant_config_files.append(variant_config_file)
config = conda_build.api.Config(
variant_config_files=variant_config_files, arch=arch,
exclusive_config_file=exclusive_config_file, channel_urls=channel_urls)
worker = {'platform': platform, 'arch': arch,
'label': '{}-{}'.format(platform, arch)}
G = construct_graph(recipes_dir, worker=worker, run='build',
conda_resolve=conda_resolve, folders=folders,
config=config, finalize=False)
order = list(nx.topological_sort(G))
order.reverse()
print('Computed that there are {} distributions to build from {} recipes'
.format(len(order), len(folders)))
if not order:
print('Nothing to do')
return
print("Resolved dependencies, will be built in the following order:")
print(' '+'\n '.join(order))
d = OrderedDict()
for node in order:
d[G.node[node]['meta'].meta_path] = 1
conda_build.api.build(list(d.keys()), config=config)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('recipes_dir', default=os.getcwd(),
help='Directory where the recipes are')
parser.add_argument('--arch', default='64',
help='target architecture (64 or 32)')
args = parser.parse_args()
build_all(args.recipes_dir, args.arch)
|
staged-recipes-master
|
.ci_support/build_all.py
|
# ==============================================================================
#
# run.py
#
# usage:
#
# $ python run.py path/to/solver.prototxt path/to/machine_list.txt machines_per_batch CPU|1GPU|4GPU single_fc|many_fc (map_fcc_to_cc=)1|0 base_dir snapshot_input_dir(=None) snapshot_input_iter(=None)
#
# ==============================================================================
import sys
import os
import re # Regex
import subprocess
import datum_pb2
import lmdb
import datetime
# ==============================================================================
# Parameters
# ==============================================================================
# ------------------------------------------------------------------------------
# SSH parameters
# ------------------------------------------------------------------------------
# User name to log in to each machine with
user = 'root'
# Extra commands to run following ssh
# This should include:
# - cd into correct directory
# - path commands in .bashrc (ssh does not source .bashrc so its load libary
# commands may need to be included here, see the example below)
extra_cmd = 'cd /home/software/Omnivore/; export PATH=$PATH:/usr/local/cuda-7.0/bin; export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/cuda-7.0/lib64;'
# These 3 meaningless if FCC
FCCM_and_CM_to_CC = False
CM_only_to_CC = True # no effect if FCCM_and_CM_to_CC = True (that takes precedence)
# If this next one is true then it will put 2 CC per machine (note: assumes no FCC)
# More complex patterns exist, e.g. if we have many m/g and separate FCC
# we will have some CC with FCC and some without, so only those without can
# take a CC, etc. For now though just assume this next option implies FCCM.
double_subscribe_cc = False#True
# This meaningless unless FCC
CM_and_FCM_together = True
# Set this if running on a single machine (i.e. for simulations, otherwise just use CcT)
all_on_single_machine = False
# ------------------------------------------------------------------------------
# Script parameters
# ------------------------------------------------------------------------------
# Set to true after lmdb has been generated once (saves time)
# Warning: When changing the number of machines, need to reset this to True
skip_lmdb_generation = False
# Run with GPUs. If neither is True, uses CPU only.
# This is for both conv compute and fc compute/model servers
# For the fc compute server (case when single_FC_server = False), set num_gpu_per_fcc
# These will eventually not be bools but rather ints selected by the optimizer
# (for now, only support 0, 1, or 4 GPUs). Set both to false to use CPU only.
use_4_gpu = True # Takes precedence if both this and use_1_gpu are True
use_1_gpu = False
# If using > 1 GPU, do model parallelism on FC
# This applies regardless of whether fc compute and model are one server or
# separate servers
multi_gpu_model_parallelism = True # Will be default true later or selected by optimizer , e.g. turn off if fcc / fcm
# FC Compute / Model Server Parameters
single_FC_server = True
# The remaining parameters are only valid if single_FC_server is False
# For FC, the scheduler may choose to have a single fc compute + model server ("fccm",
# or the case when single_FC_server = True), or to have one or more fc compute ("fcc")
# servers as well as a separate fc model ("fcm") server.
# - If there is a single server for both fc model and fc compute (single_FC_server = True),
# then this server will use a number of gpus decided by use_4_gpu and use_1_gpu above
# (only 0, 1 or 4 GPUs per server supported). Then the parameters below are not used.
# - If there are separate servers for fc compute and fc model (single_FC_server = False),
# then for now fc model will be its own machine and fc computes will be spread across
# the remaining machines. However, it is possible to have many fc compute (fcc) on 1
# machine, if and only if that machine has multiple GPUs. E.g. if we have 4 GPUs/machine,
# there are 3 cases:
# - 1 fc compute server on that machine, using either 1 GPU, 4 GPUs or CPU only
# - 2 fc compute servers on that machine, using up to 2 GPUs each (or each can use 1 GPU)
# - 4 fc compute servers on that machine, each using exactly 1 GPU (none will use CPU)
# Now, use_1_gpu and use_4_gpu will be IGNORED for FC, and applied only to conv. The params
# below take precedence for FC.
num_fcc_per_machine = 1
# If CPU machines only, just set this to 0 and it will be ignored. Recall this parameter
# only applies to multiple FCC. It might make sense to just set this to be whatever the
# conv machines will use (e.g. CPU, GPU, 4GPU) because the conv machines will probably
# be idle while the FCC is executing, i.e. we can map the FCC to those machines, unless
# there is pipelining. For now this makes it more generic because e.g. each FCC can use
# 1 GPU so we can map 4 to 1 4GPU machine, etc.
num_gpu_per_fcc_machine = 0
# SHADJIS TODO: For now we configure the servers by args and options above, but a future change could be by config file (created externally and passed in).
# Then internally this script reads that file and creates the mapping without having to calculate / decide the mapping.
#
# The cases it should handle are:
# - default case: 1 CC per machine, + 2 machines (FCCM, CM)
# - FCCM_and_CM_to_CC (FCCM to 1 CC, CM to another)
# - CM_only_to_CC (FCCM alone, CM to a CC)
# - single_FC_server (default true, if false, then will have 1 FCC per group, but on separate machines)
# - map_fcc_to_cc (default false, but if true puts FCC on CC)
# - CM_and_FCM_together (default false, if true will put CM and FCM together. If FCM = FCCM, later will put those together too, but for now only works when single_FC_server = 0)
# - num_fcc_per_machine
# - num_gpu_per_fcc_machine
# - double_subscribe_cc
# - maybe even multi-machine model parallelism for FC
#
# Examples:
# master FCC, FCC, FCC, FCC, FCM, CM # Here it is 4 groups of size 1, each FC is 1 GPU, then once we get FCC model grads, we pull out an send to FCM
# node001 CC
# node002 CC
# node003 CC
# node004 CC
#
# master FCC, FCC, FCC, FCC, FCM, CM # Here it is 8 groups of size 1, each FC is 1 GPU, then once we get FCC model grads, we pull out an send to FCM
# node001 CC, CC
# node002 CC, CC
# node003 CC, CC
# node004 CC, CC
#
# master FCCM, CM
# node001 CC
# node002 CC
# node003 CC
# node004 CC
#
# master FCCM
# node001 CC
# node002 CC
# node003 CC
# node004 CC, CM
#
# master FCM, CM
# node001 CC,FCC
# node002 CC,FCC
# node003 CC,FCC
# node004 CC,FCC
#
# master CC,FCC,FCM
# node001 CC,FCC,CM
# node002 CC,FCC
# node003 CC,FCC
#
# ==============================================================================
# Description
# ==============================================================================
"""
--------------------------------------------------------------------------------
Input:
--------------------------------------------------------------------------------
- List of machines
--------------------------------------------------------------------------------
Task:
--------------------------------------------------------------------------------
- Create input protobuf files (solver and train_val) for each server
- Read in a machine list so we know how many conv compute servers to create
- Parse the solver prototxt to get the name of the train prototxt file
- Parse the train prototxt file for lmdb names
- Parse the network train prototxt again, now to split into 2 networks
- Create new solver and network prototxt files for each server
- Partition the lmdb for each server
- Create config files for each server
- Run ssh commands on each server
--------------------------------------------------------------------------------
Scheduler tradeoffs:
--------------------------------------------------------------------------------
- Machine types are conv model, conv compute, fc compute, fc model, and also
combinations: conv compute/model (unimplemented), fc compute/model,
model server (unimplemented) and compute server (unimplemented)
- Decide which to use and how many of each
- Number of servers to allocate to a single machine (e.g. allocate two conv
compute to a single machine to hide stalled time waiting for fc to return
gradients), and also how many GPUs per server (e.g. scheduler may decide
given a number of machines to create 2 fc compute servers, one per machine,
or to create 2 but on the same machine, each using 2 GPUs, and use the extra
machine for another conv compute, or to create a single FC compute/model server
to minimize communication)
- Data parallelism: Decide how many machines to use within a batch
- How to use each box (CPU, GPU, both, use each GPU as a different server, or use
4 GPU model/data parallelism, see also below)
- Model parallelism: Decide how many machines to use to partition models,
or how many GPUs on a single machine (model parallelism across
machines unimplemented)
- Precision (lossy, lossless, e.g. given AVX2, lossy compression makes sense)
- Hogwild vs. model averaging vs. other methods (when using many model servers),
also must choose isolation levels
Inputs:
- Throughput per box (not simply black box however, since a single machine with
4 GPUs can be seen as a single box or as 4 slower boxes)
- Network speed
--------------------------------------------------------------------------------
Details:
--------------------------------------------------------------------------------
For example, a machine list could be:
master
node015
node019
The servers will then need the following information in the .cfg file:
conv model server (running on node015)
name = "Example ConvModelServer on 5555";
bind = "tcp://*:5555";
type = "ConvModelServer";
solver_file = "protos/solver.conv_model_server.prototxt";
output_model_file = "conv_model.bin";
conv compute server (running on node019)
name = "Example ConvComputeServer on 5555";
conv_bind = "tcp://node015:5555";
fc_bind = "tcp://master:5556";
type = "ConvComputeServer";
solver_file = "protos/solver.conv_compute_server.prototxt";
fc server (running on master)
name = "Example FCComputeModelServer on 5556";
bind = "tcp://*:5556";
type = "FCComputeModelServer";
solver_file = "protos/solver.fc_model_server.prototxt";
output_model_file = "fc_model.bin";
"""
# ==============================================================================
# Parse arguments
# ==============================================================================
EXPECTED_NUM_ARGS = 10
if len(sys.argv) not in [EXPECTED_NUM_ARGS,EXPECTED_NUM_ARGS+1]:
# SHADJIS TODO: map_fcc_to_cc is only used if many_fc is set.
# Eventually there will also be an option to map fcm and cm to the same machine,
# either by making two separate servers and assigning them to one machine or using
# an AllModelServer (and then also an AllComputeServer can be used)
# SHADJIS TODO: These can be in a config file if there are too many params
# SHADJIS TODO: Or even better, rather than a bool for every case (fcc + cc on same machine,
# etc.), for now we can read in a machine list file which has cols for each machine, e.g.
# if we have:
#
# master cc0.0 fcc0
# node001 cc0.1
# node002 cc1.0 fcc1
# node003 cc1.1
# node003 fcm
# node003 cm
#
# Or even simpler: see comments @ top
#
# then it is clear how machines should be assigned. The scheduler will then eventually
# generate this file (e.g. a JSON format which specifies each server, its machine,
# its GPUs, etc.)
print 'Usage: >>> python run.py path/to/solver.prototxt path/to/machine_list.txt machines_per_batch CPU|1GPU|4GPU single_fc|many_fc (map_fcc_to_cc=)1|0 base_dir snapshot_input_dir(="none" to ignore) snapshot_input_iter(="none" to ignore)'
sys.exit(0)
# Check that the distributed cct binary exists before running this script
if not os.path.exists('./dcct'):
print 'Please first run \'make -j\''
sys.exit(0)
# We will also need one of the util files, so check here if it exists
if not os.path.exists('./tools/size_util/size_util'):
print 'Please cd into tools/size_util and \'make -j\''
sys.exit(0)
solver_file = sys.argv[1]
machine_list_file = sys.argv[2]
machines_per_batch = int(sys.argv[3]) # SHADJIS TODO: Eventually optimizer will select this
node_hw = sys.argv[4]
if sys.argv[5] == 'single_fc':
single_FC_server = True
del num_fcc_per_machine # These are unused
del num_gpu_per_fcc_machine
else:
assert sys.argv[5] == 'many_fc'
print 'Using 1 fc compute server per conv compute group'
single_FC_server = False
if sys.argv[6] == '1':
# If we want to map the fcc to the same machine as cc we need to replace the
# port with a local port.
map_fcc_to_cc = True
print 'Assigning fcc servers to same machine as (some) cc servers'
else:
assert sys.argv[6] == '0'
map_fcc_to_cc = False
base_dir = sys.argv[7]
if base_dir[-1] != '/':
base_dir += '/'
snapshot_input_dir = sys.argv[8]
if snapshot_input_dir == "none":
snapshot_input_dir = None
elif snapshot_input_dir[-1] != '/':
snapshot_input_dir += '/'
snapshot_input_iter = sys.argv[9]
if snapshot_input_iter == "none":
snapshot_input_iter = None
if len(sys.argv) == EXPECTED_NUM_ARGS+1 and sys.argv[EXPECTED_NUM_ARGS] == 's':
skip_lmdb_generation = True
if node_hw == 'CPU':
use_4_gpu = False
use_1_gpu = False
elif node_hw == '1GPU':
use_4_gpu = False
use_1_gpu = True
elif node_hw == '4GPU':
use_4_gpu = True
use_1_gpu = False
else:
assert False
# assert skip_lmdb_generation
# ==============================================================================
# Create input protobuf files (solver and train_val) for each server
# ==============================================================================
# ------------------------------------------------------------------------------
# Read in a machine list so we know how many conv compute servers to create
# ------------------------------------------------------------------------------
# Note: For now, the first line must be the current machine, so there must be at least 1 machine
machine_list = []
f = open(machine_list_file)
for line in f:
if line.strip():
machine_list.append(line.strip())
f.close()
assert len(machine_list) >= 1
# Allocate each server to machines
# This depends on how many FC servers we have
if single_FC_server:
# The FC server is always the first machine
fc_server_machine = machine_list[0]
# SHADJIS TODO: This now assumes that given e.g. N machines, the best way to use them is to
# allocate 1 machine to FCCM, 1 to CM, and the remaining N-2 are CC. In general this probably
# makes sense as N is large, but for small N e.g. 2 machines should handle this better. Even
# on e.g. 8 machines which is not too small, it makes sense to be able to map CM and FCCM to
# same machine, or CM to a CC and FCCM to another CC, or CM and FCCM and CC all one 1 machine.
# The optimizer can decide this layout later and pass in a config file (as mentioned above)
# which lists, for each machine, which servers map to it. For now, I will just set a bool at
# the top of this file which if true will put FCCM on a CC and CM on another CC. Then we can
# handle more general cases later (along with single_fc|many_fc, (map_fcc_to_cc=)1|0 , etc.)
# Conv Model Server:
# If there is 1 machine, it must be CM
# If 2 or more machines, now we can choose, e.g. (CM,CC) and (FCCM,CC) or (CM,FCCM,CC) and (CC),
# etc. Having (CM, FCCM), (CC) would thrash less but would be certainly slower than 1 machine
if len(machine_list) == 1:
conv_model_server_machine = machine_list[0]
else:
# SHADJIS TODO: Optimizer should decide if this goes on server 0 (with FCCM and CC) or
# server 1 (alone with a CC). For now hard-code this one.
conv_model_server_machine = machine_list[1]
# Conv Compute Server:
if len(machine_list) == 1: # There is only 1 machine, so CC goes here
conv_compute_server_machines = [machine_list[0]]
num_conv_compute_servers = 1
elif len(machine_list) == 2:
conv_compute_server_machines = machine_list # Put a CC on each
num_conv_compute_servers = 2
else:
# Calculate the number of conv compute servers
# In the default case, the number of CC servers is #machines - 2
# However, we may choose to make it #machines or #machines-1, and map
# CM or FC or both to CC. In the future, #CC can also be > #machines,
# i.e. pipelining
# SHADJIS TODO: Can also map e.g. a CM to a CC but separate FCCM, etc.
if FCCM_and_CM_to_CC:
num_machines_reserved = 0
elif CM_only_to_CC: # In this case we have N-1 CC machines
num_machines_reserved = 1
else:
num_machines_reserved = 2
num_machines_left = len(machine_list) - num_machines_reserved
# Make sure it divides the number of machines per batch
num_conv_compute_servers = (num_machines_left/machines_per_batch) * machines_per_batch
conv_compute_server_machines = machine_list[num_machines_reserved : num_machines_reserved + num_conv_compute_servers]
# SHADJIS TODO: If there are leftovers because num_conv_compute_servers < len(machine_list),
# the CM and FCCM can go there, but for now ignore that case
# For now this will just take the # of CC from before and multiply by 2
# Later more complicated things can be done
# So #groups is just double what it was going to be before
if double_subscribe_cc:
num_conv_compute_servers *= 2
# Note how this is happening: if we have 8 machines and group size 4, we put
# G1 on 0-3, G2 on 4-7, G3 on 0-3, and G4 on 4-7
# Specifically, we do not put G1 on 0-1 (2 CC/machine), since then while FC is processing
# that group, it will be idle
conv_compute_server_machines = conv_compute_server_machines * 2
# Now determine the number of groups
num_groups = num_conv_compute_servers / machines_per_batch
assert num_groups > 0
assert num_conv_compute_servers % machines_per_batch == 0
else:
# For now do not support FCC and double CC
# - if 1 m/g, NO help from pipelining
# - if 2 m/g or more, than can cleverly put extra CC and FCC on un-utilized CC, but will IGNORE THIS CASE for now
assert not double_subscribe_cc
# Determine how to allocate machines
num_machines = len(machine_list)
# For now let's assert 4 or more machines:
# 1. On a single machine it doesn't make sense to have multiple fc compute servers, since
# they cannot run at the same time so might as well just use a single one
# 2. SHADJIS TODO: In the case of 2 machines, each machine can have a conv compute and an FC,
# so that data does not need to be copied. However since there is only a single conv model
# and fc model server, we need to allocate these. The scheduler will do it later so for
# now I will just exit in this case
# 3. SHADJIS TODO: In the case of 3 machines, it is again possible to have intelligent machine
# allocation, but for now I will ignore this case as well
assert num_machines > 3
# Above are special-cases. It may be beneficial to have multiple fc compute servers when the
# number of machines < 4, but for now we will assume 4 or more machines and that if there are
# fewer machines then there will be a single fc compute and model server (since it is less
# likely to be the bottleneck if there are few conv compute machines)
# For now, if there are 4 or more machines, and FC model / computation are on different machines,
# then currently machine allocations will be: 1 conv model, 1 fc model, and a number of conv compute
# and fc compute specified at the top of the file.
# In the future it will be possible to have other configurations, e.g. multiple groups allocated
# to one fc compute server, but for now 1 group per fc will be used.
# SHADJIS TODO: do config file later, once we know what is fast
if CM_and_FCM_together:
# The FC model server can be the first machine, and conv model on second
fc_model_server_machine = machine_list[0]
conv_model_server_machine = machine_list[0]
num_machines_reserved = 1
else:
# The FC model server can be the first machine, and conv model on second
fc_model_server_machine = machine_list[0]
conv_model_server_machine = machine_list[1]
num_machines_reserved = 2
num_machines_left = num_machines - num_machines_reserved
# If num_gpu_per_fcc > 1, then that fcc server will use model
# parallelism iff multi_gpu_model_parallelism = True
num_gpu_per_fcc = num_gpu_per_fcc_machine / num_fcc_per_machine
# Now assign machines to the fcc and conv compute servers
# Special case: fcc and cc on same server
if map_fcc_to_cc:
num_groups = num_machines_left / machines_per_batch
num_fc_compute_servers = num_groups
assert num_groups > 0
# These FC compute servers will be on the same machines and cc servers, so assign those first
conv_compute_server_machines = machine_list[num_machines_reserved : num_machines_reserved + num_groups*machines_per_batch]
# Now assign FC as a subset of these machines. If group size is 1, it will be the same
# Otherwise, it will be strided by machines_per_batch
fc_compute_server_machines = conv_compute_server_machines[::machines_per_batch]
num_conv_compute_servers = len(conv_compute_server_machines)
assert num_fcc_per_machine == 1 # Makes sense, since it also has a CC on it
# Default case
else:
# Now we have the following:
# - num_machines_left (after allocating 1 to conv model and 1 to fc model)
# - machines_per_batch (i.e. # machines / conv compute group),
# - num fc compute / machine,
# Now we can calculate how many parallel batches (AKA groups) there will be:
# num_groups = num_machines_left / (num_machines/group)
# = num_machines_left / (num_machines/cc_group + num_machines/fcc_server),
# where #machines/fcc_server = 1/num_fcc_per_machine <= 1
num_groups = int( num_machines_left / (machines_per_batch + 1./num_fcc_per_machine) )
# Next we must allocate a number of fc compute servers equal to the number of groups.
num_fc_compute_servers = num_groups
assert num_groups > 0
num_machines_for_fc_compute_servers = ( num_fc_compute_servers + num_fcc_per_machine - 1 )/ num_fcc_per_machine # Round up
# Allocate machines for these fc compute servers
fc_compute_server_machines = []
current_machine = num_machines_reserved # Since we allocated some to the model servers
servers_on_current_machine = 0
for i in range(num_fc_compute_servers):
fc_compute_server_machines.append(machine_list[current_machine])
servers_on_current_machine += 1
if servers_on_current_machine == num_fcc_per_machine:
servers_on_current_machine = 0
current_machine += 1
# Make sure we assigned all the machines that we had allocated for fcc servers (maybe the last one is
# not 100% full so check for that case as well)
assert (current_machine == num_machines_reserved + num_machines_for_fc_compute_servers) or (current_machine == num_machines_reserved + num_machines_for_fc_compute_servers - 1)
current_machine = num_machines_reserved + num_machines_for_fc_compute_servers
# Now, the remaining number of machines must be able to fit the conv compute servers
# Edit: I think this can never happen since we fit the # FCC to your # machines, but anyway good to assert
num_machines_for_conv_compute_servers = num_groups * machines_per_batch
if num_machines_for_conv_compute_servers + current_machine > num_machines:
print 'Error: your configuration requires more machines than provided (' + \
str(num_machines) + ' provided, ' + str(num_machines_for_conv_compute_servers + current_machine) + ' needed)'
assert False
conv_compute_server_machines = machine_list[current_machine : current_machine + num_machines_for_conv_compute_servers]
num_conv_compute_servers = len(conv_compute_server_machines)
# Now we have the following set:
# - num_groups
# - fc_model_server_machine (as opposed to fc_server_machine for the case of single fc server)
# - conv_model_server_machine
# - fc_compute_server_machines, num_fc_compute_servers, num_gpu_per_fcc
# - conv_compute_server_machines and num_conv_compute_servers, and use_1_gpu or use_4_gpu
# The rest of the file will use these variables
print 'Number of machines = ' + str(len(machine_list))
print 'Number of groups = ' + str(num_groups)
# ------------------------------------------------------------------------------
# Find ports for the machines above
# ------------------------------------------------------------------------------
# Now we need to create 2 ports per group (one to listen and one to broadcast)
# SHADJIS TODO: We should get a list of free ports both on the conv model server
# machine and the fc server machine. For now, I am only getting a list of free
# ports on the current machine (running this script) and re-using for the machines
# running these servers, which is wrong. If this causes errors, can log in to
# each machine and run a script (get_n_free_ports.py), then use those ports.
import socket
# SHADJIS TODO: ssh into conv model / fc server machines and run this there
# We need at least num_groups * 2 unique ports. In fact, this is enough because we can
# re-use these ports #s across machines (to be sure, we should ssh into each and generate
# the ports). But if we ever want to merge the model servers into one machine we need
# more ports. For now we will just re-use the ports.
# Update: In order to map FCCM and CM to the same machine we can make separate ports
# (this is not needed for FCCM to go on a CC or for CM to go on a CC, just when FCCM/CM
# are on same machine)
cm_ports = []
while len(cm_ports) != num_groups*2:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('', 0))
addr = s.getsockname()
# E.g. parse 40582 from ('0.0.0.0', 40582) SHADJIS TODO: Support other formats
if str(addr[1]) not in cm_ports:
cm_ports.append(str(addr[1]))
s.close()
fc_ports = [] # Also used for fcc, since that is what cc binds to
while len(fc_ports) != num_groups*2:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('', 0))
addr = s.getsockname()
# E.g. parse 40582 from ('0.0.0.0', 40582) SHADJIS TODO: Support other formats
if str(addr[1]) not in fc_ports and str(addr[1]) not in cm_ports:
fc_ports.append(str(addr[1]))
s.close()
if not single_FC_server:
fcm_ports = [] # Also used for fccm
while len(fcm_ports) != num_groups*2:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('', 0))
addr = s.getsockname()
# E.g. parse 40582 from ('0.0.0.0', 40582) SHADJIS TODO: Support other formats
if str(addr[1]) not in fc_ports and str(addr[1]) not in cm_ports and str(addr[1]) not in fcm_ports:
fcm_ports.append(str(addr[1]))
s.close()
# ------------------------------------------------------------------------------
# Parse the solver prototxt to get the name of the train prototxt file
# ------------------------------------------------------------------------------
# From this we need to parse the network prototxt so we can create new
# sub-networks. We also need to store the solver as a string so we can
# create new copies of the solver that point to each network prototxt.
f = open(solver_file)
solver_file_lines = []
train_proto = None
for line in f:
# The line should look something like:
# net: "protos/train_test.prototxt"
match = re.search(r'net\s*:\s*"\s*(\S+)\s*"', line, flags=re.IGNORECASE)
if match:
train_proto = match.group(1)
solver_file_lines.append(line.replace(train_proto, '__TRAIN_PROTO__'))
else:
solver_file_lines.append(line)
assert train_proto
solver_file_str = ''.join(solver_file_lines)
f.close()
# ------------------------------------------------------------------------------
# Parse the train prototxt file for lmdb names
# ------------------------------------------------------------------------------
# First parse the file to obtain the lmdb names
# Also while parsing, count the number of FC layers. This is only used for
# multi-GPU model parallelism
num_fc_layers_total = 0
f = open(train_proto)
lmdb_databases = []
for line in f:
# E.g. a line like
# source: "/home/ubuntu/train_data"
match = re.search(r'source\s*:\s*"\s*(\S+)\s*"', line, flags=re.IGNORECASE)
if match:
lmdb_databases.append(match.group(1))
# Also check if this is an fc layer
match = re.search(r'type\s*:\s*"\s*(\S+)\s*"', line, flags=re.IGNORECASE)
if match:
type = match.group(1)
if 'INNERPRODUCT' in type.upper():
num_fc_layers_total += 1
assert len(lmdb_databases) in [1,2]
assert num_fc_layers_total > 0 # For softmax
if len(lmdb_databases) == 2:
print 'Warning: For now, validation / test sets are ignored.'
print ' This will be supported soon.'
f.close()
# SHADJIS TODO: Support validation set lmdb
train_lmdb_name = lmdb_databases[0].rstrip('/')
conv_movel_server_train_lmdb_name = train_lmdb_name
fc_server_train_lmdb_name = train_lmdb_name + '_FC'
conv_compute_server_train_lmdb_names = []
skip_main_lmdb_only = False
if num_conv_compute_servers == 1:
conv_compute_server_train_lmdb_names.append(train_lmdb_name)
skip_main_lmdb_only = True
else:
os.system('mkdir -p ' + train_lmdb_name + '_' + str(num_conv_compute_servers) + '_PARTITION')
for i in range(num_conv_compute_servers):
conv_compute_server_train_lmdb_names.append(train_lmdb_name + '_' + str(num_conv_compute_servers) + '_PARTITION/' + str(num_conv_compute_servers) + 'P_p' + str(i))
# ------------------------------------------------------------------------------
# Parse the network train prototxt again, now to split into 2 networks
# ------------------------------------------------------------------------------
# Now read through the file again, this time partitioning into conv and fc layers
# Here we want to make 2 networks: first with everything up to first FC, then
# first FC to the end
# Note: in the case of separate fcc and fcm servers, the protos are identical
# except GPU info. However, the GPU info can be complicated because fcm has no
# GPU (this is easy to handle) but fcc's potentially each have different GPUs.
conv_model_server_proto_str = ''
conv_compute_server_proto_strs = ['']*num_conv_compute_servers
if single_FC_server:
fc_server_proto_str = ''
else:
fcm_server_proto_str = ''
fcc_server_proto_strs = ['']*num_fc_compute_servers
# Where we are in the file
data_section = True
conv_section = False
fc_section = False
# Read layer by layer
lines_for_current_layer = []
lines_for_current_layer_no_gpu = []
f = open(train_proto)
num_fc_layers_found = 0
processing_first_fc_layer = False
data_name = ''
for line in f:
# Check if this is the start of a new layer
# SHADJIS TODO: I think proto can skip a line before the curly brace but I'll ignore that for now
if re.search(r'layer\s*\{', line, flags=re.IGNORECASE):
layer_str = ''.join(lines_for_current_layer)
layer_str_no_gpu = ''.join(lines_for_current_layer_no_gpu)
lines_for_current_layer = [line]
lines_for_current_layer_no_gpu = [line]
# This is a new layer. What we do with the old layer depends on
# which section we are in
# Case 1, this is a data layer
if data_section:
# We want to append this layer to all the networks
conv_model_server_proto_str += layer_str_no_gpu # No GPU for now on the conv model server. SHADJIS TODO: Why is this needed for data section? Assert same as no gpu
# For each conv compute network, we need to replace the LMDB
# We also need to reduce the batch size
layer_str_copy = layer_str
match = re.search(r'batch_size\s*:\s*(\d+)', layer_str, flags=re.IGNORECASE)
if match:
batch_size = int(match.group(1))
assert batch_size % machines_per_batch == 0
batch_size_reduced = int(batch_size / machines_per_batch)
layer_str_copy = re.sub(r'batch_size\s*:\s*(\d+)', 'batch_size: ' + str(batch_size_reduced), layer_str_copy, 1, flags=re.IGNORECASE)
for i in range(num_conv_compute_servers):
conv_compute_server_proto_strs[i] += layer_str_copy.replace(conv_movel_server_train_lmdb_name, conv_compute_server_train_lmdb_names[i])
# For the FC network, we need to do some replacements and then also replace the LMDB:
# SHADJIS TODO: Use regex to replace the mirror in mirror: true/false, but not others
layer_str = re.sub(r'mirror', '#mirror', layer_str , 1, flags=re.IGNORECASE)
layer_str = re.sub(r'crop_size', '#crop_size', layer_str , 1, flags=re.IGNORECASE)
layer_str = re.sub(r'mean_file', '#mean_file', layer_str , 1, flags=re.IGNORECASE)
layer_str_for_fc = layer_str.replace(conv_movel_server_train_lmdb_name, fc_server_train_lmdb_name)
if single_FC_server:
fc_server_proto_str += layer_str_for_fc
else:
fcm_server_proto_str += layer_str_for_fc
for i in range(num_fc_compute_servers):
fcc_server_proto_strs[i] += layer_str_for_fc
# Case 2, this is a layer in the conv part
elif conv_section:
conv_model_server_proto_str += layer_str_no_gpu
for i in range(num_conv_compute_servers):
conv_compute_server_proto_strs[i] += layer_str
# Case 3, this is a layer in the FC part
elif fc_section:
if single_FC_server:
fc_server_proto_str += layer_str
else:
fcm_server_proto_str += layer_str_no_gpu
# Now we need to substitute in {SUB_GPU_NUM_HERE} for the correct gpu numbers
# Iterate over each server and assign GPUs. Keep in mind that multiple servers
# may be assigned to a single machine
# Start at machine 0 GPU 0 and increment the GPU each time. Reset once we
# reach num_gpu_per_fcc_machine (this moves to a new machine, although the
# machine assignments are not done here, since they were already done above)
current_gpu = 0
for i in range(num_fc_compute_servers):
layer_str_this_fcc_server = layer_str
for g in range(num_gpu_per_fcc):
layer_str_this_fcc_server = layer_str_this_fcc_server.replace('{SUB_GPU_NUM_HERE}', str(current_gpu), 1)
current_gpu += 1
if current_gpu == num_gpu_per_fcc_machine:
current_gpu = 0
fcc_server_proto_strs[i] += layer_str_this_fcc_server
# Otherwise this is part of a layer
else:
# 3 checks,
# - lines_for_current_layer to make sure we are in a layer (not the name of the network on line 1)
# - not data_name so we only set it once
# - data_section so we get the data name
if lines_for_current_layer and not data_name and data_section:
match = re.search(r'name\s*:\s*"\s*(\S+)\s*"', line, flags=re.IGNORECASE)
if match:
data_name = match.group(1)
if processing_first_fc_layer:
match = re.search(r'bottom\s*:\s*"\s*(\S+)\s*"', line, flags=re.IGNORECASE)
if match:
old_bottom = match.group(1)
assert data_name
line = line.replace(old_bottom, data_name)
processing_first_fc_layer = False
lines_for_current_layer.append(line)
lines_for_current_layer_no_gpu.append(line)
# We can also determine if we moved to a new section of the network
match = re.search(r'type\s*:\s*"\s*(\S+)\s*"', line, flags=re.IGNORECASE)
if match:
# If this is a 'type: "..."' line, the section of the network might have changed
type = match.group(1)
# If it is a convolution layer, assert we are not in the fc section
# and transition to conv section if in data section
if 'CONVOLUTION' in type.upper() or 'POOL' in type.upper():
assert not fc_section
if data_section:
data_section = False
conv_section = True
elif 'INNERPRODUCT' in type.upper():
if num_fc_layers_found == 0:
processing_first_fc_layer = True
num_fc_layers_found += 1
data_section = False
conv_section = False
fc_section = True
# Update proto with GPU information
#
# Only do this once per layer, i.e. we want to do it after this 'type:' line:
#
# type: "ReLU" <----
#
# But not after this 'type:' line
#
# weight_filler {
# type: "gaussian" <----
# std: 0.01
# }
# Note: We assume FC is a straight line, i.e. we transition to FC phase @ 1st FC,
# meaning we can't have grouping for FC (FC and on must be a straight line/chain)
if type.upper() in ['INNERPRODUCT', 'RELU', 'DROPOUT', 'POOLING', 'CONVOLUTION', 'LRN', 'BATCHNORM', 'SCALE', 'ELTWISE', 'CONCAT']:
# Conv can use up to 4 GPUs
if conv_section:
if use_4_gpu:
lines_for_current_layer.append(''' gpu_0_batch_proportion: 0.25
gpu_1_batch_proportion: 0.25
gpu_2_batch_proportion: 0.25
gpu_3_batch_proportion: 0.25
''')
elif use_1_gpu:
lines_for_current_layer.append(''' gpu_0_batch_proportion: 1.0
''')
# FC can use up to 1 GPU with model parallelism disabled,
# and all the GPUs with model parallelism enabled
elif fc_section and type.upper() != 'SOFTMAXWITHLOSS' and type.upper() != 'SOFTMAX':
if single_FC_server:
if use_1_gpu or (use_4_gpu and not multi_gpu_model_parallelism):
lines_for_current_layer.append(''' gpu_0_batch_proportion: 1.0
''')
# If 4 GPUs and model parallelism is enabled, the type of parallelism and
# number of GPUs depends on the layer.
# By default, FC layers get 4 GPUs and model parallelism, and non-FC layers
# get 4 GPUs and data parallelism. However the final FC layer may be faster
# on just 1 GPU because:
# - 4 GPUs data parallelism: this is slow because the computation is small
# for the final FC layer so there is little speedup from using
# multiple GPUs, but accumulating the gradients requires
# copying the gradients from each GPU and model back to each GPU
# - 4 GPUs model parallelism: this is fast but requires copying all the
# data to each GPU then back to the host in backward pass to sum gradients
# It turns out that because the computation is fast for the last FC, minimizing copies
# is more important, so using 1 GPU (and keeping gradients on device at all times)
# is fastest. Then to avoid copies to that GPU, it is also fastest if all
# the preceding layers (e.g. ReLU, dropout) also use 1 GPU (1 GPU is batch
# parallelism by default, but 1 GPU batch and 1 GPU depth are equivalent).
elif use_4_gpu:
# Now how many GPUs to use depends on the layer
assert multi_gpu_model_parallelism
# If we are past the second-last FC, use 1 GPU
# Note that this conditional branch seems useless (same result in both cases)
# but is not because of batch vs depth proportion
if type.upper() == 'INNERPRODUCT':
assert num_fc_layers_found >= 1
assert num_fc_layers_found <= num_fc_layers_total
if num_fc_layers_found == num_fc_layers_total: # Last FC
lines_for_current_layer.append(''' gpu_0_batch_proportion: 1.0
''')
else:
lines_for_current_layer.append(''' gpu_0_depth_proportion: 0.25
gpu_1_depth_proportion: 0.25
gpu_2_depth_proportion: 0.25
gpu_3_depth_proportion: 0.25
''')
else:
if num_fc_layers_found >= num_fc_layers_total-1: # Right before last FC
lines_for_current_layer.append(''' gpu_0_batch_proportion: 1.0
''')
else:
lines_for_current_layer.append(''' gpu_0_batch_proportion: 0.25
gpu_1_batch_proportion: 0.25
gpu_2_batch_proportion: 0.25
gpu_3_batch_proportion: 0.25
''')
# not single_FC_server
else:
if num_gpu_per_fcc == 1 or (num_gpu_per_fcc > 1 and not multi_gpu_model_parallelism):
lines_for_current_layer.append(''' gpu_{SUB_GPU_NUM_HERE}_batch_proportion: 1.0
''')
# See model parallelism comment in code above
elif num_gpu_per_fcc > 1:
# Now how many GPUs to use depends on the layer
assert multi_gpu_model_parallelism
# If we are past the second-last FC, use 1 GPU
if type.upper() == 'INNERPRODUCT':
assert num_fc_layers_found >= 1
assert num_fc_layers_found <= num_fc_layers_total
if num_fc_layers_found == num_fc_layers_total: # Last FC
lines_for_current_layer.append(''' gpu_{SUB_GPU_NUM_HERE}_batch_proportion: 1.0
''')
elif num_gpu_per_fcc == 2:
lines_for_current_layer.append(''' gpu_{SUB_GPU_NUM_HERE}_depth_proportion: 0.5
gpu_{SUB_GPU_NUM_HERE}_depth_proportion: 0.5
''')
else:
assert num_gpu_per_fcc == 4
lines_for_current_layer.append(''' gpu_{SUB_GPU_NUM_HERE}_depth_proportion: 0.25
gpu_{SUB_GPU_NUM_HERE}_depth_proportion: 0.25
gpu_{SUB_GPU_NUM_HERE}_depth_proportion: 0.25
gpu_{SUB_GPU_NUM_HERE}_depth_proportion: 0.25
''')
else:
if num_fc_layers_found >= num_fc_layers_total-1: # Right before last FC
lines_for_current_layer.append(''' gpu_{SUB_GPU_NUM_HERE}_batch_proportion: 1.0
''')
elif num_gpu_per_fcc == 2:
lines_for_current_layer.append(''' gpu_{SUB_GPU_NUM_HERE}_batch_proportion: 0.5
gpu_{SUB_GPU_NUM_HERE}_batch_proportion: 0.5
''')
else:
assert num_gpu_per_fcc == 4
lines_for_current_layer.append(''' gpu_{SUB_GPU_NUM_HERE}_batch_proportion: 0.25
gpu_{SUB_GPU_NUM_HERE}_batch_proportion: 0.25
gpu_{SUB_GPU_NUM_HERE}_batch_proportion: 0.25
gpu_{SUB_GPU_NUM_HERE}_batch_proportion: 0.25
''')
f.close()
# Call code above for the last layer too now
layer_str = ''.join(lines_for_current_layer)
layer_str_no_gpu = ''.join(lines_for_current_layer_no_gpu)
assert layer_str == layer_str_no_gpu # Last layer (softmax) should not use GPU
assert fc_section
if single_FC_server:
fc_server_proto_str += layer_str
else:
fcm_server_proto_str += layer_str_no_gpu
for i in range(num_fc_compute_servers):
fcc_server_proto_strs[i] += layer_str
# ------------------------------------------------------------------------------
# Create new solver and network prototxt files
# ------------------------------------------------------------------------------
input_file_dir = base_dir + 'server_input_files-' + datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S") # SHADJIS TODO: base_dir can include this later
os.system('mkdir -p ' + input_file_dir)
print 'Writing input prototxt files to ' + input_file_dir + '/'
# First create the solver files for the model servers
if single_FC_server:
server_types = ['conv_model', 'fc_model_compute']
else:
server_types = ['conv_model', 'fc_model']
for server in server_types:
solver_name = input_file_dir + '/solver.' + server + '_server.prototxt'
train_name = input_file_dir + '/train_val.' + server + '_server.prototxt'
# Make a solver for this server
f = open(solver_name, 'w')
print ' Writing ' + solver_name
f.write(solver_file_str.replace('__TRAIN_PROTO__', train_name))
f.close()
# Make a train file for this server
f = open(train_name, 'w')
print ' Writing ' + train_name
if server == 'conv_model':
f.write(conv_model_server_proto_str)
conv_model_server_solver_file = solver_name
elif server == 'fc_model_compute':
assert single_FC_server
f.write(fc_server_proto_str)
fc_server_solver_file = solver_name
else:
assert server == 'fc_model'
assert not single_FC_server
f.write(fcm_server_proto_str)
fcm_server_solver_file = solver_name
f.close()
# Now create the solver files for the conv compute servers
conv_compute_server_solver_files = []
for i in range(num_conv_compute_servers):
solver_name = input_file_dir + '/solver.conv_compute_server.' + str(i) + '.prototxt'
conv_compute_server_solver_files.append(solver_name)
train_name = input_file_dir + '/train_val.conv_compute_server.' + str(i) + '.prototxt'
# Make a solver for this server
f = open(solver_name, 'w')
print ' Writing ' + solver_name
f.write(solver_file_str.replace('__TRAIN_PROTO__', train_name))
f.close()
# Make a train file for this server
f = open(train_name, 'w')
print ' Writing ' + train_name
f.write(conv_compute_server_proto_strs[i])
f.close()
# Now create the solver files for the fc compute servers, if any
if not single_FC_server:
fc_compute_server_solver_files = []
for i in range(num_fc_compute_servers):
solver_name = input_file_dir + '/solver.fc_compute_server.' + str(i) + '.prototxt'
fc_compute_server_solver_files.append(solver_name)
train_name = input_file_dir + '/train_val.fc_compute_server.' + str(i) + '.prototxt'
# Make a solver for this server
f = open(solver_name, 'w')
print ' Writing ' + solver_name
f.write(solver_file_str.replace('__TRAIN_PROTO__', train_name))
f.close()
# Make a train file for this server
f = open(train_name, 'w')
print ' Writing ' + train_name
f.write(fcc_server_proto_strs[i])
f.close()
# ==============================================================================
# Create the LMDBs referenced above
# ==============================================================================
# Recall that above we made:
#
# conv_movel_server_train_lmdb_name (same as default, since unused)
# fc_server_train_lmdb_name
# and
# conv_compute_server_train_lmdb_names for i in range(num_conv_compute_servers)
#
# Now we need to create the fc and conv compute lmdb files by reading in the
# conv model server. The fc server can also be empty but needs the right size.
# We don't know this size however without loading the network, so we need to
# do that using a C++ utility (see below)
# ------------------------------------------------------------------------------
# First, make the lmdb for each conv compute server
# ------------------------------------------------------------------------------
def open_new_write_lmdb_helper(new_lmdb_name, num_imgs, map_size):
os.system('rm -rf ' + new_lmdb_name)
write_env = lmdb.open(new_lmdb_name, readonly=False, lock=False, map_size=map_size)
write_txn = write_env.begin(write=True)
print ' Writing ' + str(num_imgs) + ' images to ' + new_lmdb_name
return write_env, write_txn
map_size = 1024*1024*1024*1024
if not (skip_lmdb_generation or skip_main_lmdb_only):
# SHADJIS TODO: Skip this if there is only 1 partition, and re-use existing lmdb
# Open the full lmdb that we will read from
read_lmdb_name = conv_movel_server_train_lmdb_name
# First count the number of images
read_env = lmdb.open(read_lmdb_name, readonly=True)
num_images = 0
with read_env.begin() as read_txn:
with read_txn.cursor() as read_cursor:
for key, value in read_cursor:
num_images += 1
read_env.close()
# num_images = 129395
print 'LMDB ' + read_lmdb_name + ' contains ' + str(num_images)
# Now split by the number of conv compute servers
num_images_per_conv_compute_server = [num_images/num_conv_compute_servers]*num_conv_compute_servers
# We also have to add the remainders
num_leftover = num_images%num_conv_compute_servers
for i in range(num_leftover):
num_images_per_conv_compute_server[i] += 1
assert sum(num_images_per_conv_compute_server) == num_images
# Now create the lmdb for each conv compute server
read_env = lmdb.open(read_lmdb_name, readonly=True)
with read_env.begin() as read_txn:
with read_txn.cursor() as read_cursor:
# Read over each datum again, this time writing to a new lmdb
current_server = 0
img_idx = 0
# Open the LMDB for the first server
write_env, write_txn = open_new_write_lmdb_helper(conv_compute_server_train_lmdb_names[current_server], num_images_per_conv_compute_server[current_server], map_size)
# Read over each image in original lmdb
for key, value in read_cursor:
# Check if we should move to the next server
if img_idx >= num_images_per_conv_compute_server[current_server]:
# We just finished the current server
# First close the currenet lmdb
write_txn.commit()
write_env.close()
# Increment server count and reset img_idx
current_server += 1
img_idx = 0
# Open new lmdb
write_env, write_txn = open_new_write_lmdb_helper(conv_compute_server_train_lmdb_names[current_server], num_images_per_conv_compute_server[current_server], map_size)
# Write the new datum to the new lmdb
write_txn.put(key, value)
img_idx += 1
# assert we have 1 server lmdb left to write
assert current_server == num_conv_compute_servers-1
assert img_idx == num_images_per_conv_compute_server[current_server]
write_txn.commit()
write_env.close()
read_env.close()
# ------------------------------------------------------------------------------
# Next, make the lmdb for the FC server
# ------------------------------------------------------------------------------
if not skip_lmdb_generation:
# This requires calling a utility which loads the network and prints the size
# of the output of the final conv layer.
util_output_str = subprocess.check_output(['./tools/size_util/size_util', conv_model_server_solver_file])
num_fc_inputs = int(util_output_str.strip().split("\n")[-1].strip())
# Now create a new LMDB with 1 datum that contains the right size
write_env, write_txn = open_new_write_lmdb_helper(fc_server_train_lmdb_name, 1, map_size)
# Create the datum
datum = datum_pb2.Datum()
datum.height = 1
datum.width = 1
datum.channels = num_fc_inputs
# Write back the new datum
write_txn.put('dummy', datum.SerializeToString())
write_txn.commit()
write_env.close()
# ==============================================================================
# Create config files for each server
# ==============================================================================
print 'Generating configuration files for each server'
# Also keep a list of the machine and config file
cmd_params = []
# Get the config file names
if single_FC_server:
fc_server_cfg = input_file_dir + '/fc_server.cfg'
else:
fcm_server_cfg = input_file_dir + '/fc_model_server.cfg'
fcc_server_cfgs = []
for i in range(num_fc_compute_servers):
fcc_server_cfgs.append(input_file_dir + '/fc_compute_server.' + str(i) + '.cfg')
conv_model_server_cfg = input_file_dir + '/conv_model_server.cfg'
conv_compute_server_cfgs = []
for i in range(num_conv_compute_servers):
conv_compute_server_cfgs.append(input_file_dir + '/conv_compute_server.' + str(i) + '.cfg')
# Write config files
if single_FC_server:
# FC server
print ' Writing ' + fc_server_cfg
f = open(fc_server_cfg, 'w')
f.write('''name = "FCComputeModelServer on tcp://''' + fc_server_machine + '''";
type = "FCComputeModelServer";
solver = "''' + fc_server_solver_file + '''";
group_size = ''' + str(machines_per_batch) + ''';
''')
if snapshot_input_dir:
f.write('snapshot_input_dir = "' + snapshot_input_dir + '"' + "\n")
if snapshot_input_iter:
f.write('snapshot_input_iter = ' + snapshot_input_iter + "\n")
f.write('''
ports = (
''')
for i in range(num_groups):
if i != 0:
f.write(',')
f.write('''
{
broadcast = "tcp://*:''' + str(fc_ports[2*i ]) + '''",
listen = "tcp://*:''' + str(fc_ports[2*i + 1]) + '''"
}''')
f.write('''
);
''')
f.close()
cmd_params.append((fc_server_machine, fc_server_cfg))
else:
# FC model server
# Note group_size = 1 since the fc compute servers will be connecting to this
print ' Writing ' + fcm_server_cfg
f = open(fcm_server_cfg, 'w')
f.write('''name = "FCModelServer on tcp://''' + fc_model_server_machine + '''";
type = "FCModelServer";
solver = "''' + fcm_server_solver_file + '''";
group_size = 1;
''')
if snapshot_input_dir:
f.write('snapshot_input_dir = "' + snapshot_input_dir + '"' + "\n")
if snapshot_input_iter:
f.write('snapshot_input_iter = ' + snapshot_input_iter + "\n")
f.write('''
ports = (
''')
for i in range(num_groups):
if i != 0:
f.write(',')
f.write('''
{
broadcast = "tcp://*:''' + str(fcm_ports[2*i ]) + '''",
listen = "tcp://*:''' + str(fcm_ports[2*i + 1]) + '''"
}''')
f.write('''
);
''')
f.close()
cmd_params.append((fc_model_server_machine, fcm_server_cfg))
# Conv model server
print ' Writing ' + conv_model_server_cfg
f = open(conv_model_server_cfg, 'w')
f.write('''name = "ConvModelServer on tcp://''' + conv_model_server_machine + '''";
type = "ConvModelServer";
solver = "''' + conv_model_server_solver_file + '''";
group_size = ''' + str(machines_per_batch) + ''';
''')
if snapshot_input_dir:
f.write('snapshot_input_dir = "' + snapshot_input_dir + '"' + "\n")
if snapshot_input_iter:
f.write('snapshot_input_iter = ' + snapshot_input_iter + "\n")
f.write('''
ports = (
''')
for i in range(num_groups):
if i != 0:
f.write(',')
f.write('''
{
broadcast = "tcp://*:''' + str(cm_ports[2*i ]) + '''",
listen = "tcp://*:''' + str(cm_ports[2*i + 1]) + '''"
}''')
f.write('''
);
''')
f.close()
cmd_params.append((conv_model_server_machine, conv_model_server_cfg))
# Conv compute servers
for i in range(num_conv_compute_servers):
group_of_this_machine = i / machines_per_batch
print ' Writing ' + conv_compute_server_cfgs[i]
f = open(conv_compute_server_cfgs[i], 'w')
# Check which FC machine to bind to (the port is fixed but the machine may change)
# Specifically, if the FC server and conv server are mapped to the same machine,
# then this will change. That can be true if the FC server is an FCCM or an FCC.
if single_FC_server:
if conv_compute_server_machines[i] == fc_server_machine:
fc_bind_machine = '127.0.0.1' # Localhost
else:
fc_bind_machine = fc_server_machine
else:
# Special case: if these are on the same machine, use localhost
if conv_compute_server_machines[i] == fc_compute_server_machines[group_of_this_machine]:
fc_bind_machine = '127.0.0.1' # Localhost
else:
fc_bind_machine = fc_compute_server_machines[group_of_this_machine]
# Check also which CM machine to bind to. This is like above but simpler because
# there is no distinction like FCCM vs FCC for CM, it is always just a CM
if conv_compute_server_machines[i] == conv_model_server_machine:
cm_bind_machine = '127.0.0.1' # Localhost
else:
cm_bind_machine = conv_model_server_machine
f.write('''name = "ConvComputeServer ''' + str(i) + '''";
conv_listen_bind = "tcp://''' + cm_bind_machine + ''':''' + str(cm_ports[2*group_of_this_machine + 1]) + '''";
conv_send_bind = "tcp://''' + cm_bind_machine + ''':''' + str(cm_ports[2*group_of_this_machine ]) + '''";
fc_listen_bind = "tcp://''' + fc_bind_machine + ''':''' + str(fc_ports[2*group_of_this_machine + 1]) + '''";
fc_send_bind = "tcp://''' + fc_bind_machine + ''':''' + str(fc_ports[2*group_of_this_machine ]) + '''";
type = "ConvComputeServer";
solver = "''' + conv_compute_server_solver_files[i] + '''";
group_size = ''' + str(machines_per_batch) + ''';
rank_in_group = ''' + str(i%machines_per_batch) + ''';
''')
f.close()
cmd_params.append((conv_compute_server_machines[i], conv_compute_server_cfgs[i]))
# FC compute servers
if not single_FC_server:
# Note rank in group is always 0 because there is only one fcc per group
for i in range(num_fc_compute_servers):
print ' Writing ' + fcc_server_cfgs[i]
f = open(fcc_server_cfgs[i], 'w')
f.write('''name = "FCComputeServer ''' + str(i) + '''";
conv_listen_bind = "tcp://''' + '*' + ''':''' + str(fc_ports[2*i + 1]) + '''";
conv_send_bind = "tcp://''' + '*' + ''':''' + str(fc_ports[2*i ]) + '''";
fc_listen_bind = "tcp://''' + fc_model_server_machine + ''':''' + str(fcm_ports[2*i + 1]) + '''";
fc_send_bind = "tcp://''' + fc_model_server_machine + ''':''' + str(fcm_ports[2*i ]) + '''";
type = "FCComputeServer";
solver = "''' + fc_compute_server_solver_files[i] + '''";
group_size = ''' + str(machines_per_batch) + ''';
rank_in_group = 0;
''')
f.close()
cmd_params.append((fc_compute_server_machines[i], fcc_server_cfgs[i]))
# ==============================================================================
# Run ssh commands
# ==============================================================================
print '''
Beginning to run commands for each server (commands also written to rerun_experiment.sh)
'''
# Run the commmands
f = open('rerun_experiment.sh', 'w')
for cmd_param in cmd_params:
machine = cmd_param[0]
cfg_file = cmd_param[1]
if all_on_single_machine:
cmd = './dcct ' + cfg_file + ' &> ' + cfg_file +'.out &'
else:
cmd = 'ssh ' + user + '@' + machine + ' \'' + extra_cmd + ' ./dcct ' + cfg_file + ' &> ' + cfg_file +'.out\' &'
f.write(cmd + "\n")
print cmd
os.system(cmd)
# SHADJIS TODO: To prevent FC (ZMQ SUB) from missing model from CM, sleep (make more permanent solution later)
# Most of the time it works to sleep 15s only after model servers, but then for 32 machines, 1 group it hangs.
# However if I sleep 0 after each conv compute (i.e. do nothing differently) it works. Since this is a hack
# anyway and has to do with e.g. OS scheduling it is probably not that unexpected and probably random.
if 'fc_server' in cmd or 'conv_model_server' in cmd or 'fc_model_server' in cmd:
cmd = 'sleep 15'
elif 'fc_compute_server' in cmd: # SHADJIS TODO: May be able to reduce this delay
cmd = 'sleep 1'
else:
cmd = 'sleep 0' # sleep 0 works too, but not removing the command entirely (for 128, 0 does not work -- some msgs lost)
f.write(cmd + "\n")
print cmd
os.system(cmd)
f.close()
# Also generate a script that can be used to kill all these servers
f = open('kill_servers.sh', 'w')
for cmd_param in cmd_params:
machine = cmd_param[0]
if all_on_single_machine:
f.write('pkill dcct' + "\n")
else:
# f.write('ssh ' + user + '@' + machine + ' \'pkill dcct; fuser -k 5555/tcp; fuser -k 5556/tcp;\' &' + "\n")
f.write('ssh ' + user + '@' + machine + ' \'pkill dcct;\' &' + "\n")
f.close()
print '''
Servers are now running on each machine over ssh.
See the input configuration file for each server to see which machine it is running on.
To stop all servers, run:
$ bash kill_servers.sh
'''
|
Omnivore-master
|
run.py
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: datum.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='datum.proto',
package='',
syntax='proto2',
serialized_pb=_b('\n\x0b\x64\x61tum.proto\"\x81\x01\n\x05\x44\x61tum\x12\x10\n\x08\x63hannels\x18\x01 \x01(\x05\x12\x0e\n\x06height\x18\x02 \x01(\x05\x12\r\n\x05width\x18\x03 \x01(\x05\x12\x0c\n\x04\x64\x61ta\x18\x04 \x01(\x0c\x12\r\n\x05label\x18\x05 \x01(\x05\x12\x12\n\nfloat_data\x18\x06 \x03(\x02\x12\x16\n\x07\x65ncoded\x18\x07 \x01(\x08:\x05\x66\x61lse')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_DATUM = _descriptor.Descriptor(
name='Datum',
full_name='Datum',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='channels', full_name='Datum.channels', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='height', full_name='Datum.height', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='width', full_name='Datum.width', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='data', full_name='Datum.data', index=3,
number=4, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='label', full_name='Datum.label', index=4,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='float_data', full_name='Datum.float_data', index=5,
number=6, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='encoded', full_name='Datum.encoded', index=6,
number=7, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=16,
serialized_end=145,
)
DESCRIPTOR.message_types_by_name['Datum'] = _DATUM
Datum = _reflection.GeneratedProtocolMessageType('Datum', (_message.Message,), dict(
DESCRIPTOR = _DATUM,
__module__ = 'datum_pb2'
# @@protoc_insertion_point(class_scope:Datum)
))
_sym_db.RegisterMessage(Datum)
# @@protoc_insertion_point(module_scope)
|
Omnivore-master
|
datum_pb2.py
|
#!/usr/bin/python
# ==============================================================================
#
# omnivore.py
#
# usage:
#
# $ python omnivore.py config.txt
#
# ==============================================================================
import os
import sys
import re # Regex
# config file provides all parameters, but here are some extra ones
base_dir = '/home/software/Omnivore/my_run/'
hw_type = 'CPU' #'CPU' '4GPU' 'GPU'
random_seeds_phase_0 = [1] # Seeds matter a lot, but will not search now (save time)
EXTRA_TIME_FIRST = 0
MAKE_LMDB_FIRST = False
FCC = False
# leave these empty to let the system guess
momentum_list_phase_0 = []
LR_list_phase_0 = []
# Epoch duration can be fixed (e.g. 1h) or a multiplier of optimizer time (e.g. 10x)
optimizer_duration = 10000
# optimizer_factor = 10
snap_frequency = 0.07
# Get HE measurements
group_size_to_time = {}
# ==============================================================================
# Description
# ==============================================================================
"""
This reads an input config file and calls run.py for a number of configurations
Enhancements:
- Rather than read in list of #m/g, read the machine list file like in run.py to figure out # conv compute machine which will be used
(this code existe already in run.py, just copy it here)
- Rather than read times (phase 1 2 3) as input, run for 1 minute at a time, checkpoint at the end of the minute, and then repeat that until there is a clear winner
- Read snapshot: move forward in the dataset (now we restart), this is important if implementing 1 minute at a time change
- Now if momentum goes to 0, we increase # groups. Verify this makes sense, or maybe negative momemtum etc. is a better strategy. Also, can force a "phase 2" to try
momentum of 0.1, 0.2 etc. if momentum 0 is chosen. Finally, understand what LR and momemtum to search once we hit m=0 and decrease #groups
Older TODO items: (some of these may be done now)
- Once we finish tuning for G groups, and we are about to start for 2G, can
reduce params to search since we know LR cannot be greater (momentum can
however if LR goes down)
- Change display frequency so it is not too big (else no information by timeout).
Then take average of past few iterations.
- Remove unused input args to run.py, or make a config file
- Handle case where if LR is on boundary, then we also need to run each momentum
at that one. E.g. if best result is LR 0.01 and momentum 0.9, it's possible
that an even better result would be LR 0.1 and momentum 0.6, but we would
never test that in this case. So if it is on boundary, we should run a 2nd
phase which runs all momentum values at that new LR
- Do not choose batch size but adjust batch size in proto slightly to properly
divide # machines in a group
- Choose batch size
- Make dynamic (re-evaluate every few iter)
"""
# ==============================================================================
# Parse arguments
# ==============================================================================
# ------------------------------------------------------------------------------
# Check for right number of args
# ------------------------------------------------------------------------------
expected_num_args = 2
if len(sys.argv) not in [expected_num_args, expected_num_args+1]:
print '''
Usage: >>> python omnivore.py config.txt
Example config.txt:
solver_template = files/solver.prototxt
group_size_list = 1,2,4,8,16,32
time_per_exp_phase1 = 60
time_per_exp_phase2 = 0
time_per_exp_phase3 = 300
'''
sys.exit(0)
# Pass in a D or d at the end to run in debug
DEBUG = False
if len(sys.argv) == expected_num_args+1 and sys.argv[expected_num_args] in ['d','D']:
DEBUG = True
# ------------------------------------------------------------------------------
# Read params
# ------------------------------------------------------------------------------
# Params we need to read
solver_template = ''
machine_list = ''
group_size_list = []
# SHADJIS TODO: Rather than read these times as input, run for 1 minute at a time and
# checkpoint at the end of the minute, and then repeat that until there is a clear winner
time_per_exp_phase1 = 0
time_per_exp_phase2 = 0
time_per_exp_phase3 = 0
# Read the experiment parameters
def parse_val(line, type_to_convert_to=str):
return type_to_convert_to(line.split('=')[-1].strip())
def parse_list(line, type_to_convert_to=str):
parsed_list_str = line.split('=')[-1].strip()
parsed_list_str_type = []
if ',' in parsed_list_str:
for s in parsed_list_str.split(','):
parsed_list_str_type.append(type_to_convert_to(s.strip()))
else:
parsed_list_str_type.append(type_to_convert_to(parsed_list_str))
assert parsed_list_str_type
return parsed_list_str_type
config_file = open(sys.argv[1])
for line in config_file:
if 'solver_template' in line:
solver_template = parse_val(line, str)
elif 'machine_list' in line:
machine_list = parse_val(line, str)
elif 'group_size_list' in line:
group_size_list = parse_list(line, int)
elif 'time_per_exp_phase1' in line:
time_per_exp_phase1 = parse_val(line, int)
elif 'time_per_exp_phase2' in line:
time_per_exp_phase2 = parse_val(line, int)
elif 'time_per_exp_phase3' in line:
time_per_exp_phase3 = parse_val(line, int)
config_file.close()
# Make sure we got params we need
assert solver_template
# assert len(group_size_list) == 1 # For now optimize 1 group_size at a time
assert time_per_exp_phase1
# assert time_per_exp_phase2
solver_name = solver_template.split('/')[-1].split('.')[0]
SOLVER_DIR = 'solvers_' + solver_name
os.system('mkdir -p ' + SOLVER_DIR)
# ------------------------------------------------------------------------------
# Optimize over these:
# ------------------------------------------------------------------------------
momentum_list = []
LR_list = []
# Parse some parameters:
# - initial LR, used as a guess for tuning (note: not assuming this is tuned for 1 machine)
# - increment: used to parse log file at the end
# initial_LR = 0
increment = 0
f = open(solver_template)
def parse_proto_val(line, type_to_convert_to):
return type_to_convert_to( line.strip().split(':')[-1].strip() )
for line in f:
#if 'base_lr' in line:
# initial_LR = parse_proto_val(line, float)
if 'display' in line:
increment = parse_proto_val(line, int)
f.close()
# assert initial_LR and initial_LR > 0
assert increment
# ------------------------------------------------------------------------------
# For now these ones will be hard-coded
# ------------------------------------------------------------------------------
if FCC:
fc_type = 'many_fc'
map_fcc_to_cc = '1'
else:
fc_type = 'single_fc' # 'single_fc' or 'many_fc'
map_fcc_to_cc = '0'
script_name_suffix = ''
script_name = 'run' + script_name_suffix + '.py'
# max_parallel_runs = 52
# Future options:
# - delay 1s between cc launch
# - 1 core or all cores?
# + run with LOG(INFO)
# + name server_... dir based on run name
# ==============================================================================
# Helper functions
# ==============================================================================
def read_output_by_lines(run_dir):
if FCC:
fname = run_dir + '/fc_compute_server.0.cfg.out' # SHADJIS TODO: Should open each and average (else favors fewer fcc)
else:
fname = run_dir + '/fc_server.cfg.out'
f = open(fname)
lines = f.read().strip().split("\n")
f.close()
return lines
def get_list_of_all_losses(lines, increment, group_size = 0):
# For each line, parse the loss
started_iterations = False
# If this line has a loss, append it to a list
list_of_all_losses = []
list_of_all_times = []
for line in lines:
if line.strip().split():
if not started_iterations:
if line.strip().split()[0] == str(increment):
started_iterations = True
else:
continue
if 'Writing snapshot' in line:
continue
loss_this_iter = float(line.strip().split()[2])
list_of_all_losses.append(loss_this_iter)
# Also check time
time_this_iter = float(line.strip().split()[1])
list_of_all_times.append(time_this_iter)
# Edit: also measure the hardware efficiency for this run
if group_size > 0 and len(list_of_all_times) > 1:
if group_size not in group_size_to_time.keys():
burn_in = int(len(list_of_all_times) * 0.5)
burn_in = max(burn_in, 1)
last_few_iter = len(list_of_all_times) - burn_in
group_size_to_time[group_size] = (list_of_all_times[-1] - list_of_all_times[-last_few_iter])/float(last_few_iter-1)/float(increment)
assert group_size_to_time[group_size] > 0
return list_of_all_losses
# Wrapper so I can comment out for debugging
def run_cmd(cmd):
if DEBUG:
return
else:
os.system(cmd)
def contains_nan(L):
import math
for l in L:
if math.isnan(l):
return True
return False
# Wrapper so I can comment out for debugging
def get_lr_m_s(lines, group_size):
# Do lots of assertions (this can be removed later)
# Read the output log and ensure it matches from above
# E.g.:
# base_lr: 0.1
# momentum: 0.6
base_lr = ''
momentum = ''
random_seed = ''
for line in lines:
if 'base_lr' in line:
base_lr = line.strip().split()[1]
if 'momentum' in line:
momentum = line.strip().split()[1]
if 'random_seed' in line:
random_seed = line.strip().split()[1]
if 'GROUPSIZE' in line:
assert group_size == int(line.strip().split()[-1])
return base_lr, momentum, random_seed
# Read in the solver template and fill it in
# There will be these lines:
# base_lr: ''' + str(LR) + '''
# momentum: ''' + str(momentum) + '''
# random_seed: ''' + str(random_seed) + '''
def make_solver(momentum, LR, fname, random_seed, snapshot_interval):
# Read in the file and swap in the parameters
output_str = ''
f = open(solver_template)
found_LR = False
found_m = False
for line in f:
if 'base_lr' in line and line.strip()[0] != '#':
assert not found_LR
output_str += ( 'base_lr: ' + str(LR) + "\n" )
found_LR = True
elif 'momentum' in line and line.strip()[0] != '#':
assert not found_m
output_str += ( 'momentum: ' + str(momentum) + "\n" )
found_m = True
elif 'random_seed' in line and line.strip()[0] != '#':
# We will insert our own later
pass
elif snapshot_interval > 0 and 'snapshot' in line and line.strip()[0] != '#':
# We will insert our own later
pass
else:
output_str += line
assert found_LR
assert found_m
output_str += ( 'random_seed: ' + str(random_seed) + "\n" )
if snapshot_interval > 0:
output_str += ( 'snapshot: ' + str(int(snapshot_interval)) + "\n" )
f.close()
# Write to a new file
f = open(fname, 'w')
f.write(output_str)
f.close()
# Launch a run
# Currently serial but maybe we will want to do in parallel later
def run(group_size, hw_type, experiment_label, First, momentum, LR, seed, output_dir_base, run_time, print_only = False, snapshot_interval = 0, snapshot_input_dir = 'none', snapshot_input_iter = 'none'):
global total_optimizer_time
total_optimizer_time += run_time # SHADJIS TODO: Can count 30s overhead (15 + 15) or eliminate it in ZeroMQ
# Check if we should make a new lmdb
if First and MAKE_LMDB_FIRST:
skip_string = ''
else:
skip_string = 's'
run_id = '_'+ experiment_label + '.m' + str(momentum) + '.LR' + str(LR)
fname = SOLVER_DIR + '/solver' + run_id + '.prototxt'
# Create the command to run
os.system('mkdir -p logs')
logfile_out = 'logs/log.' + hw_type + run_id
# SHADJIS TODO: should make a config file rather than pass 100 arguments I think
run_experiment_command = 'python ' + script_name + ' ' + fname + ' ' + machine_list + ' ' + str(group_size) + ' ' + hw_type + ' ' + fc_type + ' ' + map_fcc_to_cc + ' ' + output_dir_base + ' ' + snapshot_input_dir + ' ' + str(snapshot_input_iter) + ' ' + skip_string + ' > ' + logfile_out + ' 2>&1'
if print_only:
print ' ' + run_experiment_command
return None
# Make solver
make_solver(momentum, LR, fname, seed, snapshot_interval)
# Extra commands to wait and then kill servers
if First:
run_time += EXTRA_TIME_FIRST
extra_cmds = ['sleep ' + str(run_time),
'echo \' Ending current run\'',
'bash kill_servers' + script_name_suffix + '.sh',
'sleep 10',
'bash kill_servers' + script_name_suffix + '.sh']
# Run commands
print '[' + str(run_time/60) + ' min] ' + run_experiment_command
run_cmd(run_experiment_command)
# Wait for the command to finish and then kill the servers
for extra_cmd in extra_cmds:
run_cmd(extra_cmd)
# Return the directory, which we can get from parsing the output file
# Writing input prototxt files to /path/server_input_files-2016-01-22-21-34-22/
output_dir = ''
f = open(logfile_out)
for line in f:
if 'Writing input prototxt files to' in line:
output_dir = line.strip().split()[-1]
break
f.close()
assert output_dir
return output_dir
def print_estimation_time(random_seeds, group_size, momentum_list, LR_list, time_per_exp):
time_for_1_run = int( time_per_exp + 15 + 15 )
time_estimate = time_for_1_run*len(random_seeds)*len(momentum_list)*len(LR_list)
time_estimate /= 60 # minutes
if time_estimate > 60:
print 'Estimated runtime: ' + str(time_estimate/60) + ' hours and ' + str(time_estimate%60) + ' minutes'
else:
print 'Estimated runtime: ' + str(time_estimate) + ' minutes'
def grid_search_parameters(EXP_NAME, group_size, momentum_list, LR_list, random_seeds, best_m_last_iteration, best_LR_last_iteration, snapshot_input_dir = 'none', snapshot_input_iter = 'none', exit_early_time_threshold = 100000., buffer = 1.0): # No buffer for cold start since loss is still high for all, and gap is small
global First_Run
for phase in [0,1]:
# Estimate runtime for this phase
print "\n" + '**********************************************************' + "\n" + 'Beginning tuning phase ' + str(phase) + "\n"
if phase == 0:
time_per_exp = time_per_exp_phase1
else:
assert phase == 1
time_per_exp = time_per_exp_phase2
if time_per_exp == 0:
print ' Skipping phase, time set to 0'
break
print_estimation_time(random_seeds, group_size, momentum_list, LR_list, time_per_exp)
print ' Momentum: ' + str(momentum_list)
print ' LR: ' + str(LR_list)
print ' seeds: ' + str(random_seeds)
# Check if any of these have been run already
# This is useful in case there is an interruption and the experiment did not complete fully
#
# Iterate over the existing directories and make a list of the ones already run
m_LR_s_to_output_dir = {} # This one has keys which are strings
# Check if any runs exist
experiment_dir = base_dir + '/' + EXP_NAME + '_PHASE_' + str(phase) + '/'
if os.path.isdir(experiment_dir):
# Check if any of these have already run
for subdir in os.listdir(experiment_dir):
# Read the output file and check for errors
full_experiment_dir = experiment_dir + subdir + '/'
lines = read_output_by_lines(full_experiment_dir)
base_lr, momentum, random_seed = get_lr_m_s(lines, group_size)
list_of_all_losses = get_list_of_all_losses(lines, increment, group_size)
# Check for errors
if not base_lr or not momentum or not random_seed or not list_of_all_losses:
# This one didn't finish properly, so rerun it later
continue
# Otherwise this one ran, so no need to run again
m_LR_s_to_output_dir[(momentum, base_lr, random_seed)] = full_experiment_dir
# Now we have a map (set) of the parameters which finished already
# Run the remaining parameters
# We will map each run to a loss
m_LR_s_to_loss = {} # This one has keys which are float float int (should make consistent with above map which is strings)
# Print some output as well
output_lines = []
best_str = ''
best_s = None
best_m = None
best_LR = None
best_loss = 10000000000
# Now run commands
for s in random_seeds:
print "\n" + 'Running seed ' + str(s)
# Optimization: if we hit NaN for some parametes, no need to search larger parameters
NaN_LR = None
NaN_m = None
# SHADJIS TODO: Optimization:
# Iterate from high to low for LR and m and if it ever gets worse
# (or e.g. only better by a small margin like 5%), then stop.
# This is because we expect the best parameters to be larger than
# the smallest parameters we check, so we can save time
for LR in sorted(LR_list): # Low to high
for m in sorted(momentum_list): # Low to high
# Optimization:
# Skip this iteration if it will be Nan
# The LR check is redundant since it is in the outer loop, i.e. every LR will be >= NaN_LR
if NaN_LR is not None and NaN_m is not None:
if LR >= NaN_LR and m >= NaN_m:
print ' Skipping LR = ' + str(LR) + ', m = ' + str(m) + ', s = ' + str(s) + ' to avoid NaN'
continue
# Also skip this iteration if LR and m are larger than the previous iteration's optimal LR and m
# This is because we run from low to high staleness
# SHADJIS TODO: Remove this heuristic when searching a grid
if best_LR_last_iteration is not None and best_m_last_iteration is not None:
if LR > best_LR_last_iteration or (LR == best_LR_last_iteration and m > best_m_last_iteration):
print ' Skipping LR = ' + str(LR) + ', m = ' + str(m) + ', s = ' + str(s) + ' because of previous iteration (staleness)'
continue
if LR < best_LR_last_iteration and m < best_m_last_iteration: # SHADJIS TODO: Check this
print ' Skipping LR = ' + str(LR) + ', m = ' + str(m) + ', s = ' + str(s) + ' because too low'
continue
# if this seed/momentum/LR ran already, skip it
if (str(m), str(LR), str(s)) in m_LR_s_to_output_dir.keys():
print ' Found m=' + str(m) + ' LR=' + str(LR) + ' s=' + str(s) + ', skipping command:'
run(group_size, hw_type, EXP_NAME + '.PHASE' + str(phase) + '.seed' + str(s), First_Run, m, LR, s, experiment_dir, time_per_exp, print_only = True, snapshot_input_dir = snapshot_input_dir, snapshot_input_iter = snapshot_input_iter)
full_experiment_dir = m_LR_s_to_output_dir[(str(m), str(LR), str(s))]
# otherwise run this command
else:
full_experiment_dir = run(group_size, hw_type, EXP_NAME + '.PHASE' + str(phase) + '.seed' + str(s), First_Run, m, LR, s, experiment_dir, time_per_exp, snapshot_input_dir = snapshot_input_dir, snapshot_input_iter = snapshot_input_iter)
First_Run = False
# Now the command has run, read the output log and parse the final (or average, etc.) loss
lines = read_output_by_lines(full_experiment_dir)
base_lr, momentum, random_seed = get_lr_m_s(lines, group_size)
list_of_all_losses = get_list_of_all_losses(lines, increment, group_size)
# Check for errors
if not base_lr or not momentum or not random_seed or not list_of_all_losses:
print "\t".join([random_seed, momentum, base_lr, 'ERROR ' + full_experiment_dir])
# We could break here because every larger momentum would be skipped for NaN,
# but by continuing instead it will print that it is skipping them
continue
assert base_lr == str(LR)
assert momentum == str(m)
assert random_seed == str(s)
# If this did not speed up, exit early
assert group_size in group_size_to_time.keys() and group_size_to_time[group_size] > 0
if group_size_to_time[group_size] > exit_early_time_threshold:
return 0, 0, 0, m_LR_s_to_loss, True
# Check also if there was a nan in this result
# If so, we know not to run a higher momentum, although
if contains_nan(list_of_all_losses):
NaN_LR = LR
NaN_m = m
print ' NaN found for LR = ' + str(LR) + ', m = ' + str(m) + ', s = ' + str(s)
continue
# Calculate average loss for run
# average_loss = sum(list_of_all_losses) / float(len(list_of_all_losses))
last_few_iter = 10 # SHADJIS TODO: Use another heuristic?
assert last_few_iter > 0
average_loss = sum(list_of_all_losses[-last_few_iter:]) / float(last_few_iter)
row = "\t".join([random_seed, momentum, base_lr, str(average_loss)])
m_LR_s_to_loss[(float(momentum), float(base_lr), int(random_seed))] = average_loss
if average_loss < best_loss:
best_loss = average_loss
best_str = row + "\t" + full_experiment_dir
best_s = int(random_seed)
best_m = float(momentum)
best_LR = float(base_lr)
output_lines.append(row)
# Every command has now run
assert best_str
print ''
print "\t".join(['seed', 'momentum', 'LR', 'loss'])
print "\n".join(list(sorted(output_lines)))
print 'Best:'
print best_str
# Now we have m_LR_s_to_loss for each m / LR / s and also the best,
# Just using the best_* parameters works well, but since we have
# m_LR_s_to_loss we can pick parameters which are higher (e.g.
# higher LR, higher m) ass long as the final loss is not much
# worse (e.g. within 10%). This works better in the long-run.
#
# First iterate over LR and pick highest LR possible
original_best_LR = best_LR
original_best_m = best_m
for s in random_seeds:
for LR in sorted(LR_list): # Lowest to highest
# only consider LR bigger than or equal to the best one
if LR < original_best_LR:
continue
for m in sorted(momentum_list): # Lowest to highest
# at the same LR, only pick a larger m
if LR == original_best_LR and m <= original_best_m:
continue
# Check if it is within buffer range
if (m,LR,s) in m_LR_s_to_loss.keys() and m_LR_s_to_loss[(m,LR,s)] < best_loss*buffer: # SHADJIS TODO: Use another heuristic?
print 'Adjusting best to m = ' + str(m) + ', LR = ' + str(LR) + ', s = ' + str(s) + ', loss = ' + str(m_LR_s_to_loss[(m,LR,s)])
best_LR = LR
best_m = m
best_s = s
if phase == 0:
# Pick new momentum list:
if best_m == 0.0:
momentum_list = [0.0, 0.1, 0.2]
# SHADJIS TODO: If this function is being called during steady-state optimizer, can force a finer momentum grid here.
# Specifically, add an input argument to this function "steady_state" (default False, or alternatively "cold_start" default true)
# and if we are in steady state, then here set the time for second phase to be > 0 if it is not already
elif best_m == 0.3:
momentum_list = [0.1, 0.2, 0.3, 0.4, 0.5]
elif best_m == 0.6:
momentum_list = [0.4, 0.5, 0.6, 0.7, 0.8]
else:
# assert best_m == 0.9
momentum_list = [0.7, 0.8, 0.9]
# Pick a new LR list
#if best_LR == LR_list[0]:
# LR_list = [LR_list[0]*10., LR_list[0]]
#elif best_LR == LR_list[-1]:
# LR_list = [LR_list[-1], LR_list[-1]/10.]
#else:
# assert best_LR == LR_list[1]
# LR_list = [LR_list[1]]
LR_list = [best_LR]
random_seeds = [best_s] # If we used more than 1 seed for previous phase, only need 1 for next phase
# If running more than 2 phases, can made similar parameter adjustments for next phase here
#elif...
print "\n" + '**********************************************************'
print 'Experiment complete, final tuned result for ' + str(group_size) + ' machines per group:'
print ' s* = ' + str(best_s)
print ' m* = ' + str(best_m)
print ' LR* = ' + str(best_LR)
return best_s, best_m, best_LR, m_LR_s_to_loss, False
# ==============================================================================
# Cold Start Phase
# ==============================================================================
print ''
print '========================================================================================================'
print 'Cold Start Optimization'
print '========================================================================================================'
# Only generate LMDB once for this cluster
time_threshold = 0.10 # SHADJIS TODO: Heuristic, this is how much faster we need to be to consider a larger #groups
First_Run = True
best_group_size = None
best_group_size_s = None
best_group_size_m = None
best_group_size_LR = None
best_loss_across_staleness = 10000000000
total_optimizer_time = 0
# Iterate over each group_size setting and optimize each separately
# Iterate in order from low staleness to high staleness (i.e. large to small groups)
# This is because we know that the optimal parameters will be smaller as S increases
best_LR_last_iteration = None
best_m_last_iteration = None
# For the seed, just run multiple seeds with no staleness and then use the best
# one for other staleness values
best_seed_last_iteration = None
# SHADJIS TODO: For now I assume the first iteration is a single group, i.e. I assume
# that when sorting group sizes from largest to smallest the largest group is all machines.
# This might not be true so can verify. But when running 1 group, don't tune m
# Note: it is possible that for S=0, i.e. 1 group, the best momentum is not 0.9, e.g. it is
# possible that LR 0.001, m 0.9 does not diverge, and also that LR 0.01, m 0.3 does not diverge.
# However our contribution is tuning parameters to compensate for staleness, so the optimizer
# can ignore tuning momentum for the case of no staleness to save time.
single_group = True
time_for_prev_group_size = 1000000.
for group_size in reversed(sorted(group_size_list)):
print ''
print '-----------------------------------------------------------------------------------'
print 'Beginning optimization for ' + str(group_size) + ' machines per group'
print '-----------------------------------------------------------------------------------'
EXP_NAME = solver_name + '_' + str(group_size) + 'mpg_COLD' # Parse the name of the solver for log file names
# The optimization procedure consists of a number of iteration phases
# Each phase we will zoom in on the optimal parameters
if momentum_list_phase_0:
momentum_list = momentum_list_phase_0
else:
if single_group:
# Optimization:
# See comment above, if S=0 we can choose to skip momentum tuning
momentum_list = [0.9]
else:
momentum_list = [0.0, 0.3, 0.6, 0.9]
if LR_list_phase_0:
LR_list = LR_list_phase_0
else:
if single_group:
LR_list = [0.1, 0.01, 0.001, 0.0001] # SHADJIS TODO: use finer grid for LR so momentum will not increase again
# LR_list = [initial_LR*100., initial_LR*10., initial_LR, initial_LR/10., initial_LR/100.]
else:
LR_list = [best_LR_last_iteration, best_LR_last_iteration/10.]
# Optimization:
# For the first iteration, run multiple seeds
# Then pick the best one for later runs
if single_group:
random_seeds = random_seeds_phase_0
else:
random_seeds = [best_seed_last_iteration]
best_s, best_m, best_LR, m_LR_s_to_loss, early_exit = grid_search_parameters(EXP_NAME, group_size, momentum_list, LR_list, random_seeds, best_m_last_iteration, best_LR_last_iteration, exit_early_time_threshold = time_for_prev_group_size/(1. + time_threshold))
if early_exit:
print "\n" + 'Skipping remaining group sizes because FC saturation was reached'
break
# Now run a final experiment with these
if time_per_exp_phase3 > 0:
print 'Running for ' + str(time_per_exp_phase3) + ' seconds...'
full_experiment_dir = base_dir + '/' + EXP_NAME + '_FINAL_PHASE/'
# Check if this exists
if os.path.isdir(full_experiment_dir):
print ' Skipping, already ran'
list_of_subdir = os.listdir(full_experiment_dir)
if len(list_of_subdir) != 1:
print 'Error -- please only put 1 directory in ' + full_experiment_dir + ' (to simplify parsing)'
sys.exit(0)
output_dir = full_experiment_dir + list_of_subdir[0]
else:
# Run the experiment
output_dir = run(group_size, hw_type, EXP_NAME + '.FINAL_PHASE.seed' + str(best_s), First_Run, best_m, best_LR, best_s, full_experiment_dir, time_per_exp_phase3)
# Parse the output
lines = read_output_by_lines(output_dir)
if 'SOFTMAX' in lines[-1] or 'my_create_zmq' in lines[-1]:
print ' Run failed, need to rerun!'
continue
list_of_all_losses = get_list_of_all_losses(lines, increment, group_size)
# Calculate the average loss of the last few iterations, e.g. the last 5 or 10
# (but make sure it is consistent across S, otherwise not a fair comparison)
last_few_iter = 10 # SHADJIS TODO: Use another heuristic?
assert last_few_iter > 0
average_loss = sum(list_of_all_losses[-last_few_iter:]) / float(last_few_iter)
print "\n" + 'Final loss for group size ' + str(group_size) + ' = ' + str(average_loss) + "\n"
else:
print 'Not running the best for longer, re-using the best from phase 1/2'
average_loss = m_LR_s_to_loss[(best_m, best_LR, best_s)]
print "\n" + 'Final loss for group size ' + str(group_size) + ' = ' + str(average_loss) + "\n"
if average_loss < best_loss_across_staleness:
best_group_size = group_size
best_loss_across_staleness = average_loss
best_group_size_s = best_s
best_group_size_m = best_m
best_group_size_LR = best_LR
# Done this group. If it was the first iteration, now it is not the first iteration anymore
single_group = False
best_LR_last_iteration = best_LR
best_m_last_iteration = best_m
best_seed_last_iteration = best_s
assert group_size in group_size_to_time.keys() and group_size_to_time[group_size] > 0
time_for_prev_group_size = group_size_to_time[group_size]
print ''
print 'Finished cold-start optimizer, best result is group size ' + str(best_group_size)
print 'Total optimizer time (seconds) was ' + str(total_optimizer_time)
EXP_NAME = solver_name + '_' + str(best_group_size) + 'mpg_COLD'
time_to_run = optimizer_duration #max(total_optimizer_time*optimizer_factor, 600) #max(optimizer_duration-total_optimizer_time, 600)
print 'Running this setting for ' + str(time_to_run) + ' seconds (Ctrl-C will (1) stop the job and (2) run kill script)'
full_experiment_dir = base_dir + '/' + EXP_NAME + '_DECISION/'
# Snapshot: For this run we need to save a snapshot
# We know we will run for time_to_run seconds, so calculate the number of iterations in that time:
assert best_group_size in group_size_to_time.keys() and group_size_to_time[best_group_size] > 0
num_iterations = time_to_run / group_size_to_time[best_group_size]
snapshot_interval = num_iterations*snap_frequency # Could write more frequently as well in case something fails
if os.path.isdir(full_experiment_dir):
print ' Skipping, already ran'
list_of_subdir = os.listdir(full_experiment_dir)
if len(list_of_subdir) != 1:
print 'Error -- please only put 1 directory in ' + full_experiment_dir + ' (to simplify parsing)'
sys.exit(0)
output_dir = full_experiment_dir + list_of_subdir[0]
else:
# Run the experiment
output_dir = run(best_group_size, hw_type, EXP_NAME + '._DECISION.seed' + str(best_group_size_s), First_Run, best_group_size_m, best_group_size_LR, best_group_size_s, full_experiment_dir, time_to_run, snapshot_interval = snapshot_interval)
# ==============================================================================
# Steady-State Optimizer
# ==============================================================================
"""
g = most async until FC saturation
while True (user can stop once sufficiently converged)
load checkpoint from last run
grid search momentum and LR (seed irrelevant since starting from checkpoint)
while momentum = 0 and g > 1
g = g / 2
grid search momentum and LR (seed irrelevant since starting from checkpoint)
train model and save checkpoint at end (set checkpoint based on iteration time)
"""
print ''
print '========================================================================================================'
print 'Steady-State Optimizer'
print '========================================================================================================'
time_per_exp_phase1 = 200 # We do 5 runs, so 1000 seconds in optimizer, then run for 10000 seconds, i.e. 10% overhead
run_number = 1
last_experiment_dir = output_dir
last_m = best_group_size_m
last_LR = best_group_size_LR
last_s = best_group_size_s # Not needed because initialization comes from snapshot
total_optimizer_time = 0
# Use HE results to find the fastest group size (within tolerance)
print 'Iteration time for each group size:'
current_group_size = 0
last_iter_time = 100000000.
for group_size in reversed(sorted(group_size_list)):
if group_size in group_size_to_time.keys():
print ' group size ' + str(group_size) + ': ' + str(group_size_to_time[group_size])
if group_size_to_time[group_size] < last_iter_time/(1. + time_threshold):
last_iter_time = group_size_to_time[group_size]
current_group_size = group_size
print 'Initial choice for group size: ' + str(group_size)
assert current_group_size > 0
# Begin iteration
while current_group_size <= max(group_size_list):
# Look through the last experiment directory and find the latest snapshot
assert os.path.isdir(last_experiment_dir)
latest_snapshot_iter = -1
for f in os.listdir(last_experiment_dir):
if 'snapshot_iter' in f:
match = re.search(r'snapshot_iter(\d+)', f, flags=re.IGNORECASE)
if match:
f_snap_iter = int(match.group(1))
if f_snap_iter > latest_snapshot_iter:
latest_snapshot_iter = f_snap_iter
assert latest_snapshot_iter >= 0 # Assert we found a snapshot
# Search momentum and LR
while current_group_size <= max(group_size_list):
print ''
print 'Current group size is ' + str(current_group_size) + ' machines per group'
EXP_NAME = solver_name + '_' + str(current_group_size) + 'mpg_OPT' + str(run_number)
# The optimization procedure consists of a number of iteration phases
# Each phase we will zoom in on the optimal parameters
if momentum_list_phase_0:
momentum_list = momentum_list_phase_0
else:
momentum_list = [0.0, 0.3, 0.6, 0.9]
if LR_list_phase_0:
LR_list = LR_list_phase_0
else:
# SHADJIS TODO: Not sure yet based on theory what to do here. How are LR and m related?
# If m is 0, then LR will go down, and m will go back up, so m might never be 0.
# Should I try negative momentum? Or not decrease LR, but if m goes to 0, then increase # groups?
LR_list = [last_LR, last_LR/10.]
random_seeds = [last_s]
best_s, best_m, best_LR, unused_1, unused_2 = grid_search_parameters(EXP_NAME, current_group_size, momentum_list, LR_list, random_seeds, last_m, last_LR, snapshot_input_dir = last_experiment_dir, snapshot_input_iter = latest_snapshot_iter, buffer = 1.01) # buffer because slope is larger after cold start
if best_m == 0:
last_LR = best_LR
current_group_size = current_group_size * 2
# SHADJIS TODO: Heuristic. Idea is that if we make #groups smaller, maybe momentum can be a bit bigger.
# Choosing 0.6 here puts the 0.0 just chosen in the center of the next search range
# Maybe we can pick an even higher momentum, or even search a higher learning rate (since we are making # groups smaller)
# Or maybe we can keep the #groups same, but use negative momentum. Maybe we can reparameterize and keep the learning rate constant, etc.
best_m = 0.6
last_m = best_m
else:
break
# Run for the next optimizer epoch
print 'Total optimizer time (seconds) was ' + str(total_optimizer_time)
EXP_NAME = solver_name + '_' + str(current_group_size) + 'mpg_OPT' + str(run_number)
time_to_run = optimizer_duration #max(total_optimizer_time*optimizer_factor, 600) #max(optimizer_duration-total_optimizer_time, 600)
print 'Running this setting for ' + str(time_to_run) + ' seconds (Ctrl-C will (1) stop the job and (2) run kill script)'
full_experiment_dir = base_dir + '/' + EXP_NAME + '_DECISION/'
# Snapshot: For this run we need to save a snapshot
# We know we will run for time_to_run seconds, so calculate the number of iterations in that time:
num_iterations = time_to_run / group_size_to_time[current_group_size]
snapshot_interval = num_iterations*snap_frequency # Could write more frequently as well in case something fails
if os.path.isdir(full_experiment_dir):
print ' Skipping, already ran'
list_of_subdir = os.listdir(full_experiment_dir)
if len(list_of_subdir) != 1:
print 'Error -- please only put 1 directory in ' + full_experiment_dir + ' (to simplify parsing)'
sys.exit(0)
output_dir = full_experiment_dir + list_of_subdir[0]
else:
# Run the experiment
output_dir = run(current_group_size, hw_type, EXP_NAME + '.OPTIMIZER_DECISION', First_Run, best_m, best_LR, best_s, full_experiment_dir, time_to_run, snapshot_interval = snapshot_interval, snapshot_input_dir = last_experiment_dir, snapshot_input_iter = latest_snapshot_iter)
# Update for next iter
run_number += 1
last_experiment_dir = output_dir
last_m = best_m
last_LR = best_LR
total_optimizer_time = 0
|
Omnivore-master
|
omnivore.py
|
# ------------------------------------------------------------------------------
# Parameters
# ------------------------------------------------------------------------------
NUM_COMPUTE = 1
dir = str(NUM_COMPUTE) + '_profile'
out_name = dir # No ext needed, automatically will be .png
# ------------------------------------------------------------------------------
# Imports
# ------------------------------------------------------------------------------
import sys, os
import datetime
import re
import matplotlib.pyplot as plt
import matplotlib.patches as patches
# ------------------------------------------------------------------------------
# Helper Functions
# ------------------------------------------------------------------------------
def fsec(s):
hours, minutes, seconds = [float(val) for val in s.split(':')]
return int((hours*3600+minutes*60+seconds)*1000)
# ------------------------------------------------------------------------------
# Read all files to get size of graph
# ------------------------------------------------------------------------------
min_times = []
max_times = [0]*(2+NUM_COMPUTE)
server = 0
# Read each log file to get the min time in each
# Parse conv model server
first = True
for l in open('../' + dir + '/conv_model_server.cfg.out'):
l = l.rstrip()
if first and ('~~~~ ENTER STATE' in l):
min_times.append( fsec(l.split(' ')[1]) )
first = False
elif ('~~~~ EXIT STATE' in l):
max_times[server] = fsec(l.split(' ')[1])
server += 1
# print '.'
# Parse fc server
first = True
for l in open('../' + dir + '/fc_server.cfg.out'):
l = l.rstrip()
if first and ('~~~~ ENTER STATE' in l):
min_times.append( fsec(l.split(' ')[1]) )
first = False
elif ('~~~~ EXIT STATE' in l):
max_times[server] = fsec(l.split(' ')[1])
server += 1
# print '.'
# Parse conv compute servers
for worker in range(NUM_COMPUTE):
first = True
for l in open('../' + dir + '/conv_compute_server.%d.cfg.out' % worker):
l = l.rstrip()
if first and ('~~~~ ENTER STATE' in l):
min_times.append( fsec(l.split(' ')[1]) )
first = False
elif ('~~~~ EXIT STATE' in l):
max_times[server] = fsec(l.split(' ')[1])
server += 1
# print '.'
assert len(min_times) == 2+NUM_COMPUTE
assert len(max_times) == 2+NUM_COMPUTE
MIN_TIME = min(min_times)
MAX_TIME = min(max_times)
END_TIME = MAX_TIME#MIN_TIME + 1000
# print MIN_TIME
# print MAX_TIME
# print END_TIME
# ------------------------------------------------------------------------------
# Set up graphics
# ------------------------------------------------------------------------------
fig1 = plt.figure()
ax1 = fig1.add_subplot(111)
ax1.set_ylim([MIN_TIME,END_TIME])
ax1.set_xlim([0,0.6+0.3*NUM_COMPUTE])
ax1.yaxis.grid(True)
# ax1.yaxis.set_ticks([i*10.0 for i in range(649810,789810)])
colors = {
# FC
'Read msg': "red",
'Update input layer': "blue",
'FC Get Grad': "blue",
'FC FW': "pink",
'FC BW': "pink",
'ACC': "yellow",
# Conv Model
'Read corpus': "red",
'Update Model': "blue",
'Update gradients': "blue",
'Copy FW': "yellow",
'Copy BW': "yellow",
'Conv FW': "pink",
'Conv BW': "pink",
'Read msg': "black",
# Conv Compute
'Copy Model': "red",
}
# ------------------------------------------------------------------------------
# Generate Plots
# ------------------------------------------------------------------------------
# Parse conv model server
lasttime = None
for l in open('../' + dir + '/conv_model_server.cfg.out'):
l = l.rstrip()
if ('~~~~ ENTER STATE' in l) and ('IDLE' not in l):
t = fsec(l.split(' ')[1])
if t >= END_TIME: break
lasttime = t
elif ('~~~~ EXIT STATE' in l) and ('IDLE' not in l) and (lasttime != None):
t = fsec(l.split(' ')[1])
if t >= END_TIME: break
m = re.search('EXIT STATE (.*?)$', l)
# print t-lasttime, m.group(1)
ax1.add_patch(patches.Rectangle((0.1, lasttime), 0.2, t-lasttime, color=colors[m.group(1)]))
# Parse fc server
lasttime = None
for l in open('../' + dir + '/fc_server.cfg.out'):
l = l.rstrip()
if ('~~~~ ENTER STATE' in l) and ('IDLE' not in l):
t = fsec(l.split(' ')[1])
if t >= END_TIME: break
lasttime = t
elif ('~~~~ EXIT STATE' in l) and ('IDLE' not in l) and (lasttime != None):
t = fsec(l.split(' ')[1])
if t >= END_TIME: break
m = re.search('EXIT STATE (.*?)$', l)
print t-lasttime, m.group(1)
ax1.add_patch(patches.Rectangle((0.3, lasttime), 0.2, t-lasttime, color=colors[m.group(1)]))
# Parse conv compute servers
for worker in range(NUM_COMPUTE):
lasttime = None
for l in open('../' + dir + '/conv_compute_server.%d.cfg.out' % worker):
l = l.rstrip()
if ('~~~~ ENTER STATE' in l) and ('IDLE' not in l):
t = fsec(l.split(' ')[1])
if t >= END_TIME: break
lasttime = t
elif ('~~~~ EXIT STATE' in l) and ('IDLE' not in l) and (lasttime != None):
t = fsec(l.split(' ')[1])
if t >= END_TIME: break
m = re.search('EXIT STATE (.*?)$', l)
# print t-lasttime, m.group(1)
ax1.add_patch(patches.Rectangle((0.3 + 0.3*(worker+1), lasttime), 0.2, t-lasttime, color=colors[m.group(1)]))
# Print figure
print "Generating " + out_name + '.png'
fig1.set_size_inches(5, 1000)
fig1.savefig(out_name + '.png', dpi=30, bbox_inches='tight')
|
Omnivore-master
|
tools/profile/analyze.py
|
import os
# ------------------------------------------------------------------------------
# Parameters
# ------------------------------------------------------------------------------
hw_types = ['4GPU']
num_machines_per_group_list = [1,2,4,8]
num_group_list = [32,16,8,4]
num_runs = 2
time_per_exp = 3600*1
NUM_MACHINES = 32
#CPU
# hw_types = ['CPU']
# num_machines_per_group_list = [2, 16]
# num_group_list = [2, 16]
# num_runs = 2
# time_per_exp = 3600*8
# NUM_MACHINES = 32
#Staleness
# hw_types = ['4GPU']
# num_machines_per_group_list = [2]
# num_group_list = [16]
# num_runs = 1
# time_per_exp = 600
# NUM_MACHINES = 32
# ------------------------------------------------------------------------------
# Helper functions
# ------------------------------------------------------------------------------
# Wrapper so I can comment out for debugging
def run_cmd(cmd):
os.system(cmd)
# return
# Run 30 min plus extra if fewer machines
#def get_run_time(num_convcompute):
# return 3600 # Terminate all at 30 min, and sometimes slow to start
# Unused for now (but more runs should be needed for more groups?)
# We can do analysis of variance later and see
#def get_num_runs(num_groups, num_machine_per_grp, num_convcompute):
# num_runs = 1
# if num_groups <= 2:
# if num_convcompute == 8:
# num_runs = 2
# elif num_convcompute > 8:
# num_runs = 3
# else:
# num_runs = 1
# elif num_groups <= 4: # Run two runs (a and b)
# num_runs = 2
# else: # Run three runs (a,b,c)
# num_runs = 3
# return num_runs
def run(num_convcompute, num_machine_per_grp, hw_type, experiment_label):
# Clear old lmdb
# run_cmd('rm -rf ilsvrc12_train_lmdb_8_p*')
# run_cmd('sleep 20')
# Create the command to run. Will run multiple times if many machines (since variance)
# num_groups = num_convcompute / num_machine_per_grp
base_cmd = 'python run.py example/solver_template.prototxt example/m' + str(num_convcompute) + '.txt ' + str(num_machine_per_grp) + ' ' + hw_type + ' s > logs/log.' + str(num_convcompute) + '.' + str(num_machine_per_grp) + '.' + hw_type
# Extra commands to wait and then kill servers
run_time = time_per_exp#get_run_time(num_convcompute)
extra_cmds = ['sleep ' + str(run_time),
'bash kill_servers.sh',
'sleep 10',
'bash kill_servers.sh',
'sleep 10']
# Run commands
cmd = base_cmd + '_'+ experiment_label + ' 2>&1'
print '[' + str(run_time/60) + ' min] ' + cmd
run_cmd(cmd)
# Wait for the command to finish and then kill the servers
for extra_cmd in extra_cmds:
print ' ' + extra_cmd
run_cmd(extra_cmd)
# ------------------------------------------------------------------------------
# Main script
# ------------------------------------------------------------------------------
# First estimate runtime
est = 0
for r in range(num_runs):
for hw_type in hw_types:
for num_machines_per_group in num_machines_per_group_list:
for num_group in num_group_list:
if num_group * num_machines_per_group != NUM_MACHINES:
continue
num_conv_compute = num_group*num_machines_per_group
time_for_1_run = time_per_exp#get_run_time(num_conv_compute) + 2 # 2 min to make lmdb and wait between launching
time_for_1_run /= 60 # minutes
est += time_for_1_run
if est > 60:
print 'Estimated runtime: ' + str(est/60) + ' hours and ' + str(est%60) + ' minutes'
else:
print 'Estimated runtime: ' + str(est) + ' minutes'
# Now run actual commands
for hw_type in hw_types:
for r in range(num_runs):
for num_machines_per_group in num_machines_per_group_list:
print "\n" + 'Running ' + str(num_machines_per_group) + ' machine(s) per group'
for num_group in num_group_list:
if num_group * num_machines_per_group != NUM_MACHINES:
continue
run(num_group * num_machines_per_group, num_machines_per_group, hw_type, 'FCCM' + str(r))
print "\n" + 'Experiment complete'
|
Omnivore-master
|
tools/util_scripts/experiment.py
|
import sys
import numpy as np
def makeColorwheel():
# color encoding scheme
# adapted from the color circle idea described at
# http://members.shaw.ca/quadibloc/other/colint.htm
RY = 15
YG = 6
GC = 4
CB = 11
BM = 13
MR = 6
ncols = RY + YG + GC + CB + BM + MR
colorwheel = np.zeros([ncols, 3]) # r g b
col = 0
#RY
colorwheel[0:RY, 0] = 255
colorwheel[0:RY, 1] = np.floor(255*np.arange(0, RY, 1)/RY)
col += RY
#YG
colorwheel[col:YG+col, 0]= 255 - np.floor(255*np.arange(0, YG, 1)/YG)
colorwheel[col:YG+col, 1] = 255;
col += YG;
#GC
colorwheel[col:GC+col, 1]= 255
colorwheel[col:GC+col, 2] = np.floor(255*np.arange(0, GC, 1)/GC)
col += GC;
#CB
colorwheel[col:CB+col, 1]= 255 - np.floor(255*np.arange(0, CB, 1)/CB)
colorwheel[col:CB+col, 2] = 255
col += CB;
#BM
colorwheel[col:BM+col, 2]= 255
colorwheel[col:BM+col, 0] = np.floor(255*np.arange(0, BM, 1)/BM)
col += BM;
#MR
colorwheel[col:MR+col, 2]= 255 - np.floor(255*np.arange(0, MR, 1)/MR)
colorwheel[col:MR+col, 0] = 255
return colorwheel
def computeColor(u, v):
colorwheel = makeColorwheel();
nan_u = np.isnan(u)
nan_v = np.isnan(v)
nan_u = np.where(nan_u)
nan_v = np.where(nan_v)
u[nan_u] = 0
u[nan_v] = 0
v[nan_u] = 0
v[nan_v] = 0
ncols = colorwheel.shape[0]
radius = np.sqrt(u**2 + v**2)
a = np.arctan2(-v, -u) / np.pi
fk = (a+1) /2 * (ncols-1) # -1~1 maped to 1~ncols
k0 = fk.astype(np.uint8) # 1, 2, ..., ncols
k1 = k0+1;
k1[k1 == ncols] = 0
f = fk - k0
img = np.empty([k1.shape[0], k1.shape[1],3])
ncolors = colorwheel.shape[1]
for i in range(ncolors):
tmp = colorwheel[:,i]
col0 = tmp[k0]/255
col1 = tmp[k1]/255
col = (1-f)*col0 + f*col1
idx = radius <= 1
col[idx] = 1 - radius[idx]*(1-col[idx]) # increase saturation with radius
col[~idx] *= 0.75 # out of range
img[:,:,2-i] = np.floor(255*col).astype(np.uint8)
return img.astype(np.uint8)
def computeImg(flow):
eps = sys.float_info.epsilon
UNKNOWN_FLOW_THRESH = 1e9
UNKNOWN_FLOW = 1e10
u = flow[: , : , 0]
v = flow[: , : , 1]
maxu = -999
maxv = -999
minu = 999
minv = 999
maxrad = -1
#fix unknown flow
greater_u = np.where(u > UNKNOWN_FLOW_THRESH)
greater_v = np.where(v > UNKNOWN_FLOW_THRESH)
u[greater_u] = 0
u[greater_v] = 0
v[greater_u] = 0
v[greater_v] = 0
maxu = max([maxu, np.amax(u)])
minu = min([minu, np.amin(u)])
maxv = max([maxv, np.amax(v)])
minv = min([minv, np.amin(v)])
rad = np.sqrt(np.multiply(u,u)+np.multiply(v,v))
maxrad = max([maxrad, np.amax(rad)])
u = u/(maxrad+eps)
v = v/(maxrad+eps)
img = computeColor(u, v)
return img
|
AR-Depth-main
|
flow_color.py
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python implementation of BLEU and smooth-BLEU.
This module provides a Python implementation of BLEU and smooth-BLEU.
Smooth BLEU is computed following the method outlined in the paper:
Chin-Yew Lin, Franz Josef Och. ORANGE: a method for evaluating automatic
evaluation metrics for machine translation. COLING 2004.
"""
import collections
import math
def _get_ngrams(segment, max_order):
"""Extracts all n-grams upto a given maximum order from an input segment.
Args:
segment: text segment from which n-grams will be extracted.
max_order: maximum length in tokens of the n-grams returned by this
methods.
Returns:
The Counter containing all n-grams upto max_order in segment
with a count of how many times each n-gram occurred.
"""
ngram_counts = collections.Counter()
for order in range(1, max_order + 1):
for i in range(0, len(segment) - order + 1):
ngram = tuple(segment[i:i+order])
ngram_counts[ngram] += 1
return ngram_counts
def compute_bleu(reference_corpus, translation_corpus, max_order=4,
smooth=False):
"""Computes BLEU score of translated segments against one or more references.
Args:
reference_corpus: list of lists of references for each translation. Each
reference should be tokenized into a list of tokens.
translation_corpus: list of translations to score. Each translation
should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
3-Tuple with the BLEU score, n-gram precisions, geometric mean of n-gram
precisions and brevity penalty.
"""
matches_by_order = [0] * max_order
possible_matches_by_order = [0] * max_order
reference_length = 0
translation_length = 0
for (references, translation) in zip(reference_corpus,
translation_corpus):
reference_length += min(len(r) for r in references)
translation_length += len(translation)
merged_ref_ngram_counts = collections.Counter()
for reference in references:
merged_ref_ngram_counts |= _get_ngrams(reference, max_order)
translation_ngram_counts = _get_ngrams(translation, max_order)
overlap = translation_ngram_counts & merged_ref_ngram_counts
for ngram in overlap:
matches_by_order[len(ngram)-1] += overlap[ngram]
for order in range(1, max_order+1):
possible_matches = len(translation) - order + 1
if possible_matches > 0:
possible_matches_by_order[order-1] += possible_matches
precisions = [0] * max_order
for i in range(0, max_order):
if smooth:
precisions[i] = ((matches_by_order[i] + 1.) /
(possible_matches_by_order[i] + 1.))
else:
if possible_matches_by_order[i] > 0:
precisions[i] = (float(matches_by_order[i]) /
possible_matches_by_order[i])
else:
precisions[i] = 0.0
if min(precisions) > 0:
p_log_sum = sum((1. / max_order) * math.log(p) for p in precisions)
geo_mean = math.exp(p_log_sum)
else:
geo_mean = 0
ratio = float(translation_length) / reference_length
if ratio > 1.0:
bp = 1.
else:
bp = math.exp(1 - 1. / ratio)
bleu = geo_mean * bp
return (bleu, precisions, bp, ratio, translation_length, reference_length)
def _bleu(ref_file, trans_file, subword_option=None):
max_order = 4
smooth = True
ref_files = [ref_file]
reference_text = []
for reference_filename in ref_files:
with open(reference_filename) as fh:
reference_text.append(fh.readlines())
per_segment_references = []
for references in zip(*reference_text):
reference_list = []
for reference in references:
reference_list.append(reference.strip().split())
per_segment_references.append(reference_list)
translations = []
with open(trans_file) as fh:
for line in fh:
translations.append(line.strip().split())
bleu_score, _, _, _, _, _ = compute_bleu(per_segment_references, translations, max_order, smooth)
return round(100 * bleu_score,2)
|
CodeGen-main
|
CodeXGLUE/Text-Code/text-to-code/evaluator/bleu.py
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
import logging
import argparse
from bleu import _bleu
import json
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
def main():
parser = argparse.ArgumentParser(description='Evaluate leaderboard predictions for code completion (line level).')
parser.add_argument('--answers', '-a', required=True, help="filename of the labels, in json format.")
parser.add_argument('--predictions', '-p', required=True, help="filename of the leaderboard predictions, in txt format.")
args = parser.parse_args()
preds = open(args.predictions, "r").readlines()
gts = open(args.answers, "r").readlines()
assert len(preds) == len(gts), f"Samples of predictions and answers are not equal, {len(preds)}: {len(gts)}"
total = len(gts)
EM = 0.0
wf = open("ground_truth.txt", "w")
for pred, gt in zip(preds, gts):
pred = pred.strip()
gt = json.loads(gt)["code"]
wf.write(gt+"\n")
if pred.split() == gt.split():
EM += 1
bleu_score = round(_bleu("ground_truth.txt", args.predictions), 2)
logger.info(f"BLEU: {bleu_score}, EM: {round(EM/total*100, 2)}")
try:
os.remove("ground_truth.txt")
except Exception:
pass
if __name__ == "__main__":
main()
|
CodeGen-main
|
CodeXGLUE/Text-Code/text-to-code/evaluator/evaluator.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Text to code generation pipeline in CodeXGLUE
"""
from __future__ import absolute_import, division, print_function
import argparse
import glob
import logging
import os
import pickle
import random
import re
import shutil
import json
import numpy as np
import torch
from torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler,TensorDataset
from torch.utils.data.distributed import DistributedSampler
from dataset import concodeDataset
from beam import Beam
try:
from torch.utils.tensorboard import SummaryWriter
except:
from tensorboardX import SummaryWriter
from torch.nn import CrossEntropyLoss
from bleu import _bleu
from transformers import (WEIGHTS_NAME, AdamW, get_linear_schedule_with_warmup,
BertConfig, BertForMaskedLM, BertTokenizer,
GPT2Config, GPT2LMHeadModel, GPT2Tokenizer,
OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer,
RobertaConfig, RobertaForMaskedLM, RobertaTokenizer,
DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer)
logger = logging.getLogger(__name__)
MODEL_CLASSES = {
'gpt2': (GPT2Config, GPT2LMHeadModel, GPT2Tokenizer),
'openai-gpt': (OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer),
'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer)
}
def load_and_cache_examples(args, tokenizer, evaluate=False):
dataset = concodeDataset(tokenizer, args, logger, file_type='dev' if evaluate else 'train',
block_size=args.block_size)
return dataset
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def update_config(model, tokenizer):
model.config.bos_token_id = tokenizer.bos_token_id
model.config.eos_token_id = tokenizer.eos_token_id
model.config.pad_token_id = tokenizer.pad_token_id
def train(args, train_dataset, model, tokenizer, fh, pool):
""" Train the model """
if args.local_rank in [-1, 0]:
args.tensorboard_dir = os.path.join(args.output_dir, 'tensorboard')
if not os.path.exists(args.tensorboard_dir):
os.makedirs(args.tensorboard_dir)
tb_writer = SummaryWriter(args.tensorboard_dir)
args.batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.batch_size, drop_last=True)
total_examples = len(train_dataset) * (
torch.distributed.get_world_size() if args.local_rank != -1 else 1)
batch_size = args.batch_size * args.gradient_accumulation_steps * (
torch.distributed.get_world_size() if args.local_rank != -1 else 1)
# if args.max_steps > 0:
# t_total = args.max_steps
# args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
if args.num_train_epochs > 0:
t_total = total_examples // batch_size * args.num_train_epochs
args.max_steps = t_total
model.to(args.device)
if args.local_rank not in [-1, 0]:
torch.distributed.barrier()
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
'weight_decay': args.weight_decay},
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps,
num_training_steps=t_total)
checkpoint_last = os.path.join(args.output_dir, 'checkpoint-last')
scheduler_last = os.path.join(checkpoint_last, 'scheduler.pt')
optimizer_last = os.path.join(checkpoint_last, 'optimizer.pt')
if os.path.exists(scheduler_last):
scheduler.load_state_dict(torch.load(scheduler_last, map_location="cpu"))
if os.path.exists(optimizer_last):
optimizer.load_state_dict(torch.load(optimizer_last, map_location="cpu"))
if args.local_rank == 0:
torch.distributed.barrier()
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank%args.gpu_per_node],
output_device=args.local_rank%args.gpu_per_node,
find_unused_parameters=True)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", total_examples )
logger.info(" Num epoch = %d", t_total*batch_size//total_examples)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d", batch_size)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = args.start_step
tr_loss, logging_loss,avg_loss,tr_nb = 0.0, 0.0,0.0,0
# model.resize_token_embeddings(len(tokenizer))
model.zero_grad()
set_seed(args) # Added here for reproducibility (even between python 2 and 3)
best_bleu = 0.0
for idx in range(args.start_epoch, int(args.num_train_epochs)):
for step, (batch, token_labels) in enumerate(train_dataloader):
inputs = batch.to(args.device)
attn_mask = torch.tensor(token_labels.clone().detach() != 0, dtype=torch.uint8, device=args.device)
loss_mask = torch.tensor(token_labels.clone().detach() == 2, dtype=torch.uint8, device=args.device)
model.train()
# outputs = model(inputs, attention_mask=attn_mask, labels=inputs, loss_mask=loss_mask)
# loss = outputs[0]
outputs = model(inputs, attention_mask=attn_mask)
logits = outputs[0]
labels = inputs
shift_logits = logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
# Flatten the tokens
loss_fct = CrossEntropyLoss()
flatten_shift_loss_mask = loss_mask[..., :-1].contiguous().view(-1)
ids = torch.nonzero(flatten_shift_loss_mask).view(-1)
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1))[ids], shift_labels.view(-1)[ids])
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
optimizer.step()
optimizer.zero_grad()
scheduler.step()
global_step += 1
output_flag=True
avg_loss=round(np.exp((tr_loss - logging_loss) /(global_step- tr_nb)),4)
if global_step % args.logging_steps == 0:
logger.info(" steps: %s ppl: %s", global_step, round(avg_loss,5))
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
# Log metrics
tb_writer.add_scalar('lr', scheduler.get_last_lr()[0], global_step)
tb_writer.add_scalar('loss', (tr_loss - logging_loss) / args.logging_steps, global_step)
logging_loss = tr_loss
tr_nb=global_step
if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
checkpoint_prefix = "checkpoint"
# Save model checkpoint
if args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well
# results = evaluate(args, model, tokenizer, eval_when_training=True)
# for key, value in results.items():
# tb_writer.add_scalar('eval_{}'.format(key), value, global_step)
# logger.info(" %s = %s", key, round(value,4))
# output_dir = os.path.join(args.output_dir, '{}-{}-{}'.format(checkpoint_prefix, global_step, round(results['perplexity'],4)))
dev_bleu, dev_EM = eval_bleu(args, model, tokenizer, file_type='dev', num=100)
logger.info(f"dev bleu: {dev_bleu}, dev EM: {dev_EM}")
output_dir = os.path.join(args.output_dir, '{}-{}-{}'.format(checkpoint_prefix, global_step, round(dev_bleu,2)))
if dev_bleu > best_bleu:
best_bleu = dev_bleu
logger.info(f"best bleu updated. saved in {output_dir}")
logger.info(f"best bleu: {best_bleu}")
else:
output_dir = os.path.join(args.output_dir, "{}-{}".format(checkpoint_prefix, global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
# _rotate_checkpoints(args, checkpoint_prefix)
last_output_dir = os.path.join(args.output_dir, 'checkpoint-last')
if not os.path.exists(last_output_dir):
os.makedirs(last_output_dir)
model_to_save.save_pretrained(last_output_dir)
tokenizer.save_pretrained(last_output_dir)
idx_file = os.path.join(last_output_dir, 'idx_file.txt')
with open(idx_file, 'w', encoding='utf-8') as idxf:
idxf.write(str(0) + '\n')
torch.save(optimizer.state_dict(), os.path.join(last_output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(last_output_dir, "scheduler.pt"))
logger.info("Saving optimizer and scheduler states to %s", last_output_dir)
step_file = os.path.join(last_output_dir, 'step_file.txt')
with open(step_file, 'w', encoding='utf-8') as stepf:
stepf.write(str(global_step) + '\n')
# torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
# torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
# logger.info("Saving optimizer and scheduler states to %s", output_dir)
if args.max_steps > 0 and global_step > args.max_steps:
break
if args.max_steps > 0 and global_step > args.max_steps:
break
if args.local_rank in [-1, 0]:
tb_writer.close()
return global_step, tr_loss / global_step
def evaluate(args, model, tokenizer, prefix="", eval_when_training=False):
# Loop to handle MNLI double evaluation (matched, mis-matched)
eval_output_dir = args.output_dir
eval_dataset = load_and_cache_examples(args, tokenizer, evaluate=True)
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu evaluate
if args.n_gpu > 1 and eval_when_training is False:
model = torch.nn.DataParallel(model)
# Eval!
#logger.info("***** Running evaluation {} *****".format(prefix))
#logger.info(" Num examples = %d", len(eval_dataset))
#logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
model.eval()
for step, (batch, token_labels) in enumerate(eval_dataloader):
inputs = batch.to(args.device)
attn_mask = torch.tensor(token_labels.clone().detach() != 0, dtype=torch.uint8, device=args.device)
loss_mask = torch.tensor(token_labels.clone().detach() == 2, dtype=torch.uint8, device=args.device)
with torch.no_grad():
outputs = model(inputs, attention_mask=attn_mask, labels=inputs, loss_mask=loss_mask)
loss = outputs[0]
eval_loss += loss.mean().item()
nb_eval_steps += 1
eval_loss = eval_loss / nb_eval_steps
perplexity = torch.exp(torch.tensor(eval_loss))
result = {
"perplexity": float(perplexity)
}
output_eval_file = os.path.join(eval_output_dir, prefix, "eval_results.txt")
with open(output_eval_file, "w") as writer:
#logger.info("***** Eval results {} *****".format(prefix))
for key in sorted(result.keys()):
#logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
return result
def eval_bleu(args, model, tokenizer, file_type='test', num=2000):
dataset = concodeDataset(tokenizer, args, logger, file_type=file_type, block_size=args.block_size, mode='test')
test_sampler = SequentialSampler(dataset)
test_dataloader = DataLoader(dataset, sampler=test_sampler, batch_size=1)
model.to(args.device)
model.zero_grad()
model.eval()
preds = []
for step, (batch, token_labels) in enumerate(test_dataloader):
if step >= num:
break
inputs = batch.to(args.device)
# with torch.no_grad():
# outputs = model.generate(inputs, max_length=args.block_size, num_beams=10, temperature=0.7, early_stopping=False, top_k=70, \
# bos_token_id=tokenizer.bos_token_id, eos_token_id=tokenizer.pad_token_id, pad_token_id=tokenizer.pad_token_id)
# # outputs = model.generate(inputs, max_length=args.block_size, do_sample=True, temperature=0.7, top_k=70, top_p=0.95, \
# # bos_token_id=tokenizer.bos_token_id, eos_token_id=tokenizer.pad_token_id, pad_token_id=tokenizer.pad_token_id)
# # outputs = model.generate(inputs, max_length=args.block_size, num_beams=10, temperature=0.7, early_stopping=False, top_k=70)
# # outputs = model.generate(inputs, max_length=args.block_size, do_sample=True, temperature=0.7, top_k=70, top_p=0.95)
# generation = tokenizer.decode(outputs[0])[len(tokenizer.decode(inputs[0])):]
# preds.append(generation.rstrip("<pad>"))
with torch.no_grad():
beam_size = 10
m = torch.nn.LogSoftmax(dim=-1)
outputs = model(inputs)[1]
p = []
zero = torch.cuda.LongTensor(1).fill_(0)
for i in range(inputs.shape[0]):
past_hidden = [x[:, i:i+1].expand(-1, beam_size, -1, -1, -1) for x in outputs]
# context_mask=source_mask[i:i+1,:].expand(beam_size,-1)
beam = Beam(beam_size, tokenizer.bos_token_id, tokenizer.eos_token_id)
input_ids = None
for _ in range(162):
if beam.done():
break
input_ids = beam.getCurrentState()
# context_mask=torch.cat((context_mask,input_ids*0+1),-1)
# mask=context_mask.unsqueeze(0).unsqueeze(-2).unsqueeze(-2).expand(self.config.n_layer, -1, -1, -1, -1)
transformer_outputs = model(input_ids, past=past_hidden)
out = m(transformer_outputs[0][:, -1, :]).data
# out = self.lsm(self.lm_head(transformer_outputs[0][:,-1,:])).data
beam.advance(out)
past_hidden = [x.data.index_select(1, beam.getCurrentOrigin()) for x in transformer_outputs[1]]
hyp = beam.getHyp(beam.getFinal())
pred =beam.buildTargetTokens(hyp)[:beam_size]
pred = [torch.cat([x.view(-1) for x in p]+[zero]*(162-len(p))).view(1,-1) for p in pred]
p.append(torch.cat(pred, 0).unsqueeze(0))
p = torch.cat(p, 0)
for pred in p:
t = pred[0].cpu().numpy()
t = list(t)
if 0 in t:
t = t[:t.index(0)]
text = tokenizer.decode(t, clean_up_tokenization_spaces=False)
# print(text)
preds.append(text)
if step % args.logging_steps == 0:
logger.info(f"{step} are done!")
golds = []
datafile = os.path.join(args.data_dir, f"{file_type}.json")
datas = open(datafile).readlines()
for x in datas[:num]:
x = json.loads(x)
golds.append(x["code"])
assert len(preds) == len(golds)
EM = []
with open(os.path.join(args.output_dir, f"{file_type}.output"), 'w') as f, open(os.path.join(args.output_dir, f"{file_type}.gold"), 'w') as f1:
for pred, gold in zip(preds, golds):
f.write(pred+'\n')
f1.write(gold+'\n')
EM.append(pred.split() == gold.split())
if file_type == "test":
return 0, 0
bleu_score = round(_bleu(os.path.join(args.output_dir, f"{file_type}.gold"), os.path.join(args.output_dir, f"{file_type}.output")), 2)
EM = round(np.mean(EM) * 100, 2)
return bleu_score, EM
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--data_dir", default=None, type=str, required=True,
help="The input data path.")
parser.add_argument("--langs", default=None, type=str, required=True,
help="Languages to train, if all, train all languages in data_dir")
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written.")
## Other parameters
parser.add_argument("--model_type", default="gpt2", type=str,
help="The model architecture to be fine-tuned.")
parser.add_argument("--pretrain_dir", default="", type=str,
help="The output directory where the model predictions and checkpoints will be written.")
parser.add_argument("--config_dir", type=str,
help="config name. Required when training from scratch")
parser.add_argument("--tokenizer_dir", type=str,
help="Pre-trained tokenizer dir. Required when training from scratch")
parser.add_argument("--load_name", type=str, default="pretrained",
help="Load pretrained model name")
parser.add_argument("--mlm", action='store_true',
help="Train with masked-language modeling loss instead of language modeling.")
parser.add_argument("--mlm_probability", type=float, default=0.15,
help="Ratio of tokens to mask for masked language modeling loss")
parser.add_argument("--cache_dir", default="", type=str,
help="Optional directory to store the pre-trained models downloaded from s3 (instread of the default one)")
parser.add_argument("--block_size", default=1024, type=int,
help="Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens).")
parser.add_argument("--do_train", action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval", action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--do_infer", action='store_true',
help="Whether to run inference on test set.")
parser.add_argument("--evaluate_during_training", action='store_true',
help="Run evaluation during training at each logging step.")
parser.add_argument("--do_lower_case", action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--per_gpu_train_batch_size", default=2, type=int,
help="Batch size per GPU/CPU for training.")
parser.add_argument("--per_gpu_eval_batch_size", default=4, type=int,
help="Batch size per GPU/CPU for evaluation.")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--learning_rate", default=5e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.0, type=float,
help="Weight deay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float,
help="Max gradient norm.")
parser.add_argument("--num_train_epochs", default=1.0, type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--max_steps", default=-1, type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.")
parser.add_argument("--warmup_steps", default=0, type=int,
help="Linear warmup over warmup_steps.")
parser.add_argument('--logging_steps', type=int, default=10,
help="Log every X updates steps.")
parser.add_argument('--save_steps', type=int, default=50,
help="Save checkpoint every X updates steps.")
parser.add_argument('--save_total_limit', type=int, default=None,
help='Limit the total amount of checkpoints, delete the older checkpoints in the output_dir, does not delete by default')
parser.add_argument("--eval_all_checkpoints", action='store_true',
help="Evaluate all checkpoints starting with the same prefix as model_name_or_path ending and ending with step number")
parser.add_argument("--no_cuda", action='store_true',
help="Avoid using CUDA when available")
parser.add_argument('--overwrite_output_dir', action='store_true',
help="Overwrite the content of the output directory")
parser.add_argument('--overwrite_cache', action='store_true',
help="Overwrite the cached training and evaluation sets")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
parser.add_argument('--fp16', action='store_true',
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit")
parser.add_argument('--fp16_opt_level', type=str, default='O1',
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument("--local_rank", type=int, default=-1,
help="For distributed training: local_rank")
parser.add_argument("--node_index", type=int, default=-1,
help="node index if multi-node running")
parser.add_argument("--gpu_per_node", type=int, default=-1,
help="num of gpus per node")
parser.add_argument('--server_ip', type=str, default='', help="For distant debugging.")
parser.add_argument('--server_port', type=str, default='', help="For distant debugging.")
parser.add_argument('--log_file', type=str, default='')
parser.add_argument('--tensorboard_dir', type=str)
pool = None
args = parser.parse_args()
# args.output_dir = os.path.join(args.output_dir, args.dataset)
if args.model_type in ["bert", "roberta", "distilbert"] and not args.mlm:
raise ValueError("BERT and RoBERTa do not have LM heads but masked LM heads. They must be run using the --mlm "
"flag (masked language modeling).")
if os.path.exists(args.output_dir) and os.listdir(
args.output_dir) and args.do_train and not args.overwrite_output_dir:
raise ValueError(
"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(
args.output_dir))
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
logger.warning("local_rank: %d, node_index: %d, gpu_per_node: %d"%(args.local_rank, args.node_index, args.gpu_per_node))
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend='nccl')
args.local_rank += args.node_index * args.gpu_per_node
args.n_gpu = 1
args.device = device
# args.batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
# Setup logging
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s, world size: %s",
args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16,
torch.distributed.get_world_size() if args.local_rank != -1 else 1)
# 使用FileHandler输出到文件
fh = logging.FileHandler(args.log_file)
logger.addHandler(fh)
# Set seed
set_seed(args)
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Barrier to make sure only the first process in distributed training download model & vocab
args.start_epoch = 0
args.start_step = 0
checkpoint_last = os.path.join(args.output_dir, 'checkpoint-last')
if args.do_train and os.path.exists(checkpoint_last) and os.listdir(checkpoint_last):
args.pretrain_dir = os.path.join(checkpoint_last)
args.config_name = os.path.join(checkpoint_last, 'config.json')
idx_file = os.path.join(checkpoint_last, 'idx_file.txt')
with open(idx_file, encoding='utf-8') as idxf:
args.start_epoch = int(idxf.readlines()[0].strip()) + 1
step_file = os.path.join(checkpoint_last, 'step_file.txt')
if os.path.exists(step_file):
with open(step_file, encoding='utf-8') as stepf:
args.start_step = int(stepf.readlines()[0].strip())
logger.info("reload model from {}, resume from {} epoch".format(checkpoint_last, args.start_epoch))
# Load pre-trained model
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
pretrained = args.pretrain_dir
if pretrained:
tokenizer = tokenizer_class.from_pretrained(pretrained, do_lower_case=args.do_lower_case, bos_token='<s>', eos_token='</s>', pad_token='<pad>', unk_token='<|UNKNOWN|>', sep_token='concode_elem_sep')
logger.info(tokenizer.encode("<s> hello world <pad> </s>"))
model = model_class.from_pretrained(pretrained)
model.resize_token_embeddings(len(tokenizer))
update_config(model, tokenizer)
logger.info(model.config)
else:
tokenizer = tokenizer_class.from_pretrained(args.tokenizer_dir, bos_token='<s>', eos_token='</s>', pad_token='<pad>', unk_token='<|UNKNOWN|>', sep_token='concode_elem_sep')
args.vocab_size = tokenizer.vocab_size
config = config_class.from_pretrained(args.config_dir)
model = model_class(config)
model.resize_token_embeddings(len(tokenizer))
update_config(model, tokenizer)
model_parameters = model.parameters()
num_params = sum([np.prod(p.size()) for p in model_parameters])
logger.info(f"Model has a total of {num_params} trainable parameters")
if args.local_rank == 0:
torch.distributed.barrier() # End of barrier to make sure only the first process in distributed training download model & vocab
logger.info("Training/evaluation parameters %s", args)
# Training
if args.do_train:
train_dataset = load_and_cache_examples(args, tokenizer, evaluate=False)
global_step, tr_loss = train(args, train_dataset, model, tokenizer, fh, pool)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
if args.do_eval: # only works on 1 GPU
dev_bleu, dev_EM = eval_bleu(args, model, tokenizer, file_type='dev', num=2000)
logger.info(f"dev bleu: {dev_bleu}, dev EM: {dev_EM}")
if args.do_infer: # only works on 1 GPU
test_bleu, test_EM = eval_bleu(args, model, tokenizer, file_type='test', num=2000)
logger.info(f"test bleu: {test_bleu}, test EM: {test_EM}")
if __name__ == "__main__":
main()
|
CodeGen-main
|
CodeXGLUE/Text-Code/text-to-code/code/run.py
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python implementation of BLEU and smooth-BLEU.
This module provides a Python implementation of BLEU and smooth-BLEU.
Smooth BLEU is computed following the method outlined in the paper:
Chin-Yew Lin, Franz Josef Och. ORANGE: a method for evaluating automatic
evaluation metrics for machine translation. COLING 2004.
"""
import collections
import math
def _get_ngrams(segment, max_order):
"""Extracts all n-grams upto a given maximum order from an input segment.
Args:
segment: text segment from which n-grams will be extracted.
max_order: maximum length in tokens of the n-grams returned by this
methods.
Returns:
The Counter containing all n-grams upto max_order in segment
with a count of how many times each n-gram occurred.
"""
ngram_counts = collections.Counter()
for order in range(1, max_order + 1):
for i in range(0, len(segment) - order + 1):
ngram = tuple(segment[i:i+order])
ngram_counts[ngram] += 1
return ngram_counts
def compute_bleu(reference_corpus, translation_corpus, max_order=4,
smooth=False):
"""Computes BLEU score of translated segments against one or more references.
Args:
reference_corpus: list of lists of references for each translation. Each
reference should be tokenized into a list of tokens.
translation_corpus: list of translations to score. Each translation
should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
3-Tuple with the BLEU score, n-gram precisions, geometric mean of n-gram
precisions and brevity penalty.
"""
matches_by_order = [0] * max_order
possible_matches_by_order = [0] * max_order
reference_length = 0
translation_length = 0
for (references, translation) in zip(reference_corpus,
translation_corpus):
reference_length += min(len(r) for r in references)
translation_length += len(translation)
merged_ref_ngram_counts = collections.Counter()
for reference in references:
merged_ref_ngram_counts |= _get_ngrams(reference, max_order)
translation_ngram_counts = _get_ngrams(translation, max_order)
overlap = translation_ngram_counts & merged_ref_ngram_counts
for ngram in overlap:
matches_by_order[len(ngram)-1] += overlap[ngram]
for order in range(1, max_order+1):
possible_matches = len(translation) - order + 1
if possible_matches > 0:
possible_matches_by_order[order-1] += possible_matches
precisions = [0] * max_order
for i in range(0, max_order):
if smooth:
precisions[i] = ((matches_by_order[i] + 1.) /
(possible_matches_by_order[i] + 1.))
else:
if possible_matches_by_order[i] > 0:
precisions[i] = (float(matches_by_order[i]) /
possible_matches_by_order[i])
else:
precisions[i] = 0.0
if min(precisions) > 0:
p_log_sum = sum((1. / max_order) * math.log(p) for p in precisions)
geo_mean = math.exp(p_log_sum)
else:
geo_mean = 0
ratio = float(translation_length) / reference_length
if ratio > 1.0:
bp = 1.
else:
bp = math.exp(1 - 1. / ratio)
bleu = geo_mean * bp
return (bleu, precisions, bp, ratio, translation_length, reference_length)
def _bleu(ref_file, trans_file, subword_option=None):
max_order = 4
smooth = True
ref_files = [ref_file]
reference_text = []
for reference_filename in ref_files:
with open(reference_filename) as fh:
reference_text.append(fh.readlines())
per_segment_references = []
for references in zip(*reference_text):
reference_list = []
for reference in references:
reference_list.append(reference.strip().split())
per_segment_references.append(reference_list)
translations = []
with open(trans_file) as fh:
for line in fh:
translations.append(line.strip().split())
bleu_score, _, _, _, _, _ = compute_bleu(per_segment_references, translations, max_order, smooth)
return round(100 * bleu_score,2)
|
CodeGen-main
|
CodeXGLUE/Text-Code/text-to-code/code/bleu.py
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from __future__ import absolute_import, division, print_function
import argparse
import glob
import logging
import os
import pickle
import random
import re
import gc
import shutil
import json
import numpy as np
import torch
from torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler,TensorDataset
from torch.utils.data.distributed import DistributedSampler
try:
from torch.utils.tensorboard import SummaryWriter
except:
from tensorboardX import SummaryWriter
from transformers import (WEIGHTS_NAME, AdamW, get_linear_schedule_with_warmup,
BertConfig, BertForMaskedLM, BertTokenizer,
GPT2Config, GPT2LMHeadModel, GPT2Tokenizer,
OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer,
RobertaConfig, RobertaForMaskedLM, RobertaTokenizer,
DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer)
class concodeDataset(Dataset):
def __init__(self, tokenizer, args, logger, file_type='train', block_size=512, mode='train'):
if args.local_rank==-1:
local_rank=0
world_size=1
else:
local_rank=args.local_rank
world_size=torch.distributed.get_world_size()
self.block_size = block_size
self.mode = mode
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
cached_file = os.path.join(args.output_dir, file_type+"_blocksize_%d"%(block_size)+"_wordsize_%d"%(world_size)+"_rank_%d"%(local_rank))
if mode != 'test' and os.path.exists(cached_file) and not args.overwrite_cache:
if file_type == 'train':
logger.warning("Loading features from cached file %s", cached_file)
with open(cached_file, 'rb') as handle:
data = pickle.load(handle)
self.inputs = data['inputs']
self.token_labels = data['token_labels']
else:
self.inputs = []
self.token_labels = []
datafile = os.path.join(args.data_dir, f"{file_type}.json")
if file_type == 'train':
logger.warning("Creating features from dataset file at %s", datafile)
datas = open(datafile).readlines()
length = len(datas)
logger.info("Data size: %d"%(length))
for idx, x in enumerate(datas):
if idx % (length//10) == 0:
percent = idx / (length//10) * 10
logger.warning("Rank %d, load %d"%(local_rank, percent))
if idx % world_size != local_rank:
continue
x = json.loads(x)
code = tokenizer.encode(x["code"])
nl = tokenizer.encode(x["nl"])
input_ids, input_labels = self.pad_and_get_mask(code, nl, tokenizer)
self.inputs.append(input_ids)
self.token_labels.append(input_labels)
if file_type == 'train':
logger.warning("Rank %d Training %d token, %d samples"%(local_rank, length, len(self.inputs)))
logger.warning("Saving features into cached file %s", cached_file)
if mode != 'test':
with open(cached_file, 'wb') as handle:
pickle.dump({'inputs': self.inputs, 'token_labels': self.token_labels}, handle, protocol=pickle.HIGHEST_PROTOCOL)
def pad_and_get_mask(self, code, nl, tokenizer):
if self.mode == 'test':
code = []
while (len(code) + len(nl) + 2 > self.block_size):
if (len(code) > len(nl)):
code = code[:-1]
else:
nl = nl[:-1]
if self.mode == 'train':
inputs = nl + [tokenizer.bos_token_id] + code + [tokenizer.eos_token_id]
labels = [1] * len(nl) + [2] * (len(code)+1) + [0]
else:
inputs = nl + [tokenizer.bos_token_id]
labels = [1] * len(nl) + [2]
return inputs, labels
assert len(inputs) <= self.block_size
pad_len = self.block_size - len(inputs)
inputs += [tokenizer.pad_token_id] * pad_len
labels += [0] * pad_len
assert len(inputs) == len(labels)
return inputs, labels
def __len__(self):
return len(self.inputs)
def __getitem__(self, item):
return torch.tensor(self.inputs[item]), torch.tensor(self.token_labels[item])
|
CodeGen-main
|
CodeXGLUE/Text-Code/text-to-code/code/dataset.py
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import torch
import torch.nn as nn
import torch
from torch.autograd import Variable
import copy
class Seq2Seq(nn.Module):
"""
Build Seqence-to-Sequence.
Parameters:
* `encoder`- encoder of seq2seq model. e.g. roberta
* `decoder`- decoder of seq2seq model. e.g. transformer
* `config`- configuration of encoder model.
* `beam_size`- beam size for beam search.
* `max_length`- max length of target for beam search.
* `sos_id`- start of symbol ids in target for beam search.
* `eos_id`- end of symbol ids in target for beam search.
"""
def __init__(self, encoder,decoder,config,beam_size=None,max_length=None,sos_id=None,eos_id=None):
super(Seq2Seq, self).__init__()
self.encoder = encoder
self.decoder=decoder
self.config=config
self.register_buffer("bias", torch.tril(torch.ones(2048, 2048)))
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.lsm = nn.LogSoftmax(dim=-1)
self.tie_weights()
self.beam_size=beam_size
self.max_length=max_length
self.sos_id=sos_id
self.eos_id=eos_id
def _tie_or_clone_weights(self, first_module, second_module):
""" Tie or clone module weights depending of weither we are using TorchScript or not
"""
if self.config.torchscript:
first_module.weight = nn.Parameter(second_module.weight.clone())
else:
first_module.weight = second_module.weight
def tie_weights(self):
""" Make sure we are sharing the input and output embeddings.
Export to TorchScript can't handle parameter sharing so we are cloning them instead.
"""
self._tie_or_clone_weights(self.lm_head,
self.encoder.embeddings.word_embeddings)
def forward(self, source_ids=None,source_mask=None,target_ids=None,target_mask=None,args=None):
outputs = self.encoder(source_ids, attention_mask=source_mask)
encoder_output = outputs[0].permute([1,0,2]).contiguous()
if target_ids is not None:
attn_mask=-1e4 *(1-self.bias[:target_ids.shape[1],:target_ids.shape[1]])
tgt_embeddings = self.encoder.embeddings(target_ids).permute([1,0,2]).contiguous()
out = self.decoder(tgt_embeddings,encoder_output,tgt_mask=attn_mask,memory_key_padding_mask=(1-source_mask).bool())
hidden_states = torch.tanh(self.dense(out)).permute([1,0,2]).contiguous()
lm_logits = self.lm_head(hidden_states)
# Shift so that tokens < n predict n
active_loss = target_mask[..., 1:].ne(0).view(-1) == 1
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = target_ids[..., 1:].contiguous()
# Flatten the tokens
loss_fct = nn.CrossEntropyLoss(ignore_index=-1)
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1))[active_loss],
shift_labels.view(-1)[active_loss])
outputs = loss,loss*active_loss.sum(),active_loss.sum()
return outputs
else:
#Predict
preds=[]
zero=torch.cuda.LongTensor(1).fill_(0)
for i in range(source_ids.shape[0]):
context=encoder_output[:,i:i+1]
context_mask=source_mask[i:i+1,:]
beam = Beam(self.beam_size,self.sos_id,self.eos_id)
input_ids=beam.getCurrentState()
context=context.repeat(1, self.beam_size,1)
context_mask=context_mask.repeat(self.beam_size,1)
for _ in range(self.max_length):
if beam.done():
break
attn_mask=-1e4 *(1-self.bias[:input_ids.shape[1],:input_ids.shape[1]])
tgt_embeddings = self.encoder.embeddings(input_ids).permute([1,0,2]).contiguous()
out = self.decoder(tgt_embeddings,context,tgt_mask=attn_mask,memory_key_padding_mask=(1-context_mask).bool())
out = torch.tanh(self.dense(out))
hidden_states=out.permute([1,0,2]).contiguous()[:,-1,:]
out = self.lsm(self.lm_head(hidden_states)).data
beam.advance(out)
input_ids.data.copy_(input_ids.data.index_select(0, beam.getCurrentOrigin()))
input_ids=torch.cat((input_ids,beam.getCurrentState()),-1)
hyp= beam.getHyp(beam.getFinal())
pred=beam.buildTargetTokens(hyp)[:self.beam_size]
pred=[torch.cat([x.view(-1) for x in p]+[zero]*(self.max_length-len(p))).view(1,-1) for p in pred]
preds.append(torch.cat(pred,0).unsqueeze(0))
preds=torch.cat(preds,0)
return preds
class Beam(object):
def __init__(self, size,sos,eos):
self.size = size
self.tt = torch.cuda
# The score for each translation on the beam.
self.scores = self.tt.FloatTensor(size).zero_()
# The backpointers at each time-step.
self.prevKs = []
# The outputs at each time-step.
self.nextYs = [self.tt.LongTensor(size)
.fill_(0)]
self.nextYs[0][0] = sos
# Has EOS topped the beam yet.
self._eos = eos
self.eosTop = False
# Time and k pair for finished.
self.finished = []
def getCurrentState(self):
"Get the outputs for the current timestep."
batch = self.tt.LongTensor(self.nextYs[-1]).view(-1, 1)
return batch
def getCurrentOrigin(self):
"Get the backpointers for the current timestep."
return self.prevKs[-1]
def advance(self, wordLk):
"""
Given prob over words for every last beam `wordLk` and attention
`attnOut`: Compute and update the beam search.
Parameters:
* `wordLk`- probs of advancing from the last step (K x words)
* `attnOut`- attention at the last step
Returns: True if beam search is complete.
"""
numWords = wordLk.size(1)
# Sum the previous scores.
if len(self.prevKs) > 0:
beamLk = wordLk + self.scores.unsqueeze(1).expand_as(wordLk)
# Don't let EOS have children.
for i in range(self.nextYs[-1].size(0)):
if self.nextYs[-1][i] == self._eos:
beamLk[i] = -1e20
else:
beamLk = wordLk[0]
flatBeamLk = beamLk.view(-1)
bestScores, bestScoresId = flatBeamLk.topk(self.size, 0, True, True)
self.scores = bestScores
# bestScoresId is flattened beam x word array, so calculate which
# word and beam each score came from
prevK = bestScoresId // numWords
self.prevKs.append(prevK)
self.nextYs.append((bestScoresId - prevK * numWords))
for i in range(self.nextYs[-1].size(0)):
if self.nextYs[-1][i] == self._eos:
s = self.scores[i]
self.finished.append((s, len(self.nextYs) - 1, i))
# End condition is when top-of-beam is EOS and no global score.
if self.nextYs[-1][0] == self._eos:
self.eosTop = True
def done(self):
return self.eosTop and len(self.finished) >=self.size
def getFinal(self):
if len(self.finished) == 0:
self.finished.append((self.scores[0], len(self.nextYs) - 1, 0))
self.finished.sort(key=lambda a: -a[0])
if len(self.finished) != self.size:
unfinished=[]
for i in range(self.nextYs[-1].size(0)):
if self.nextYs[-1][i] != self._eos:
s = self.scores[i]
unfinished.append((s, len(self.nextYs) - 1, i))
unfinished.sort(key=lambda a: -a[0])
self.finished+=unfinished[:self.size-len(self.finished)]
return self.finished[:self.size]
def getHyp(self, beam_res):
"""
Walk back to construct the full hypothesis.
"""
hyps=[]
for _,timestep, k in beam_res:
hyp = []
for j in range(len(self.prevKs[:timestep]) - 1, -1, -1):
hyp.append(self.nextYs[j+1][k])
k = self.prevKs[j][k]
hyps.append(hyp[::-1])
return hyps
def buildTargetTokens(self, preds):
sentence=[]
for pred in preds:
tokens = []
for tok in pred:
if tok==self._eos:
break
tokens.append(tok)
sentence.append(tokens)
return sentence
|
CodeGen-main
|
CodeXGLUE/Text-Code/text-to-code/code/beam.py
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import logging
import sys,json
import numpy as np
def read_answers(filename):
answers={}
with open(filename) as f:
for line in f:
line=line.strip()
js=json.loads(line)
answers[js['url']]=js['idx']
return answers
def read_predictions(filename):
predictions={}
with open(filename) as f:
for line in f:
line=line.strip()
js=json.loads(line)
predictions[js['url']]=js['answers']
return predictions
def calculate_scores(answers,predictions):
scores=[]
for key in answers:
if key not in predictions:
logging.error("Missing prediction for url {}.".format(key))
sys.exit()
flag=False
for rank,idx in enumerate(predictions[key]):
if idx==answers[key]:
scores.append(1/(rank+1))
flag=True
break
if flag is False:
scores.append(0)
result={}
result['MRR']=round(np.mean(scores),4)
return result
def main():
import argparse
parser = argparse.ArgumentParser(description='Evaluate leaderboard predictions for POJ-104 dataset.')
parser.add_argument('--answers', '-a',help="filename of the labels, in txt format.")
parser.add_argument('--predictions', '-p',help="filename of the leaderboard predictions, in txt format.")
args = parser.parse_args()
answers=read_answers(args.answers)
predictions=read_predictions(args.predictions)
scores=calculate_scores(answers,predictions)
print(scores)
if __name__ == '__main__':
main()
|
CodeGen-main
|
CodeXGLUE/Text-Code/NL-code-search-Adv/evaluator/evaluator.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning the library models for language modeling on a text file (GPT, GPT-2, BERT, RoBERTa).
GPT and GPT-2 are fine-tuned using a causal language modeling (CLM) loss while BERT and RoBERTa are fine-tuned
using a masked language modeling (MLM) loss.
"""
from __future__ import absolute_import, division, print_function
import argparse
import logging
import os
import random
import sys
from pathlib import Path
import numpy as np
import torch
from torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler
from torch.utils.data.distributed import DistributedSampler
import json
try:
from torch.utils.tensorboard import SummaryWriter
except:
from tensorboardX import SummaryWriter
import multiprocessing
from model import Model
cpu_cont = multiprocessing.cpu_count()
from transformers import (AdamW, get_linear_schedule_with_warmup,
BertConfig, BertForMaskedLM, BertTokenizer,
GPT2Config, GPT2LMHeadModel, GPT2Tokenizer,
OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer,
RobertaConfig, RobertaModel, RobertaTokenizer,
DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer)
from codegen_sources.wrappers.models import ModelPython, ModelConfig, ModelPythonFunc
from codegen_sources.wrappers.tokenizer import PythonTokenizer, RobertaPythonTokenizer
logger = logging.getLogger(__name__)
MODEL_CLASSES = {
'gpt2': (GPT2Config, GPT2LMHeadModel, GPT2Tokenizer),
'openai-gpt': (OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer),
'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
'roberta': (RobertaConfig, RobertaModel, RobertaTokenizer),
'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'xlm_python': (ModelConfig, ModelPython, PythonTokenizer),
'roberta_python': (ModelConfig, ModelPython, RobertaPythonTokenizer),
'xlm_python_func': (ModelConfig, ModelPythonFunc, PythonTokenizer),
'roberta_python_func': (ModelConfig, ModelPythonFunc, RobertaPythonTokenizer),
}
class InputFeatures(object):
"""A single training/test features for a example."""
def __init__(self,
code_tokens,
code_ids,
nl_tokens,
nl_ids,
url,
idx,
):
self.code_tokens = code_tokens
self.code_ids = code_ids
self.nl_tokens = nl_tokens
self.nl_ids = nl_ids
self.url=url
self.idx=idx
def convert_examples_to_features(js,tokenizer,args):
#code
if args.model_type in ['xlm_python', 'xlm_java', 'xlm_java_func', 'xlm_python_func']:
if 'code' in js:
code = js['code']
else:
code = js['function']
code_tokens = tokenizer.tokenize(code, keep_comments=False)
else:
if 'code_tokens' in js:
code=' '.join(js['code_tokens'])
else:
code=' '.join(js['function_tokens'])
code_tokens=tokenizer.tokenize(code)
code_tokens = code_tokens[:args.block_size-2]
if len(code_tokens) == 0:
return None
code_tokens =[tokenizer.cls_token]+code_tokens+[tokenizer.sep_token]
code_ids = tokenizer.convert_tokens_to_ids(code_tokens)
padding_length = args.block_size - len(code_ids)
code_ids+=[tokenizer.pad_token_id]*padding_length
nl=' '.join(js['docstring_tokens'])
nl_tokens=tokenizer.tokenize(nl, is_text=True)[:args.block_size-2]
nl_tokens =[tokenizer.cls_token]+nl_tokens+[tokenizer.sep_token]
nl_ids = tokenizer.convert_tokens_to_ids(nl_tokens)
padding_length = args.block_size - len(nl_ids)
nl_ids+=[tokenizer.pad_token_id]*padding_length
return InputFeatures(code_tokens,code_ids,nl_tokens,nl_ids,js['url'],js['idx'])
class TextDataset(Dataset):
def __init__(self, tokenizer, args, file_path=None):
self.examples = []
data=[]
with open(file_path) as f:
for line in f:
line=line.strip()
js=json.loads(line)
data.append(js)
for i,js in enumerate(data):
features = convert_examples_to_features(js,tokenizer,args)
if features is None:
print(f" rm 1 example could not tokenized")
continue
self.examples.append(features)
if 'train' in file_path:
for idx, example in enumerate(self.examples[:3]):
logger.info("*** Example ***")
logger.info("idx: {}".format(idx))
logger.info("code_tokens: {}".format([x.replace('\u0120','_') for x in example.code_tokens]))
logger.info("code_ids: {}".format(' '.join(map(str, example.code_ids))))
logger.info("nl_tokens: {}".format([x.replace('\u0120','_') for x in example.nl_tokens]))
logger.info("nl_ids: {}".format(' '.join(map(str, example.nl_ids))))
def __len__(self):
return len(self.examples)
def __getitem__(self, i):
return (torch.tensor(self.examples[i].code_ids),torch.tensor(self.examples[i].nl_ids))
def set_seed(seed=42):
random.seed(seed)
os.environ['PYHTONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
def train(args, train_dataset, model, tokenizer):
""" Train the model """
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler,
batch_size=args.train_batch_size,num_workers=4,pin_memory=True)
args.max_steps=args.epoch*len( train_dataloader)
args.save_steps=len( train_dataloader)//10
args.warmup_steps=len( train_dataloader)
args.logging_steps=len( train_dataloader)
args.num_train_epochs=args.epoch
model.to(args.device)
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
'weight_decay': args.weight_decay},
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.max_steps*0.1,
num_training_steps=args.max_steps)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True)
checkpoint_last = os.path.join(args.output_dir, 'checkpoint-last')
scheduler_last = os.path.join(checkpoint_last, 'scheduler.pt')
optimizer_last = os.path.join(checkpoint_last, 'optimizer.pt')
if os.path.exists(scheduler_last):
scheduler.load_state_dict(torch.load(scheduler_last))
if os.path.exists(optimizer_last):
optimizer.load_state_dict(torch.load(optimizer_last))
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size * args.gradient_accumulation_steps * (
torch.distributed.get_world_size() if args.local_rank != -1 else 1))
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", args.max_steps)
global_step = args.start_step
tr_loss, logging_loss,avg_loss,tr_nb,tr_num,train_loss = 0.0, 0.0,0.0,0,0,0
best_mrr=0.0
best_acc=0.0
# model.resize_token_embeddings(len(tokenizer))
model.zero_grad()
for idx in range(args.start_epoch, int(args.num_train_epochs)):
bar = train_dataloader
tr_num=0
train_loss=0
for step, batch in enumerate(bar):
code_inputs = batch[0].to(args.device)
nl_inputs = batch[1].to(args.device)
model.train()
loss,code_vec,nl_vec = model(code_inputs,nl_inputs)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
tr_loss += loss.item()
tr_num+=1
train_loss+=loss.item()
if avg_loss==0:
avg_loss=tr_loss
avg_loss=round(train_loss/tr_num,5)
if (step+1)% 100==0:
logger.info("epoch {} step {} loss {}".format(idx,step+1,avg_loss))
#bar.set_description("epoch {} loss {}".format(idx,avg_loss))
if (step + 1) % args.gradient_accumulation_steps == 0:
optimizer.step()
optimizer.zero_grad()
scheduler.step()
global_step += 1
output_flag=True
avg_loss=round(np.exp((tr_loss - logging_loss) /(global_step- tr_nb)),4)
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
logging_loss = tr_loss
tr_nb=global_step
if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well
results = evaluate(args, model, tokenizer,eval_when_training=True)
for key, value in results.items():
logger.info(" %s = %s", key, round(value,4))
# Save model checkpoint
tr_num=0
train_loss=0
if results['eval_mrr']>best_acc:
best_acc=results['eval_mrr']
logger.info(" "+"*"*20)
logger.info(" Best mrr:%s",round(best_acc,4))
logger.info(" "+"*"*20)
checkpoint_prefix = 'checkpoint-best-mrr'
output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = model.module if hasattr(model,'module') else model
output_dir = os.path.join(output_dir, '{}'.format('model.bin'))
torch.save(model_to_save.state_dict(), output_dir)
logger.info("Saving model checkpoint to %s", output_dir)
eval_dataset=None
def evaluate(args, model, tokenizer,eval_when_training=False):
# Loop to handle MNLI double evaluation (matched, mis-matched)
eval_output_dir = args.output_dir
global eval_dataset
if eval_dataset is None:
eval_dataset = TextDataset(tokenizer, args,args.eval_data_file)
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size,num_workers=4,pin_memory=True)
# multi-gpu evaluate
if args.n_gpu > 1 and eval_when_training is False:
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation *****")
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
model.eval()
code_vecs=[]
nl_vecs=[]
for batch in eval_dataloader:
code_inputs = batch[0].to(args.device)
nl_inputs = batch[1].to(args.device)
with torch.no_grad():
lm_loss,code_vec,nl_vec = model(code_inputs,nl_inputs)
eval_loss += lm_loss.mean().item()
code_vecs.append(code_vec.cpu().numpy())
nl_vecs.append(nl_vec.cpu().numpy())
nb_eval_steps += 1
code_vecs=np.concatenate(code_vecs,0)
nl_vecs=np.concatenate(nl_vecs,0)
eval_loss = eval_loss / nb_eval_steps
perplexity = torch.tensor(eval_loss)
scores=np.matmul(nl_vecs,code_vecs.T)
ranks=[]
for i in range(len(scores)):
score=scores[i,i]
rank=1
for j in range(len(scores)):
if i!=j and scores[i,j]>=score:
rank+=1
ranks.append(1/rank)
result = {
"eval_loss": float(perplexity),
"eval_mrr":float(np.mean(ranks))
}
return result
def test(args, model, tokenizer):
# Loop to handle MNLI double evaluation (matched, mis-matched)
eval_dataset = TextDataset(tokenizer, args,args.test_data_file)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu evaluate
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running Test *****")
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
code_vecs=[]
nl_vecs=[]
for batch in eval_dataloader:
code_inputs = batch[0].to(args.device)
nl_inputs = batch[1].to(args.device)
with torch.no_grad():
lm_loss,code_vec,nl_vec = model(code_inputs,nl_inputs)
eval_loss += lm_loss.mean().item()
code_vecs.append(code_vec.cpu().numpy())
nl_vecs.append(nl_vec.cpu().numpy())
nb_eval_steps += 1
code_vecs=np.concatenate(code_vecs,0)
nl_vecs=np.concatenate(nl_vecs,0)
eval_loss = eval_loss / nb_eval_steps
perplexity = torch.tensor(eval_loss)
scores=np.matmul(nl_vecs,code_vecs.T)
sort_ids=np.argsort(scores, axis=-1, kind='quicksort', order=None)[:,::-1]
indexs=[]
urls=[]
for example in eval_dataset.examples:
indexs.append(example.idx)
urls.append(example.url)
with open(os.path.join(args.output_dir,"predictions.jsonl"),'w') as f:
for index,url,sort_id in zip(indexs,urls,sort_ids):
js={}
js['url']=url
js['answers']=[]
for idx in sort_id[:100]:
js['answers'].append(indexs[int(idx)])
f.write(json.dumps(js)+'\n')
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--train_data_file", default=None, type=str, required=True,
help="The input training data file (a text file).")
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written.")
## Other parameters
parser.add_argument("--eval_data_file", default=None, type=str,
help="An optional input evaluation data file to evaluate the perplexity on (a text file).")
parser.add_argument("--test_data_file", default=None, type=str,
help="An optional input evaluation data file to evaluate the perplexity on (a text file).")
parser.add_argument("--model_type", default="bert", type=str,
help="The model architecture to be fine-tuned.")
parser.add_argument("--model_name_or_path", default=None, type=str,
help="The model checkpoint for weights initialization.")
parser.add_argument("--mlm", action='store_true',
help="Train with masked-language modeling loss instead of language modeling.")
parser.add_argument("--mlm_probability", type=float, default=0.15,
help="Ratio of tokens to mask for masked language modeling loss")
parser.add_argument("--config_name", default="", type=str,
help="Optional pretrained config name or path if not the same as model_name_or_path")
parser.add_argument("--tokenizer_name", default="", type=str,
help="Optional pretrained tokenizer name or path if not the same as model_name_or_path")
parser.add_argument("--cache_dir", default="", type=str,
help="Optional directory to store the pre-trained models downloaded from s3 (instread of the default one)")
parser.add_argument("--block_size", default=-1, type=int,
help="Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens).")
parser.add_argument("--do_train", action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval", action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--do_test", action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--evaluate_during_training", action='store_true',
help="Run evaluation during training at each logging step.")
parser.add_argument("--do_lower_case", action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--train_batch_size", default=4, type=int,
help="Batch size per GPU/CPU for training.")
parser.add_argument("--eval_batch_size", default=4, type=int,
help="Batch size per GPU/CPU for evaluation.")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--learning_rate", default=5e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.0, type=float,
help="Weight deay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float,
help="Max gradient norm.")
parser.add_argument("--num_train_epochs", default=1.0, type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--max_steps", default=-1, type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.")
parser.add_argument("--warmup_steps", default=0, type=int,
help="Linear warmup over warmup_steps.")
parser.add_argument('--logging_steps', type=int, default=50,
help="Log every X updates steps.")
parser.add_argument('--save_steps', type=int, default=50,
help="Save checkpoint every X updates steps.")
parser.add_argument('--save_total_limit', type=int, default=None,
help='Limit the total amount of checkpoints, delete the older checkpoints in the output_dir, does not delete by default')
parser.add_argument("--eval_all_checkpoints", action='store_true',
help="Evaluate all checkpoints starting with the same prefix as model_name_or_path ending and ending with step number")
parser.add_argument("--no_cuda", action='store_true',
help="Avoid using CUDA when available")
parser.add_argument('--overwrite_output_dir', action='store_true',
help="Overwrite the content of the output directory")
parser.add_argument('--overwrite_cache', action='store_true',
help="Overwrite the cached training and evaluation sets")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
parser.add_argument('--epoch', type=int, default=42,
help="random seed for initialization")
parser.add_argument('--fp16', action='store_true',
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit")
parser.add_argument('--fp16_opt_level', type=str, default='O1',
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument("--local_rank", type=int, default=-1,
help="For distributed training: local_rank")
parser.add_argument('--server_ip', type=str, default='', help="For distant debugging.")
parser.add_argument('--server_port', type=str, default='', help="For distant debugging.")
args = parser.parse_args()
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend='nccl')
args.n_gpu = 1
args.device = device
args.per_gpu_train_batch_size=args.train_batch_size//args.n_gpu
args.per_gpu_eval_batch_size=args.eval_batch_size//args.n_gpu
# Setup logging
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16)
# Set seed
set_seed(args.seed)
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Barrier to make sure only the first process in distributed training download model & vocab
args.start_epoch = 0
args.start_step = 0
checkpoint_last = os.path.join(args.output_dir, 'checkpoint-last')
if os.path.exists(checkpoint_last) and os.listdir(checkpoint_last):
args.model_name_or_path = os.path.join(checkpoint_last, 'pytorch_model.bin')
args.config_name = os.path.join(checkpoint_last, 'config.json')
idx_file = os.path.join(checkpoint_last, 'idx_file.txt')
with open(idx_file, encoding='utf-8') as idxf:
args.start_epoch = int(idxf.readlines()[0].strip()) + 1
step_file = os.path.join(checkpoint_last, 'step_file.txt')
if os.path.exists(step_file):
with open(step_file, encoding='utf-8') as stepf:
args.start_step = int(stepf.readlines()[0].strip())
logger.info("reload model from {}, resume from {} epoch".format(checkpoint_last, args.start_epoch))
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path,
cache_dir=args.cache_dir if args.cache_dir else None)
config.num_labels=1
tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None)
if args.block_size <= 0:
args.block_size = tokenizer.max_len_single_sentence # Our input block size will be the max possible for the model
args.block_size = min(args.block_size, tokenizer.max_len_single_sentence)
if args.model_name_or_path:
model = model_class.from_pretrained(args.model_name_or_path,
from_tf=bool('.ckpt' in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir if args.cache_dir else None)
else:
model = model_class(config)
model=Model(model,config,tokenizer,args)
if args.local_rank == 0:
torch.distributed.barrier() # End of barrier to make sure only the first process in distributed training download model & vocab
logger.info("Training/evaluation parameters %s", args)
# Training
if args.do_train:
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Barrier to make sure only the first process in distributed training process the dataset, and the others will use the cache
train_dataset = TextDataset(tokenizer, args,args.train_data_file)
if args.local_rank == 0:
torch.distributed.barrier()
train(args, train_dataset, model, tokenizer)
# Evaluation
results = {}
if args.do_eval and args.local_rank in [-1, 0]:
checkpoint_prefix = 'checkpoint-best-mrr/model.bin'
output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix))
model.load_state_dict(torch.load(output_dir))
model.to(args.device)
result=evaluate(args, model, tokenizer)
logger.info("***** Eval results *****")
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(round(result[key],4)))
if args.do_test and args.local_rank in [-1, 0]:
checkpoint_prefix = 'checkpoint-best-mrr/model.bin'
output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix))
model.load_state_dict(torch.load(output_dir))
model.to(args.device)
test(args, model, tokenizer)
return results
if __name__ == "__main__":
main()
|
CodeGen-main
|
CodeXGLUE/Text-Code/NL-code-search-Adv/code/run.py
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import torch
import torch.nn as nn
import torch
from torch.autograd import Variable
import copy
import torch.nn.functional as F
from torch.nn import CrossEntropyLoss, MSELoss
class Model(nn.Module):
def __init__(self, encoder,config,tokenizer,args):
super(Model, self).__init__()
self.encoder = encoder
self.config=config
self.tokenizer=tokenizer
self.args=args
def forward(self, code_inputs,nl_inputs,return_vec=False):
bs=code_inputs.shape[0]
inputs=torch.cat((code_inputs,nl_inputs),0)
outputs=self.encoder(inputs,attention_mask=inputs.ne(1))[1]
code_vec=outputs[:bs]
nl_vec=outputs[bs:]
if return_vec:
return code_vec,nl_vec
scores=(nl_vec[:,None,:]*code_vec[None,:,:]).sum(-1)
loss_fct = CrossEntropyLoss()
loss = loss_fct(scores, torch.arange(bs, device=scores.device))
return loss,code_vec,nl_vec
|
CodeGen-main
|
CodeXGLUE/Text-Code/NL-code-search-Adv/code/model.py
|
#!/usr/bin/python
'''
This script was adapted from the original version by hieuhoang1972 which is part of MOSES.
'''
# $Id: bleu.py 1307 2007-03-14 22:22:36Z hieuhoang1972 $
'''Provides:
cook_refs(refs, n=4): Transform a list of reference sentences as strings into a form usable by cook_test().
cook_test(test, refs, n=4): Transform a test sentence as a string (together with the cooked reference sentences) into a form usable by score_cooked().
score_cooked(alltest, n=4): Score a list of cooked test sentences.
score_set(s, testid, refids, n=4): Interface with dataset.py; calculate BLEU score of testid against refids.
The reason for breaking the BLEU computation into three phases cook_refs(), cook_test(), and score_cooked() is to allow the caller to calculate BLEU scores for multiple test sets as efficiently as possible.
'''
import sys, math, re, xml.sax.saxutils
import subprocess
import os
# Added to bypass NIST-style pre-processing of hyp and ref files -- wade
nonorm = 0
preserve_case = False
eff_ref_len = "shortest"
normalize1 = [
('<skipped>', ''), # strip "skipped" tags
(r'-\n', ''), # strip end-of-line hyphenation and join lines
(r'\n', ' '), # join lines
# (r'(\d)\s+(?=\d)', r'\1'), # join digits
]
normalize1 = [(re.compile(pattern), replace) for (pattern, replace) in normalize1]
normalize2 = [
(r'([\{-\~\[-\` -\&\(-\+\:-\@\/])',r' \1 '), # tokenize punctuation. apostrophe is missing
(r'([^0-9])([\.,])',r'\1 \2 '), # tokenize period and comma unless preceded by a digit
(r'([\.,])([^0-9])',r' \1 \2'), # tokenize period and comma unless followed by a digit
(r'([0-9])(-)',r'\1 \2 ') # tokenize dash when preceded by a digit
]
normalize2 = [(re.compile(pattern), replace) for (pattern, replace) in normalize2]
def normalize(s):
'''Normalize and tokenize text. This is lifted from NIST mteval-v11a.pl.'''
# Added to bypass NIST-style pre-processing of hyp and ref files -- wade
if (nonorm):
return s.split()
if type(s) is not str:
s = " ".join(s)
# language-independent part:
for (pattern, replace) in normalize1:
s = re.sub(pattern, replace, s)
s = xml.sax.saxutils.unescape(s, {'"':'"'})
# language-dependent part (assuming Western languages):
s = " %s " % s
if not preserve_case:
s = s.lower() # this might not be identical to the original
for (pattern, replace) in normalize2:
s = re.sub(pattern, replace, s)
return s.split()
def count_ngrams(words, n=4):
counts = {}
for k in range(1,n+1):
for i in range(len(words)-k+1):
ngram = tuple(words[i:i+k])
counts[ngram] = counts.get(ngram, 0)+1
return counts
def cook_refs(refs, n=4):
'''Takes a list of reference sentences for a single segment
and returns an object that encapsulates everything that BLEU
needs to know about them.'''
refs = [normalize(ref) for ref in refs]
maxcounts = {}
for ref in refs:
counts = count_ngrams(ref, n)
for (ngram,count) in counts.items():
maxcounts[ngram] = max(maxcounts.get(ngram,0), count)
return ([len(ref) for ref in refs], maxcounts)
def cook_test(test, item, n=4):
'''Takes a test sentence and returns an object that
encapsulates everything that BLEU needs to know about it.'''
(reflens, refmaxcounts)=item
test = normalize(test)
result = {}
result["testlen"] = len(test)
# Calculate effective reference sentence length.
if eff_ref_len == "shortest":
result["reflen"] = min(reflens)
elif eff_ref_len == "average":
result["reflen"] = float(sum(reflens))/len(reflens)
elif eff_ref_len == "closest":
min_diff = None
for reflen in reflens:
if min_diff is None or abs(reflen-len(test)) < min_diff:
min_diff = abs(reflen-len(test))
result['reflen'] = reflen
result["guess"] = [max(len(test)-k+1,0) for k in range(1,n+1)]
result['correct'] = [0]*n
counts = count_ngrams(test, n)
for (ngram, count) in counts.items():
result["correct"][len(ngram)-1] += min(refmaxcounts.get(ngram,0), count)
return result
def score_cooked(allcomps, n=4, ground=0, smooth=1):
totalcomps = {'testlen':0, 'reflen':0, 'guess':[0]*n, 'correct':[0]*n}
for comps in allcomps:
for key in ['testlen','reflen']:
totalcomps[key] += comps[key]
for key in ['guess','correct']:
for k in range(n):
totalcomps[key][k] += comps[key][k]
logbleu = 0.0
all_bleus = []
for k in range(n):
correct = totalcomps['correct'][k]
guess = totalcomps['guess'][k]
addsmooth = 0
if smooth == 1 and k > 0:
addsmooth = 1
logbleu += math.log(correct + addsmooth + sys.float_info.min)-math.log(guess + addsmooth+ sys.float_info.min)
if guess == 0:
all_bleus.append(-10000000)
else:
all_bleus.append(math.log(correct + sys.float_info.min)-math.log( guess ))
logbleu /= float(n)
all_bleus.insert(0, logbleu)
brevPenalty = min(0,1-float(totalcomps['reflen'] + 1)/(totalcomps['testlen'] + 1))
for i in range(len(all_bleus)):
if i ==0:
all_bleus[i] += brevPenalty
all_bleus[i] = math.exp(all_bleus[i])
return all_bleus
def bleu(refs, candidate, ground=0, smooth=1):
refs = cook_refs(refs)
test = cook_test(candidate, refs)
return score_cooked([test], ground=ground, smooth=smooth)
def splitPuncts(line):
return ' '.join(re.findall(r"[\w]+|[^\s\w]", line))
def computeMaps(predictions, goldfile):
predictionMap = {}
goldMap = {}
gf = open(goldfile, 'r')
for row in predictions:
cols = row.strip().split('\t')
if len(cols) == 1:
(rid, pred) = (cols[0], '')
else:
(rid, pred) = (cols[0], cols[1])
predictionMap[rid] = [splitPuncts(pred.strip().lower())]
for row in gf:
(rid, pred) = row.split('\t')
if rid in predictionMap: # Only insert if the id exists for the method
if rid not in goldMap:
goldMap[rid] = []
goldMap[rid].append(splitPuncts(pred.strip().lower()))
sys.stderr.write('Total: ' + str(len(goldMap)) + '\n')
return (goldMap, predictionMap)
#m1 is the reference map
#m2 is the prediction map
def bleuFromMaps(m1, m2):
score = [0] * 5
num = 0.0
for key in m1:
if key in m2:
bl = bleu(m1[key], m2[key][0])
score = [ score[i] + bl[i] for i in range(0, len(bl))]
num += 1
return [s * 100.0 / num for s in score]
if __name__ == '__main__':
reference_file = sys.argv[1]
predictions = []
for row in sys.stdin:
predictions.append(row)
(goldMap, predictionMap) = computeMaps(predictions, reference_file)
print (bleuFromMaps(goldMap, predictionMap)[0])
|
CodeGen-main
|
CodeXGLUE/Code-Text/code-to-text/evaluator/evaluator.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning the library models for language modeling on a text file (GPT, GPT-2, BERT, RoBERTa).
GPT and GPT-2 are fine-tuned using a causal language modeling (CLM) loss while BERT and RoBERTa are fine-tuned
using a masked language modeling (MLM) loss.
"""
from __future__ import absolute_import
import numpy as np
import os
import sys
from torch import LongTensor
import bleu
import pickle
import torch
import json
import random
import logging
import argparse
from io import open
from itertools import cycle
import torch.nn as nn
from model import Seq2Seq
from tqdm import tqdm, trange
from torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler,TensorDataset
from torch.utils.data.distributed import DistributedSampler
from transformers import (WEIGHTS_NAME, AdamW, get_linear_schedule_with_warmup,
RobertaConfig, RobertaModel, RobertaTokenizer)
from pathlib import Path
from codegen_sources.wrappers.models import Model, ModelPython, ModelConfig, ModelPythonFunc, ModelJava
from codegen_sources.wrappers.tokenizer import JavaTokenizer, PythonTokenizer, RobertaPythonTokenizer, RobertaJavaTokenizer, Tokenizer
MODEL_CLASSES = {'roberta': (RobertaConfig, RobertaModel, RobertaTokenizer),
'xlm_python': (ModelConfig, ModelPython, PythonTokenizer),
'xlm_java': (ModelConfig, ModelJava, JavaTokenizer),
'roberta_python': (ModelConfig, ModelPython, RobertaPythonTokenizer),
'roberta_java': (ModelConfig, ModelJava, RobertaJavaTokenizer),
'xlm_python_func': (ModelConfig, ModelPythonFunc, JavaTokenizer),
'roberta_python_func': (ModelConfig, ModelPythonFunc, RobertaPythonTokenizer),
}
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
class Example(object):
"""A single training/test example."""
def __init__(self,
idx,
source,
target,
):
self.idx = idx
self.source = source
self.target = target
def read_examples(filename, args):
"""Read examples from filename."""
examples=[]
with open(filename,encoding="utf-8") as f:
for idx, line in enumerate(f):
line=line.strip()
js=json.loads(line)
if 'idx' not in js:
js['idx']=idx
if 'xlm' in args.model_type:
code=js['code']
docstring = js['docstring']
if "python" in filename:
assert docstring in code
code = code.replace(docstring, "")
else:
code=' '.join(js['code_tokens']).replace('\n',' ')
code=' '.join(code.strip().split())
nl=' '.join(js['docstring_tokens']).replace('\n','')
nl=' '.join(nl.strip().split())
examples.append(
Example(
idx = idx,
source=code,
target = nl,
)
)
return examples
class InputFeatures(object):
"""A single training/test features for a example."""
def __init__(self,
example_id,
source_ids,
target_ids,
source_mask,
target_mask,
):
self.example_id = example_id
self.source_ids = source_ids
self.target_ids = target_ids
self.source_mask = source_mask
self.target_mask = target_mask
def convert_examples_to_features(examples, tokenizer, args,stage=None):
features = []
for example_index, example in enumerate(examples):
#source
source_tokens = tokenizer.tokenize(example.source)[:args.max_source_length-2]
source_tokens =[tokenizer.cls_token]+source_tokens+[tokenizer.sep_token]
source_ids = tokenizer.convert_tokens_to_ids(source_tokens)
source_mask = [1] * (len(source_tokens))
padding_length = args.max_source_length - len(source_ids)
source_ids+=[tokenizer.pad_token_id]*padding_length
source_mask+=[0]*padding_length
#target
if stage=="test":
target_tokens = tokenizer.tokenize("None", is_text=True) if isinstance(tokenizer, Tokenizer) else tokenizer.tokenize("None")
else:
target_tokens = tokenizer.tokenize(example.target, is_text=True)[:args.max_target_length-2] if isinstance(tokenizer, Tokenizer) else tokenizer.tokenize(example.target)[:args.max_target_length-2]
target_tokens = [tokenizer.cls_token]+target_tokens+[tokenizer.sep_token]
target_ids = tokenizer.convert_tokens_to_ids(target_tokens)
target_mask = [1] *len(target_ids)
padding_length = args.max_target_length - len(target_ids)
target_ids+=[tokenizer.pad_token_id]*padding_length
target_mask+=[0]*padding_length
if example_index < 5:
if stage=='train':
logger.info("*** Example ***")
logger.info("idx: {}".format(example.idx))
logger.info("source_tokens: {}".format([x.replace('\u0120','_') for x in source_tokens]))
logger.info("source_ids: {}".format(' '.join(map(str, source_ids))))
logger.info("source_mask: {}".format(' '.join(map(str, source_mask))))
logger.info("target_tokens: {}".format([x.replace('\u0120','_') for x in target_tokens]))
logger.info("target_ids: {}".format(' '.join(map(str, target_ids))))
logger.info("target_mask: {}".format(' '.join(map(str, target_mask))))
features.append(
InputFeatures(
example_index,
source_ids,
target_ids,
source_mask,
target_mask,
)
)
return features
def set_seed(seed=42):
random.seed(seed)
os.environ['PYHTONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--model_type", default=None, type=str, required=True,
help="Model type: e.g. roberta")
parser.add_argument("--model_name_or_path", default=None, type=str, required=True,
help="Path to pre-trained model: e.g. roberta-base" )
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written.")
parser.add_argument("--load_model_path", default=None, type=str,
help="Path to trained model: Should contain the .bin files" )
## Other parameters
parser.add_argument("--train_filename", default=None, type=str,
help="The train filename. Should contain the .jsonl files for this task.")
parser.add_argument("--dev_filename", default=None, type=str,
help="The dev filename. Should contain the .jsonl files for this task.")
parser.add_argument("--test_filename", default=None, type=str,
help="The test filename. Should contain the .jsonl files for this task.")
parser.add_argument("--config_name", default="", type=str,
help="Pretrained config name or path if not the same as model_name")
parser.add_argument("--tokenizer_name", default="", type=str,
help="Pretrained tokenizer name or path if not the same as model_name")
parser.add_argument("--max_source_length", default=64, type=int,
help="The maximum total source sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.")
parser.add_argument("--max_target_length", default=32, type=int,
help="The maximum total target sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.")
parser.add_argument("--do_train", action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval", action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--do_test", action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--do_lower_case", action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--no_cuda", action='store_true',
help="Avoid using CUDA when available")
parser.add_argument("--train_batch_size", default=8, type=int,
help="Batch size per GPU/CPU for training.")
parser.add_argument("--eval_batch_size", default=8, type=int,
help="Batch size per GPU/CPU for evaluation.")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--learning_rate", default=5e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--beam_size", default=10, type=int,
help="beam size for beam search")
parser.add_argument("--weight_decay", default=0.0, type=float,
help="Weight deay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float,
help="Max gradient norm.")
parser.add_argument("--num_train_epochs", default=3, type=int,
help="Total number of training epochs to perform.")
parser.add_argument("--max_steps", default=-1, type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.")
parser.add_argument("--eval_steps", default=-1, type=int,
help="")
parser.add_argument("--train_steps", default=-1, type=int,
help="")
parser.add_argument("--warmup_steps", default=0, type=int,
help="Linear warmup over warmup_steps.")
parser.add_argument("--local_rank", type=int, default=-1,
help="For distributed training: local_rank")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
# print arguments
args = parser.parse_args()
logger.info(args)
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend='nccl')
args.n_gpu = 1
logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s",
args.local_rank, device, args.n_gpu, bool(args.local_rank != -1))
args.device = device
# Set seed
set_seed(args.seed)
# make dir if output_dir not exist
if os.path.exists(args.output_dir) is False:
os.makedirs(args.output_dir)
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path)
tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,do_lower_case=args.do_lower_case)
#budild model
encoder = model_class.from_pretrained(args.model_name_or_path,from_tf=bool('.ckpt' in args.model_name_or_path),config=config)
decoder_layer = nn.TransformerDecoderLayer(d_model=config.hidden_size, nhead=8)
decoder = nn.TransformerDecoder(decoder_layer, num_layers=6)
model=Seq2Seq(encoder=encoder,decoder=decoder,config=config,
beam_size=args.beam_size,max_length=args.max_target_length,
sos_id=tokenizer.cls_token_id,eos_id=tokenizer.sep_token_id)
if args.load_model_path is not None:
logger.info("reload model from {}".format(args.load_model_path))
model.load_state_dict(torch.load(args.load_model_path))
model.to(device)
if args.local_rank != -1:
# Distributed training
try:
from apex.parallel import DistributedDataParallel as DDP
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
model = DDP(model)
elif args.n_gpu > 1:
# multi-gpu training
model = torch.nn.DataParallel(model)
if args.do_train:
# Prepare training data loader
train_examples = read_examples(args.train_filename, args)
train_features = convert_examples_to_features(train_examples, tokenizer,args,stage='train')
all_source_ids = torch.tensor([f.source_ids for f in train_features], dtype=torch.long)
all_source_mask = torch.tensor([f.source_mask for f in train_features], dtype=torch.long)
all_target_ids = torch.tensor([f.target_ids for f in train_features], dtype=torch.long)
all_target_mask = torch.tensor([f.target_mask for f in train_features], dtype=torch.long)
train_data = TensorDataset(all_source_ids,all_source_mask,all_target_ids,all_target_mask)
if args.local_rank == -1:
train_sampler = RandomSampler(train_data)
else:
train_sampler = DistributedSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size//args.gradient_accumulation_steps)
num_train_optimization_steps = args.train_steps
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
'weight_decay': args.weight_decay},
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer,
num_warmup_steps=int(len(train_dataloader)*args.num_train_epochs*0.1),
num_training_steps=len(train_dataloader)*args.num_train_epochs)
#Start training
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_examples))
logger.info(" Batch size = %d", args.train_batch_size)
logger.info(" Num epoch = %d", args.num_train_epochs)
model.train()
dev_dataset={}
nb_tr_examples, nb_tr_steps,tr_loss,global_step,best_bleu,best_loss = 0, 0,0,0,0,1e6
for epoch in range(args.num_train_epochs):
bar = train_dataloader
for step, batch in enumerate(bar):
batch = tuple(t.to(device) for t in batch)
source_ids,source_mask,target_ids,target_mask = batch
loss,_,_ = model(source_ids=source_ids,source_mask=source_mask,target_ids=target_ids,target_mask=target_mask)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
tr_loss += loss.item()
train_loss=round(tr_loss*args.gradient_accumulation_steps/(nb_tr_steps+1),4)
if step % 100 == 0:
print("step {}: epoch {} loss {}".format(step, epoch,train_loss))
nb_tr_examples += source_ids.size(0)
nb_tr_steps += 1
loss.backward()
if (nb_tr_steps + 1) % args.gradient_accumulation_steps == 0:
#Update parameters
optimizer.step()
optimizer.zero_grad()
scheduler.step()
global_step += 1
if args.do_eval:
#Eval model with dev dataset
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
eval_flag=False
if 'dev_loss' in dev_dataset:
eval_examples,eval_data=dev_dataset['dev_loss']
else:
eval_examples = read_examples(args.dev_filename, args)
eval_features = convert_examples_to_features(eval_examples, tokenizer, args,stage='dev')
all_source_ids = torch.tensor([f.source_ids for f in eval_features], dtype=torch.long)
all_source_mask = torch.tensor([f.source_mask for f in eval_features], dtype=torch.long)
all_target_ids = torch.tensor([f.target_ids for f in eval_features], dtype=torch.long)
all_target_mask = torch.tensor([f.target_mask for f in eval_features], dtype=torch.long)
eval_data = TensorDataset(all_source_ids,all_source_mask,all_target_ids,all_target_mask)
dev_dataset['dev_loss']=eval_examples,eval_data
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)
logger.info("\n***** Running evaluation *****")
logger.info(" Num examples = %d", len(eval_examples))
logger.info(" Batch size = %d", args.eval_batch_size)
#Start Evaling model
model.eval()
eval_loss,tokens_num = 0,0
for batch in eval_dataloader:
batch = tuple(t.to(device) for t in batch)
source_ids,source_mask,target_ids,target_mask = batch
with torch.no_grad():
_,loss,num = model(source_ids=source_ids,source_mask=source_mask,
target_ids=target_ids,target_mask=target_mask)
eval_loss += loss.sum().item()
tokens_num += num.sum().item()
#Pring loss of dev dataset
model.train()
eval_loss = eval_loss / tokens_num
result = {'eval_ppl': round(np.exp(eval_loss),5),
'global_step': global_step+1,
'train_loss': round(train_loss,5)}
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
logger.info(" "+"*"*20)
#save last checkpoint
last_output_dir = os.path.join(args.output_dir, 'checkpoint-last')
if not os.path.exists(last_output_dir):
os.makedirs(last_output_dir)
model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self
output_model_file = os.path.join(last_output_dir, "pytorch_model.bin")
torch.save(model_to_save.state_dict(), output_model_file)
if eval_loss<best_loss:
logger.info(" Best ppl:%s",round(np.exp(eval_loss),5))
logger.info(" "+"*"*20)
best_loss=eval_loss
# Save best checkpoint for best ppl
output_dir = os.path.join(args.output_dir, 'checkpoint-best-ppl')
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self
output_model_file = os.path.join(output_dir, "pytorch_model.bin")
torch.save(model_to_save.state_dict(), output_model_file)
#Calculate bleu
if 'dev_bleu' in dev_dataset:
eval_examples,eval_data=dev_dataset['dev_bleu']
else:
eval_examples = read_examples(args.dev_filename, args)
eval_examples = random.sample(eval_examples,min(1000,len(eval_examples)))
eval_features = convert_examples_to_features(eval_examples, tokenizer, args,stage='test')
all_source_ids = torch.tensor([f.source_ids for f in eval_features], dtype=torch.long)
all_source_mask = torch.tensor([f.source_mask for f in eval_features], dtype=torch.long)
eval_data = TensorDataset(all_source_ids,all_source_mask)
dev_dataset['dev_bleu']=eval_examples,eval_data
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)
model.eval()
p=[]
for batch in eval_dataloader:
batch = tuple(t.to(device) for t in batch)
source_ids,source_mask= batch
with torch.no_grad():
preds = model(source_ids=source_ids,source_mask=source_mask)
for pred in preds:
t=pred[0].cpu().numpy()
t=list(t)
if 0 in t:
t=t[:t.index(0)]
text = tokenizer.decode(t, clean_up_tokenization_spaces=False, text=True)\
if isinstance(tokenizer, JavaTokenizer) or isinstance(tokenizer, PythonTokenizer)\
else tokenizer.decode(t,clean_up_tokenization_spaces=False)
p.append(text)
model.train()
predictions=[]
with open(os.path.join(args.output_dir,"dev.output"),'w') as f, open(os.path.join(args.output_dir,"dev.gold"),'w') as f1:
for ref,gold in zip(p,eval_examples):
predictions.append(str(gold.idx)+'\t'+ref)
f.write(str(gold.idx)+'\t'+ref+'\n')
f1.write(str(gold.idx)+'\t'+gold.target+'\n')
(goldMap, predictionMap) = bleu.computeMaps(predictions, os.path.join(args.output_dir, "dev.gold"))
dev_bleu=round(bleu.bleuFromMaps(goldMap, predictionMap)[0],2)
logger.info(" %s = %s "%("bleu-4",str(dev_bleu)))
logger.info(" "+"*"*20)
if dev_bleu>best_bleu:
logger.info(" Best bleu:%s",dev_bleu)
logger.info(" "+"*"*20)
best_bleu=dev_bleu
# Save best checkpoint for best bleu
output_dir = os.path.join(args.output_dir, 'checkpoint-best-bleu')
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self
output_model_file = os.path.join(output_dir, "pytorch_model.bin")
torch.save(model_to_save.state_dict(), output_model_file)
if args.do_test:
files=[]
if args.dev_filename is not None:
files.append(args.dev_filename)
if args.test_filename is not None:
files.append(args.test_filename)
for idx,file in enumerate(files):
logger.info("Test file: {}".format(file))
eval_examples = read_examples(file, args)
eval_features = convert_examples_to_features(eval_examples, tokenizer, args,stage='test')
all_source_ids = torch.tensor([f.source_ids for f in eval_features], dtype=torch.long)
all_source_mask = torch.tensor([f.source_mask for f in eval_features], dtype=torch.long)
eval_data = TensorDataset(all_source_ids,all_source_mask)
# Calculate bleu
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)
model.eval()
p=[]
for batch in tqdm(eval_dataloader,total=len(eval_dataloader)):
batch = tuple(t.to(device) for t in batch)
source_ids,source_mask= batch
with torch.no_grad():
preds = model(source_ids=source_ids,source_mask=source_mask)
for pred in preds:
t=pred[0].cpu().numpy()
t=list(t)
if 0 in t:
t=t[:t.index(0)]
text = tokenizer.decode(t,clean_up_tokenization_spaces=False, text=True) \
if isinstance(tokenizer, JavaTokenizer) or isinstance(tokenizer, PythonTokenizer) \
else tokenizer.decode(t, clean_up_tokenization_spaces=False)
p.append(text)
model.train()
predictions=[]
with open(os.path.join(args.output_dir,"test_{}.output".format(str(idx))),'w') as f, open(os.path.join(args.output_dir,"test_{}.gold".format(str(idx))),'w') as f1:
for ref,gold in zip(p,eval_examples):
predictions.append(str(gold.idx)+'\t'+ref)
f.write(str(gold.idx)+'\t'+ref+'\n')
f1.write(str(gold.idx)+'\t'+gold.target+'\n')
(goldMap, predictionMap) = bleu.computeMaps(predictions, os.path.join(args.output_dir, "test_{}.gold".format(idx)))
dev_bleu=round(bleu.bleuFromMaps(goldMap, predictionMap)[0],2)
logger.info(" %s = %s "%("bleu-4",str(dev_bleu)))
logger.info(" "+"*"*20)
if __name__ == "__main__":
main()
|
CodeGen-main
|
CodeXGLUE/Code-Text/code-to-text/code/run.py
|
#!/usr/bin/python
'''
This script was adapted from the original version by hieuhoang1972 which is part of MOSES.
'''
# $Id: bleu.py 1307 2007-03-14 22:22:36Z hieuhoang1972 $
'''Provides:
cook_refs(refs, n=4): Transform a list of reference sentences as strings into a form usable by cook_test().
cook_test(test, refs, n=4): Transform a test sentence as a string (together with the cooked reference sentences) into a form usable by score_cooked().
score_cooked(alltest, n=4): Score a list of cooked test sentences.
score_set(s, testid, refids, n=4): Interface with dataset.py; calculate BLEU score of testid against refids.
The reason for breaking the BLEU computation into three phases cook_refs(), cook_test(), and score_cooked() is to allow the caller to calculate BLEU scores for multiple test sets as efficiently as possible.
'''
import sys, math, re, xml.sax.saxutils
import subprocess
import os
# Added to bypass NIST-style pre-processing of hyp and ref files -- wade
nonorm = 0
preserve_case = False
eff_ref_len = "shortest"
normalize1 = [
('<skipped>', ''), # strip "skipped" tags
(r'-\n', ''), # strip end-of-line hyphenation and join lines
(r'\n', ' '), # join lines
# (r'(\d)\s+(?=\d)', r'\1'), # join digits
]
normalize1 = [(re.compile(pattern), replace) for (pattern, replace) in normalize1]
normalize2 = [
(r'([\{-\~\[-\` -\&\(-\+\:-\@\/])',r' \1 '), # tokenize punctuation. apostrophe is missing
(r'([^0-9])([\.,])',r'\1 \2 '), # tokenize period and comma unless preceded by a digit
(r'([\.,])([^0-9])',r' \1 \2'), # tokenize period and comma unless followed by a digit
(r'([0-9])(-)',r'\1 \2 ') # tokenize dash when preceded by a digit
]
normalize2 = [(re.compile(pattern), replace) for (pattern, replace) in normalize2]
def normalize(s):
'''Normalize and tokenize text. This is lifted from NIST mteval-v11a.pl.'''
# Added to bypass NIST-style pre-processing of hyp and ref files -- wade
if (nonorm):
return s.split()
if type(s) is not str:
s = " ".join(s)
# language-independent part:
for (pattern, replace) in normalize1:
s = re.sub(pattern, replace, s)
s = xml.sax.saxutils.unescape(s, {'"':'"'})
# language-dependent part (assuming Western languages):
s = " %s " % s
if not preserve_case:
s = s.lower() # this might not be identical to the original
for (pattern, replace) in normalize2:
s = re.sub(pattern, replace, s)
return s.split()
def count_ngrams(words, n=4):
counts = {}
for k in range(1,n+1):
for i in range(len(words)-k+1):
ngram = tuple(words[i:i+k])
counts[ngram] = counts.get(ngram, 0)+1
return counts
def cook_refs(refs, n=4):
'''Takes a list of reference sentences for a single segment
and returns an object that encapsulates everything that BLEU
needs to know about them.'''
refs = [normalize(ref) for ref in refs]
maxcounts = {}
for ref in refs:
counts = count_ngrams(ref, n)
for (ngram,count) in counts.items():
maxcounts[ngram] = max(maxcounts.get(ngram,0), count)
return ([len(ref) for ref in refs], maxcounts)
def cook_test(test, item, n=4):
'''Takes a test sentence and returns an object that
encapsulates everything that BLEU needs to know about it.'''
(reflens, refmaxcounts)=item
test = normalize(test)
result = {}
result["testlen"] = len(test)
# Calculate effective reference sentence length.
if eff_ref_len == "shortest":
result["reflen"] = min(reflens)
elif eff_ref_len == "average":
result["reflen"] = float(sum(reflens))/len(reflens)
elif eff_ref_len == "closest":
min_diff = None
for reflen in reflens:
if min_diff is None or abs(reflen-len(test)) < min_diff:
min_diff = abs(reflen-len(test))
result['reflen'] = reflen
result["guess"] = [max(len(test)-k+1,0) for k in range(1,n+1)]
result['correct'] = [0]*n
counts = count_ngrams(test, n)
for (ngram, count) in counts.items():
result["correct"][len(ngram)-1] += min(refmaxcounts.get(ngram,0), count)
return result
def score_cooked(allcomps, n=4, ground=0, smooth=1):
totalcomps = {'testlen':0, 'reflen':0, 'guess':[0]*n, 'correct':[0]*n}
for comps in allcomps:
for key in ['testlen','reflen']:
totalcomps[key] += comps[key]
for key in ['guess','correct']:
for k in range(n):
totalcomps[key][k] += comps[key][k]
logbleu = 0.0
all_bleus = []
for k in range(n):
correct = totalcomps['correct'][k]
guess = totalcomps['guess'][k]
addsmooth = 0
if smooth == 1 and k > 0:
addsmooth = 1
logbleu += math.log(correct + addsmooth + sys.float_info.min)-math.log(guess + addsmooth+ sys.float_info.min)
if guess == 0:
all_bleus.append(-10000000)
else:
all_bleus.append(math.log(correct + sys.float_info.min)-math.log( guess ))
logbleu /= float(n)
all_bleus.insert(0, logbleu)
brevPenalty = min(0,1-float(totalcomps['reflen'] + 1)/(totalcomps['testlen'] + 1))
for i in range(len(all_bleus)):
if i ==0:
all_bleus[i] += brevPenalty
all_bleus[i] = math.exp(all_bleus[i])
return all_bleus
def bleu(refs, candidate, ground=0, smooth=1):
refs = cook_refs(refs)
test = cook_test(candidate, refs)
return score_cooked([test], ground=ground, smooth=smooth)
def splitPuncts(line):
return ' '.join(re.findall(r"[\w]+|[^\s\w]", line))
def computeMaps(predictions, goldfile):
predictionMap = {}
goldMap = {}
gf = open(goldfile, 'r')
for row in predictions:
cols = row.strip().split('\t')
if len(cols) == 1:
(rid, pred) = (cols[0], '')
else:
(rid, pred) = (cols[0], cols[1])
predictionMap[rid] = [splitPuncts(pred.strip().lower())]
for row in gf:
(rid, pred) = row.split('\t')
if rid in predictionMap: # Only insert if the id exists for the method
if rid not in goldMap:
goldMap[rid] = []
goldMap[rid].append(splitPuncts(pred.strip().lower()))
sys.stderr.write('Total: ' + str(len(goldMap)) + '\n')
return (goldMap, predictionMap)
#m1 is the reference map
#m2 is the prediction map
def bleuFromMaps(m1, m2):
score = [0] * 5
num = 0.0
for key in m1:
if key in m2:
bl = bleu(m1[key], m2[key][0])
score = [ score[i] + bl[i] for i in range(0, len(bl))]
num += 1
return [s * 100.0 / num for s in score]
if __name__ == '__main__':
reference_file = sys.argv[1]
predictions = []
for row in sys.stdin:
predictions.append(row)
(goldMap, predictionMap) = computeMaps(predictions, reference_file)
print (bleuFromMaps(goldMap, predictionMap)[0])
|
CodeGen-main
|
CodeXGLUE/Code-Text/code-to-text/code/bleu.py
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import torch
import torch.nn as nn
import torch
from torch.autograd import Variable
import copy
class Seq2Seq(nn.Module):
"""
Build Seqence-to-Sequence.
Parameters:
* `encoder`- encoder of seq2seq model. e.g. roberta
* `decoder`- decoder of seq2seq model. e.g. transformer
* `config`- configuration of encoder model.
* `beam_size`- beam size for beam search.
* `max_length`- max length of target for beam search.
* `sos_id`- start of symbol ids in target for beam search.
* `eos_id`- end of symbol ids in target for beam search.
"""
def __init__(self, encoder,decoder,config,beam_size=None,max_length=None,sos_id=None,eos_id=None):
super(Seq2Seq, self).__init__()
self.encoder = encoder
self.decoder=decoder
self.config=config
self.register_buffer("bias", torch.tril(torch.ones(2048, 2048)))
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.lsm = nn.LogSoftmax(dim=-1)
self.tie_weights()
self.beam_size=beam_size
self.max_length=max_length
self.sos_id=sos_id
self.eos_id=eos_id
def _tie_or_clone_weights(self, first_module, second_module):
""" Tie or clone module weights depending of weither we are using TorchScript or not
"""
if self.config.torchscript:
first_module.weight = nn.Parameter(second_module.weight.clone())
else:
first_module.weight = second_module.weight
def tie_weights(self):
""" Make sure we are sharing the input and output embeddings.
Export to TorchScript can't handle parameter sharing so we are cloning them instead.
"""
self._tie_or_clone_weights(self.lm_head,
self.encoder.embeddings.word_embeddings)
def forward(self, source_ids=None,source_mask=None,target_ids=None,target_mask=None,args=None):
outputs = self.encoder(source_ids, attention_mask=source_mask)
encoder_output = outputs[0].permute([1,0,2]).contiguous()
if target_ids is not None:
attn_mask=-1e4 *(1-self.bias[:target_ids.shape[1],:target_ids.shape[1]])
tgt_embeddings = self.encoder.embeddings(target_ids).permute([1,0,2]).contiguous()
out = self.decoder(tgt_embeddings,encoder_output,tgt_mask=attn_mask,memory_key_padding_mask=(1-source_mask).bool())
hidden_states = torch.tanh(self.dense(out)).permute([1,0,2]).contiguous()
lm_logits = self.lm_head(hidden_states)
# Shift so that tokens < n predict n
active_loss = target_mask[..., 1:].ne(0).view(-1) == 1
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = target_ids[..., 1:].contiguous()
# Flatten the tokens
loss_fct = nn.CrossEntropyLoss(ignore_index=-1)
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1))[active_loss],
shift_labels.view(-1)[active_loss])
outputs = loss,loss*active_loss.sum(),active_loss.sum()
return outputs
else:
#Predict
preds=[]
zero=torch.cuda.LongTensor(1).fill_(0)
for i in range(source_ids.shape[0]):
context=encoder_output[:,i:i+1]
context_mask=source_mask[i:i+1,:]
beam = Beam(self.beam_size,self.sos_id,self.eos_id)
input_ids=beam.getCurrentState()
context=context.repeat(1, self.beam_size,1)
context_mask=context_mask.repeat(self.beam_size,1)
for _ in range(self.max_length):
if beam.done():
break
attn_mask=-1e4 *(1-self.bias[:input_ids.shape[1],:input_ids.shape[1]])
tgt_embeddings = self.encoder.embeddings(input_ids).permute([1,0,2]).contiguous()
out = self.decoder(tgt_embeddings,context,tgt_mask=attn_mask,memory_key_padding_mask=(1-context_mask).bool())
out = torch.tanh(self.dense(out))
hidden_states=out.permute([1,0,2]).contiguous()[:,-1,:]
out = self.lsm(self.lm_head(hidden_states)).data
beam.advance(out)
# print(input_ids)
# print(beam)
beam_origin = beam.getCurrentOrigin()
if input_ids.dtype != torch.int64 or beam_origin.dtype != torch.int64:
print('type error, casting to long')
print(f"input ids {input_ids}")
print(f"beam current origin {beam_origin}")
select = input_ids.data.index_select(0, beam_origin)
input_ids.data.copy_(select)
input_ids=torch.cat((input_ids,beam.getCurrentState()),-1)
hyp= beam.getHyp(beam.getFinal())
pred=beam.buildTargetTokens(hyp)[:self.beam_size]
pred=[torch.cat([x.view(-1) for x in p]+[zero]*(self.max_length-len(p))).view(1,-1) for p in pred]
preds.append(torch.cat(pred,0).unsqueeze(0))
preds=torch.cat(preds,0)
return preds
class Beam(object):
def __init__(self, size,sos,eos):
self.size = size
self.tt = torch.cuda
# The score for each translation on the beam.
self.scores = self.tt.FloatTensor(size).zero_()
# The backpointers at each time-step.
self.prevKs = []
# The outputs at each time-step.
self.nextYs = [self.tt.LongTensor(size)
.fill_(0)]
self.nextYs[0][0] = sos
# Has EOS topped the beam yet.
self._eos = eos
self.eosTop = False
# Time and k pair for finished.
self.finished = []
def getCurrentState(self):
"Get the outputs for the current timestep."
batch = self.tt.LongTensor(self.nextYs[-1]).view(-1, 1)
return batch
def getCurrentOrigin(self):
"Get the backpointers for the current timestep."
return self.prevKs[-1]
def advance(self, wordLk):
"""
Given prob over words for every last beam `wordLk` and attention
`attnOut`: Compute and update the beam search.
Parameters:
* `wordLk`- probs of advancing from the last step (K x words)
* `attnOut`- attention at the last step
Returns: True if beam search is complete.
"""
numWords = wordLk.size(1)
# Sum the previous scores.
if len(self.prevKs) > 0:
beamLk = wordLk + self.scores.unsqueeze(1).expand_as(wordLk)
# Don't let EOS have children.
for i in range(self.nextYs[-1].size(0)):
if self.nextYs[-1][i] == self._eos:
beamLk[i] = -1e20
else:
beamLk = wordLk[0]
flatBeamLk = beamLk.view(-1)
bestScores, bestScoresId = flatBeamLk.topk(self.size, 0, True, True)
self.scores = bestScores
# bestScoresId is flattened beam x word array, so calculate which
# word and beam each score came from
prevK = bestScoresId // numWords
self.prevKs.append(prevK)
self.nextYs.append((bestScoresId - prevK * numWords))
for i in range(self.nextYs[-1].size(0)):
if self.nextYs[-1][i] == self._eos:
s = self.scores[i]
self.finished.append((s, len(self.nextYs) - 1, i))
# End condition is when top-of-beam is EOS and no global score.
if self.nextYs[-1][0] == self._eos:
self.eosTop = True
def done(self):
return self.eosTop and len(self.finished) >=self.size
def getFinal(self):
if len(self.finished) == 0:
self.finished.append((self.scores[0], len(self.nextYs) - 1, 0))
self.finished.sort(key=lambda a: -a[0])
if len(self.finished) != self.size:
unfinished=[]
for i in range(self.nextYs[-1].size(0)):
if self.nextYs[-1][i] != self._eos:
s = self.scores[i]
unfinished.append((s, len(self.nextYs) - 1, i))
unfinished.sort(key=lambda a: -a[0])
self.finished+=unfinished[:self.size-len(self.finished)]
return self.finished[:self.size]
def getHyp(self, beam_res):
"""
Walk back to construct the full hypothesis.
"""
hyps=[]
for _,timestep, k in beam_res:
hyp = []
for j in range(len(self.prevKs[:timestep]) - 1, -1, -1):
hyp.append(self.nextYs[j+1][k])
k = self.prevKs[j][k]
hyps.append(hyp[::-1])
return hyps
def buildTargetTokens(self, preds):
sentence=[]
for pred in preds:
tokens = []
for tok in pred:
if tok==self._eos:
break
tokens.append(tok)
sentence.append(tokens)
return sentence
|
CodeGen-main
|
CodeXGLUE/Code-Text/code-to-text/code/model.py
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import logging
import sys
from sklearn.metrics import recall_score,precision_score,f1_score
def read_answers(filename):
answers={}
with open(filename) as f:
for line in f:
line=line.strip()
idx1,idx2,label=line.split()
answers[(idx1,idx2)]=label
return answers
def read_predictions(filename):
predictions={}
with open(filename) as f:
for line in f:
line=line.strip()
idx1,idx2,label=line.split()
predictions[(idx1,idx2)]=label
return predictions
def calculate_scores(answers,predictions):
y_trues,y_preds=[],[]
for key in answers:
if key not in predictions:
logging.error("Missing prediction for ({},{}) pair.".format(key[0],key[1]))
sys.exit()
y_trues.append(answers[key])
y_preds.append(predictions[key])
scores={}
scores['Recall']=recall_score(y_trues, y_preds, average='macro')
scores['Prediction']=precision_score(y_trues, y_preds, average='macro')
scores['F1']=f1_score(y_trues, y_preds, average='macro')
return scores
def main():
import argparse
parser = argparse.ArgumentParser(description='Evaluate leaderboard predictions for BigCloneBench dataset.')
parser.add_argument('--answers', '-a',help="filename of the labels, in txt format.")
parser.add_argument('--predictions', '-p',help="filename of the leaderboard predictions, in txt format.")
args = parser.parse_args()
answers=read_answers(args.answers)
predictions=read_predictions(args.predictions)
scores=calculate_scores(answers,predictions)
print(scores)
if __name__ == '__main__':
main()
|
CodeGen-main
|
CodeXGLUE/Code-Code/Clone-detection-BigCloneBench/evaluator/evaluator.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning the library models for language modeling on a text file (GPT, GPT-2, BERT, RoBERTa).
GPT and GPT-2 are fine-tuned using a causal language modeling (CLM) loss while BERT and RoBERTa are fine-tuned
using a masked language modeling (MLM) loss.
"""
from __future__ import absolute_import, division, print_function
import argparse
import json
import logging
import os
import random
from concurrent.futures.thread import ThreadPoolExecutor
from multiprocessing import cpu_count
import numpy as np
import torch
from torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler
from torch.utils.data.distributed import DistributedSampler
import sys
from codegen_sources.wrappers.models import Model, ModelConfig, ModelJava
from codegen_sources.wrappers.tokenizer import JavaTokenizer, RobertaJavaTokenizer
try:
from torch.utils.tensorboard import SummaryWriter
except:
from tensorboardX import SummaryWriter
from tqdm import tqdm
import multiprocessing
from model import Model
cpu_cont = 16
from transformers import (AdamW, get_linear_schedule_with_warmup,
BertConfig, BertForMaskedLM, BertTokenizer,
GPT2Config, GPT2LMHeadModel, GPT2Tokenizer,
OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer,
RobertaConfig, RobertaModel, RobertaTokenizer,
DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer)
logger = logging.getLogger(__name__)
MODEL_CLASSES = {
'gpt2': (GPT2Config, GPT2LMHeadModel, GPT2Tokenizer),
'openai-gpt': (OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer),
'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
'roberta': (RobertaConfig, RobertaModel, RobertaTokenizer),
'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'xlm_java': (ModelConfig, ModelJava, JavaTokenizer),
'roberta_java': (ModelConfig, ModelJava, RobertaJavaTokenizer),
}
def get_example(item):
url1,url2,label,tokenizer,args,cache,url_to_code=item
if url1 in cache:
code1=cache[url1].copy()
else:
try:
code=' '.join(url_to_code[url1].split())
except:
code=""
code1=tokenizer.tokenize(code)
if url2 in cache:
code2=cache[url2].copy()
else:
try:
code=' '.join(url_to_code[url2].split())
except:
code=""
code2=tokenizer.tokenize(code)
return convert_examples_to_features(code1,code2,label,url1,url2,tokenizer,args,cache)
class InputFeatures(object):
"""A single training/test features for a example."""
def __init__(self,
input_tokens,
input_ids,
label,
url1,
url2
):
self.input_tokens = input_tokens
self.input_ids = input_ids
self.label=label
self.url1=url1
self.url2=url2
def convert_examples_to_features(code1_tokens,code2_tokens,label,url1,url2,tokenizer,args,cache):
#source
code1_tokens=code1_tokens[:args.block_size-2]
code1_tokens =[tokenizer.cls_token]+code1_tokens+[tokenizer.sep_token]
code2_tokens=code2_tokens[:args.block_size-2]
code2_tokens =[tokenizer.cls_token]+code2_tokens+[tokenizer.sep_token]
code1_ids=tokenizer.convert_tokens_to_ids(code1_tokens)
padding_length = args.block_size - len(code1_ids)
code1_ids+=[tokenizer.pad_token_id]*padding_length
code2_ids=tokenizer.convert_tokens_to_ids(code2_tokens)
padding_length = args.block_size - len(code2_ids)
code2_ids+=[tokenizer.pad_token_id]*padding_length
source_tokens=code1_tokens+code2_tokens
source_ids=code1_ids+code2_ids
return InputFeatures(source_tokens,source_ids,label,url1,url2)
class TextDataset(Dataset):
def __init__(self, tokenizer, args, file_path='train', block_size=512,pool=None):
postfix=file_path.split('/')[-1].split('.txt')[0]
self.examples = []
index_filename=file_path
logger.info("Creating features from index file at %s ", index_filename)
url_to_code={}
with open('/'.join(index_filename.split('/')[:-1])+'/data.jsonl') as f:
for line in f:
line=line.strip()
js=json.loads(line)
url_to_code[js['idx']]=js['func']
data=[]
cache={}
f=open(index_filename)
with open(index_filename) as f:
for line in f:
line=line.strip()
url1,url2,label=line.split('\t')
if url1 not in url_to_code or url2 not in url_to_code:
continue
if label=='0':
label=0
else:
label=1
data.append((url1,url2,label,tokenizer, args,cache,url_to_code))
if 'test' not in postfix:
data=random.sample(data,int(len(data)*0.1))
executor = ThreadPoolExecutor(max_workers=cpu_count())
self.examples=list(executor.map(get_example, data))
if 'train' in postfix:
for idx, example in enumerate(self.examples[:3]):
logger.info("*** Example ***")
logger.info("idx: {}".format(idx))
logger.info("label: {}".format(example.label))
logger.info("input_tokens: {}".format([x.replace('\u0120','_') for x in example.input_tokens]))
logger.info("input_ids: {}".format(' '.join(map(str, example.input_ids))))
def __len__(self):
return len(self.examples)
def __getitem__(self, item):
return torch.tensor(self.examples[item].input_ids),torch.tensor(self.examples[item].label)
def load_and_cache_examples(args, tokenizer, evaluate=False,test=False,pool=None):
dataset = TextDataset(tokenizer, args, file_path=args.test_data_file if test else (args.eval_data_file if evaluate else args.train_data_file),block_size=args.block_size,pool=pool)
return dataset
def set_seed(seed=42):
random.seed(seed)
os.environ['PYHTONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
def train(args, train_dataset, model, tokenizer,pool):
""" Train the model """
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
args.max_steps=args.epoch*len( train_dataloader)
args.save_steps=len( train_dataloader)
args.warmup_steps=len( train_dataloader)
args.logging_steps=len( train_dataloader)
args.num_train_epochs=args.epoch
model.to(args.device)
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
'weight_decay': args.weight_decay},
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps,
num_training_steps=args.max_steps)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True)
checkpoint_last = os.path.join(args.output_dir, 'checkpoint-last')
scheduler_last = os.path.join(checkpoint_last, 'scheduler.pt')
optimizer_last = os.path.join(checkpoint_last, 'optimizer.pt')
if os.path.exists(scheduler_last):
scheduler.load_state_dict(torch.load(scheduler_last))
if os.path.exists(optimizer_last):
optimizer.load_state_dict(torch.load(optimizer_last))
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size * args.gradient_accumulation_steps * (
torch.distributed.get_world_size() if args.local_rank != -1 else 1))
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", args.max_steps)
global_step = args.start_step
tr_loss, logging_loss,avg_loss,tr_nb,tr_num,train_loss = 0.0, 0.0,0.0,0,0,0
best_mrr=0.0
best_f1=0
# model.resize_token_embeddings(len(tokenizer))
model.zero_grad()
set_seed(args.seed) # Added here for reproducibility (even between python 2 and 3)
for idx in range(args.start_epoch, int(args.num_train_epochs)):
bar = train_dataloader
tr_num=0
train_loss=0
for step, batch in enumerate(bar):
inputs = batch[0].to(args.device)
labels=batch[1].to(args.device)
model.train()
loss,logits = model(inputs,labels)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
tr_loss += loss.item()
tr_num+=1
train_loss+=loss.item()
if avg_loss==0:
avg_loss=tr_loss
avg_loss=round(train_loss/tr_num,5)
if step % 100 == 0:
logger.info("step {}: epoch {} loss {}".format(step, idx,avg_loss))
if (step + 1) % args.gradient_accumulation_steps == 0:
optimizer.step()
optimizer.zero_grad()
scheduler.step()
global_step += 1
output_flag=True
avg_loss=round(np.exp((tr_loss - logging_loss) /(global_step- tr_nb)),4)
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
logging_loss = tr_loss
tr_nb=global_step
if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well
results = evaluate(args, model, tokenizer,pool=pool,eval_when_training=True)
# Save model checkpoint
if results['eval_f1']>best_f1:
best_f1=results['eval_f1']
logger.info(" "+"*"*20)
logger.info(" Best f1:%s",round(best_f1,4))
logger.info(" "+"*"*20)
checkpoint_prefix = 'checkpoint-best-f1'
output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = model.module if hasattr(model,'module') else model
output_dir = os.path.join(output_dir, '{}'.format('model.bin'))
torch.save(model_to_save.state_dict(), output_dir)
logger.info("Saving model checkpoint to %s", output_dir)
if args.max_steps > 0 and global_step > args.max_steps:
break
return global_step, tr_loss / global_step
def evaluate(args, model, tokenizer, prefix="",pool=None,eval_when_training=False):
# Loop to handle MNLI double evaluation (matched, mis-matched)
eval_output_dir = args.output_dir
eval_dataset = load_and_cache_examples(args, tokenizer, evaluate=True,pool=pool)
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size,num_workers=4,pin_memory=True)
# multi-gpu evaluate
if args.n_gpu > 1 and eval_when_training is False:
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
model.eval()
logits=[]
y_trues=[]
for batch in eval_dataloader:
inputs = batch[0].to(args.device)
labels=batch[1].to(args.device)
with torch.no_grad():
lm_loss,logit = model(inputs,labels)
eval_loss += lm_loss.mean().item()
logits.append(logit.cpu().numpy())
y_trues.append(labels.cpu().numpy())
nb_eval_steps += 1
logits=np.concatenate(logits,0)
y_trues=np.concatenate(y_trues,0)
best_threshold=0
best_f1=0
for i in range(1,100):
threshold=i/100
y_preds=logits[:,1]>threshold
from sklearn.metrics import recall_score
recall=recall_score(y_trues, y_preds, average='macro')
from sklearn.metrics import precision_score
precision=precision_score(y_trues, y_preds, average='macro')
from sklearn.metrics import f1_score
f1=f1_score(y_trues, y_preds, average='macro')
if f1>best_f1:
best_f1=f1
best_threshold=threshold
y_preds=logits[:,1]>best_threshold
from sklearn.metrics import recall_score
recall=recall_score(y_trues, y_preds, average='macro')
from sklearn.metrics import precision_score
precision=precision_score(y_trues, y_preds, average='macro')
from sklearn.metrics import f1_score
f1=f1_score(y_trues, y_preds, average='macro')
result = {
"eval_recall": float(recall),
"eval_precision": float(precision),
"eval_f1": float(f1),
"eval_threshold":best_threshold,
}
logger.info("***** Eval results {} *****".format(prefix))
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(round(result[key],4)))
return result
def test(args, model, tokenizer, prefix="",pool=None,best_threshold=0):
# Loop to handle MNLI double evaluation (matched, mis-matched)
eval_dataset = load_and_cache_examples(args, tokenizer, test=True,pool=pool)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size,num_workers=4,pin_memory=True)
# multi-gpu evaluate
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running Test {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
model.eval()
logits=[]
y_trues=[]
for batch in eval_dataloader:
inputs = batch[0].to(args.device)
labels=batch[1].to(args.device)
with torch.no_grad():
lm_loss,logit = model(inputs,labels)
eval_loss += lm_loss.mean().item()
logits.append(logit.cpu().numpy())
y_trues.append(labels.cpu().numpy())
nb_eval_steps += 1
logits=np.concatenate(logits,0)
y_preds=logits[:,1]>best_threshold
with open(os.path.join(args.output_dir,"predictions.txt"),'w') as f:
for example,pred in zip(eval_dataset.examples,y_preds):
if pred:
f.write(example.url1+'\t'+example.url2+'\t'+'1'+'\n')
else:
f.write(example.url1+'\t'+example.url2+'\t'+'0'+'\n')
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--train_data_file", default=None, type=str, required=True,
help="The input training data file (a text file).")
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written.")
## Other parameters
parser.add_argument("--eval_data_file", default=None, type=str,
help="An optional input evaluation data file to evaluate the perplexity on (a text file).")
parser.add_argument("--test_data_file", default=None, type=str,
help="An optional input evaluation data file to evaluate the perplexity on (a text file).")
parser.add_argument("--model_type", default="bert", type=str,
help="The model architecture to be fine-tuned.")
parser.add_argument("--model_name_or_path", default=None, type=str,
help="The model checkpoint for weights initialization.")
parser.add_argument("--mlm", action='store_true',
help="Train with masked-language modeling loss instead of language modeling.")
parser.add_argument("--mlm_probability", type=float, default=0.15,
help="Ratio of tokens to mask for masked language modeling loss")
parser.add_argument("--config_name", default="", type=str,
help="Optional pretrained config name or path if not the same as model_name_or_path")
parser.add_argument("--tokenizer_name", default="", type=str,
help="Optional pretrained tokenizer name or path if not the same as model_name_or_path")
parser.add_argument("--cache_dir", default="", type=str,
help="Optional directory to store the pre-trained models downloaded from s3 (instread of the default one)")
parser.add_argument("--block_size", default=-1, type=int,
help="Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens).")
parser.add_argument("--do_train", action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval", action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--do_test", action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--evaluate_during_training", action='store_true',
help="Run evaluation during training at each logging step.")
parser.add_argument("--do_lower_case", action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--train_batch_size", default=4, type=int,
help="Batch size per GPU/CPU for training.")
parser.add_argument("--eval_batch_size", default=4, type=int,
help="Batch size per GPU/CPU for evaluation.")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--learning_rate", default=5e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.0, type=float,
help="Weight deay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float,
help="Max gradient norm.")
parser.add_argument("--num_train_epochs", default=1.0, type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--max_steps", default=-1, type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.")
parser.add_argument("--warmup_steps", default=0, type=int,
help="Linear warmup over warmup_steps.")
parser.add_argument('--logging_steps', type=int, default=50,
help="Log every X updates steps.")
parser.add_argument('--save_steps', type=int, default=50,
help="Save checkpoint every X updates steps.")
parser.add_argument('--save_total_limit', type=int, default=None,
help='Limit the total amount of checkpoints, delete the older checkpoints in the output_dir, does not delete by default')
parser.add_argument("--eval_all_checkpoints", action='store_true',
help="Evaluate all checkpoints starting with the same prefix as model_name_or_path ending and ending with step number")
parser.add_argument("--no_cuda", action='store_true',
help="Avoid using CUDA when available")
parser.add_argument('--overwrite_output_dir', action='store_true',
help="Overwrite the content of the output directory")
parser.add_argument('--overwrite_cache', action='store_true',
help="Overwrite the cached training and evaluation sets")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
parser.add_argument('--epoch', type=int, default=42,
help="random seed for initialization")
parser.add_argument('--fp16', action='store_true',
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit")
parser.add_argument('--fp16_opt_level', type=str, default='O1',
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument("--local_rank", type=int, default=-1,
help="For distributed training: local_rank")
parser.add_argument('--server_ip', type=str, default='', help="For distant debugging.")
parser.add_argument('--server_port', type=str, default='', help="For distant debugging.")
pool = multiprocessing.Pool(cpu_cont)
args = parser.parse_args()
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend='nccl')
args.n_gpu = 1
args.device = device
args.per_gpu_train_batch_size=args.train_batch_size//args.n_gpu
args.per_gpu_eval_batch_size=args.eval_batch_size//args.n_gpu
# Setup logging
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16)
# Set seed
set_seed(args.seed)
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Barrier to make sure only the first process in distributed training download model & vocab
args.start_epoch = 0
args.start_step = 0
checkpoint_last = os.path.join(args.output_dir, 'checkpoint-last')
if os.path.exists(checkpoint_last) and os.listdir(checkpoint_last):
args.model_name_or_path = os.path.join(checkpoint_last, 'pytorch_model.bin')
args.config_name = os.path.join(checkpoint_last, 'config.json')
idx_file = os.path.join(checkpoint_last, 'idx_file.txt')
with open(idx_file, encoding='utf-8') as idxf:
args.start_epoch = int(idxf.readlines()[0].strip()) + 1
step_file = os.path.join(checkpoint_last, 'step_file.txt')
if os.path.exists(step_file):
with open(step_file, encoding='utf-8') as stepf:
args.start_step = int(stepf.readlines()[0].strip())
logger.info("reload model from {}, resume from {} epoch".format(checkpoint_last, args.start_epoch))
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path,
cache_dir=args.cache_dir if args.cache_dir else None)
config.num_labels=2
tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None)
if args.block_size <= 0:
args.block_size = tokenizer.max_len_single_sentence # Our input block size will be the max possible for the model
args.block_size = min(args.block_size, tokenizer.max_len_single_sentence)
if args.model_name_or_path:
model = model_class.from_pretrained(args.model_name_or_path,
from_tf=bool('.ckpt' in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir if args.cache_dir else None)
else:
model = model_class(config)
model=Model(model,config,tokenizer,args)
if args.local_rank == 0:
torch.distributed.barrier() # End of barrier to make sure only the first process in distributed training download model & vocab
logger.info("Training/evaluation parameters %s", args)
# Training
if args.do_train:
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Barrier to make sure only the first process in distributed training process the dataset, and the others will use the cache
train_dataset = load_and_cache_examples(args, tokenizer, evaluate=False,pool=pool)
if args.local_rank == 0:
torch.distributed.barrier()
global_step, tr_loss = train(args, train_dataset, model, tokenizer,pool)
# Evaluation
results = {}
if args.do_eval and args.local_rank in [-1, 0]:
checkpoint_prefix = 'checkpoint-best-f1/model.bin'
output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix))
model.load_state_dict(torch.load(output_dir))
model.to(args.device)
result=evaluate(args, model, tokenizer,pool=pool)
if args.do_test and args.local_rank in [-1, 0]:
checkpoint_prefix = 'checkpoint-best-f1/model.bin'
output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix))
model.load_state_dict(torch.load(output_dir))
model.to(args.device)
test(args, model, tokenizer,pool=pool,best_threshold=0.5)
return results
if __name__ == "__main__":
main()
|
CodeGen-main
|
CodeXGLUE/Code-Code/Clone-detection-BigCloneBench/code/run.py
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import torch
import torch.nn as nn
import torch
from torch.autograd import Variable
import copy
import torch.nn.functional as F
from torch.nn import CrossEntropyLoss, MSELoss
class RobertaClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size*2, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.out_proj = nn.Linear(config.hidden_size, 2)
def forward(self, features, **kwargs):
x = features[:, 0, :] # take <s> token (equiv. to [CLS])
x = x.reshape(-1,x.size(-1)*2)
x = self.dropout(x)
x = self.dense(x)
x = torch.tanh(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
class Model(nn.Module):
def __init__(self, encoder,config,tokenizer,args):
super(Model, self).__init__()
self.encoder = encoder
self.config=config
self.tokenizer=tokenizer
self.classifier=RobertaClassificationHead(config)
self.args=args
def forward(self, input_ids=None,labels=None):
input_ids=input_ids.view(-1,self.args.block_size)
outputs = self.encoder(input_ids= input_ids,attention_mask=input_ids.ne(1))[0]
logits=self.classifier(outputs)
prob=F.softmax(logits)
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits, labels)
return loss,prob
else:
return prob
|
CodeGen-main
|
CodeXGLUE/Code-Code/Clone-detection-BigCloneBench/code/model.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def f_gold ( n ) :
if n <= 1 :
return False
for i in range ( 2 , n ) :
if n % i == 0 :
return False ;
return True
#TOFILL
if __name__ == '__main__':
param = [
(37,),
(39,),
(73,),
(8,),
(28,),
(66,),
(20,),
(36,),
(6,),
(51,)
]
n_success = 0
for i, parameters_set in enumerate(param):
if f_filled(*parameters_set) == f_gold(*parameters_set):
n_success+=1
print("#Results: %i, %i" % (n_success, len(param)))
|
CodeGen-main
|
data/transcoder_evaluation_gfg/python/PRIMALITY_TEST_SET_1_INTRODUCTION_AND_SCHOOL_METHOD.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def f_gold ( S , n ) :
found = False
S.sort ( )
for i in range ( n - 1 , - 1 , - 1 ) :
for j in range ( 0 , n ) :
if ( i == j ) :
continue
for k in range ( j + 1 , n ) :
if ( i == k ) :
continue
for l in range ( k + 1 , n ) :
if ( i == l ) :
continue
if ( S [ i ] == S [ j ] + S [ k ] + S [ l ] ) :
found = True
return S [ i ]
if ( found == False ) :
return - 1
#TOFILL
if __name__ == '__main__':
param = [
([8, 12, 14, 15, 16, 20, 27, 28, 29, 30, 35, 41, 46, 51, 53, 55, 55, 58, 63, 64, 72, 73, 75, 75, 75, 82, 82, 86, 89, 91, 92, 94, 95, 95, 97, 97, 98],24,),
([-62, 48, -22, -44, -58, -50, -82, 34, 26, -2, 86, -44, 92, -96, 42, -20, 10, 74, -56, -12, -28, -40],19,),
([0, 0, 0, 0, 0, 0, 0, 1, 1, 1],8,),
([84, 58, 10, 67, 77, 66, 10, 47, 65, 55, 54],5,),
([-46, -28, -20, -18, 4, 8, 18, 38, 90, 90],6,),
([0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0],35,),
([11, 13, 14, 21, 26, 28, 36, 39, 41, 42, 43, 44, 49, 49, 57, 58, 59, 59, 63, 64, 67, 69, 70, 75, 78, 79, 83, 83, 86, 91, 92, 93, 96, 96, 96, 97],30,),
([74, 52, -16, 34, -88, 62, 54, 46, -82, 76, -48, 54, 50, -66, -18, 78, -48, 38, 96, -32, -82, 0, -76, 46, -56, 4, -30, -70, -62],16,),
([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],17,),
([55, 74, 18, 4, 68, 66, 33, 61, 66, 92, 21, 9, 49, 14, 99, 87, 74, 6, 11, 25, 5, 58, 56, 20],23,)
]
n_success = 0
for i, parameters_set in enumerate(param):
if f_filled(*parameters_set) == f_gold(*parameters_set):
n_success+=1
print("#Results: %i, %i" % (n_success, len(param)))
|
CodeGen-main
|
data/transcoder_evaluation_gfg/python/FIND_LARGEST_D_IN_ARRAY_SUCH_THAT_A_B_C_D.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def f_gold ( s ) :
n = len ( s ) ;
sub_count = ( n * ( n + 1 ) ) // 2 ;
arr = [ 0 ] * sub_count ;
index = 0 ;
for i in range ( n ) :
for j in range ( 1 , n - i + 1 ) :
arr [ index ] = s [ i : i + j ] ;
index += 1 ;
arr.sort ( ) ;
res = "" ;
for i in range ( sub_count ) :
res += arr [ i ] ;
return res ;
#TOFILL
if __name__ == '__main__':
param = [
('sqGOi',),
('848580',),
('01001110011001',),
('ZhWXUKmeiI',),
('0917296541285',),
('01101001111100',),
('tjP kR',),
('999907',),
('011100',),
('qJPHNSJOUj',)
]
n_success = 0
for i, parameters_set in enumerate(param):
if f_filled(*parameters_set) == f_gold(*parameters_set):
n_success+=1
print("#Results: %i, %i" % (n_success, len(param)))
|
CodeGen-main
|
data/transcoder_evaluation_gfg/python/LEXICOGRAPHICAL_CONCATENATION_SUBSTRINGS_STRING.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def f_gold(str1, str2):
if (len(str1) > len(str2)):
t = str1
str1 = str2
str2 = t
str = ""
n1 = len(str1)
n2 = len(str2)
str1 = str1[:: - 1]
str2 = str2[:: - 1]
carry = 0
for i in range(n1):
sum = ((ord(str1[i]) - 48) + ((ord(str2[i]) - 48) + carry))
str += chr(sum % 10 + 48)
carry = int(sum / 10)
for i in range(n1, n2):
sum = ((ord(str2[i]) - 48) + carry)
str += chr(sum % 10 + 48)
carry = (int)(sum / 10)
if (carry):
str += chr(carry + 48)
str = str[:: - 1]
return str
#TOFILL
if __name__ == '__main__':
param = [
('VkfzrPG', 'rKZ',),
('0526110506447', '903',),
('011010010', '110100000',),
('sPAwZACc ', 'liYMsojPiinOV',),
('3', '611',),
('0101', '01110101011',),
('VTtNu', 'Wsmc',),
('2317170', '898421173423',),
('111111000010', '01100001110111',),
('Ktt', 'CTbbVX wGBkE',)
]
n_success = 0
for i, parameters_set in enumerate(param):
if f_filled(*parameters_set) == f_gold(*parameters_set):
n_success += 1
print("#Results: %i, %i" % (n_success, len(param)))
|
CodeGen-main
|
data/transcoder_evaluation_gfg/python/SUM_TWO_LARGE_NUMBERS.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def f_gold ( num , divisor ) :
while ( num >= divisor ) :
num -= divisor ;
return num ;
#TOFILL
if __name__ == '__main__':
param = [
(70,13,),
(77,3,),
(77,73,),
(88,54,),
(96,39,),
(6,10,),
(79,95,),
(44,32,),
(26,86,),
(82,91,)
]
n_success = 0
for i, parameters_set in enumerate(param):
if f_filled(*parameters_set) == f_gold(*parameters_set):
n_success+=1
print("#Results: %i, %i" % (n_success, len(param)))
|
CodeGen-main
|
data/transcoder_evaluation_gfg/python/PROGRAM_TO_FIND_REMAINDER_WITHOUT_USING_MODULO_OR_OPERATOR_2.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def f_gold ( n ) :
while ( int ( n / 100 ) ) :
last_digit = int ( n % 10 )
n = int ( n / 10 )
n += last_digit * 3
return ( n % 29 == 0 )
#TOFILL
if __name__ == '__main__':
param = [
(29,),
(0,),
(65,),
(1419,),
(54,),
(7,),
(44,),
(34,),
(1160,),
(292929002929,)
]
n_success = 0
for i, parameters_set in enumerate(param):
if f_filled(*parameters_set) == f_gold(*parameters_set):
n_success+=1
print("#Results: %i, %i" % (n_success, len(param)))
|
CodeGen-main
|
data/transcoder_evaluation_gfg/python/NUMBER_IS_DIVISIBLE_BY_29_OR_NOT.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def f_gold ( n ) :
num = n ;
dec_value = 0 ;
base1 = 1 ;
len1 = len ( num ) ;
for i in range ( len1 - 1 , - 1 , - 1 ) :
if ( num [ i ] == '1' ) :
dec_value += base1 ;
base1 = base1 * 2 ;
return dec_value ;
#TOFILL
if __name__ == '__main__':
param = [
('uEmIAgF',),
('753310137',),
('010011010',),
('kNi',),
('04562016903312',),
('000111101',),
('bk',),
('9',),
('1',),
('XxT nXLlk',)
]
n_success = 0
for i, parameters_set in enumerate(param):
if f_filled(*parameters_set) == f_gold(*parameters_set):
n_success+=1
print("#Results: %i, %i" % (n_success, len(param)))
|
CodeGen-main
|
data/transcoder_evaluation_gfg/python/PROGRAM_BINARY_DECIMAL_CONVERSION_1.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def f_gold ( arr , n ) :
found = False
arr.sort ( )
for i in range ( 0 , n - 1 ) :
l = i + 1
r = n - 1
x = arr [ i ]
while ( l < r ) :
if ( x + arr [ l ] + arr [ r ] == 0 ) :
print ( x , arr [ l ] , arr [ r ] )
l += 1
r -= 1
found = True
elif ( x + arr [ l ] + arr [ r ] < 0 ) :
l += 1
else :
r -= 1
if ( found == False ) :
print ( " No Triplet Found" )
#TOFILL
if __name__ == '__main__':
param = [
([4, 24, 27, 34, 39, 41, 67, 69, 84, 91, 94],7,),
([14, 8, 92, 46, 62, 8, 8, 70, 98, -20, -16, -6, -2, -36, 46, 46, -26, 50, 76, 96, -32, 2, -32, 72, 48, 24, 64, 42, 40, 92],29,),
([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1],15,),
([47, 69, 42, 36, 82, 65, 84],3,),
([-98, -74, -62, -60, -60, -32],5,),
([1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0],35,),
([1, 4, 4, 9, 20, 23, 24, 27, 28, 29, 31, 35, 42, 45, 46, 47, 49, 52, 55, 57, 62, 67, 72, 78, 79, 82, 86, 86, 88],26,),
([92, 0, 56, 90, -10, -46, 44, -86, -16, -90, -92, -44, -88, 24, -80, -98, 68, -86, 98, -10, 18, -40, 98, 40, -58, -6, -38, 72, 90],15,),
([0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],17,),
([7, 3, 37, 60, 6, 26, 30, 21, 7, 59, 18, 69, 40, 47, 34, 19, 51, 27, 4, 7, 56, 4, 57, 62, 54, 9, 93, 31, 9, 85],28,)
]
filled_function_param = [
([4, 24, 27, 34, 39, 41, 67, 69, 84, 91, 94],7,),
([14, 8, 92, 46, 62, 8, 8, 70, 98, -20, -16, -6, -2, -36, 46, 46, -26, 50, 76, 96, -32, 2, -32, 72, 48, 24, 64, 42, 40, 92],29,),
([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1],15,),
([47, 69, 42, 36, 82, 65, 84],3,),
([-98, -74, -62, -60, -60, -32],5,),
([1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0],35,),
([1, 4, 4, 9, 20, 23, 24, 27, 28, 29, 31, 35, 42, 45, 46, 47, 49, 52, 55, 57, 62, 67, 72, 78, 79, 82, 86, 86, 88],26,),
([92, 0, 56, 90, -10, -46, 44, -86, -16, -90, -92, -44, -88, 24, -80, -98, 68, -86, 98, -10, 18, -40, 98, 40, -58, -6, -38, 72, 90],15,),
([0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],17,),
([7, 3, 37, 60, 6, 26, 30, 21, 7, 59, 18, 69, 40, 47, 34, 19, 51, 27, 4, 7, 56, 4, 57, 62, 54, 9, 93, 31, 9, 85],28,)
]
n_success = 0
for i, parameters_set in enumerate(param):
f_filled(*(filled_function_param[i]))
f_gold(*parameters_set)
if parameters_set == filled_function_param[i]:
n_success+=1
print("#Results: %i, %i" % (n_success, len(param)))
|
CodeGen-main
|
data/transcoder_evaluation_gfg/python/FIND_TRIPLETS_ARRAY_WHOSE_SUM_EQUAL_ZERO_2.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def f_gold ( arr , n ) :
inc , dcr = dict ( ) , dict ( )
len_inc , len_dcr = [ 0 ] * n , [ 0 ] * n
longLen = 0
for i in range ( n ) :
len = 0
if inc.get ( arr [ i ] - 1 ) in inc.values ( ) :
len = inc.get ( arr [ i ] - 1 )
inc [ arr [ i ] ] = len_inc [ i ] = len + 1
for i in range ( n - 1 , - 1 , - 1 ) :
len = 0
if dcr.get ( arr [ i ] - 1 ) in dcr.values ( ) :
len = dcr.get ( arr [ i ] - 1 )
dcr [ arr [ i ] ] = len_dcr [ i ] = len + 1
for i in range ( n ) :
if longLen < ( len_inc [ i ] + len_dcr [ i ] - 1 ) :
longLen = len_inc [ i ] + len_dcr [ i ] - 1
return longLen
#TOFILL
if __name__ == '__main__':
param = [
([78],0,),
([-6, -18, -48, 58, -54, 76, 80, -56, 86, 58, -86, -86, -88, 32, 12, 58, 58, -16, 86, -24, 84, 86, 36, 18, 30, -32, -4, -36, -72, -4, 42, 94],18,),
([0, 1],1,),
([92, 26, 72, 8, 66, 28, 34, 61, 28],5,),
([-86, -82, -76, -68, -66, -64, -62, -56, -48, -42, -38, -30, -22, -18, -10, -10, -4, -2, 4, 28, 42, 44, 50, 50, 56, 58, 60, 76, 82, 86, 86, 98],25,),
([0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0],17,),
([3, 4, 8, 9, 12, 13, 16, 19, 23, 25, 29, 31, 34, 36, 38, 41, 42, 47, 49, 50, 51, 51, 58, 63, 66, 70, 73, 74, 75, 75, 75, 76, 76, 80, 82, 83, 83, 84, 86, 89, 90, 91, 91, 95, 96],44,),
([4, -76, 60, 48, -14, 72],3,),
([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],17,),
([66, 80, 79, 72, 1, 67, 20, 67, 32, 40, 22, 64, 58, 67, 10, 21, 37, 49],15,)
]
n_success = 0
for i, parameters_set in enumerate(param):
if f_filled(*parameters_set) == f_gold(*parameters_set):
n_success+=1
print("#Results: %i, %i" % (n_success, len(param)))
|
CodeGen-main
|
data/transcoder_evaluation_gfg/python/LENGTH_LONGEST_STRICT_BITONIC_SUBSEQUENCE.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def f_gold ( a , b ) :
if ( a < 0 ) :
a = - a
if ( b < 0 ) :
b = - b
mod = a
while ( mod >= b ) :
mod = mod - b
if ( a < 0 ) :
return - mod
return mod
#TOFILL
if __name__ == '__main__':
param = [
(3243.229719038493,5659.926861939672,),
(-4362.665881044217,-9196.507113304497,),
(7255.066257575837,2623.200060506935,),
(-6929.554320261099,-3009.0234530313287,),
(3569.942027998315,6920.809419868375,),
(-6513.849053096595,-70.95992406437102,),
(7333.183189243961,580.3500610971768,),
(-2856.1752826258803,-9625.97442825802,),
(9787.228111241662,2419.6844962423256,),
(-1722.873699288031,-8370.700544254058,)
]
n_success = 0
for i, parameters_set in enumerate(param):
if abs(1 - (0.0000001 + abs(f_gold(*parameters_set))) / (abs(f_filled(*parameters_set)) + 0.0000001)) < 0.001:
n_success+=1
print("#Results: %i, %i" % (n_success, len(param)))
|
CodeGen-main
|
data/transcoder_evaluation_gfg/python/MODULUS_TWO_FLOAT_DOUBLE_NUMBERS.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def f_gold ( str ) :
n = len ( str ) ;
return int ( n * ( n + 1 ) / 2 ) ;
#TOFILL
if __name__ == '__main__':
param = [
('gZFGZsHCimLf',),
('505357',),
('011011101',),
('ovfwP Osauz',),
('92132238746026',),
('01100',),
('RaOWYQRfiWKSyC',),
('861330202',),
('001100010',),
('uvpKlGUBLOMba',)
]
n_success = 0
for i, parameters_set in enumerate(param):
if f_filled(*parameters_set) == f_gold(*parameters_set):
n_success+=1
print("#Results: %i, %i" % (n_success, len(param)))
|
CodeGen-main
|
data/transcoder_evaluation_gfg/python/NUMBER_SUBSTRINGS_STRING.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def f_gold ( arr , l , r , x ) :
if ( r >= l ) :
mid = int ( l + ( r - l ) / 2 )
if ( arr [ mid ] == x ) : return mid
if ( mid > l and arr [ mid - 1 ] == x ) :
return ( mid - 1 )
if ( mid < r and arr [ mid + 1 ] == x ) :
return ( mid + 1 )
if ( arr [ mid ] > x ) :
return f_gold ( arr , l , mid - 2 , x )
return f_gold ( arr , mid + 2 , r , x )
return - 1
#TOFILL
if __name__ == '__main__':
param = [
([6,7,15,42,47,54,56,59,59,64,68,70,71,75,91,93], 0, 15, 71),
([6,7,15,42,47,56,54,59,59,64,68,71,70, 75,91,93], 0, 15, 71),
([-92,-96,-68,-40,70], 0, 4, , -96),
([-92,-86,-68,-40,70], 0, 4, 20),
([-3,-1,0,30,10,45,70,60], 0, 7, 0),
([-3,-1,0,10,5,45,60,50], 0, 7, 12),
([-3,-1,0,10,30,45,60,70], 0, 7, 18),
([0,0,1], 0, 2, 20),
([1,1,1], 0, 2, 17),
([30,2,30,45], 0, 3, 28)
]
n_success = 0
for i, parameters_set in enumerate(param):
if f_filled(*parameters_set) == f_gold(*parameters_set):
n_success+=1
print("#Results: %i, %i" % (n_success, len(param)))
|
CodeGen-main
|
data/transcoder_evaluation_gfg/python/SEARCH_ALMOST_SORTED_ARRAY.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def f_gold ( x ) :
next = 0
if ( x ) :
rightOne = x & - ( x )
nextHigherOneBit = x + int ( rightOne )
rightOnesPattern = x ^ int ( nextHigherOneBit )
rightOnesPattern = ( int ( rightOnesPattern ) / int ( rightOne ) )
rightOnesPattern = int ( rightOnesPattern ) >> 2
next = nextHigherOneBit | rightOnesPattern
return next
#TOFILL
if __name__ == '__main__':
param = [
(42,),
(75,),
(94,),
(5,),
(52,),
(22,),
(77,),
(44,),
(85,),
(59,)
]
n_success = 0
for i, parameters_set in enumerate(param):
if f_filled(*parameters_set) == f_gold(*parameters_set):
n_success+=1
print("#Results: %i, %i" % (n_success, len(param)))
|
CodeGen-main
|
data/transcoder_evaluation_gfg/python/NEXT_HIGHER_NUMBER_WITH_SAME_NUMBER_OF_SET_BITS.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import sys
def f_gold(arr, n, A, B, C):
for i in range(n):
arr[i] = (A * arr[i] * arr[i] + B * arr[i] + C)
index = - (sys.maxsize - 1)
maximum = - (sys.maxsize - 1)
for i in range(n):
if maximum < arr[i]:
index = i
maximum = arr[i]
i = 0
j = n - 1
new_arr = [0] * n
k = 0
while i < index and j > index:
if arr[i] < arr[j]:
new_arr[k] = arr[i]
k += 1
i += 1
else:
new_arr[k] = arr[j]
k += 1
j -= 1
while i < index:
new_arr[k] = arr[i]
k += 1
i += 1
while j > index:
new_arr[k] = arr[j]
k += 1
j -= 1
new_arr[n - 1] = maximum
for i in range(n):
arr[i] = new_arr[i]
#TOFILL
if __name__ == '__main__':
param = [
([9, 30, 49, 65, 78, 85, 85, 92], 4, 4, 5, 4,),
([-48, 89, -60, 66, 71, -37, 47, -50, 61, 41, -22, -3, 90, -57, 77, -64, 22,
8, -90, -5, -94, -43, 29, -29, 86, -79, -8, 27, -20, -44, 16], 18, 20, 20, 23,),
([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1], 25, 26, 15, 18,),
([87, 70, 77, 87, 73, 81, 66, 19, 83, 7, 63, 42, 42, 59, 20, 73, 17, 27, 47, 2, 63, 62, 19, 17, 69, 39,
82, 71, 81, 39, 36, 40, 45, 4, 25, 69, 30, 76, 68, 88, 29, 73, 68, 51, 24, 14, 69, 18], 33, 42, 35, 41,),
([-91, -85, -77, -73, -70, -68, -24, -21, -12, -
1, 9, 29, 48, 52, 56, 63, 88], 8, 12, 8, 8,),
([0, 0, 0, 1, 1, 0, 1, 1, 1, 1], 7, 8, 6, 7,),
([4, 5, 9, 14, 18, 20, 22, 23, 25, 28, 30, 31, 34, 35, 36, 38, 38, 39, 44, 48, 49, 51,
54, 55, 59, 64, 66, 71, 72, 72, 73, 76, 78, 82, 82, 84, 92, 93, 95], 22, 33, 19, 25,),
([40, 6, 33, 8, 78, -58, 2, 24, 40, 3, 46, 94, -26, 8, 22, -83, 96, -29, -
38, -59, 19, 62, 98, -55, -42, 79, 26, 62, -56, -85, -22], 20, 16, 19, 16,),
([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], 23, 21, 19, 23,),
([3, 68, 40, 48, 54, 35, 95, 56, 89, 40, 77, 68, 46, 78, 13, 27, 6, 17, 36, 99,
81, 2, 77, 52, 66, 52, 92, 43, 90, 22, 55, 67, 99, 60, 58], 28, 21, 23, 23,)
]
filled_function_param = [
([9, 30, 49, 65, 78, 85, 85, 92], 4, 4, 5, 4,),
([-48, 89, -60, 66, 71, -37, 47, -50, 61, 41, -22, -3, 90, -57, 77, -64, 22,
8, -90, -5, -94, -43, 29, -29, 86, -79, -8, 27, -20, -44, 16], 18, 20, 20, 23,),
([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1], 25, 26, 15, 18,),
([87, 70, 77, 87, 73, 81, 66, 19, 83, 7, 63, 42, 42, 59, 20, 73, 17, 27, 47, 2, 63, 62, 19, 17, 69, 39,
82, 71, 81, 39, 36, 40, 45, 4, 25, 69, 30, 76, 68, 88, 29, 73, 68, 51, 24, 14, 69, 18], 33, 42, 35, 41,),
([-91, -85, -77, -73, -70, -68, -24, -21, -12, -
1, 9, 29, 48, 52, 56, 63, 88], 8, 12, 8, 8,),
([0, 0, 0, 1, 1, 0, 1, 1, 1, 1], 7, 8, 6, 7,),
([4, 5, 9, 14, 18, 20, 22, 23, 25, 28, 30, 31, 34, 35, 36, 38, 38, 39, 44, 48, 49, 51,
54, 55, 59, 64, 66, 71, 72, 72, 73, 76, 78, 82, 82, 84, 92, 93, 95], 22, 33, 19, 25,),
([40, 6, 33, 8, 78, -58, 2, 24, 40, 3, 46, 94, -26, 8, 22, -83, 96, -29, -
38, -59, 19, 62, 98, -55, -42, 79, 26, 62, -56, -85, -22], 20, 16, 19, 16,),
([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], 23, 21, 19, 23,),
([3, 68, 40, 48, 54, 35, 95, 56, 89, 40, 77, 68, 46, 78, 13, 27, 6, 17, 36, 99,
81, 2, 77, 52, 66, 52, 92, 43, 90, 22, 55, 67, 99, 60, 58], 28, 21, 23, 23,)
]
n_success = 0
for i, parameters_set in enumerate(param):
f_filled(*(filled_function_param[i]))
f_gold(*parameters_set)
if parameters_set == filled_function_param[i]:
n_success += 1
print("#Results: %i, %i" % (n_success, len(param)))
|
CodeGen-main
|
data/transcoder_evaluation_gfg/python/SORT_ARRAY_APPLYING_GIVEN_EQUATION.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def f_gold ( n ) :
if n < 3 :
return n
elif n >= 3 and n < 10 :
return n - 1
po = 1
while n / po > 9 :
po = po * 10
msd = n / po
if msd != 3 :
return f_gold ( msd ) * f_gold ( po - 1 ) + f_gold ( msd ) + f_gold ( n % po )
else :
return f_gold ( msd * po - 1 )
#TOFILL
if __name__ == '__main__':
param = [
(85,),
(86,),
(3,),
(35,),
(59,),
(38,),
(33,),
(15,),
(75,),
(74,)
]
n_success = 0
for i, parameters_set in enumerate(param):
if f_filled(*parameters_set) == f_gold(*parameters_set):
n_success+=1
print("#Results: %i, %i" % (n_success, len(param)))
|
CodeGen-main
|
data/transcoder_evaluation_gfg/python/COUNT_NUMBERS_THAT_DONT_CONTAIN_3.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.